index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCqlQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.List;
import org.apache.cassandra.thrift.Compression;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.SchemaDisagreementException;
import org.apache.cassandra.thrift.TimedOutException;
import org.apache.cassandra.thrift.UnavailableException;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.thrift.TException;
import com.netflix.astyanax.serializers.StringSerializer;
public class ThriftCqlQuery<K, C> extends AbstractThriftCqlQuery<K, C> {
ThriftCqlQuery(ThriftColumnFamilyQueryImpl<K, C> cfQuery, String cql) {
super(cfQuery, cql);
}
@Override
protected org.apache.cassandra.thrift.CqlPreparedResult prepare_cql_query(Client client) throws InvalidRequestException, TException {
return client.prepare_cql_query(StringSerializer.get().toByteBuffer(cql), Compression.NONE);
}
@Override
protected org.apache.cassandra.thrift.CqlResult execute_prepared_cql_query(Client client, int id, List<ByteBuffer> values) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException {
return client.execute_prepared_cql_query(id, values);
}
@Override
protected org.apache.cassandra.thrift.CqlResult execute_cql_query(Client client) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException {
return client.execute_cql_query(
StringSerializer.get().toByteBuffer(cql),
useCompression ? Compression.GZIP : Compression.NONE);
}
}
| 8,000 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/AbstractThriftColumnMutationImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.UUID;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.Clock;
import com.netflix.astyanax.ColumnMutation;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.clock.ConstantClock;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.ByteSerializer;
import com.netflix.astyanax.serializers.BytesArraySerializer;
import com.netflix.astyanax.serializers.DateSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.FloatSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.ShortSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
public abstract class AbstractThriftColumnMutationImpl implements ColumnMutation {
protected final ByteBuffer key;
protected final ByteBuffer column;
protected Clock clock;
protected RetryPolicy retry;
protected ConsistencyLevel writeConsistencyLevel;
public AbstractThriftColumnMutationImpl(ByteBuffer key, ByteBuffer column, AstyanaxConfiguration config) {
this.key = key;
this.column = column;
this.clock = config.getClock();
this.retry = config.getRetryPolicy().duplicate();
this.writeConsistencyLevel = config.getDefaultWriteConsistencyLevel();
}
@Override
public ColumnMutation withRetryPolicy(RetryPolicy retry) {
this.retry = retry;
return this;
}
@Override
public ColumnMutation setConsistencyLevel(ConsistencyLevel consistencyLevel) {
writeConsistencyLevel = consistencyLevel;
return this;
}
@Override
public ColumnMutation withTimestamp(long timestamp) {
this.clock = new ConstantClock(timestamp);
return this;
}
@Override
public Execution<Void> putValue(String value, Integer ttl) {
return insertValue(StringSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(byte[] value, Integer ttl) {
return insertValue(BytesArraySerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(byte value, Integer ttl) {
return insertValue(ByteSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(short value, Integer ttl) {
return insertValue(ShortSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(int value, Integer ttl) {
return insertValue(IntegerSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(long value, Integer ttl) {
return insertValue(LongSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(boolean value, Integer ttl) {
return insertValue(BooleanSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(ByteBuffer value, Integer ttl) {
return insertValue(ByteBufferSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(Date value, Integer ttl) {
return insertValue(DateSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(float value, Integer ttl) {
return insertValue(FloatSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(double value, Integer ttl) {
return insertValue(DoubleSerializer.get().toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putValue(UUID value, Integer ttl) {
return insertValue(UUIDSerializer.get().toByteBuffer(value), ttl);
}
@Override
public <T> Execution<Void> putValue(T value, Serializer<T> serializer, Integer ttl) {
return insertValue(serializer.toByteBuffer(value), ttl);
}
@Override
public Execution<Void> putEmptyColumn(Integer ttl) {
return insertValue(ThriftUtils.EMPTY_BYTE_BUFFER, ttl);
}
protected abstract Execution<Void> insertValue(ByteBuffer value, Integer ttl);
}
| 8,001 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCqlStatement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import org.apache.cassandra.thrift.Compression;
import org.apache.cassandra.thrift.Cassandra.Client;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlPreparedStatement;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.StringSerializer;
class ThriftCqlStatement implements CqlStatement {
private ThriftKeyspaceImpl keyspace;
private ByteBuffer query;
private Compression compression = Compression.NONE;
private RetryPolicy retry;
public ThriftCqlStatement(ThriftKeyspaceImpl keyspace) {
this.keyspace = keyspace;
this.retry = keyspace.getConfig().getRetryPolicy().duplicate();
}
@Override
public OperationResult<CqlStatementResult> execute() throws ConnectionException {
return keyspace.connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<CqlStatementResult>(keyspace.tracerFactory.newTracer(
CassandraOperationType.CQL, null), null, keyspace.getKeyspaceName()) {
@Override
public CqlStatementResult internalExecute(Client client, ConnectionContext context) throws Exception {
return new ThriftCqlStatementResult(client.execute_cql_query(query, compression));
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<CqlStatementResult>> executeAsync() throws ConnectionException {
throw new RuntimeException("Not supported yet");
}
@Override
public CqlStatement withCql(String cql) {
query = StringSerializer.get().toByteBuffer(cql);
return this;
}
public CqlStatement withCompression(Boolean flag) {
if (flag)
compression = Compression.GZIP;
else
compression = Compression.NONE;
return this;
}
@Override
public CqlStatement withConsistencyLevel(ConsistencyLevel cl) {
throw new IllegalStateException("Cannot set consistency level for Cassandra 1.1 thrift CQL api. Set consistency level directly in the your CQL text");
}
@Override
public CqlPreparedStatement asPreparedStatement() {
throw new RuntimeException("Not supported yet");
}
}
| 8,002 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftCqlSchema.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.Map.Entry;
import org.apache.cassandra.thrift.CqlMetadata;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.CqlSchema;
import com.netflix.astyanax.serializers.ComparatorType;
import com.netflix.astyanax.serializers.StringSerializer;
public class ThriftCqlSchema implements CqlSchema {
private final CqlMetadata schema;
public ThriftCqlSchema(CqlMetadata schema) {
this.schema = schema;
System.out.println("Name: " + schema.getDefault_name_type());
System.out.println("Value: " + schema.getDefault_value_type());
for (Entry<ByteBuffer, String> type : schema.getName_types().entrySet()) {
Serializer serializer = ComparatorType.valueOf(type.getValue().toUpperCase()).getSerializer();
System.out.println("Name: " + type.getValue() + " = " + serializer.getString(type.getKey()));
}
for (Entry<ByteBuffer, String> value : schema.getValue_types().entrySet()) {
Serializer serializer = StringSerializer.get(); // ComparatorType.valueOf(value.getValue().toUpperCase()).getSerializer();
System.out.println("Type: " + value.getValue() + " = " + ((value.getKey() == null) ? "null" : serializer.getString(value.getKey())));
}
}
}
| 8,003 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftKeyspaceImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.CounterColumn;
import org.apache.cassandra.thrift.KsDef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.ColumnMutation;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.SerializerPackage;
import com.netflix.astyanax.WriteAheadEntry;
import com.netflix.astyanax.WriteAheadLog;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.ConnectionPool;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.Operation;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.TokenRange;
import com.netflix.astyanax.connectionpool.exceptions.BadRequestException;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.IsDeadConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.connectionpool.exceptions.OperationException;
import com.netflix.astyanax.connectionpool.exceptions.SchemaDisagreementException;
import com.netflix.astyanax.connectionpool.impl.OperationResultImpl;
import com.netflix.astyanax.connectionpool.impl.TokenRangeImpl;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.KeyspaceDefinition;
import com.netflix.astyanax.ddl.SchemaChangeResult;
import com.netflix.astyanax.ddl.impl.SchemaChangeResponseImpl;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.retry.RunOnce;
import com.netflix.astyanax.serializers.SerializerPackageImpl;
import com.netflix.astyanax.serializers.UnknownComparatorException;
import com.netflix.astyanax.thrift.ddl.ThriftColumnFamilyDefinitionImpl;
import com.netflix.astyanax.thrift.ddl.ThriftKeyspaceDefinitionImpl;
/**
*
* @ThreadSafe
* Note that the same instances of this class can be used by multiple threads.
* Especially for reads and writes, the prepareQuery() and prepareMutationBatch() calls are thread safe
* but adding multiple mutations to the same mutation batch is NOT thread safe.
*
* Hence once a mutation batch is created by a thread, then that batch should be used by that thread only.
*
* @author elandau
*
*/
public final class ThriftKeyspaceImpl implements Keyspace {
private final static Logger LOG = LoggerFactory.getLogger(ThriftKeyspaceImpl.class);
final ConnectionPool<Cassandra.Client> connectionPool;
final AstyanaxConfiguration config;
final String ksName;
final ListeningExecutorService executor;
final KeyspaceTracerFactory tracerFactory;
final Cache<String, Object> cache;
final ThriftCqlFactory cqlStatementFactory;
private Host ddlHost = null;
private volatile Partitioner partitioner;
public ThriftKeyspaceImpl(
String ksName,
ConnectionPool<Cassandra.Client> pool,
AstyanaxConfiguration config,
final KeyspaceTracerFactory tracerFactory) {
this.connectionPool = pool;
this.config = config;
this.ksName = ksName;
this.executor = MoreExecutors.listeningDecorator(config.getAsyncExecutor());
this.tracerFactory = tracerFactory;
this.cache = CacheBuilder.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).build();
this.cqlStatementFactory = ThriftCqlFactoryResolver.createFactory(config);
}
@Override
public String getKeyspaceName() {
return this.ksName;
}
@Override
public MutationBatch prepareMutationBatch() {
return new AbstractThriftMutationBatchImpl(config.getClock(), config.getDefaultWriteConsistencyLevel(), config.getRetryPolicy().duplicate()) {
@Override
public OperationResult<Void> execute() throws ConnectionException {
WriteAheadLog wal = getWriteAheadLog();
WriteAheadEntry walEntry = null;
if (wal != null) {
walEntry = wal.createEntry();
walEntry.writeMutation(this);
}
try {
OperationResult<Void> result = executeOperation(
new AbstractKeyspaceOperationImpl<Void>(
tracerFactory.newTracer(useAtomicBatch() ? CassandraOperationType.ATOMIC_BATCH_MUTATE : CassandraOperationType.BATCH_MUTATE),
getPinnedHost(),
getKeyspaceName()) {
@Override
public Void internalExecute(Client client, ConnectionContext context) throws Exception {
// Mutation can be atomic or non-atomic.
// see http://www.datastax.com/dev/blog/atomic-batches-in-cassandra-1-2 for details on atomic batches
if (useAtomicBatch()) {
client.atomic_batch_mutate(getMutationMap(),
ThriftConverter.ToThriftConsistencyLevel(getConsistencyLevel()));
} else {
client.batch_mutate(getMutationMap(),
ThriftConverter.ToThriftConsistencyLevel(getConsistencyLevel()));
}
discardMutations();
return null;
}
@Override
public ByteBuffer getRowKey() {
if (getMutationMap().size() == 1)
return getMutationMap().keySet().iterator().next();
else
return null;
}
}, getRetryPolicy());
if (walEntry != null) {
wal.removeEntry(walEntry);
}
return result;
}
catch (ConnectionException exception) {
throw exception;
}
catch (Exception exception) {
throw ThriftConverter.ToConnectionPoolException(exception);
}
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Void>>() {
@Override
public OperationResult<Void> call() throws Exception {
return execute();
}
});
}
@Override
public MutationBatch withCaching(boolean condition) {
// This is a no-op when using Thrift
return this;
}
};
}
@Override
public List<TokenRange> describeRing() throws ConnectionException {
return describeRing(null, null);
}
@Override
public List<TokenRange> describeRing(final String dc) throws ConnectionException {
return describeRing(dc, null);
}
@Override
public List<TokenRange> describeRing(final String dc, final String rack) throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<List<TokenRange>>(tracerFactory
.newTracer(CassandraOperationType.DESCRIBE_RING), getKeyspaceName()) {
@Override
public List<TokenRange> internalExecute(Cassandra.Client client, ConnectionContext context) throws Exception {
List<org.apache.cassandra.thrift.TokenRange> trs = client.describe_ring(getKeyspaceName());
List<TokenRange> range = Lists.newArrayList();
for (org.apache.cassandra.thrift.TokenRange tr : trs) {
List<String> endpoints = Lists.newArrayList();
for (org.apache.cassandra.thrift.EndpointDetails ed : tr.getEndpoint_details()) {
if (dc != null && !ed.getDatacenter().equals(dc)) {
continue;
}
else if (rack != null && !ed.getRack().equals(rack)) {
continue;
}
else {
endpoints.add(ed.getHost());
}
}
if (!endpoints.isEmpty()) {
range.add(new TokenRangeImpl(tr.getStart_token(), tr.getEnd_token(), endpoints));
}
}
return range;
}
}, getConfig().getRetryPolicy().duplicate()).getResult();
}
@SuppressWarnings("unchecked")
@Override
public List<TokenRange> describeRing(boolean cached) throws ConnectionException {
if (cached) {
try {
return (List<TokenRange>) this.cache.get(CassandraOperationType.DESCRIBE_RING.name(),
new Callable<Object>() {
@Override
public Object call() throws Exception {
return describeRing();
}
});
}
catch (ExecutionException e) {
throw ThriftConverter.ToConnectionPoolException(e);
}
}
else {
return describeRing();
}
}
@Override
public KeyspaceDefinition describeKeyspace() throws ConnectionException {
return internalDescribeKeyspace().getResult();
}
public OperationResult<KeyspaceDefinition> internalDescribeKeyspace() throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<KeyspaceDefinition>(
tracerFactory.newTracer(CassandraOperationType.DESCRIBE_KEYSPACE), getKeyspaceName()) {
@Override
public KeyspaceDefinition internalExecute(Cassandra.Client client, ConnectionContext context) throws Exception {
return new ThriftKeyspaceDefinitionImpl(client.describe_keyspace(getKeyspaceName()));
}
}, getConfig().getRetryPolicy().duplicate());
}
@Override
public Map<String, List<String>> describeSchemaVersions() throws ConnectionException {
return connectionPool.executeWithFailover(
new AbstractOperationImpl<Map<String, List<String>>>(
tracerFactory.newTracer(CassandraOperationType.DESCRIBE_SCHEMA_VERSION)) {
@Override
public Map<String, List<String>> internalExecute(Client client, ConnectionContext context) throws Exception {
return client.describe_schema_versions();
}
}, config.getRetryPolicy().duplicate()).getResult();
}
public <K, C> ColumnFamilyQuery<K, C> prepareQuery(ColumnFamily<K, C> cf) {
return new ThriftColumnFamilyQueryImpl<K, C>(
executor,
tracerFactory,
this,
connectionPool,
cf,
config.getDefaultReadConsistencyLevel(),
config.getRetryPolicy().duplicate());
}
@Override
public <K, C> ColumnMutation prepareColumnMutation(final ColumnFamily<K, C> columnFamily, final K rowKey, C column) {
return new AbstractThriftColumnMutationImpl(
columnFamily.getKeySerializer().toByteBuffer(rowKey),
columnFamily.getColumnSerializer().toByteBuffer(column),
config) {
@Override
public Execution<Void> incrementCounterColumn(final long amount) {
return new Execution<Void>() {
@Override
public OperationResult<Void> execute() throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<Void>(
tracerFactory.newTracer(CassandraOperationType.COUNTER_MUTATE),
getKeyspaceName()) {
@Override
public Void internalExecute(Client client, ConnectionContext context) throws Exception {
client.add(key, ThriftConverter.getColumnParent(columnFamily, null),
new CounterColumn().setValue(amount).setName(column),
ThriftConverter.ToThriftConsistencyLevel(writeConsistencyLevel));
return null;
}
@Override
public ByteBuffer getRowKey() {
return columnFamily.getKeySerializer()
.toByteBuffer(rowKey);
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Void>>() {
@Override
public OperationResult<Void> call() throws Exception {
return execute();
}
});
}
};
}
@Override
public Execution<Void> deleteColumn() {
return new Execution<Void>() {
@Override
public OperationResult<Void> execute() throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<Void>(
tracerFactory.newTracer(CassandraOperationType.COLUMN_DELETE),
getKeyspaceName()) {
@Override
public Void internalExecute(Client client, ConnectionContext context) throws Exception {
client.remove(key, new org.apache.cassandra.thrift.ColumnPath()
.setColumn_family(columnFamily.getName()).setColumn(column), config
.getClock().getCurrentTime(), ThriftConverter
.ToThriftConsistencyLevel(writeConsistencyLevel));
return null;
}
@Override
public ByteBuffer getRowKey() {
return columnFamily.getKeySerializer().toByteBuffer(rowKey);
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Void>>() {
@Override
public OperationResult<Void> call() throws Exception {
return execute();
}
});
}
};
}
@Override
public Execution<Void> insertValue(final ByteBuffer value, final Integer ttl) {
return new Execution<Void>() {
@Override
public OperationResult<Void> execute() throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<Void>(
tracerFactory.newTracer(CassandraOperationType.COLUMN_INSERT),
getKeyspaceName()) {
@Override
public Void internalExecute(Client client, ConnectionContext context) throws Exception {
org.apache.cassandra.thrift.Column c = new org.apache.cassandra.thrift.Column();
c.setName(column).setValue(value).setTimestamp(clock.getCurrentTime());
if (ttl != null) {
c.setTtl(ttl);
}
client.insert(key, ThriftConverter.getColumnParent(columnFamily, null), c,
ThriftConverter.ToThriftConsistencyLevel(writeConsistencyLevel));
return null;
}
@Override
public ByteBuffer getRowKey() {
return columnFamily.getKeySerializer().toByteBuffer(rowKey);
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Void>>() {
@Override
public OperationResult<Void> call() throws Exception {
return execute();
}
});
}
};
}
@Override
public Execution<Void> deleteCounterColumn() {
return new Execution<Void>() {
@Override
public OperationResult<Void> execute() throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<Void>(
tracerFactory.newTracer(CassandraOperationType.COLUMN_DELETE),
getKeyspaceName()) {
@Override
public Void internalExecute(Client client, ConnectionContext context) throws Exception {
client.remove_counter(key, new org.apache.cassandra.thrift.ColumnPath()
.setColumn_family(columnFamily.getName()).setColumn(column),
ThriftConverter.ToThriftConsistencyLevel(writeConsistencyLevel));
return null;
}
@Override
public ByteBuffer getRowKey() {
return columnFamily.getKeySerializer().toByteBuffer(rowKey);
}
}, retry);
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return executor.submit(new Callable<OperationResult<Void>>() {
@Override
public OperationResult<Void> call() throws Exception {
return execute();
}
});
}
};
}
};
}
@Override
public AstyanaxConfiguration getConfig() {
return this.config;
}
@Override
public SerializerPackage getSerializerPackage(String cfName, boolean ignoreErrors) throws ConnectionException,
UnknownComparatorException {
return new SerializerPackageImpl(describeKeyspace().getColumnFamily(cfName), ignoreErrors);
}
@Override
public OperationResult<Void> testOperation(final Operation<?, ?> operation) throws ConnectionException {
return testOperation(operation, config.getRetryPolicy().duplicate());
}
@Override
public OperationResult<Void> testOperation(final Operation<?, ?> operation, RetryPolicy retry)
throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<Void>(tracerFactory.newTracer(CassandraOperationType.TEST),
operation.getPinnedHost(), getKeyspaceName()) {
@Override
public Void internalExecute(Client client, ConnectionContext context) throws Exception {
operation.execute(null, context);
return null;
}
}, retry);
}
@Override
public ConnectionPool<Cassandra.Client> getConnectionPool() {
return connectionPool;
}
@Override
public <K, C> OperationResult<Void> truncateColumnFamily(final ColumnFamily<K, C> columnFamily)
throws OperationException, ConnectionException {
return truncateColumnFamily(columnFamily.getName());
}
@Override
public OperationResult<Void> truncateColumnFamily(final String columnFamily) throws ConnectionException {
return executeOperation(
new AbstractKeyspaceOperationImpl<Void>(tracerFactory.newTracer(CassandraOperationType.TRUNCATE),
getKeyspaceName()) {
@Override
public Void internalExecute(Cassandra.Client client, ConnectionContext context) throws Exception {
client.truncate(columnFamily);
return null;
}
}, config.getRetryPolicy().duplicate());
}
private <R> OperationResult<R> executeOperation(Operation<Cassandra.Client, R> operation, RetryPolicy retry)
throws OperationException, ConnectionException {
return connectionPool.executeWithFailover(operation, retry);
}
/**
* Attempt to execute the DDL operation on the same host
* @param operation
* @param retry
* @return
* @throws OperationException
* @throws ConnectionException
*/
private synchronized <R> OperationResult<R> executeDdlOperation(AbstractOperationImpl<R> operation, RetryPolicy retry)
throws OperationException, ConnectionException {
ConnectionException lastException = null;
for (int i = 0; i < 2; i++) {
operation.setPinnedHost(ddlHost);
try {
OperationResult<R> result = connectionPool.executeWithFailover(operation, retry);
ddlHost = result.getHost();
return result;
}
catch (ConnectionException e) {
lastException = e;
if (e instanceof IsDeadConnectionException) {
ddlHost = null;
}
}
}
throw lastException;
}
@Override
public String describePartitioner() throws ConnectionException {
return executeOperation(
new AbstractOperationImpl<String>(
tracerFactory.newTracer(CassandraOperationType.DESCRIBE_PARTITIONER)) {
@Override
public String internalExecute(Client client, ConnectionContext context) throws Exception {
return client.describe_partitioner();
}
}, config.getRetryPolicy().duplicate()).getResult();
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(final Map<String, Object> options) throws ConnectionException {
final CfDef cfDef = toThriftColumnFamilyDefinition(options, null).getThriftColumnFamilyDefinition();
return internalCreateColumnFamily(cfDef);
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Map<String, Object> options) throws ConnectionException {
return createKeyspaceIfNotExists(new Callable<OperationResult<SchemaChangeResult>>() {
@Override
public OperationResult<SchemaChangeResult> call() throws Exception {
return createKeyspace(options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(
final Map<String, Object> options,
final Map<ColumnFamily, Map<String, Object>> cfs) throws ConnectionException {
ThriftKeyspaceDefinitionImpl ksDef = toThriftKeyspaceDefinition(options);
for (Entry<ColumnFamily, Map<String, Object>> cf : cfs.entrySet()) {
ksDef.addColumnFamily(toThriftColumnFamilyDefinition(cf.getValue(), cf.getKey()));
}
ksDef.setName(getKeyspaceName());
return internalCreateKeyspace(ksDef.getThriftKeyspaceDefinition());
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(
final Map<String, Object> options,
final Map<ColumnFamily, Map<String, Object>> cfs) throws ConnectionException {
return createKeyspaceIfNotExists(new Callable<OperationResult<SchemaChangeResult>>() {
@Override
public OperationResult<SchemaChangeResult> call() throws Exception {
return createKeyspace(options, cfs);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(final Properties props) throws ConnectionException {
if (props.containsKey("name") && !props.get("name").equals(getKeyspaceName())) {
throw new BadRequestException(
String.format("'name' attribute must match keyspace name. Expected '%s' but got '%s'",
getKeyspaceName(), props.get("name")));
}
final KsDef ksDef;
try {
ksDef = ThriftUtils.getThriftObjectFromProperties(KsDef.class, props);
} catch (Exception e) {
throw new BadRequestException("Unable to convert props to keyspace definition");
}
return internalCreateKeyspace(ksDef);
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Properties props) throws ConnectionException {
return createKeyspaceIfNotExists(new Callable<OperationResult<SchemaChangeResult>>() {
@Override
public OperationResult<SchemaChangeResult> call() throws Exception {
return createKeyspace(props);
}
});
}
private OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(Callable<OperationResult<SchemaChangeResult>> createKeyspace) throws ConnectionException {
boolean shouldCreateKeyspace = false;
try {
OperationResult<KeyspaceDefinition> opResult = this.internalDescribeKeyspace();
if (opResult != null && opResult.getResult() != null) {
return new OperationResultImpl<SchemaChangeResult>(opResult.getHost(),
new SchemaChangeResponseImpl().setSchemaId("no-op"),
opResult.getLatency());
} else {
shouldCreateKeyspace = true;
}
} catch (BadRequestException e) {
if (e.isKeyspaceDoestNotExist()) {
shouldCreateKeyspace = true;
} else {
throw e;
}
}
if (shouldCreateKeyspace) {
try {
return createKeyspace.call();
} catch (ConnectionException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
throw new IllegalStateException();
}
}
@Override
public <K, C> OperationResult<SchemaChangeResult> createColumnFamily(final ColumnFamily<K, C> columnFamily, final Map<String, Object> options) throws ConnectionException {
final CfDef cfDef = toThriftColumnFamilyDefinition(options, columnFamily).getThriftColumnFamilyDefinition();
return internalCreateColumnFamily(cfDef);
}
@Override
public <K, C> OperationResult<SchemaChangeResult> updateColumnFamily(final ColumnFamily<K, C> columnFamily, final Map<String, Object> options) throws ConnectionException {
final CfDef cfDef = toThriftColumnFamilyDefinition(options, columnFamily).getThriftColumnFamilyDefinition();
return internalUpdateColumnFamily(cfDef);
}
@Override
public OperationResult<SchemaChangeResult> dropColumnFamily(final String columnFamilyName) throws ConnectionException {
return executeDdlOperation(
new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.DROP_COLUMN_FAMILY), getKeyspaceName()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl()
.setSchemaId(client.system_drop_column_family(columnFamilyName));
}
}, RunOnce.get());
}
@Override
public <K, C> OperationResult<SchemaChangeResult> dropColumnFamily(final ColumnFamily<K, C> columnFamily) throws ConnectionException {
return dropColumnFamily(columnFamily.getName());
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(final Map<String, Object> options) throws ConnectionException {
final KsDef ksDef = toThriftKeyspaceDefinition(options).getThriftKeyspaceDefinition();
return internalCreateKeyspace(ksDef);
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(final Map<String, Object> options) throws ConnectionException {
final KsDef ksDef = toThriftKeyspaceDefinition(options).getThriftKeyspaceDefinition();
return internalUpdateKeyspace(ksDef);
}
@Override
public OperationResult<SchemaChangeResult> dropKeyspace() throws ConnectionException {
return executeDdlOperation(
new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.DROP_KEYSPACE), getKeyspaceName()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl()
.setSchemaId(client.system_drop_keyspace(getKeyspaceName()));
}
}, RunOnce.get());
}
@Override
public CqlStatement prepareCqlStatement() {
return this.cqlStatementFactory.createCqlStatement(this);
}
@Override
public Partitioner getPartitioner() throws ConnectionException {
if (partitioner == null) {
synchronized(this) {
if (partitioner == null) {
String partitionerName = this.describePartitioner();
try {
partitioner = config.getPartitioner(partitionerName);
LOG.info(String.format("Detected partitioner %s for keyspace %s", partitionerName, ksName));
} catch (Exception e) {
throw new NotFoundException("Unable to determine partitioner " + partitionerName, e);
}
}
}
}
return partitioner;
}
/**
* Do a quick check to see if there is a schema disagreement. This is done as an extra precaution
* to reduce the chances of putting the cluster into a bad state. This will not gurantee however, that
* by the time a schema change is made the cluster will be in the same state.
* @param client
* @throws Exception
*/
private void precheckSchemaAgreement(Client client) throws Exception {
Map<String, List<String>> schemas = client.describe_schema_versions();
if (schemas.size() > 1) {
throw new SchemaDisagreementException("Can't change schema due to pending schema agreement");
}
}
/**
* Convert a Map of options to an internal thrift column family definition
* @param options
*/
private ThriftColumnFamilyDefinitionImpl toThriftColumnFamilyDefinition(Map<String, Object> options, ColumnFamily columnFamily) {
ThriftColumnFamilyDefinitionImpl def = new ThriftColumnFamilyDefinitionImpl();
Map<String, Object> internalOptions = Maps.newHashMap();
if (options != null)
internalOptions.putAll(options);
internalOptions.put("keyspace", getKeyspaceName());
if (columnFamily != null) {
internalOptions.put("name", columnFamily.getName());
if (!internalOptions.containsKey("comparator_type"))
internalOptions.put("comparator_type", columnFamily.getColumnSerializer().getComparatorType().getTypeName());
if (!internalOptions.containsKey("key_validation_class"))
internalOptions.put("key_validation_class", columnFamily.getKeySerializer().getComparatorType().getTypeName());
if (columnFamily.getDefaultValueSerializer() != null && !internalOptions.containsKey("default_validation_class"))
internalOptions.put("default_validation_class", columnFamily.getDefaultValueSerializer().getComparatorType().getTypeName());
}
def.setFields(internalOptions);
return def;
}
/**
* Convert a Map of options to an internal thrift keyspace definition
* @param options
*/
private ThriftKeyspaceDefinitionImpl toThriftKeyspaceDefinition(final Map<String, Object> options) {
ThriftKeyspaceDefinitionImpl def = new ThriftKeyspaceDefinitionImpl();
Map<String, Object> internalOptions = Maps.newHashMap();
if (options != null)
internalOptions.putAll(options);
if (internalOptions.containsKey("name") && !internalOptions.get("name").equals(getKeyspaceName())) {
throw new RuntimeException(
String.format("'name' attribute must match keyspace name. Expected '%s' but got '%s'",
getKeyspaceName(), internalOptions.get("name")));
}
else {
internalOptions.put("name", getKeyspaceName());
}
def.setFields(internalOptions);
return def;
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(final Properties props) throws ConnectionException {
if (props.containsKey("name") && !props.get("name").equals(getKeyspaceName())) {
throw new RuntimeException(
String.format("'name' attribute must match keyspace name. Expected '%s' but got '%s'",
getKeyspaceName(), props.get("name")));
}
final KsDef ksDef;
try {
ksDef = ThriftUtils.getThriftObjectFromProperties(KsDef.class, props);
} catch (Exception e) {
throw new BadRequestException("Unable to convert properties to KsDef", e);
}
ksDef.setName(getKeyspaceName());
return internalUpdateKeyspace(ksDef);
}
public OperationResult<SchemaChangeResult> internalUpdateKeyspace(final KsDef ksDef) throws ConnectionException {
return connectionPool
.executeWithFailover(
new AbstractOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.UPDATE_KEYSPACE)) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl().setSchemaId(client.system_update_keyspace(ksDef));
}
}, RunOnce.get());
}
public OperationResult<SchemaChangeResult> internalCreateKeyspace(final KsDef ksDef) throws ConnectionException {
if (ksDef.getCf_defs() == null)
ksDef.setCf_defs(Lists.<CfDef>newArrayList());
return executeDdlOperation(
new AbstractOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.ADD_KEYSPACE)) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl().setSchemaId(client.system_add_keyspace(ksDef));
}
}, RunOnce.get());
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(final Properties props) throws ConnectionException {
if (props.containsKey("keyspace") && !props.get("keyspace").equals(getKeyspaceName())) {
throw new RuntimeException(
String.format("'keyspace' attribute must match keyspace name. Expected '%s' but got '%s'",
getKeyspaceName(), props.get("keyspace")));
}
CfDef cfDef;
try {
cfDef = ThriftUtils.getThriftObjectFromProperties(CfDef.class, props);
} catch (Exception e) {
throw new BadRequestException("Unable to convert properties to CfDef", e);
}
cfDef.setKeyspace(getKeyspaceName());
return internalCreateColumnFamily(cfDef);
}
private OperationResult<SchemaChangeResult> internalCreateColumnFamily(final CfDef cfDef) throws ConnectionException {
return executeDdlOperation(new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.ADD_COLUMN_FAMILY), getKeyspaceName()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
LOG.info(cfDef.toString());
return new SchemaChangeResponseImpl().setSchemaId(client.system_add_column_family(cfDef));
}
}, RunOnce.get());
}
private OperationResult<SchemaChangeResult> internalUpdateColumnFamily(final CfDef cfDef) throws ConnectionException {
return executeDdlOperation(
new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.ADD_COLUMN_FAMILY), getKeyspaceName()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
precheckSchemaAgreement(client);
return new SchemaChangeResponseImpl().setSchemaId(client.system_update_column_family(cfDef));
}
}, RunOnce.get());
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(final Map<String, Object> options) throws ConnectionException {
if (options.containsKey("keyspace") && !options.get("keyspace").equals(getKeyspaceName())) {
throw new RuntimeException(
String.format("'keyspace' attribute must match keyspace name. Expected '%s' but got '%s'",
getKeyspaceName(), options.get("keyspace")));
}
return connectionPool
.executeWithFailover(
new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.UPDATE_COLUMN_FAMILY), getKeyspaceName()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
ThriftColumnFamilyDefinitionImpl def = new ThriftColumnFamilyDefinitionImpl();
def.setFields(options);
def.setKeyspace(getKeyspaceName());
return new SchemaChangeResponseImpl()
.setSchemaId(client.system_update_column_family(def.getThriftColumnFamilyDefinition()));
}
}, RunOnce.get());
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(final Properties props) throws ConnectionException {
if (props.containsKey("keyspace") && !props.get("keyspace").equals(getKeyspaceName())) {
throw new RuntimeException(
String.format("'keyspace' attribute must match keyspace name. Expected '%s' but got '%s'",
getKeyspaceName(), props.get("keyspace")));
}
return connectionPool
.executeWithFailover(
new AbstractKeyspaceOperationImpl<SchemaChangeResult>(
tracerFactory.newTracer(CassandraOperationType.ADD_COLUMN_FAMILY), getKeyspaceName()) {
@Override
public SchemaChangeResult internalExecute(Client client, ConnectionContext context) throws Exception {
CfDef def = ThriftUtils.getThriftObjectFromProperties(CfDef.class, props);
def.setKeyspace(getKeyspaceName());
return new SchemaChangeResponseImpl().setSchemaId(client.system_update_column_family(def));
}
}, RunOnce.get());
}
@Override
public Properties getKeyspaceProperties() throws ConnectionException {
KeyspaceDefinition ksDef = this.describeKeyspace();
if (ksDef == null)
throw new NotFoundException(String.format("Keyspace '%s' not found", getKeyspaceName()));
Properties props = new Properties();
ThriftKeyspaceDefinitionImpl thriftKsDef = (ThriftKeyspaceDefinitionImpl)ksDef;
try {
for (Entry<Object, Object> prop : thriftKsDef.getProperties().entrySet()) {
props.setProperty((String)prop.getKey(), (String) prop.getValue());
}
} catch (Exception e) {
LOG.error(String.format("Error fetching properties for keyspace '%s'", getKeyspaceName()));
}
return props;
}
@Override
public Properties getColumnFamilyProperties(String columnFamily) throws ConnectionException {
KeyspaceDefinition ksDef = this.describeKeyspace();
ColumnFamilyDefinition cfDef = ksDef.getColumnFamily(columnFamily);
if (cfDef == null)
throw new NotFoundException(String.format("Column family '%s' in keyspace '%s' not found", columnFamily, getKeyspaceName()));
Properties props = new Properties();
ThriftColumnFamilyDefinitionImpl thriftCfDef = (ThriftColumnFamilyDefinitionImpl)cfDef;
try {
for (Entry<Object, Object> prop : thriftCfDef.getProperties().entrySet()) {
props.setProperty((String)prop.getKey(), (String) prop.getValue());
}
} catch (Exception e) {
LOG.error("Error processing column family properties");
}
return props;
}
}
| 8,004 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ThriftAllRowsQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.thrift;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.KeyRange;
import org.apache.cassandra.thrift.KeySlice;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.thrift.Cassandra.Client;
import com.netflix.astyanax.shaded.org.apache.cassandra.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.ExceptionCallback;
import com.netflix.astyanax.RowCallback;
import com.netflix.astyanax.connectionpool.ConnectionContext;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.TokenRange;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.impl.OperationResultImpl;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.query.AllRowsQuery;
import com.netflix.astyanax.query.CheckpointManager;
import com.netflix.astyanax.shallows.EmptyCheckpointManager;
import com.netflix.astyanax.thrift.model.ThriftRowsSliceImpl;
public class ThriftAllRowsQueryImpl<K, C> implements AllRowsQuery<K, C> {
private final static Logger LOG = LoggerFactory.getLogger(ThriftAllRowsQueryImpl.class);
private final ThriftColumnFamilyQueryImpl<K,C> query;
protected SlicePredicate predicate = new SlicePredicate().setSlice_range(ThriftUtils.createAllInclusiveSliceRange());
protected CheckpointManager checkpointManager = new EmptyCheckpointManager();
protected ColumnFamily<K, C> columnFamily;
private ExceptionCallback exceptionCallback;
private int blockSize = 100;
private boolean repeatLastToken = true;
private Integer nThreads;
private String startToken ;
private String endToken ;
private Boolean includeEmptyRows;
public ThriftAllRowsQueryImpl(ThriftColumnFamilyQueryImpl<K, C> query) {
this.columnFamily = query.columnFamily;
this.query = query;
}
protected List<org.apache.cassandra.thrift.KeySlice> getNextBlock(final KeyRange range) {
ThriftKeyspaceImpl keyspace = query.keyspace;
while (true) {
try {
return keyspace.connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<List<org.apache.cassandra.thrift.KeySlice>>(
keyspace.tracerFactory.newTracer(CassandraOperationType.GET_ROWS_RANGE, columnFamily),
query.pinnedHost, keyspace.getKeyspaceName()) {
@Override
public List<org.apache.cassandra.thrift.KeySlice> internalExecute(Client client, ConnectionContext context)
throws Exception {
List<KeySlice> slice = client.get_range_slices(
new ColumnParent().setColumn_family(columnFamily.getName()), predicate,
range, ThriftConverter.ToThriftConsistencyLevel(query.consistencyLevel));
return slice;
}
@Override
public ByteBuffer getRowKey() {
if (range.getStart_key() != null)
return range.start_key;
return null;
}
}, query.retry).getResult();
}
catch (ConnectionException e) {
// Let exception callback handle this exception. If it
// returns false then
// we return an empty result which the iterator's
// hasNext() to return false.
// If no exception handler is provided then simply
// return an empty set as if the
// there is no more data
if (this.getExceptionCallback() == null) {
throw new RuntimeException(e);
}
else {
if (!this.getExceptionCallback().onException(e)) {
return new ArrayList<org.apache.cassandra.thrift.KeySlice>();
}
}
}
}
}
@Override
public OperationResult<Rows<K, C>> execute() throws ConnectionException {
return new OperationResultImpl<Rows<K, C>>(Host.NO_HOST,
new ThriftAllRowsImpl<K, C>(query.keyspace.getPartitioner(), this, columnFamily), 0);
}
@Override
public ListenableFuture<OperationResult<Rows<K, C>>> executeAsync() throws ConnectionException {
throw new UnsupportedOperationException("executeAsync not supported here. Use execute()");
}
private boolean shouldIgnoreEmptyRows() {
if (getIncludeEmptyRows() == null) {
if (getPredicate().isSetSlice_range() && getPredicate().getSlice_range().getCount() == 0) {
return false;
}
}
else {
return !getIncludeEmptyRows();
}
return true;
}
@Override
public void executeWithCallback(final RowCallback<K, C> callback) throws ConnectionException {
final ThriftKeyspaceImpl keyspace = query.keyspace;
final Partitioner partitioner = keyspace.getPartitioner();
final AtomicReference<ConnectionException> error = new AtomicReference<ConnectionException>();
final boolean bIgnoreTombstones = shouldIgnoreEmptyRows();
List<Pair<String, String>> ranges;
if (this.getConcurrencyLevel() != null) {
ranges = Lists.newArrayList();
int nThreads = this.getConcurrencyLevel();
List<TokenRange> tokens = partitioner.splitTokenRange(
startToken == null ? partitioner.getMinToken() : startToken,
endToken == null ? partitioner.getMaxToken() : endToken,
nThreads);
for (TokenRange range : tokens) {
try {
String currentToken = checkpointManager.getCheckpoint(range.getStartToken());
if (currentToken == null) {
currentToken = range.getStartToken();
}
else if (currentToken.equals(range.getEndToken())) {
continue;
}
ranges.add(Pair.create(currentToken, range.getEndToken()));
} catch (Exception e) {
throw ThriftConverter.ToConnectionPoolException(e);
}
}
}
else {
ranges = Lists.transform(keyspace.describeRing(true), new Function<TokenRange, Pair<String, String>> () {
@Override
public Pair<String, String> apply(TokenRange input) {
return Pair.create(input.getStartToken(), input.getEndToken());
}
});
}
final CountDownLatch doneSignal = new CountDownLatch(ranges.size());
for (final Pair<String, String> tokenPair : ranges) {
// Prepare the range of tokens for this token range
final KeyRange range = new KeyRange()
.setCount(getBlockSize())
.setStart_token(tokenPair.left)
.setEnd_token(tokenPair.right);
query.executor.submit(new Callable<Void>() {
private boolean firstBlock = true;
@Override
public Void call() throws Exception {
if (error.get() == null && internalRun()) {
query.executor.submit(this);
}
else {
doneSignal.countDown();
}
return null;
}
private boolean internalRun() throws Exception {
try {
// Get the next block
List<KeySlice> ks = keyspace.connectionPool.executeWithFailover(
new AbstractKeyspaceOperationImpl<List<KeySlice>>(keyspace.tracerFactory
.newTracer(CassandraOperationType.GET_ROWS_RANGE,
columnFamily), query.pinnedHost, keyspace
.getKeyspaceName()) {
@Override
public List<KeySlice> internalExecute(Client client, ConnectionContext context)
throws Exception {
return client.get_range_slices(new ColumnParent()
.setColumn_family(columnFamily.getName()),
predicate, range, ThriftConverter
.ToThriftConsistencyLevel(query.consistencyLevel));
}
@Override
public ByteBuffer getRowKey() {
if (range.getStart_key() != null)
return ByteBuffer.wrap(range.getStart_key());
return null;
}
}, query.retry.duplicate()).getResult();
// Notify the callback
if (!ks.isEmpty()) {
KeySlice lastRow = Iterables.getLast(ks);
boolean bContinue = (ks.size() == getBlockSize());
if (getRepeatLastToken()) {
if (firstBlock) {
firstBlock = false;
}
else {
ks.remove(0);
}
}
if (bIgnoreTombstones) {
Iterator<KeySlice> iter = ks.iterator();
while (iter.hasNext()) {
if (iter.next().getColumnsSize() == 0)
iter.remove();
}
}
Rows<K, C> rows = new ThriftRowsSliceImpl<K, C>(ks, columnFamily
.getKeySerializer(), columnFamily.getColumnSerializer());
try {
callback.success(rows);
}
catch (Throwable t) {
ConnectionException ce = ThriftConverter.ToConnectionPoolException(t);
error.set(ce);
return false;
}
if (bContinue) {
// Determine the start token for the next page
String token = partitioner.getTokenForKey(lastRow.bufferForKey()).toString();
checkpointManager.trackCheckpoint(tokenPair.left, token);
if (getRepeatLastToken()) {
range.setStart_token(partitioner.getTokenMinusOne(token));
}
else {
range.setStart_token(token);
}
}
else {
checkpointManager.trackCheckpoint(tokenPair.left, tokenPair.right);
return false;
}
}
else {
checkpointManager.trackCheckpoint(tokenPair.left, tokenPair.right);
return false;
}
}
catch (Exception e) {
ConnectionException ce = ThriftConverter.ToConnectionPoolException(e);
if (!callback.failure(ce)) {
error.set(ce);
return false;
}
}
return true;
}
});
}
// Block until all threads finish
try {
doneSignal.await();
}
catch (InterruptedException e) {
LOG.debug("Execution interrupted on get all rows for keyspace " + keyspace.getKeyspaceName());
}
if (error.get() != null) {
throw error.get();
}
}
public AllRowsQuery<K, C> setExceptionCallback(ExceptionCallback cb) {
exceptionCallback = cb;
return this;
}
protected ExceptionCallback getExceptionCallback() {
return this.exceptionCallback;
}
@Override
public AllRowsQuery<K, C> setThreadCount(int numberOfThreads) {
setConcurrencyLevel(numberOfThreads);
return this;
}
@Override
public AllRowsQuery<K, C> setConcurrencyLevel(int numberOfThreads) {
this.nThreads = numberOfThreads;
return this;
}
@Override
public AllRowsQuery<K, C> setCheckpointManager(CheckpointManager manager) {
this.checkpointManager = manager;
return this;
}
@Override
public AllRowsQuery<K, C> withColumnSlice(C... columns) {
if (columns != null)
predicate.setColumn_names(columnFamily.getColumnSerializer().toBytesList(Arrays.asList(columns)))
.setSlice_rangeIsSet(false);
return this;
}
@Override
public AllRowsQuery<K, C> withColumnSlice(Collection<C> columns) {
if (columns != null)
predicate.setColumn_names(columnFamily.getColumnSerializer().toBytesList(columns)).setSlice_rangeIsSet(
false);
return this;
}
@Override
public AllRowsQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
predicate.setSlice_range(ThriftUtils.createSliceRange(columnFamily.getColumnSerializer(), startColumn,
endColumn, reversed, count));
return this;
}
@Override
public AllRowsQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int count) {
predicate.setSlice_range(new SliceRange(startColumn, endColumn, reversed, count));
return this;
}
@Override
public AllRowsQuery<K, C> withColumnSlice(ColumnSlice<C> slice) {
if (slice.getColumns() != null) {
predicate.setColumn_names(columnFamily.getColumnSerializer().toBytesList(slice.getColumns()))
.setSlice_rangeIsSet(false);
}
else {
predicate.setSlice_range(ThriftUtils.createSliceRange(columnFamily.getColumnSerializer(),
slice.getStartColumn(), slice.getEndColumn(), slice.getReversed(), slice.getLimit()));
}
return this;
}
@Override
public AllRowsQuery<K, C> withColumnRange(ByteBufferRange range) {
predicate.setSlice_range(new SliceRange().setStart(range.getStart()).setFinish(range.getEnd())
.setCount(range.getLimit()).setReversed(range.isReversed()));
return this;
}
@Override
public AllRowsQuery<K, C> setBlockSize(int blockSize) {
return setRowLimit(blockSize);
}
@Override
public AllRowsQuery<K, C> setRowLimit(int rowLimit) {
this.blockSize = rowLimit;
return this;
}
public int getBlockSize() {
return blockSize;
}
@Override
public AllRowsQuery<K, C> setRepeatLastToken(boolean repeatLastToken) {
this.repeatLastToken = repeatLastToken;
return this;
}
public boolean getRepeatLastToken() {
return this.repeatLastToken;
}
protected Integer getConcurrencyLevel() {
return this.nThreads;
}
public AllRowsQuery<K, C> setIncludeEmptyRows(boolean flag) {
this.includeEmptyRows = flag;
return this;
}
public String getStartToken() {
return this.startToken;
}
public String getEndToken() {
return this.endToken;
}
@Override
public AllRowsQuery<K, C> forTokenRange(BigInteger startToken, BigInteger endToken) {
return forTokenRange(startToken.toString(), endToken.toString());
}
public AllRowsQuery<K, C> forTokenRange(String startToken, String endToken) {
this.startToken = startToken;
this.endToken = endToken;
return this;
}
SlicePredicate getPredicate() {
return predicate;
}
Boolean getIncludeEmptyRows() {
return this.includeEmptyRows;
}
}
| 8,005 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ddl/ThriftColumnFamilyDefinitionImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.ddl;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.KsDef;
import org.apache.cassandra.thrift.CfDef._Fields;
import org.apache.cassandra.thrift.ColumnDef;
import org.apache.thrift.meta_data.FieldMetaData;
import com.google.common.collect.Maps;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.FieldMetadata;
import com.netflix.astyanax.thrift.ThriftTypes;
import com.netflix.astyanax.thrift.ThriftUtils;
public class ThriftColumnFamilyDefinitionImpl implements ColumnFamilyDefinition {
private final static Map<String, FieldMetadata> fieldsMetadata = Maps.newHashMap();
static {
for (Entry<_Fields, FieldMetaData> field : CfDef.metaDataMap.entrySet()) {
fieldsMetadata.put(
field.getValue().fieldName,
new FieldMetadata(
field.getKey().name(),
ThriftTypes.values()[field.getValue().valueMetaData.type].name(),
field.getValue().valueMetaData.isContainer()));
}
}
private CfDef cfDef;
public ThriftColumnFamilyDefinitionImpl() {
this.cfDef = new CfDef();
}
public ThriftColumnFamilyDefinitionImpl(CfDef cfDef) {
this.cfDef = cfDef;
}
public CfDef getThriftColumnFamilyDefinition() {
return cfDef;
}
@Override
public ColumnFamilyDefinition setComment(String comment) {
cfDef.setComment(comment);
return this;
}
@Override
public String getComment() {
return cfDef.getComment();
}
@Override
public ColumnFamilyDefinition setKeyspace(String keyspace) {
cfDef.setKeyspace(keyspace);
return this;
}
@Override
public String getKeyspace() {
return cfDef.getKeyspace();
}
@Override
@Deprecated
public ColumnFamilyDefinition setMemtableFlushAfterMins(Integer value) {
throw new RuntimeException("API Remove in Cassandra 1.0");
}
@Override
@Deprecated
public Integer getMemtableFlushAfterMins() {
throw new RuntimeException("API Remove in Cassandra 1.0");
}
@Override
@Deprecated
public ColumnFamilyDefinition setMemtableOperationsInMillions(Double value) {
throw new RuntimeException("API Remove in Cassandra 1.0");
}
@Override
@Deprecated
public Double getMemtableOperationsInMillions() {
throw new RuntimeException("API Remove in Cassandra 1.0");
}
@Override
@Deprecated
public ColumnFamilyDefinition setMemtableThroughputInMb(Integer value) {
throw new RuntimeException("API Remove in Cassandra 1.0");
}
@Override
public ColumnFamilyDefinition setMergeShardsChance(Double value) {
cfDef.setMerge_shards_chance(value);
return this;
}
@Override
public ColumnFamilyDefinition setMinCompactionThreshold(Integer value) {
if (value != null)
cfDef.setMin_compaction_threshold(value);
return this;
}
@Override
public ColumnFamilyDefinition setName(String name) {
cfDef.setName(name);
return this;
}
public String getName() {
return cfDef.getName();
}
@Override
public ColumnFamilyDefinition setReadRepairChance(Double value) {
if (value != null)
cfDef.setRead_repair_chance(value);
return this;
}
@Override
public ColumnFamilyDefinition setReplicateOnWrite(Boolean value) {
if (value != null)
cfDef.setReplicate_on_write(value);
return this;
}
@Override
public ColumnFamilyDefinition setRowCacheProvider(String value) {
cfDef.setRow_cache_provider(value);
return this;
}
@Override
public ColumnFamilyDefinition setRowCacheSavePeriodInSeconds(Integer value) {
if (value != null)
cfDef.setRow_cache_save_period_in_seconds(value);
return this;
}
@Override
public ColumnFamilyDefinition setRowCacheSize(Double size) {
if (size != null)
cfDef.setRow_cache_size(size);
return this;
}
@Override
public ColumnFamilyDefinition setComparatorType(String value) {
cfDef.setComparator_type(value);
return this;
}
@Override
public String getComparatorType() {
return cfDef.getComparator_type();
}
@Override
public ColumnFamilyDefinition setDefaultValidationClass(String value) {
cfDef.setDefault_validation_class(value);
return this;
}
@Override
public String getDefaultValidationClass() {
return cfDef.getDefault_validation_class();
}
@Override
public ColumnFamilyDefinition setId(Integer id) {
cfDef.setId(id);
return this;
}
@Override
public Integer getId() {
return cfDef.getId();
}
@Override
public ColumnFamilyDefinition setKeyAlias(ByteBuffer alias) {
cfDef.setKey_alias(alias);
return this;
}
@Override
public ByteBuffer getKeyAlias() {
if (cfDef.getKey_alias() == null)
return null;
return ByteBuffer.wrap(cfDef.getKey_alias());
}
@Override
public ColumnFamilyDefinition setKeyCacheSavePeriodInSeconds(Integer value) {
if (value != null)
cfDef.setKey_cache_save_period_in_seconds(value);
return this;
}
@Override
public Integer getKeyCacheSavePeriodInSeconds() {
return cfDef.getKey_cache_save_period_in_seconds();
}
@Override
public ColumnFamilyDefinition setKeyCacheSize(Double keyCacheSize) {
if (keyCacheSize != null)
cfDef.setKey_cache_size(keyCacheSize);
return this;
}
@Override
public ColumnFamilyDefinition setKeyValidationClass(String keyValidationClass) {
cfDef.setKey_validation_class(keyValidationClass);
return this;
}
@Override
public String getKeyValidationClass() {
return cfDef.getKey_validation_class();
}
@Override
public ColumnFamilyDefinition addColumnDefinition(ColumnDefinition columnDef) {
List<ColumnDef> columns = cfDef.getColumn_metadata();
if (columns == null) {
columns = new ArrayList<ColumnDef>();
cfDef.setColumn_metadata(columns);
}
columns.add(((ThriftColumnDefinitionImpl) columnDef).getThriftColumnDefinition());
return this;
}
@Override
public List<ColumnDefinition> getColumnDefinitionList() {
List<ColumnDefinition> list = new ArrayList<ColumnDefinition>();
List<ColumnDef> cdefs = cfDef.getColumn_metadata();
if (cdefs != null) {
for (ColumnDef cdef : cdefs) {
list.add(new ThriftColumnDefinitionImpl(cdef));
}
}
return list;
}
@Override
public void clearColumnDefinitionList() {
cfDef.setColumn_metadata(new ArrayList<ColumnDef>());
}
@Override
@Deprecated
public Integer getMemtableThroughputInMb() {
throw new RuntimeException("API Remove in Cassandra 1.0");
}
@Override
public Double getMergeShardsChance() {
return cfDef.merge_shards_chance;
}
@Override
public Integer getMinCompactionThreshold() {
return cfDef.min_compaction_threshold;
}
@Override
public Double getReadRepairChance() {
return cfDef.read_repair_chance;
}
@Override
public Boolean getReplicateOnWrite() {
return cfDef.replicate_on_write;
}
@Override
public String getRowCacheProvider() {
return cfDef.row_cache_provider;
}
@Override
public Integer getRowCacheSavePeriodInSeconds() {
return cfDef.row_cache_save_period_in_seconds;
}
@Override
public Double getRowCacheSize() {
return cfDef.row_cache_size;
}
@Override
public Double getKeyCacheSize() {
return cfDef.key_cache_size;
}
@Override
public Collection<String> getFieldNames() {
return fieldsMetadata.keySet();
}
@Override
public Object getFieldValue(String name) {
return cfDef.getFieldValue(_Fields.valueOf(name));
}
@Override
public ColumnFamilyDefinition setFieldValue(String name, Object value) {
cfDef.setFieldValue(_Fields.valueOf(name), value);
return this;
}
@Override
public ColumnDefinition makeColumnDefinition() {
return new ThriftColumnDefinitionImpl();
}
@Override
public Collection<FieldMetadata> getFieldsMetadata() {
return fieldsMetadata.values();
}
@Override
public ColumnFamilyDefinition setMaxCompactionThreshold(Integer value) {
if (value != null)
cfDef.setMax_compaction_threshold(value);
return this;
}
@Override
public Integer getMaxCompactionThreshold() {
return cfDef.getMax_compaction_threshold();
}
@Override
public ColumnFamilyDefinition setCompactionStrategy(String strategy) {
cfDef.setCompaction_strategy(strategy);
return this;
}
@Override
public String getCompactionStrategy() {
return cfDef.getCompaction_strategy();
}
@Override
public ColumnFamilyDefinition setCompactionStrategyOptions(Map<String, String> options) {
cfDef.setCompaction_strategy_options(options);
return this;
}
@Override
public Map<String, String> getCompactionStrategyOptions() {
return cfDef.getCompaction_strategy_options();
}
@Override
public ColumnFamilyDefinition setCompressionOptions(Map<String, String> options) {
cfDef.setCompression_options(options);
return this;
}
@Override
public Map<String, String> getCompressionOptions() {
return cfDef.getCompression_options();
}
@Override
public ColumnFamilyDefinition setBloomFilterFpChance(Double chance) {
if (chance != null)
cfDef.setBloom_filter_fp_chance(chance);
return this;
}
@Override
public Double getBloomFilterFpChance() {
return cfDef.getBloom_filter_fp_chance();
}
@Override
public ColumnFamilyDefinition setCaching(String caching) {
cfDef.setCaching(caching);
return this;
}
@Override
public String getCaching() {
return cfDef.getCaching();
}
@Override
public ColumnFamilyDefinition setLocalReadRepairChance(Double value) {
if (value != null)
cfDef.setDclocal_read_repair_chance(value);
return this;
}
@Override
public Double getLocalReadRepairChance() {
return cfDef.getDclocal_read_repair_chance();
}
@Override
public ColumnFamilyDefinition setGcGraceSeconds(Integer seconds) {
if (seconds != null)
cfDef.setGc_grace_seconds(seconds);
return this;
}
@Override
public Integer getGcGraceSeconds() {
return cfDef.getGc_grace_seconds();
}
@Override
public void setFields(Map<String, Object> options) {
for (Entry<String, FieldMetadata> field : fieldsMetadata.entrySet()) {
String fieldName = field.getKey();
if (options.containsKey(fieldName)) {
if (fieldName.equals("column_metadata")) {
Map<String, Object> columns = (Map<String, Object>) options.get("column_metadata");
for (Entry<String, Object> column : columns.entrySet()) {
ThriftColumnDefinitionImpl columnDef = new ThriftColumnDefinitionImpl();
columnDef.setName(column.getKey().toString());
columnDef.setFields((Map<String, Object>) column.getValue());
this.addColumnDefinition(columnDef);
}
}
else {
setFieldValue(field.getValue().getName(), options.get(fieldName));
}
}
}
}
@Override
public Properties getProperties() throws Exception {
return ThriftUtils.getPropertiesFromThrift(cfDef);
}
@Override
public void setProperties(Properties properties) throws Exception {
ThriftUtils.populateObjectFromProperties(cfDef, properties);
}
}
| 8,006 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ddl/ThriftKeyspaceDefinitionImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.ddl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.KsDef;
import org.apache.cassandra.thrift.KsDef._Fields;
import org.apache.thrift.meta_data.FieldMetaData;
import com.google.common.collect.Maps;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.FieldMetadata;
import com.netflix.astyanax.ddl.KeyspaceDefinition;
import com.netflix.astyanax.thrift.ThriftTypes;
import com.netflix.astyanax.thrift.ThriftUtils;
public class ThriftKeyspaceDefinitionImpl implements KeyspaceDefinition {
private final static Map<String, FieldMetadata> fieldsMetadata = Maps.newHashMap();
static {
for (Entry<_Fields, FieldMetaData> field : KsDef.metaDataMap.entrySet()) {
fieldsMetadata.put(
field.getValue().fieldName,
new FieldMetadata(
field.getKey().name(),
ThriftTypes.values()[field.getValue().valueMetaData.type].name(),
field.getValue().valueMetaData.isContainer()));
}
}
protected KsDef ks_def;
public ThriftKeyspaceDefinitionImpl() {
ks_def = new KsDef();
ks_def.setCf_defs(new ArrayList<CfDef>());
}
public ThriftKeyspaceDefinitionImpl(KsDef ks_def) {
this.ks_def = ks_def;
}
public KsDef getThriftKeyspaceDefinition() {
return ks_def;
}
@Override
public KeyspaceDefinition setName(String name) {
ks_def.setName(name);
return this;
}
@Override
public String getName() {
return ks_def.getName();
}
@Override
public KeyspaceDefinition setStrategyClass(String strategyClass) {
ks_def.setStrategy_class(strategyClass);
return this;
}
@Override
public String getStrategyClass() {
return ks_def.getStrategy_class();
}
@Override
public KeyspaceDefinition setStrategyOptions(Map<String, String> options) {
ks_def.setStrategy_options(options);
return this;
}
@Override
public Map<String, String> getStrategyOptions() {
if (ks_def.getStrategy_options() == null) {
ks_def.strategy_options = new HashMap<String, String>();
}
return ks_def.getStrategy_options();
}
@Override
public KeyspaceDefinition addStrategyOption(String name, String value) {
getStrategyOptions().put(name, value);
return this;
}
@Override
public KeyspaceDefinition addColumnFamily(ColumnFamilyDefinition cfDef) {
if (ks_def.getCf_defs() == null) {
ks_def.setCf_defs(new ArrayList<CfDef>());
}
CfDef thriftCfDef = ((ThriftColumnFamilyDefinitionImpl) cfDef).getThriftColumnFamilyDefinition();
thriftCfDef.setColumn_type("Standard");
thriftCfDef.setKeyspace(ks_def.getName());
ks_def.getCf_defs().add(thriftCfDef);
return this;
}
@Override
public List<ColumnFamilyDefinition> getColumnFamilyList() {
List<CfDef> cfdefs = ks_def.getCf_defs();
List<ColumnFamilyDefinition> list = new ArrayList<ColumnFamilyDefinition>();
for (CfDef cfdef : cfdefs) {
list.add(new ThriftColumnFamilyDefinitionImpl(cfdef));
}
return list;
}
@Override
public ColumnFamilyDefinition getColumnFamily(String columnFamily) {
for (CfDef cfdef : ks_def.getCf_defs()) {
if (cfdef.getName().equalsIgnoreCase(columnFamily)) {
return new ThriftColumnFamilyDefinitionImpl(cfdef);
}
}
return null;
}
@Override
public Collection<String> getFieldNames() {
return fieldsMetadata.keySet();
}
@Override
public Object getFieldValue(String name) {
return ks_def.getFieldValue(_Fields.valueOf(name));
}
@Override
public KeyspaceDefinition setFieldValue(String name, Object value) {
ks_def.setFieldValue(_Fields.valueOf(name), value);
return this;
}
@Override
public Collection<FieldMetadata> getFieldsMetadata() {
return fieldsMetadata.values();
}
@Override
public void setFields(Map<String, Object> options) {
for (Entry<String, FieldMetadata> field : fieldsMetadata.entrySet()) {
String fieldName = field.getKey();
if (options.containsKey(fieldName)) {
setFieldValue(field.getValue().getName(), options.get(fieldName));
}
}
}
@Override
public Properties getProperties() throws Exception {
return ThriftUtils.getPropertiesFromThrift(ks_def);
}
@Override
public void setProperties(Properties properties) throws Exception {
ThriftUtils.populateObjectFromProperties(ks_def, properties);
}
}
| 8,007 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/ddl/ThriftColumnDefinitionImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.ddl;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.cassandra.thrift.ColumnDef;
import org.apache.cassandra.thrift.IndexType;
import org.apache.cassandra.thrift.ColumnDef._Fields;
import org.apache.thrift.meta_data.FieldMetaData;
import com.google.common.collect.Maps;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.ddl.FieldMetadata;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.thrift.ThriftTypes;
public class ThriftColumnDefinitionImpl implements ColumnDefinition {
private final static Map<String, FieldMetadata> fieldsMetadata = Maps.newHashMap();
private final ColumnDef columnDef;
static {
for (Entry<_Fields, FieldMetaData> field : ColumnDef.metaDataMap.entrySet()) {
fieldsMetadata.put(
field.getValue().fieldName,
new FieldMetadata(
field.getKey().name(),
ThriftTypes.values()[field.getValue().valueMetaData.type].name(),
field.getValue().valueMetaData.isContainer()));
}
}
public ThriftColumnDefinitionImpl() {
this.columnDef = new ColumnDef();
}
public ThriftColumnDefinitionImpl(ColumnDef columnDef) {
this.columnDef = columnDef;
}
ColumnDef getThriftColumnDefinition() {
return columnDef;
}
@Override
public Map<String, String> getOptions() {
return columnDef.getIndex_options();
}
@Override
public ColumnDefinition setOptions(Map<String, String> index_options ) {
columnDef.setIndex_options(index_options);
return this;
}
@Override
public ColumnDefinition setName(String name) {
columnDef.setName(name.getBytes());
return this;
}
@Override
public ColumnDefinition setName(ByteBuffer name) {
columnDef.setName(name.duplicate());
return this;
}
@Override
public ColumnDefinition setName(byte[] name) {
columnDef.setName(name);
return this;
}
@Override
public ColumnDefinition setValidationClass(String value) {
columnDef.setValidation_class(value);
return this;
}
@Override
public String getName() {
return StringSerializer.get().fromByteBuffer(getRawName().duplicate());
}
@Override
public String getValidationClass() {
return columnDef.getValidation_class();
}
@Override
public ByteBuffer getRawName() {
return ByteBuffer.wrap(this.columnDef.getName());
}
@Override
public String getIndexName() {
return this.columnDef.getIndex_name();
}
@Override
public String getIndexType() {
return this.columnDef.getIndex_type() == null ? null : this.columnDef.getIndex_type().name();
}
@Override
public boolean hasIndex() {
return this.columnDef.getIndex_type() != null;
}
@Override
public ColumnDefinition setIndex(String name, String type) {
this.columnDef.setIndex_name(name);
this.columnDef.setIndex_type(IndexType.valueOf(type));
return this;
}
@Override
public ColumnDefinition setKeysIndex(String name) {
this.columnDef.setIndex_name(name);
this.columnDef.setIndex_type(IndexType.KEYS);
return this;
}
@Override
public ColumnDefinition setKeysIndex() {
this.columnDef.setIndex_type(IndexType.KEYS);
return this;
}
@Override
public ColumnDefinition setIndexWithType(String type) {
this.columnDef.setIndex_type(IndexType.valueOf(type));
return this;
}
@Override
public String getOption(String name, String defaultValue) {
if (this.columnDef != null && this.columnDef.getIndex_options() != null) {
String value = this.columnDef.getIndex_options().get(name);
if (value != null)
return value;
}
return defaultValue;
}
@Override
public String setOption(String name, String value) {
if (this.columnDef.getIndex_options() == null) {
this.columnDef.setIndex_options(new HashMap<String, String>());
}
return this.columnDef.getIndex_options().put(name, value);
}
@Override
public Collection<String> getFieldNames() {
return this.fieldsMetadata.keySet();
}
@Override
public Object getFieldValue(String name) {
return columnDef.getFieldValue(_Fields.valueOf(name));
}
@Override
public ColumnDefinition setFieldValue(String name, Object value) {
columnDef.setFieldValue(_Fields.valueOf(name), value);
return this;
}
@Override
public Collection<FieldMetadata> getFieldsMetadata() {
return fieldsMetadata.values();
}
@Override
public ColumnDefinition setFields(Map<String, Object> options) {
for (Entry<String, FieldMetadata> field : fieldsMetadata.entrySet()) {
String fieldName = field.getKey();
if (options.containsKey(fieldName)) {
if ("index_type".equals(fieldName)) {
setFieldValue(field.getValue().getName(), IndexType.valueOf(options.get(fieldName).toString()));
}
else {
setFieldValue(field.getValue().getName(), options.get(fieldName));
}
}
}
return this;
}
}
| 8,008 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftColumnListImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.util.Collection;
import java.util.Iterator;
import java.util.HashMap;
import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.AbstractColumnList;
import com.netflix.astyanax.model.Column;
/**
* Wrapper for a simple list of columns where each column has a scalar value.
*
* @author elandau
*
* @param <C>
*/
public class ThriftColumnListImpl<C> extends AbstractColumnList<C> {
private final List<org.apache.cassandra.thrift.Column> columns;
private HashMap<C, org.apache.cassandra.thrift.Column> lookup;
private final Serializer<C> colSer;
public ThriftColumnListImpl(List<org.apache.cassandra.thrift.Column> columns, Serializer<C> colSer) {
Preconditions.checkArgument(columns != null, "Columns must not be null");
Preconditions.checkArgument(colSer != null, "Serializer must not be null");
this.colSer = colSer;
this.columns = columns;
}
@Override
public Iterator<Column<C>> iterator() {
class IteratorImpl implements Iterator<Column<C>> {
Iterator<org.apache.cassandra.thrift.Column> base;
public IteratorImpl(Iterator<org.apache.cassandra.thrift.Column> base) {
this.base = base;
}
@Override
public boolean hasNext() {
return base.hasNext();
}
@Override
public Column<C> next() {
org.apache.cassandra.thrift.Column c = base.next();
return new ThriftColumnImpl<C>(colSer.fromBytes(c.getName()), c);
}
@Override
public void remove() {
throw new UnsupportedOperationException("Iterator is immutable");
}
}
return new IteratorImpl(columns.iterator());
}
@Override
public Column<C> getColumnByName(C columnName) {
constructColumnMap();
org.apache.cassandra.thrift.Column c = lookup.get(columnName);
if (c == null) {
return null;
}
return new ThriftColumnImpl<C>(colSer.fromBytes(c.getName()), c);
}
private void constructColumnMap() {
if (lookup == null) {
lookup = Maps.newHashMap();;
for (org.apache.cassandra.thrift.Column column : columns) {
lookup.put(colSer.fromBytes(column.getName()), column);
}
}
}
@Override
public Column<C> getColumnByIndex(int idx) {
org.apache.cassandra.thrift.Column c = columns.get(idx);
return new ThriftColumnImpl<C>(colSer.fromBytes(c.getName()), c);
}
public C getNameByIndex(int idx) {
org.apache.cassandra.thrift.Column column = columns.get(idx);
return colSer.fromBytes(column.getName());
}
@Override
public <C2> Column<C2> getSuperColumn(C columnName, Serializer<C2> colSer) {
throw new UnsupportedOperationException();
}
@Override
public <C2> Column<C2> getSuperColumn(int idx, Serializer<C2> colSer) {
throw new UnsupportedOperationException();
}
@Override
public boolean isEmpty() {
return columns.isEmpty();
}
@Override
public int size() {
return columns.size();
}
@Override
public boolean isSuperColumn() {
return false;
}
@Override
public Collection<C> getColumnNames() {
constructColumnMap();
return lookup.keySet();
}
}
| 8,009 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftRowsListImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
public class ThriftRowsListImpl<K, C> implements Rows<K, C> {
private List<Row<K, C>> rows;
private Map<K, Row<K, C>> lookup;
public ThriftRowsListImpl(Map<ByteBuffer, List<ColumnOrSuperColumn>> rows, Serializer<K> keySer, Serializer<C> colSer) {
this.rows = Lists.newArrayListWithCapacity(rows.size());
this.lookup = Maps.newLinkedHashMap();
for (Entry<ByteBuffer, List<ColumnOrSuperColumn>> row : rows.entrySet()) {
Row<K,C> thriftRow = new ThriftRowImpl<K, C>(
keySer.fromByteBuffer(row.getKey().duplicate()),
row.getKey(),
new ThriftColumnOrSuperColumnListImpl<C>(row.getValue(), colSer));
this.rows.add(thriftRow);
lookup.put(thriftRow.getKey(), thriftRow);
}
}
@Override
public Iterator<Row<K, C>> iterator() {
return rows.iterator();
}
@Override
public Row<K, C> getRow(K key) {
return lookup.get(key);
}
@Override
public int size() {
return rows.size();
}
@Override
public boolean isEmpty() {
return rows.isEmpty();
}
@Override
public Row<K, C> getRowByIndex(int i) {
return rows.get(i);
}
@Override
public Collection<K> getKeys() {
return lookup.keySet();
}
}
| 8,010 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftColumnOrSuperColumnListImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.CounterColumn;
import org.apache.cassandra.thrift.CounterSuperColumn;
import org.apache.cassandra.thrift.SuperColumn;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.AbstractColumnList;
import com.netflix.astyanax.model.Column;
/**
* List of columns that can be either a list of super columns or standard
* columns.
*
* @author elandau
*
* @param <C>
*/
public class ThriftColumnOrSuperColumnListImpl<C> extends AbstractColumnList<C> {
private final List<ColumnOrSuperColumn> columns;
private Map<C, ColumnOrSuperColumn> lookup;
private final Serializer<C> colSer;
public ThriftColumnOrSuperColumnListImpl(List<ColumnOrSuperColumn> columns, Serializer<C> colSer) {
Preconditions.checkArgument(columns != null, "Columns must not be null");
Preconditions.checkArgument(colSer != null, "Serializer must not be null");
this.columns = columns;
this.colSer = colSer;
}
@Override
public Iterator<Column<C>> iterator() {
class IteratorImpl implements Iterator<Column<C>> {
Iterator<ColumnOrSuperColumn> base;
public IteratorImpl(Iterator<ColumnOrSuperColumn> base) {
this.base = base;
}
@Override
public boolean hasNext() {
return base.hasNext();
}
@Override
public Column<C> next() {
ColumnOrSuperColumn column = base.next();
if (column.isSetSuper_column()) {
SuperColumn sc = column.getSuper_column();
return new ThriftSuperColumnImpl<C>(colSer.fromBytes(sc.getName()), sc);
}
else if (column.isSetCounter_column()) {
CounterColumn cc = column.getCounter_column();
return new ThriftCounterColumnImpl<C>(colSer.fromBytes(cc.getName()), cc);
}
else if (column.isSetCounter_super_column()) {
CounterSuperColumn cc = column.getCounter_super_column();
return new ThriftCounterSuperColumnImpl<C>(colSer.fromBytes(cc.getName()), cc);
}
else if (column.isSetColumn()) {
org.apache.cassandra.thrift.Column c = column.getColumn();
return new ThriftColumnImpl<C>(colSer.fromBytes(c.getName()), c);
}
else {
throw new RuntimeException("Unknwon column type");
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("Iterator is immutable");
}
}
return new IteratorImpl(columns.iterator());
}
@Override
public Column<C> getColumnByName(C columnName) {
ColumnOrSuperColumn column = getColumn(columnName);
if (column == null) {
return null;
}
else if (column.isSetColumn()) {
return new ThriftColumnImpl<C>(columnName, column.getColumn());
}
else if (column.isSetCounter_column()) {
return new ThriftCounterColumnImpl<C>(columnName, column.getCounter_column());
}
throw new UnsupportedOperationException("SuperColumn " + columnName + " has no value");
}
@Override
public Column<C> getColumnByIndex(int idx) {
ColumnOrSuperColumn column = columns.get(idx);
if (column == null) {
// TODO: throw an exception
return null;
}
else if (column.isSetColumn()) {
return new ThriftColumnImpl<C>(this.colSer.fromBytes(column.getColumn().getName()), column.getColumn());
}
else if (column.isSetCounter_column()) {
return new ThriftCounterColumnImpl<C>(this.colSer.fromBytes(column.getCounter_column().getName()),
column.getCounter_column());
}
throw new UnsupportedOperationException("SuperColumn " + idx + " has no value");
}
@Override
public <C2> Column<C2> getSuperColumn(C columnName, Serializer<C2> colSer) {
ColumnOrSuperColumn column = getColumn(columnName);
if (column == null) {
// TODO: throw an exception
return null;
}
else if (column.isSetSuper_column()) {
SuperColumn sc = column.getSuper_column();
return new ThriftSuperColumnImpl<C2>(colSer.fromBytes(sc.getName()), sc);
}
else if (column.isSetCounter_super_column()) {
CounterSuperColumn sc = column.getCounter_super_column();
return new ThriftCounterSuperColumnImpl<C2>(colSer.fromBytes(sc.getName()), sc);
}
throw new UnsupportedOperationException("\'" + columnName + "\' is not a composite column");
}
@Override
public <C2> Column<C2> getSuperColumn(int idx, Serializer<C2> colSer) {
ColumnOrSuperColumn column = this.columns.get(idx);
if (column == null) {
// TODO: throw an exception
return null;
}
else if (column.isSetSuper_column()) {
SuperColumn sc = column.getSuper_column();
return new ThriftSuperColumnImpl<C2>(colSer.fromBytes(sc.getName()), sc);
}
else if (column.isSetCounter_super_column()) {
CounterSuperColumn sc = column.getCounter_super_column();
return new ThriftCounterSuperColumnImpl<C2>(colSer.fromBytes(sc.getName()), sc);
}
throw new UnsupportedOperationException("\'" + idx + "\' is not a super column");
}
@Override
public boolean isEmpty() {
return columns.isEmpty();
}
@Override
public int size() {
return columns.size();
}
@Override
public boolean isSuperColumn() {
if (columns.isEmpty())
return false;
ColumnOrSuperColumn sosc = columns.get(0);
return sosc.isSetSuper_column() || sosc.isSetCounter_super_column();
}
private ColumnOrSuperColumn getColumn(C columnName) {
constructMap();
return lookup.get(columnName);
}
private void constructMap() {
if (lookup == null) {
lookup = Maps.newHashMap();
for (ColumnOrSuperColumn column : columns) {
if (column.isSetSuper_column()) {
lookup.put(colSer.fromBytes(column.getSuper_column().getName()), column);
}
else if (column.isSetColumn()) {
lookup.put(colSer.fromBytes(column.getColumn().getName()), column);
}
else if (column.isSetCounter_column()) {
lookup.put(colSer.fromBytes(column.getCounter_column().getName()), column);
}
else if (column.isSetCounter_super_column()) {
lookup.put(colSer.fromBytes(column.getCounter_super_column().getName()), column);
}
else {
throw new UnsupportedOperationException("Unknown column type");
}
}
}
}
@Override
public Collection<C> getColumnNames() {
constructMap();
return lookup.keySet();
}
}
| 8,011 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftCounterColumnImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.AbstractColumnImpl;
import com.netflix.astyanax.model.ColumnList;
public class ThriftCounterColumnImpl<C> extends AbstractColumnImpl<C> {
private final org.apache.cassandra.thrift.CounterColumn column;
public ThriftCounterColumnImpl(C name, org.apache.cassandra.thrift.CounterColumn column) {
super(name);
this.column = column;
}
@Override
public <V> V getValue(Serializer<V> valSer) {
throw new UnsupportedOperationException("CounterColumn \'" + getName()
+ "\' has no generic value. Call getLongValue().");
}
@Override
public long getLongValue() {
return this.column.getValue();
}
@Override
public <C2> ColumnList<C2> getSubColumns(Serializer<C2> ser) {
throw new UnsupportedOperationException("CounterColumn \'" + getName()
+ "\' has no sub columns. Call getLongValue().");
}
@Override
public boolean isParentColumn() {
return false;
}
@Override
public long getTimestamp() {
throw new UnsupportedOperationException("CounterColumn \'" + getName() + "\' has no timestamp");
}
@Override
public ByteBuffer getRawName() {
return ByteBuffer.wrap(column.getName());
}
@Override
public int getTtl() {
return 0;
}
@Override
public boolean hasValue() {
return true;
}
}
| 8,012 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftSuperColumnImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import org.apache.cassandra.thrift.SuperColumn;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.AbstractColumnImpl;
import com.netflix.astyanax.model.ColumnList;
public class ThriftSuperColumnImpl<C> extends AbstractColumnImpl<C> {
private final SuperColumn column;
public ThriftSuperColumnImpl(C name, SuperColumn column) {
super(name);
this.column = column;
}
@Override
public <V> V getValue(Serializer<V> valSer) {
throw new UnsupportedOperationException("SuperColumn \'" + getName() + "\' has no value");
}
@Override
public <C2> ColumnList<C2> getSubColumns(Serializer<C2> ser) {
return new ThriftColumnListImpl<C2>(this.column.getColumns(), ser);
}
@Override
public boolean isParentColumn() {
return true;
}
@Override
public long getTimestamp() {
throw new UnsupportedOperationException("SuperColumn \'" + getName() + "\' has no timestamp");
}
@Override
public ByteBuffer getRawName() {
return ByteBuffer.wrap(column.getName());
}
@Override
public int getTtl() {
throw new UnsupportedOperationException("SuperColumn \'" + getName() + "\' has no ttl");
}
@Override
public boolean hasValue() {
return false;
}
}
| 8,013 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftCounterColumnListImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.AbstractColumnList;
import com.netflix.astyanax.model.Column;
import org.apache.cassandra.thrift.CounterColumn;
public class ThriftCounterColumnListImpl<C> extends AbstractColumnList<C> {
private final List<CounterColumn> columns;
private Map<C, CounterColumn> lookup;
private final Serializer<C> colSer;
public ThriftCounterColumnListImpl(List<CounterColumn> columns, Serializer<C> colSer) {
this.columns = columns;
this.colSer = colSer;
}
@Override
public Iterator<Column<C>> iterator() {
class IteratorImpl implements Iterator<Column<C>> {
Iterator<CounterColumn> base;
public IteratorImpl(Iterator<CounterColumn> base) {
this.base = base;
}
@Override
public boolean hasNext() {
return base.hasNext();
}
@Override
public Column<C> next() {
CounterColumn c = base.next();
return new ThriftCounterColumnImpl<C>(colSer.fromBytes(c.getName()), c);
}
@Override
public void remove() {
throw new UnsupportedOperationException("Iterator is immutable");
}
}
return new IteratorImpl(columns.iterator());
}
@Override
public Column<C> getColumnByName(C columnName) {
constructMap();
CounterColumn c = lookup.get(columnName);
if (c == null) {
return null;
}
return new ThriftCounterColumnImpl<C>(colSer.fromBytes(c.getName()), c);
}
@Override
public Column<C> getColumnByIndex(int idx) {
CounterColumn c = columns.get(idx);
return new ThriftCounterColumnImpl<C>(colSer.fromBytes(c.getName()), c);
}
@Override
public <C2> Column<C2> getSuperColumn(C columnName, Serializer<C2> colSer) {
throw new UnsupportedOperationException("Call getCounter");
}
@Override
public <C2> Column<C2> getSuperColumn(int idx, Serializer<C2> colSer) {
throw new UnsupportedOperationException("Call getCounter");
}
@Override
public boolean isEmpty() {
return columns.isEmpty();
}
@Override
public int size() {
return columns.size();
}
@Override
public boolean isSuperColumn() {
return false;
}
@Override
public Collection<C> getColumnNames() {
constructMap();
return lookup.keySet();
}
private void constructMap() {
if (lookup == null) {
lookup = Maps.newHashMap();
for (CounterColumn column : columns) {
lookup.put(colSer.fromBytes(column.getName()), column);
}
}
}
}
| 8,014 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftCqlResultImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import com.netflix.astyanax.model.CqlResult;
import com.netflix.astyanax.model.Rows;
public class ThriftCqlResultImpl<K, C> implements CqlResult<K, C> {
private final Rows<K, C> rows;
private final Integer number;
public ThriftCqlResultImpl(Rows<K, C> rows) {
this.rows = rows;
this.number = null;
}
public ThriftCqlResultImpl(Integer count) {
this.rows = null;
this.number = count;
}
@Override
public Rows<K, C> getRows() {
return rows;
}
@Override
public int getNumber() {
return number;
}
@Override
public boolean hasRows() {
return rows != null && !this.rows.isEmpty();
}
@Override
public boolean hasNumber() {
return number != null;
}
}
| 8,015 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftCounterSuperColumnImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import org.apache.cassandra.thrift.CounterSuperColumn;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.AbstractColumnImpl;
import com.netflix.astyanax.model.ColumnList;
public class ThriftCounterSuperColumnImpl<C> extends AbstractColumnImpl<C> {
private final CounterSuperColumn column;
public ThriftCounterSuperColumnImpl(C name, CounterSuperColumn column) {
super(name);
this.column = column;
}
@Override
public <V> V getValue(Serializer<V> valSer) {
throw new UnsupportedOperationException("CounterSuperColumn \'" + getName() + "\' has no value");
}
@Override
public <C2> ColumnList<C2> getSubColumns(Serializer<C2> ser) {
return new ThriftCounterColumnListImpl<C2>(this.column.getColumns(), ser);
}
@Override
public boolean isParentColumn() {
return true;
}
@Override
public long getTimestamp() {
throw new UnsupportedOperationException("CounterSuperColumn \'" + getName() + "\' has no timestamp");
}
@Override
public ByteBuffer getRawName() {
return ByteBuffer.wrap(column.getName());
}
@Override
public int getTtl() {
throw new UnsupportedOperationException("CounterSuperColumn \'" + getName() + "\' has no ttl");
}
@Override
public boolean hasValue() {
return false;
}
}
| 8,016 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftColumnImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.UUID;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.AbstractColumnImpl;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.ByteSerializer;
import com.netflix.astyanax.serializers.BytesArraySerializer;
import com.netflix.astyanax.serializers.DateSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.FloatSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.ShortSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
/**
*
*
* TODO: All serializers
*
* @author elandau
*
* @param <C>
*/
public class ThriftColumnImpl<C> extends AbstractColumnImpl<C> {
private final org.apache.cassandra.thrift.Column column;
public ThriftColumnImpl(C name, org.apache.cassandra.thrift.Column column) {
super(name);
this.column = column;
}
@Override
public <V> V getValue(Serializer<V> valSer) {
return valSer.fromBytes(column.getValue());
}
@Override
public <C2> ColumnList<C2> getSubColumns(Serializer<C2> ser) {
throw new UnsupportedOperationException("SimpleColumn \'" + getName() + "\' has no children");
}
@Override
public boolean isParentColumn() {
return false;
}
@Override
public long getTimestamp() {
return column.getTimestamp();
}
@Override
public ByteBuffer getRawName() {
return ByteBuffer.wrap(column.getName());
}
@Override
public int getTtl() {
return column.isSetTtl() ? column.getTtl() : 0;
}
@Override
public boolean hasValue() {
return column.value != null && column.value.remaining() != 0;
}
}
| 8,017 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftRowImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.Row;
public class ThriftRowImpl<K, C> implements Row<K, C> {
private final ColumnList<C> columns;
private final ByteBuffer rawkey;
private final K key;
public ThriftRowImpl(K key, ByteBuffer byteBuffer, ColumnList<C> columns) {
this.key = key;
this.columns = columns;
this.rawkey = byteBuffer;
}
@Override
public K getKey() {
return key;
}
@Override
public ColumnList<C> getColumns() {
return columns;
}
@Override
public ByteBuffer getRawKey() {
return this.rawkey;
}
}
| 8,018 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftRowsSliceImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.KeySlice;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
/**
* Wrapper for a key slice response.
*
* Will lazily create a lookup by key
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public class ThriftRowsSliceImpl<K, C> implements Rows<K, C> {
private List<Row<K,C>> rows;
private Map<K, Row<K,C>> lookup;
public ThriftRowsSliceImpl(List<KeySlice> rows, Serializer<K> keySer, Serializer<C> colSer) {
this.rows = Lists.newArrayListWithCapacity(rows.size());
for (KeySlice row : rows) {
Row<K,C> thriftRow = new ThriftRowImpl<K, C>(
keySer.fromBytes(row.getKey()),
ByteBuffer.wrap(row.getKey()),
new ThriftColumnOrSuperColumnListImpl<C>(row.getColumns(), colSer));
this.rows.add(thriftRow);
}
}
@Override
public Iterator<Row<K, C>> iterator() {
return rows.iterator();
}
@Override
public Row<K, C> getRow(K key) {
lazyBuildLookup();
return lookup.get(key);
}
@Override
public int size() {
return rows.size();
}
@Override
public boolean isEmpty() {
return rows.isEmpty();
}
@Override
public Row<K, C> getRowByIndex(int index) {
return rows.get(index);
}
@Override
public Collection<K> getKeys() {
return Lists.transform(rows, new Function<Row<K,C>, K>() {
@Override
public K apply(Row<K, C> row) {
return row.getKey();
}
});
}
private void lazyBuildLookup() {
if (lookup == null) {
this.lookup = Maps.newHashMap();
for (Row<K,C> row : rows) {
this.lookup.put(row.getKey(), row);
}
}
}
}
| 8,019 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftCounterSuperColumnMutationImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.util.List;
import com.netflix.astyanax.AbstractColumnListMutation;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.CounterColumn;
import org.apache.cassandra.thrift.CounterSuperColumn;
import org.apache.cassandra.thrift.Deletion;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.SlicePredicate;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.ColumnPath;
import com.netflix.astyanax.serializers.UUIDSerializer;
public class ThriftCounterSuperColumnMutationImpl<C> extends AbstractColumnListMutation<C> {
private final List<Mutation> mutationList;
private final ColumnPath<C> path;
private CounterSuperColumn superColumn;
private SlicePredicate deletionPredicate;
public ThriftCounterSuperColumnMutationImpl(long timestamp, List<Mutation> mutationList, ColumnPath<C> path) {
super(timestamp);
this.path = path;
this.mutationList = mutationList;
}
@Override
public <V> ColumnListMutation<C> putColumn(C columnName, V value, Serializer<V> valueSerializer, Integer ttl) {
throw new UnsupportedOperationException();
}
@Override
public ColumnListMutation<C> putEmptyColumn(C columnName, Integer ttl) {
return putColumn(columnName, null, UUIDSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putEmptyColumn(final C columnName) {
return putEmptyColumn(columnName, null);
}
@Override
public ColumnListMutation<C> delete() {
// Delete the entire super column
Deletion d = new Deletion().setSuper_column(path.get(0)).setTimestamp(timestamp);
mutationList.add(new Mutation().setDeletion(d));
timestamp++;
return this;
}
@Override
public <SC> ColumnListMutation<SC> withSuperColumn(ColumnPath<SC> superColumnPath) {
throw new UnsupportedOperationException();
}
@Override
public ColumnListMutation<C> incrementCounterColumn(C columnName, long amount) {
// 1. Set up the column with all the data
CounterColumn column = new CounterColumn();
column.setName(path.getSerializer().toByteBuffer(columnName));
column.setValue(amount);
// 2. Create the super column mutation if this is the first call
if (superColumn == null) {
superColumn = new CounterSuperColumn().setName(path.get(0));
Mutation mutation = new Mutation();
mutation.setColumn_or_supercolumn(new ColumnOrSuperColumn().setCounter_super_column(superColumn));
mutationList.add(mutation);
}
superColumn.addToColumns(column);
return this;
}
@Override
public ColumnListMutation<C> deleteColumn(C columnName) {
if (deletionPredicate == null) {
deletionPredicate = new SlicePredicate();
Deletion d = new Deletion().setTimestamp(timestamp).setSuper_column(path.get(0))
.setPredicate(deletionPredicate);
mutationList.add(new Mutation().setDeletion(d));
}
deletionPredicate.addToColumn_names(path.getSerializer().toByteBuffer(columnName));
return this;
}
@Override
public ColumnListMutation<C> setDefaultTtl(Integer ttl) {
// TODO: Throw an exception
return this;
}
}
| 8,020 |
0 | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift | Create_ds/astyanax/astyanax-thrift/src/main/java/com/netflix/astyanax/thrift/model/ThriftCqlRowsImpl.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.thrift.model;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.CqlRow;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
public class ThriftCqlRowsImpl<K, C> implements Rows<K, C> {
private List<Row<K, C>> rows;
private Map<K, Row<K, C>> lookup;
public ThriftCqlRowsImpl(final List<CqlRow> rows,
final Serializer<K> keySer, final Serializer<C> colSer) {
this.rows = Lists.newArrayListWithCapacity(rows.size());
for (CqlRow row : rows) {
byte[] keyBytes = row.getKey();
if (keyBytes == null || keyBytes.length == 0) {
this.rows.add(new ThriftRowImpl<K, C>(null, null,
new ThriftColumnListImpl<C>(row.getColumns(), colSer)));
} else {
this.rows.add(new ThriftRowImpl<K, C>(keySer
.fromBytes(keyBytes), ByteBuffer.wrap(keyBytes),
new ThriftColumnListImpl<C>(row.getColumns(), colSer)));
}
}
}
@Override
public Iterator<Row<K, C>> iterator() {
return rows.iterator();
}
@Override
public Row<K, C> getRow(K key) {
lazyBuildLookup();
return lookup.get(key);
}
@Override
public int size() {
return this.rows.size();
}
@Override
public boolean isEmpty() {
return this.rows.isEmpty();
}
@Override
public Row<K, C> getRowByIndex(int i) {
return rows.get(i);
}
@Override
public Collection<K> getKeys() {
return Lists.transform(rows, new Function<Row<K, C>, K>() {
@Override
public K apply(Row<K, C> input) {
return input.getKey();
}
});
}
private void lazyBuildLookup() {
if (lookup == null) {
this.lookup = Maps.newHashMap();
for (Row<K, C> row : rows) {
this.lookup.put(row.getKey(), row);
}
}
}
}
| 8,021 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/Shards.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.List;
import com.google.common.collect.Lists;
import com.netflix.astyanax.serializers.StringSerializer;
public class Shards {
public static interface Builder {
Collection<ByteBuffer> build();
}
public static class StringShardBuilder {
private String prefix = "";
private int shardCount = 0;
public StringShardBuilder setPrefix(String prefix) {
this.prefix = prefix;
return this;
}
public StringShardBuilder setShardCount(int count) {
this.shardCount = count;
return this;
}
public Collection<ByteBuffer> build() {
List<ByteBuffer> shards = Lists.newArrayListWithExpectedSize(shardCount);
for (int i = 0; i < shardCount; i++) {
shards.add(StringSerializer.get().toByteBuffer(prefix + i));
}
return shards;
}
}
public static StringShardBuilder newStringShardBuilder() {
return new StringShardBuilder();
}
}
| 8,022 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/UniquenessConstraint.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
import com.google.common.base.Supplier;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
@Deprecated
public class UniquenessConstraint<K, C> {
private final ColumnFamily<K, C> columnFamily;
private final Keyspace keyspace;
private final Supplier<C> uniqueColumnSupplier;
private Integer ttl;
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_QUORUM;
private UniquenessConstraintViolationMonitor<K, C> monitor;
public UniquenessConstraint(Keyspace keyspace, ColumnFamily<K, C> columnFamily, Supplier<C> uniqueColumnSupplier) {
this.keyspace = keyspace;
this.columnFamily = columnFamily;
this.uniqueColumnSupplier = uniqueColumnSupplier;
}
public UniquenessConstraint<K, C> setTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
public UniquenessConstraint<K, C> setMonitor(UniquenessConstraintViolationMonitor<K, C> monitor) {
this.monitor = monitor;
return this;
}
public C isUnique(K key) throws ConnectionException {
C unique = uniqueColumnSupplier.get();
// Phase 1: Write a unique column
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
m.withRow(columnFamily, key).putEmptyColumn(unique, ttl);
m.execute();
// Phase 2: Read back all columns. There should be only 1
ColumnList<C> result = keyspace.prepareQuery(columnFamily).setConsistencyLevel(consistencyLevel).getKey(key)
.execute().getResult();
if (result.size() == 1) {
return unique;
}
if (this.monitor != null)
this.monitor.onViolation(key, unique);
// Rollback
m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
m.withRow(columnFamily, key).deleteColumn(unique);
m.execute();
return null;
}
}
| 8,023 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/UUIDStringSupplier.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
import java.util.UUID;
import com.google.common.base.Supplier;
import com.netflix.astyanax.util.TimeUUIDUtils;
public class UUIDStringSupplier implements Supplier<String> {
private static final UUIDStringSupplier instance = new UUIDStringSupplier();
public static UUIDStringSupplier getInstance() {
return instance;
}
@Override
public String get() {
UUID uuid = TimeUUIDUtils.getUniqueTimeUUIDinMicros();
return uuid.toString();
}
}
| 8,024 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/UniquenessConstraintViolationMonitor.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
@Deprecated
public interface UniquenessConstraintViolationMonitor<K, C> {
void onViolation(K key, C column);
}
| 8,025 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/Callback.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
public interface Callback<T> {
void handle(T object);
}
| 8,026 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/ConstantSupplier.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
import com.google.common.base.Supplier;
public class ConstantSupplier<T> implements Supplier<T> {
private final T value;
public ConstantSupplier(T value) {
this.value = value;
}
@Override
public T get() {
return value;
}
}
| 8,027 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/DistributedMergeSort.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
public class DistributedMergeSort {
}
| 8,028 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/ReverseIndexQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.CompositeParser;
import com.netflix.astyanax.model.Composites;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.retry.RunOnce;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.util.RangeBuilder;
/**
* Performs a search on a reverse index and fetches all the matching rows
*
* CFData:K C=V1 C=V2
*
* CFIndex: V1:K
*
* <h3>Data and Index column family</h3> The CFData column family has key of
* type K and fields or columns of type C. Each column may have a different
* value type. The CFIndex column family is a sorted index by one of the value
* types V. The column names in the reverse index are a composite of the value
* type V and the CFData rowkey type K (V:K).
*
* @author elandau
*
* @param <K>
* Key type for data table
* @param <C>
* Column name type for data table
* @param <V>
* Value type being indexed
*/
public class ReverseIndexQuery<K, C, V> {
public static <K, C, V> ReverseIndexQuery<K, C, V> newQuery(Keyspace ks, ColumnFamily<K, C> cf, String indexCf,
Serializer<V> valSerializer) {
return new ReverseIndexQuery<K, C, V>(ks, cf, indexCf, valSerializer);
}
public static <K, C, V> ReverseIndexQuery<K, C, V> newQuery(Keyspace ks, ColumnFamily<K, C> cf, ColumnFamily<ByteBuffer, ByteBuffer> indexCf,
Serializer<V> valSerializer) {
return new ReverseIndexQuery<K, C, V>(ks, cf, indexCf, valSerializer);
}
public static interface IndexEntryCallback<K, V> {
boolean handleEntry(K key, V value, ByteBuffer meta);
}
private final Keyspace ks;
private final ColumnFamily<K, C> cfData;
private final Serializer<V> valSerializer;
private Collection<ByteBuffer> shardKeys;
private final ColumnFamily<ByteBuffer, ByteBuffer> cfIndex;
private ExecutorService executor;
private V startValue;
private V endValue;
private int keyLimit = 100;
private int columnLimit = 1000;
private int shardColumnLimit = 0;
private final AtomicLong pendingTasks = new AtomicLong();
private Function<Row<K, C>, Void> callback;
private IndexEntryCallback<K, V> indexCallback;
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_ONE;
private RetryPolicy retry = RunOnce.get();
private Collection<C> columnSlice;
private CountDownLatch latch = new CountDownLatch(1);
public ReverseIndexQuery(Keyspace ks, ColumnFamily<K, C> cfData, String indexCf, Serializer<V> valSerializer) {
this.ks = ks;
this.cfData = cfData;
this.valSerializer = valSerializer;
this.startValue = null;
this.endValue = null;
this.cfIndex = ColumnFamily.newColumnFamily(indexCf, ByteBufferSerializer.get(), ByteBufferSerializer.get());
}
public ReverseIndexQuery(Keyspace ks, ColumnFamily<K, C> cfData, ColumnFamily<ByteBuffer, ByteBuffer> indexCf, Serializer<V> valSerializer) {
this.ks = ks;
this.cfData = cfData;
this.valSerializer = valSerializer;
this.startValue = null;
this.endValue = null;
this.cfIndex = indexCf;
}
public ReverseIndexQuery<K, C, V> useExecutor(ExecutorService executor) {
this.executor = executor;
return this;
}
public ReverseIndexQuery<K, C, V> useRetryPolicy(RetryPolicy retry) {
this.retry = retry;
return this;
}
public ReverseIndexQuery<K, C, V> withIndexShards(Collection<ByteBuffer> shardKeys) {
this.shardKeys = shardKeys;
return this;
}
public ReverseIndexQuery<K, C, V> fromIndexValue(V startValue) {
this.startValue = startValue;
return this;
}
public ReverseIndexQuery<K, C, V> toIndexValue(V endValue) {
this.endValue = endValue;
return this;
}
public ReverseIndexQuery<K, C, V> forEach(Function<Row<K, C>, Void> callback) {
this.callback = callback;
return this;
}
public ReverseIndexQuery<K, C, V> forEachIndexEntry(IndexEntryCallback<K, V> callback) {
this.indexCallback = callback;
return this;
}
public ReverseIndexQuery<K, C, V> withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
public ReverseIndexQuery<K, C, V> withColumnSlice(Collection<C> columnSlice) {
this.columnSlice = columnSlice;
return this;
}
/**
* Set the number shard keys to fetch for the first query
*
* @param size
* @return
*/
public ReverseIndexQuery<K, C, V> setShardBlockSize(int size) {
this.keyLimit = size;
return this;
}
/**
* Set the number columns to read from each shard when paginating.
*
* @param size
* @return
*/
public ReverseIndexQuery<K, C, V> setShardPageSize(int size) {
this.columnLimit = size;
return this;
}
public ReverseIndexQuery<K, C, V> setShardNextPageSize(int size) {
this.shardColumnLimit = size;
return this;
}
public abstract class Task implements Runnable {
public Task() {
pendingTasks.incrementAndGet();
executor.submit(this);
}
@Override
public final void run() {
try {
internalRun();
}
catch (Throwable t) {
}
if (pendingTasks.decrementAndGet() == 0)
latch.countDown();
}
protected abstract void internalRun();
}
public void execute() {
if (executor == null)
executor = Executors.newFixedThreadPool(5, new ThreadFactoryBuilder().setDaemon(true).build());
// Break up the shards into batches
List<ByteBuffer> batch = Lists.newArrayListWithCapacity(keyLimit);
for (ByteBuffer shard : shardKeys) {
batch.add(shard);
if (batch.size() == keyLimit) {
fetchFirstIndexBatch(batch);
batch = Lists.newArrayListWithCapacity(keyLimit);
}
}
if (!batch.isEmpty()) {
fetchFirstIndexBatch(batch);
}
if (pendingTasks.get() > 0) {
try {
latch.await(1000, TimeUnit.MINUTES);
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
private void fetchFirstIndexBatch(final Collection<ByteBuffer> keys) {
new Task() {
@Override
protected void internalRun() {
// Get the first range in the index
RangeBuilder range = new RangeBuilder();
if (startValue != null) {
range.setStart(Composites.newCompositeBuilder().greaterThanEquals().add(startValue, valSerializer)
.build());
}
if (endValue != null) {
range.setEnd(Composites.newCompositeBuilder().lessThanEquals().add(endValue, valSerializer).build());
}
// Read the index shards
OperationResult<Rows<ByteBuffer, ByteBuffer>> result = null;
try {
result = ks.prepareQuery(cfIndex).setConsistencyLevel(consistencyLevel).withRetryPolicy(retry)
.getKeySlice(keys).withColumnRange(range.setLimit(columnLimit).build()).execute();
}
catch (ConnectionException e) {
e.printStackTrace();
return;
}
// Read the actual data rows in batches
List<K> batch = Lists.newArrayListWithCapacity(keyLimit);
for (Row<ByteBuffer, ByteBuffer> row : result.getResult()) {
if (!row.getColumns().isEmpty()) {
V lastValue = null;
for (Column<ByteBuffer> column : row.getColumns()) {
CompositeParser parser = Composites.newCompositeParser(column.getName());
lastValue = parser.read(valSerializer);
K key = parser.read(cfData.getKeySerializer());
if (indexCallback != null) {
if (!indexCallback.handleEntry(key, lastValue, column.getByteBufferValue())) {
continue;
}
}
if (callback != null) {
batch.add(key);
if (batch.size() == keyLimit) {
fetchDataBatch(batch);
batch = Lists.newArrayListWithCapacity(keyLimit);
}
}
}
if (row.getColumns().size() == columnLimit) {
paginateIndexShard(row.getKey(), lastValue);
}
}
}
if (!batch.isEmpty()) {
fetchDataBatch(batch);
}
}
};
}
private void paginateIndexShard(final ByteBuffer shard, final V value) {
new Task() {
@Override
protected void internalRun() {
V nextValue = value;
ColumnList<ByteBuffer> result = null;
List<K> batch = Lists.newArrayListWithCapacity(keyLimit);
int pageSize = shardColumnLimit;
if (pageSize == 0)
pageSize = columnLimit;
do {
// Get the first range in the index
RangeBuilder range = new RangeBuilder().setStart(Composites.newCompositeBuilder()
.greaterThanEquals().addBytes(valSerializer.getNext(valSerializer.toByteBuffer(nextValue)))
.build());
if (endValue != null) {
range.setEnd(Composites.newCompositeBuilder().lessThanEquals().add(endValue, valSerializer)
.build());
}
// Read the index shards
try {
result = ks.prepareQuery(cfIndex).setConsistencyLevel(consistencyLevel).withRetryPolicy(retry)
.getKey(shard).withColumnRange(range.setLimit(pageSize).build()).execute().getResult();
}
catch (ConnectionException e) {
e.printStackTrace();
return;
}
// Read the actual data rows in batches
for (Column<ByteBuffer> column : result) {
CompositeParser parser = Composites.newCompositeParser(column.getName());
nextValue = parser.read(valSerializer);
K key = parser.read(cfData.getKeySerializer());
if (indexCallback != null) {
if (!indexCallback.handleEntry(key, nextValue, column.getByteBufferValue())) {
continue;
}
}
if (callback != null) {
batch.add(key);
if (batch.size() == keyLimit) {
fetchDataBatch(batch);
batch = Lists.newArrayListWithCapacity(keyLimit);
}
}
}
} while (result != null && result.size() == pageSize);
if (!batch.isEmpty()) {
fetchDataBatch(batch);
}
}
};
}
private void fetchDataBatch(final Collection<K> keys) {
new Task() {
@Override
protected void internalRun() {
try {
OperationResult<Rows<K, C>> result = ks.prepareQuery(cfData).withRetryPolicy(retry)
.setConsistencyLevel(consistencyLevel).getKeySlice(keys)
.withColumnSlice(new ColumnSlice<C>(columnSlice)).execute();
for (Row<K, C> row : result.getResult()) {
callback.apply(row);
}
}
catch (ConnectionException e) {
e.printStackTrace();
}
}
};
}
}
| 8,029 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/UniquenessConstraintWithPrefix.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes;
import com.google.common.base.Supplier;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.util.RangeBuilder;
@Deprecated
public class UniquenessConstraintWithPrefix<K> {
private final ColumnFamily<K, String> columnFamily;
private final Keyspace keyspace;
private String prefix;
private Supplier<String> uniqueColumnSupplier = UUIDStringSupplier.getInstance();
private Integer ttl;
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_QUORUM;
private UniquenessConstraintViolationMonitor<K, String> monitor;
public UniquenessConstraintWithPrefix(Keyspace keyspace, ColumnFamily<K, String> columnFamily) {
this.keyspace = keyspace;
this.columnFamily = columnFamily;
}
public UniquenessConstraintWithPrefix<K> setColumnNameSupplier(Supplier<String> uniqueColumnSupplier) {
this.uniqueColumnSupplier = uniqueColumnSupplier;
return this;
}
public UniquenessConstraintWithPrefix<K> setPrefix(String prefix) {
this.prefix = prefix;
return this;
}
public UniquenessConstraintWithPrefix<K> setTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
public UniquenessConstraintWithPrefix<K> setMonitor(UniquenessConstraintViolationMonitor<K, String> monitor) {
this.monitor = monitor;
return this;
}
public UniquenessConstraintWithPrefix<K> setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
public String isUnique(K key) throws ConnectionException {
String unique = uniqueColumnSupplier.get();
// Phase 1: Write a unique column
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
m.withRow(columnFamily, key).putEmptyColumn(prefix + unique, ttl);
m.execute();
// Phase 2: Read back all columns. There should be only 1
ColumnList<String> result = keyspace.prepareQuery(columnFamily).setConsistencyLevel(consistencyLevel)
.getKey(key)
.withColumnRange(new RangeBuilder().setStart(prefix + "\u0000").setEnd(prefix + "\uFFFF").build())
.execute().getResult();
if (result.size() == 1) {
return prefix + unique;
}
if (this.monitor != null)
this.monitor.onViolation(key, prefix + unique);
// Rollback
m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
m.withRow(columnFamily, key).deleteColumn(prefix + unique);
m.execute().getResult();
return null;
}
}
| 8,030 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/reader/AllRowsReader.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.reader;
import java.io.Flushable;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.connectionpool.TokenRange;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.Row;
import com.netflix.astyanax.model.Rows;
import com.netflix.astyanax.partitioner.BigInteger127Partitioner;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.query.CheckpointManager;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.query.RowSliceQuery;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.shallows.EmptyCheckpointManager;
/**
* Recipe that is used to read all rows from a column family.
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public class AllRowsReader<K, C> implements Callable<Boolean> {
private static final Logger LOG = LoggerFactory.getLogger(AllRowsReader.class);
private static final Partitioner DEFAULT_PARTITIONER = BigInteger127Partitioner.get();
private final static int DEFAULT_PAGE_SIZE = 100;
private final Keyspace keyspace;
private final ColumnFamily<K, C> columnFamily;
private final int pageSize;
private final Integer concurrencyLevel; // Default to null will force ring describe
private final ExecutorService executor;
private final CheckpointManager checkpointManager;
private final Function<Row<K,C>, Boolean> rowFunction;
private final Function<Rows<K, C>, Boolean> rowsFunction;
private final boolean repeatLastToken;
private final ColumnSlice<C> columnSlice;
private final String startToken;
private final String endToken;
private final Boolean includeEmptyRows; // Default to null will discard tombstones
private final List<Future<Boolean>> futures = Lists.newArrayList();
private final AtomicBoolean cancelling = new AtomicBoolean(false);
private final Partitioner partitioner;
private final ConsistencyLevel consistencyLevel;
private final RetryPolicy retryPolicy;
private AtomicReference<Exception> error = new AtomicReference<Exception>();
private String dc;
private String rack;
public static class Builder<K, C> {
private final Keyspace keyspace;
private final ColumnFamily<K, C> columnFamily;
private Partitioner partitioner = DEFAULT_PARTITIONER;
private int pageSize = DEFAULT_PAGE_SIZE;
private Integer concurrencyLevel; // Default to null will force ring describe
private ExecutorService executor;
private CheckpointManager checkpointManager = new EmptyCheckpointManager();
private Function<Row<K,C>, Boolean> rowFunction;
private Function<Rows<K, C>, Boolean> rowsFunction;
private boolean repeatLastToken = true;
private ColumnSlice<C> columnSlice;
private String startToken;
private String endToken;
private Boolean includeEmptyRows; // Default to null will discard tombstones
private String dc;
private String rack;
private ConsistencyLevel consistencyLevel = null;
private RetryPolicy retryPolicy;
public Builder(Keyspace ks, ColumnFamily<K, C> columnFamily) {
this.keyspace = ks;
this.columnFamily = columnFamily;
}
/**
* Maximum number of rows to return for each incremental query to Cassandra.
* This limit also represents the page size when paginating.
*
* @param blockSize
* @return
*/
public Builder<K, C> withPageSize(int pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* Use this checkpoint manager to keep track of progress as all rows are being iterated
* @param manager
* @return
*/
public Builder<K, C> withCheckpointManager(CheckpointManager checkpointManager) {
this.checkpointManager = checkpointManager;
return this;
}
/**
* If true will repeat the last token in the previous block when calling cassandra. This
* feature is off by default and is used to handle situations where different row keys map
* to the same token value and they are split on a page boundary. The may not be efficient
* since it requires the entire row data to be fetched (based on the column slice)
*
* @param repeatLastToken
* @return
*/
public Builder<K, C> withRepeatLastToken(boolean repeatLastToken) {
this.repeatLastToken = repeatLastToken;
return this;
}
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
* @return
*/
public Builder<K, C> withColumnSlice(C... columns) {
this.columnSlice = new ColumnSlice<C>(ImmutableList.copyOf(columns));
return this;
}
/**
* Specify a non-contiguous set of columns to retrieve.
*
* @param columns
* @return
*/
public Builder<K, C> withColumnSlice(Collection<C> columns) {
this.columnSlice = new ColumnSlice<C>(columns);
return this;
}
/**
* Use this when your application caches the column slice.
*
* @param slice
* @return
*/
public Builder<K, C> withColumnSlice(ColumnSlice<C> columns) {
this.columnSlice = columns;
return this;
}
/**
* Specify a range of columns to return.
*
* @param startColumn
* First column in the range
* @param endColumn
* Last column in the range
* @param reversed
* True if the order should be reversed. Note that for reversed,
* startColumn should be greater than endColumn.
* @param count
* Maximum number of columns to return (similar to SQL LIMIT)
* @return
*/
public Builder<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
this.columnSlice = new ColumnSlice<C>(startColumn, endColumn).setReversed(reversed).setLimit(count);
return this;
}
/**
* Split the query into N threads with each thread processing an equal size chunk from the token range.
*
* Note that the actual number of threads is still limited by the available threads in the thread
* pool that was set with the AstyanaxConfiguration.
*
* @param numberOfThreads
* @return
*/
public Builder<K, C> withConcurrencyLevel(int concurrencyLevel) {
Preconditions.checkArgument(concurrencyLevel >= 1, "Concurrency level must be >= 1");
this.concurrencyLevel = concurrencyLevel;
return this;
}
/**
* Use the specific executor for executing the tasks. Note that this should be used with care
* when specifying the withConcurrencyLevel.
* e.g if you have a concurrencyLevel of 10 with a fixed threadpool of size 1 then this effectively
* negates the point of the concurrencyLevel
*
* @param executor
* @return
*/
public Builder<K, C> withExecutor(ExecutorService executor) {
Preconditions.checkArgument(executor != null, "Supplied executor must not be null");
this.executor = executor;
return this;
}
/**
* Execute the operation on a specific token range, instead of the entire range.
* Use this only is combination with setConcurrencyLevel being called otherwise
* it currently will not have any effect on the query. When using forTokenRange
* the specified token range will still be split into the number of threads
* specified by setConcurrencyLevel
*
* @param startToken
* @param endToken
* @return
*/
public Builder<K, C> withTokenRange(BigInteger startToken, BigInteger endToken) {
this.startToken = startToken.toString();
this.endToken = endToken.toString();
return this;
}
public Builder<K, C> withTokenRange(String startToken, String endToken) {
this.startToken = startToken;
this.endToken = endToken;
return this;
}
/**
* Partitioner used to determine token ranges and how to break token ranges
* into sub parts. The default is BigInteger127Partitioner which is the
* RandomPartitioner in cassandra.
*
* @param partitioner
* @return
*/
public Builder<K, C> withPartitioner(Partitioner partitioner) {
this.partitioner = partitioner;
return this;
}
/**
* The default behavior is to exclude empty rows, other than when specifically asking
* for no columns back. Setting this to true will result in the row callback function
* being called for empty rows.
* @param flag
* @return
*/
public Builder<K, C> withIncludeEmptyRows(Boolean flag) {
this.includeEmptyRows = flag;
return this;
}
/**
* Specify the callback function for each row being read. This callback must
* be implemented in a thread safe manner since it will be called by multiple
* internal threads.
* @param rowFunction
* @return
*/
public Builder<K, C> forEachRow(Function<Row<K,C>, Boolean> rowFunction) {
this.rowFunction = rowFunction;
return this;
}
public Builder<K, C> forEachPage(Function<Rows<K, C>, Boolean> rowsFunction) {
this.rowsFunction = rowsFunction;
return this;
}
public Builder<K, C> withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
/**
* Specify dc to use when auto determining the token ranges to ensure that only ranges
* in the current dc are used.
* @param rack
* @return
*/
public Builder<K, C> withDc(String dc) {
this.dc = dc;
return this;
}
/**
* Specify rack to use when auto determining the token ranges to ensure that only ranges
* in the current rack are used.
* @param rack
* @return
*/
public Builder<K,C> withRack(String rack) {
this.rack = rack;
return this;
}
public Builder<K,C> withRetryPolicy(RetryPolicy policy) {
this.retryPolicy = policy;
return this;
}
public AllRowsReader<K,C> build() {
if (partitioner == null) {
try {
partitioner = keyspace.getPartitioner();
} catch (ConnectionException e) {
throw new RuntimeException("Unable to determine partitioner", e);
}
}
return new AllRowsReader<K,C>(keyspace,
columnFamily,
concurrencyLevel,
executor,
checkpointManager,
rowFunction,
rowsFunction,
columnSlice,
startToken,
endToken,
includeEmptyRows,
pageSize,
repeatLastToken,
partitioner,
dc,
rack,
consistencyLevel,
retryPolicy);
}
}
public AllRowsReader(Keyspace keyspace, ColumnFamily<K, C> columnFamily,
Integer concurrencyLevel,
ExecutorService executor,
CheckpointManager checkpointManager,
Function<Row<K, C>, Boolean> rowFunction,
Function<Rows<K, C>, Boolean> rowsFunction,
ColumnSlice<C> columnSlice,
String startToken,
String endToken,
Boolean includeEmptyRows,
int pageSize,
boolean repeatLastToken,
Partitioner partitioner,
String dc,
String rack,
ConsistencyLevel consistencyLevel,
RetryPolicy retryPolicy) {
super();
this.keyspace = keyspace;
this.columnFamily = columnFamily;
this.concurrencyLevel = concurrencyLevel;
this.executor = executor;
this.checkpointManager = checkpointManager;
this.rowFunction = rowFunction;
this.rowsFunction = rowsFunction;
this.columnSlice = columnSlice;
this.startToken = startToken;
this.endToken = endToken;
this.pageSize = pageSize;
this.repeatLastToken = repeatLastToken;
this.partitioner = partitioner;
this.dc = dc;
this.rack = rack;
this.consistencyLevel = consistencyLevel;
this.retryPolicy = retryPolicy;
// Flag explicitly set
if (includeEmptyRows != null)
this.includeEmptyRows = includeEmptyRows;
// Asking for a column range of size 0
else if (columnSlice != null && columnSlice.getColumns() == null && columnSlice.getLimit() == 0)
this.includeEmptyRows = true;
// Default to false
else
this.includeEmptyRows = false;
}
private ColumnFamilyQuery<K, C> prepareQuery() {
ColumnFamilyQuery<K, C> query = keyspace.prepareQuery(columnFamily);
if (consistencyLevel != null)
query.setConsistencyLevel(consistencyLevel);
if (retryPolicy != null)
query.withRetryPolicy(retryPolicy);
return query;
}
private Callable<Boolean> makeTokenRangeTask(final String startToken, final String endToken) {
return new Callable<Boolean>() {
@Override
public Boolean call() {
try {
String currentToken;
try {
currentToken = checkpointManager.getCheckpoint(startToken);
if (currentToken == null) {
currentToken = startToken;
}
else if (currentToken.equals(endToken)) {
return true;
}
} catch (Exception e) {
error.compareAndSet(null, e);
LOG.error("Failed to get checkpoint for startToken " + startToken, e);
cancel();
throw new RuntimeException("Failed to get checkpoint for startToken " + startToken, e);
}
int localPageSize = pageSize;
int rowsToSkip = 0;
while (!cancelling.get()) {
RowSliceQuery<K, C> query = prepareQuery().getKeyRange(null, null, currentToken, endToken, localPageSize);
if (columnSlice != null)
query.withColumnSlice(columnSlice);
Rows<K, C> rows = query.execute().getResult();
if (!rows.isEmpty()) {
try {
if (rowsFunction != null) {
if (!rowsFunction.apply(rows)) {
cancel();
return false;
}
}
else {
// Iterate through all the rows and notify the callback function
for (Row<K,C> row : rows) {
if (cancelling.get())
break;
// When repeating the last row, rows to skip will be > 0
// We skip the rows that were repeated from the previous query
if (rowsToSkip > 0) {
rowsToSkip--;
continue;
}
if (!includeEmptyRows && (row.getColumns() == null || row.getColumns().isEmpty()))
continue;
if (!rowFunction.apply(row)) {
cancel();
return false;
}
}
}
}
catch (Exception e) {
error.compareAndSet(null, e);
LOG.warn(e.getMessage(), e);
cancel();
throw new RuntimeException("Error processing row", e);
}
// Get the next block
if (rows.size() == localPageSize) {
Row<K, C> lastRow = rows.getRowByIndex(rows.size() - 1);
String lastToken = partitioner.getTokenForKey(lastRow.getRawKey());
checkpointManager.trackCheckpoint(startToken, currentToken);
if (repeatLastToken) {
// Start token is non-inclusive
currentToken = partitioner.getTokenMinusOne(lastToken);
// Determine the number of rows to skip in the response. Since we are repeating the
// last token it's possible (although unlikely) that there is more than one key mapping to the
// token. We therefore count backwards the number of keys that have the same token and skip
// that number in the next iteration of the loop. If, for example, 3 keys matched but only 2 were
// returned in this iteration then the first 2 keys will be skipped from the next response.
rowsToSkip = 1;
for (int i = rows.size() - 2; i >= 0; i--, rowsToSkip++) {
if (!lastToken.equals(partitioner.getTokenForKey(rows.getRowByIndex(i).getRawKey()))) {
break;
}
}
if (rowsToSkip == localPageSize) {
localPageSize++;
}
}
else {
currentToken = lastToken;
}
continue;
}
}
// We're done!
checkpointManager.trackCheckpoint(startToken, endToken);
return true;
}
cancel();
return false;
} catch (Exception e) {
error.compareAndSet(null, e);
LOG.error("Error process token/key range", e);
cancel();
throw new RuntimeException("Error process token/key range", e);
}
}
};
}
/**
* Main execution block for the all rows query.
*/
@Override
public Boolean call() throws Exception {
error.set(null);
List<Callable<Boolean>> subtasks = Lists.newArrayList();
// We are iterating the entire ring using an arbitrary number of threads
if (this.concurrencyLevel != null || startToken != null|| endToken != null) {
List<TokenRange> tokens = partitioner.splitTokenRange(
startToken == null ? partitioner.getMinToken() : startToken,
endToken == null ? partitioner.getMinToken() : endToken,
this.concurrencyLevel == null ? 1 : this.concurrencyLevel);
for (TokenRange range : tokens) {
subtasks.add(makeTokenRangeTask(range.getStartToken(), range.getEndToken()));
}
}
// We are iterating through each token range
else {
List<TokenRange> ranges = keyspace.describeRing(dc, rack);
for (TokenRange range : ranges) {
if (range.getStartToken().equals(range.getEndToken()))
subtasks.add(makeTokenRangeTask(range.getStartToken(), range.getEndToken()));
else
subtasks.add(makeTokenRangeTask(partitioner.getTokenMinusOne(range.getStartToken()), range.getEndToken()));
}
}
try {
// Use a local executor
if (executor == null) {
ExecutorService localExecutor = Executors
.newFixedThreadPool(subtasks.size(),
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("AstyanaxAllRowsReader-%d")
.build());
try {
futures.addAll(startTasks(localExecutor, subtasks));
return waitForTasksToFinish();
}
finally {
localExecutor.shutdownNow();
}
}
// Use an externally provided executor
else {
futures.addAll(startTasks(executor, subtasks));
return waitForTasksToFinish();
}
}
catch (Exception e) {
error.compareAndSet(null, e);
LOG.warn("AllRowsReader terminated. " + e.getMessage(), e);
cancel();
throw error.get();
}
}
/**
* Wait for all tasks to finish.
*
* @param futures
* @return true if all tasks returned true or false otherwise.
*/
private boolean waitForTasksToFinish() throws Exception {
Boolean succeeded = true;
// Tracking state for multiple exceptions, if any
List<StackTraceElement> stackTraces = new ArrayList<StackTraceElement>();
StringBuilder sb = new StringBuilder();
int exCount = 0;
for (Future<Boolean> future : futures) {
try {
if (!future.get()) {
cancel();
succeeded = false;
}
}
catch (Exception e) {
error.compareAndSet(null, e);
cancel();
succeeded = false;
exCount++;
sb.append("ex" + exCount + ": ").append(e.getMessage()).append("\n");
StackTraceElement[] stackTrace = e.getStackTrace();
if (stackTrace != null && stackTrace.length > 0) {
StackTraceElement delimiterSE = new StackTraceElement("StackTrace: ex" + exCount, "", "", 0);
stackTraces.add(delimiterSE);
for (StackTraceElement se : stackTrace) {
stackTraces.add(se);
}
}
}
}
if (this.rowFunction instanceof Flushable) {
((Flushable)rowFunction).flush();
}
if (exCount > 0) {
String exMessage = sb.toString();
StackTraceElement[] seArray = stackTraces.toArray(new StackTraceElement[stackTraces.size()]);
Exception ex = new Exception(exMessage);
ex.setStackTrace(seArray);
throw ex;
}
return succeeded;
}
/**
* Submit all the callables to the executor by synchronize their execution so they all start
* AFTER the have all been submitted.
* @param executor
* @param callables
* @return
*/
private List<Future<Boolean>> startTasks(ExecutorService executor, List<Callable<Boolean>> callables) {
List<Future<Boolean>> tasks = Lists.newArrayList();
for (Callable<Boolean> callable : callables) {
tasks.add(executor.submit(callable));
}
return tasks;
}
/**
* Cancel all pending range iteration tasks. This will cause all internal threads to exit and
* call() to return false.
*/
public synchronized void cancel() {
cancelling.compareAndSet(false, true);
}
}
| 8,031 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/locks/DistributedRowLock.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.locks;
/**
* Base interface to acquiring and release a row lock
*
* Usage:
*
* DistributedRowLock lock = new SomeLockImplementation(...); try {
* lock.acquire(); // Do something ... } catch (BusyLockException) { // The lock
* was already taken by a different process } catch (StaleLockException) { //
* The row has a stale lock that needs to be addressed // This is usually caused
* when no column TTL is set and the client // crashed before releasing the
* lock. The DistributedRowLock should // have the option to auto delete stale
* locks. } finally { lock.release(); }
*
* @author elandau
*
*/
public interface DistributedRowLock {
void acquire() throws BusyLockException, StaleLockException, Exception;
void release() throws Exception;
}
| 8,032 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/locks/StaleLockException.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.locks;
public class StaleLockException extends Exception {
private static final long serialVersionUID = -1094508305645942319L;
public StaleLockException(Exception e) {
super(e);
}
public StaleLockException(String message, Exception e) {
super(message, e);
}
public StaleLockException(String message) {
super(message);
}
}
| 8,033 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/locks/ColumnPrefixDistributedRowLock.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.locks;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ColumnMap;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.OrderedColumnMap;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.retry.RunOnce;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.util.RangeBuilder;
import com.netflix.astyanax.util.TimeUUIDUtils;
/**
* Takes a distributed row lock for a single row. The row lock is accomplished using
* a sequence of read/write events to Cassandra without the need for something like
* zookeeper.
*
* Algorithm
* 1. Write a column with name <prefix>_<uuid>. Value is an expiration time.
* 2. Read back all columns with <prefix>
* case 1) count==1 Got the lock
* case 2) count> 1 No lock
* 3. Do something in your code assuming the row is locked
* 4. Release the lock by deleting the lock columns
*
* Usage considerations
* 1. Set an expiration time (expireLockAfter) that is long enough for your processing to complete
* 2. Use this when the probability for contension is very low
* 3. Optimize by reading all columns (withIncludeAllColumn(true)) and merge the mutation
* into the release. This will save 2 calls to cassandra.
* 4. If the client fails after Step 1. A subsequent attempt to lock will automatically
* release these stale locks. You can turn this auto cleanup off by calling
* failOnStaleLock(false), handling a StaleLockException and doing manual cleanup by
* calling releaseExpiredLocks()
* 5. An optional TTL can be set on the lock columns which will ensure abandoned/stale locks
* will be cleaned up by compactions at some point.
* 6. You can customize the 'prefix' used for the lock columns. This will help with storing
* the lock columns with data in the same row.
* 7. You can customize the unique part of the lock column to include meaningful data such
* as the UUID row key from another column family. This can have the same effect as
* assigning a foreign key to the lock column and is useful for uniqueness constraint.
* 8. This recipe is not a transaction.
*
* Take a lock,
* <code>
* ColumnPrefixDistributedRowLock<String> lock = new ColumnPrefixDistributedRowLock<String>(keyspace, columnFamily, "KeyBeingLocked");
* try {
* lock.acquire();
* }
* finally {
* lock.release();
* }
* </code>
*
* Read, Modify, Write. The read, modify, write piggybacks on top of the lock calls.
*
* <code>
* ColumnPrefixDistributedRowLock<String> lock = new ColumnPrefixDistributedRowLock<String>(keyspace, columnFamily, "KeyBeingLocked");
* MutationBatch m = keyspace.prepareMutationBatch();
* try {
* ColumnMap<String> columns = lock.acquireLockAndReadRow();
*
* m.withRow("KeyBeingLocked")
* .putColumn("SomeColumnBeingUpdated", );
*
* lock.releaseWithMutation(m);
* }
* catch (Exception e) {
* lock.release();
* }
* </code>
*
* @author elandau
*
* @param <K>
*/
public class ColumnPrefixDistributedRowLock<K> implements DistributedRowLock {
public static final int LOCK_TIMEOUT = 60;
public static final TimeUnit DEFAULT_OPERATION_TIMEOUT_UNITS = TimeUnit.MINUTES;
public static final String DEFAULT_LOCK_PREFIX = "_LOCK_";
private final ColumnFamily<K, String> columnFamily; // The column family for data and lock
private final Keyspace keyspace; // The keyspace
private final K key; // Key being locked
private long timeout = LOCK_TIMEOUT; // Timeout after which the lock expires. Units defined by timeoutUnits.
private TimeUnit timeoutUnits = DEFAULT_OPERATION_TIMEOUT_UNITS;
private String prefix = DEFAULT_LOCK_PREFIX; // Prefix to identify the lock columns
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_LOCAL_QUORUM;
private boolean failOnStaleLock = false;
private String lockColumn = null;
private String lockId = null;
private Set<String> locksToDelete = Sets.newHashSet();
private ColumnMap<String> columns = null;
private Integer ttl = null; // Units in seconds
private boolean readDataColumns = false;
private RetryPolicy backoffPolicy = RunOnce.get();
private long acquireTime = 0;
private int retryCount = 0;
public ColumnPrefixDistributedRowLock(Keyspace keyspace, ColumnFamily<K, String> columnFamily, K key) {
this.keyspace = keyspace;
this.columnFamily = columnFamily;
this.key = key;
this.lockId = TimeUUIDUtils.getUniqueTimeUUIDinMicros().toString();
}
/**
* Modify the consistency level being used. Consistency should always be a
* variant of quorum. The default is CL_QUORUM, which is OK for single
* region. For multi region the consistency level should be CL_LOCAL_QUORUM.
* CL_EACH_QUORUM can be used but will Incur substantial latency.
*
* @param consistencyLevel
*/
public ColumnPrefixDistributedRowLock<K> withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
/**
* Specify the prefix that uniquely distinguishes the lock columns from data
* column
*
* @param prefix
*/
public ColumnPrefixDistributedRowLock<K> withColumnPrefix(String prefix) {
this.prefix = prefix;
return this;
}
/**
* If true the first read will also fetch all the columns in the row as
* opposed to just the lock columns.
* @param flag
*/
public ColumnPrefixDistributedRowLock<K> withDataColumns(boolean flag) {
this.readDataColumns = flag;
return this;
}
/**
* Override the autogenerated lock column.
*
* @param lockId
*/
public ColumnPrefixDistributedRowLock<K> withLockId(String lockId) {
this.lockId = lockId;
return this;
}
/**
* When set to true the operation will fail if a stale lock is detected
*
* @param failOnStaleLock
*/
public ColumnPrefixDistributedRowLock<K> failOnStaleLock(boolean failOnStaleLock) {
this.failOnStaleLock = failOnStaleLock;
return this;
}
/**
* Time for failed locks. Under normal circumstances the lock column will be
* deleted. If not then this lock column will remain and the row will remain
* locked. The lock will expire after this timeout.
*
* @param timeout
* @param unit
*/
public ColumnPrefixDistributedRowLock<K> expireLockAfter(long timeout, TimeUnit unit) {
this.timeout = timeout;
this.timeoutUnits = unit;
return this;
}
/**
* This is the TTL on the lock column being written, as opposed to expireLockAfter which
* is written as the lock column value. Whereas the expireLockAfter can be used to
* identify a stale or abandoned lock the TTL will result in the stale or abandoned lock
* being eventually deleted by cassandra. Set the TTL to a number that is much greater
* tan the expireLockAfter time.
* @param ttl
*/
public ColumnPrefixDistributedRowLock<K> withTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
public ColumnPrefixDistributedRowLock<K> withTtl(Integer ttl, TimeUnit units) {
this.ttl = (int) TimeUnit.SECONDS.convert(ttl, units);
return this;
}
public ColumnPrefixDistributedRowLock<K> withBackoff(RetryPolicy policy) {
this.backoffPolicy = policy;
return this;
}
/**
* Try to take the lock. The caller must call .release() to properly clean up
* the lock columns from cassandra
*
* @throws Exception
*/
@Override
public void acquire() throws Exception {
Preconditions.checkArgument(ttl == null || TimeUnit.SECONDS.convert(timeout, timeoutUnits) < ttl, "Timeout " + timeout + " must be less than TTL " + ttl);
RetryPolicy retry = backoffPolicy.duplicate();
retryCount = 0;
while (true) {
try {
long curTimeMicros = getCurrentTimeMicros();
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
fillLockMutation(m, curTimeMicros, ttl);
m.execute();
verifyLock(curTimeMicros);
acquireTime = System.nanoTime();
return;
}
catch (BusyLockException e) {
release();
if(!retry.allowRetry())
throw e;
retryCount++;
}
}
}
/**
* Take the lock and return the row data columns. Use this, instead of acquire, when you
* want to implement a read-modify-write scenario and want to reduce the number of calls
* to Cassandra.
* @throws Exception
*/
public ColumnMap<String> acquireLockAndReadRow() throws Exception {
withDataColumns(true);
acquire();
return getDataColumns();
}
/**
* Verify that the lock was acquired. This shouldn't be called unless it's part of a recipe
* built on top of ColumnPrefixDistributedRowLock.
*
* @param curTimeInMicros
* @throws BusyLockException
*/
public void verifyLock(long curTimeInMicros) throws Exception, BusyLockException, StaleLockException {
if (lockColumn == null)
throw new IllegalStateException("verifyLock() called without attempting to take the lock");
// Read back all columns. There should be only 1 if we got the lock
Map<String, Long> lockResult = readLockColumns(readDataColumns);
// Cleanup and check that we really got the lock
for (Entry<String, Long> entry : lockResult.entrySet()) {
// This is a stale lock that was never cleaned up
if (entry.getValue() != 0 && curTimeInMicros > entry.getValue()) {
if (failOnStaleLock) {
throw new StaleLockException("Stale lock on row '" + key + "'. Manual cleanup requried.");
}
locksToDelete.add(entry.getKey());
}
// Lock already taken, and not by us
else if (!entry.getKey().equals(lockColumn)) {
throw new BusyLockException("Lock already acquired for row '" + key + "' with lock column " + entry.getKey());
}
}
}
/**
* Release the lock by releasing this and any other stale lock columns
*/
@Override
public void release() throws Exception {
if (!locksToDelete.isEmpty() || lockColumn != null) {
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
fillReleaseMutation(m, false);
m.execute();
}
}
/**
* Release using the provided mutation. Use this when you want to commit actual data
* when releasing the lock
* @param m
* @throws Exception
*/
public void releaseWithMutation(MutationBatch m) throws Exception {
releaseWithMutation(m, false);
}
public boolean releaseWithMutation(MutationBatch m, boolean force) throws Exception {
long elapsed = TimeUnit.MILLISECONDS.convert(System.nanoTime() - acquireTime, TimeUnit.NANOSECONDS);
boolean isStale = false;
if (timeout > 0 && elapsed > TimeUnit.MILLISECONDS.convert(timeout, this.timeoutUnits)) {
isStale = true;
if (!force) {
throw new StaleLockException("Lock for '" + getKey() + "' became stale");
}
}
m.setConsistencyLevel(consistencyLevel);
fillReleaseMutation(m, false);
m.execute();
return isStale;
}
/**
* Return a mapping of existing lock columns and their expiration times
*
* @throws Exception
*/
public Map<String, Long> readLockColumns() throws Exception {
return readLockColumns(false);
}
/**
* Read all the lock columns. Will also ready data columns if withDataColumns(true) was called
*
* @param readDataColumns
* @throws Exception
*/
private Map<String, Long> readLockColumns(boolean readDataColumns) throws Exception {
Map<String, Long> result = Maps.newLinkedHashMap();
// Read all the columns
if (readDataColumns) {
columns = new OrderedColumnMap<String>();
ColumnList<String> lockResult = keyspace
.prepareQuery(columnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(key)
.execute()
.getResult();
for (Column<String> c : lockResult) {
if (c.getName().startsWith(prefix))
result.put(c.getName(), readTimeoutValue(c));
else
columns.add(c);
}
}
// Read only the lock columns
else {
ColumnList<String> lockResult = keyspace
.prepareQuery(columnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(key)
.withColumnRange(new RangeBuilder().setStart(prefix + "\u0000").setEnd(prefix + "\uFFFF").build())
.execute()
.getResult();
for (Column<String> c : lockResult) {
result.put(c.getName(), readTimeoutValue(c));
}
}
return result;
}
/**
* Release all locks. Use this carefully as it could release a lock for a
* running operation.
*
* @return Map of previous locks
* @throws Exception
*/
public Map<String, Long> releaseAllLocks() throws Exception {
return releaseLocks(true);
}
/**
* Release all expired locks for this key.
*
* @return map of expire locks
* @throws Exception
*/
public Map<String, Long> releaseExpiredLocks() throws Exception {
return releaseLocks(false);
}
/**
* Delete locks columns. Set force=true to remove locks that haven't
* expired yet.
*
* This operation first issues a read to cassandra and then deletes columns
* in the response.
*
* @param force - Force delete of non expired locks as well
* @return Map of locks released
* @throws Exception
*/
public Map<String, Long> releaseLocks(boolean force) throws Exception {
Map<String, Long> locksToDelete = readLockColumns();
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
ColumnListMutation<String> row = m.withRow(columnFamily, key);
long now = getCurrentTimeMicros();
for (Entry<String, Long> c : locksToDelete.entrySet()) {
if (force || (c.getValue() > 0 && c.getValue() < now)) {
row.deleteColumn(c.getKey());
}
}
m.execute();
return locksToDelete;
}
/**
* Get the current system time
*/
private static long getCurrentTimeMicros() {
return TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
}
/**
* Fill a mutation with the lock column. This may be used when the mutation
* is executed externally but should be used with extreme caution to ensure
* the lock is properly released
*
* @param m
* @param time
* @param ttl
*/
public String fillLockMutation(MutationBatch m, Long time, Integer ttl) {
if (lockColumn != null) {
if (!lockColumn.equals(prefix+lockId))
throw new IllegalStateException("Can't change prefix or lockId after acquiring the lock");
}
else {
lockColumn = prefix + lockId;
}
Long timeoutValue
= (time == null)
? new Long(0)
: time + TimeUnit.MICROSECONDS.convert(timeout, timeoutUnits);
m.withRow(columnFamily, key).putColumn(lockColumn, generateTimeoutValue(timeoutValue), ttl);
return lockColumn;
}
/**
* Generate the expire time value to put in the column value.
* @param timeout
*/
private ByteBuffer generateTimeoutValue(long timeout) {
if (columnFamily.getDefaultValueSerializer() == ByteBufferSerializer.get() ||
columnFamily.getDefaultValueSerializer() == LongSerializer.get()) {
return LongSerializer.get().toByteBuffer(timeout);
}
else {
return columnFamily.getDefaultValueSerializer().fromString(Long.toString(timeout));
}
}
/**
* Read the expiration time from the column value
* @param column
*/
public long readTimeoutValue(Column<?> column) {
if (columnFamily.getDefaultValueSerializer() == ByteBufferSerializer.get() ||
columnFamily.getDefaultValueSerializer() == LongSerializer.get()) {
return column.getLongValue();
}
else {
return Long.parseLong(column.getStringValue());
}
}
/**
* Fill a mutation that will release the locks. This may be used from a
* separate recipe to release multiple locks.
*
* @param m
*/
public void fillReleaseMutation(MutationBatch m, boolean excludeCurrentLock) {
// Add the deletes to the end of the mutation
ColumnListMutation<String> row = m.withRow(columnFamily, key);
for (String c : locksToDelete) {
row.deleteColumn(c);
}
if (!excludeCurrentLock && lockColumn != null)
row.deleteColumn(lockColumn);
locksToDelete.clear();
lockColumn = null;
}
public ColumnMap<String> getDataColumns() {
return columns;
}
public K getKey() {
return key;
}
public Keyspace getKeyspace() {
return keyspace;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public String getLockColumn() {
return lockColumn;
}
public String getLockId() {
return lockId;
}
public String getPrefix() {
return prefix;
}
public int getRetryCount() {
return retryCount;
}
}
| 8,034 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/locks/OneStepDistributedRowLock.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.locks;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ColumnMap;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.model.OrderedColumnMap;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.retry.RunOnce;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
public class OneStepDistributedRowLock<K, C> implements DistributedRowLock {
public static final int LOCK_TIMEOUT = 60;
public static final TimeUnit DEFAULT_OPERATION_TIMEOUT_UNITS = TimeUnit.MINUTES;
private final ColumnFamily<K, C> columnFamily; // The column family for data and lock
private final Keyspace keyspace; // The keyspace
private final K key; // Key being locked
private long timeout = LOCK_TIMEOUT; // Timeout after which the lock expires. Units defined by timeoutUnits.
private TimeUnit timeoutUnits = DEFAULT_OPERATION_TIMEOUT_UNITS;
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_LOCAL_QUORUM;
private boolean failOnStaleLock = false;
private Set<C> locksToDelete = Sets.newHashSet();
private C lockColumn = null;
private ColumnMap<C> columns = null;
private Integer ttl = null; // Units in seconds
private boolean readDataColumns = false;
private RetryPolicy backoffPolicy = RunOnce.get();
private long acquireTime = 0;
private int retryCount = 0;
private LockColumnStrategy<C> columnStrategy = null;
public OneStepDistributedRowLock(Keyspace keyspace, ColumnFamily<K, C> columnFamily, K key) {
this.keyspace = keyspace;
this.columnFamily = columnFamily;
this.key = key;
}
public OneStepDistributedRowLock<K, C> withColumnStrategy(LockColumnStrategy<C> columnStrategy) {
this.columnStrategy = columnStrategy;
return this;
}
/**
* Modify the consistency level being used. Consistency should always be a
* variant of quorum. The default is CL_QUORUM, which is OK for single
* region. For multi region the consistency level should be CL_LOCAL_QUORUM.
* CL_EACH_QUORUM can be used but will Incur substantial latency.
*
* @param consistencyLevel
* @return
*/
public OneStepDistributedRowLock<K, C> withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
/**
* If true the first read will also fetch all the columns in the row as
* opposed to just the lock columns.
* @param flag
* @return
*/
public OneStepDistributedRowLock<K, C> withDataColumns(boolean flag) {
this.readDataColumns = flag;
return this;
}
/**
* When set to true the operation will fail if a stale lock is detected
*
* @param failOnStaleLock
* @return
*/
public OneStepDistributedRowLock<K, C> failOnStaleLock(boolean failOnStaleLock) {
this.failOnStaleLock = failOnStaleLock;
return this;
}
/**
* Time for failed locks. Under normal circumstances the lock column will be
* deleted. If not then this lock column will remain and the row will remain
* locked. The lock will expire after this timeout.
*
* @param timeout
* @param unit
* @return
*/
public OneStepDistributedRowLock<K, C> expireLockAfter(long timeout, TimeUnit unit) {
this.timeout = timeout;
this.timeoutUnits = unit;
return this;
}
/**
* This is the TTL on the lock column being written, as opposed to expireLockAfter which
* is written as the lock column value. Whereas the expireLockAfter can be used to
* identify a stale or abandoned lock the TTL will result in the stale or abandoned lock
* being eventually deleted by cassandra. Set the TTL to a number that is much greater
* tan the expireLockAfter time.
* @param ttl
* @return
*/
public OneStepDistributedRowLock<K, C> withTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
public OneStepDistributedRowLock<K, C> withTtl(Integer ttl, TimeUnit units) {
this.ttl = (int) TimeUnit.SECONDS.convert(ttl, units);
return this;
}
public OneStepDistributedRowLock<K, C> withBackoff(RetryPolicy policy) {
this.backoffPolicy = policy;
return this;
}
/**
* Try to take the lock. The caller must call .release() to properly clean up
* the lock columns from cassandra
*
* @return
* @throws Exception
*/
@Override
public void acquire() throws Exception {
Preconditions.checkArgument(ttl == null || TimeUnit.SECONDS.convert(timeout, timeoutUnits) < ttl, "Timeout " + timeout + " must be less than TTL " + ttl);
RetryPolicy retry = backoffPolicy.duplicate();
retryCount = 0;
while (true) {
try {
long curTimeMicros = getCurrentTimeMicros();
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
fillLockMutation(m, curTimeMicros, ttl);
m.execute();
verifyLock(curTimeMicros);
acquireTime = System.currentTimeMillis();
return;
}
catch (BusyLockException e) {
release();
if(!retry.allowRetry())
throw e;
retryCount++;
}
}
}
/**
* Take the lock and return the row data columns. Use this, instead of acquire, when you
* want to implement a read-modify-write scenario and want to reduce the number of calls
* to Cassandra.
*
* @return
* @throws Exception
*/
public ColumnMap<C> acquireLockAndReadRow() throws Exception {
withDataColumns(true);
acquire();
return getDataColumns();
}
/**
* Verify that the lock was acquired. This shouldn't be called unless it's part of a recipe
* built on top of AbstractDistributedRowLock.
*
* @param curTimeInMicros
* @throws BusyLockException
*/
public void verifyLock(long curTimeInMicros) throws Exception, BusyLockException, StaleLockException {
if (getLockColumn() == null)
throw new IllegalStateException("verifyLock() called without attempting to take the lock");
// Read back all columns. There should be only 1 if we got the lock
Map<C, Long> lockResult = readLockColumns(readDataColumns);
// Cleanup and check that we really got the lock
for (Entry<C, Long> entry : lockResult.entrySet()) {
// This is a stale lock that was never cleaned up
if (entry.getValue() != 0 && curTimeInMicros > entry.getValue()) {
if (failOnStaleLock) {
throw new StaleLockException("Stale lock on row '" + key + "'. Manual cleanup requried.");
}
locksToDelete.add(entry.getKey());
}
// Lock already taken, and not by us
else if (!entry.getKey().equals(getLockColumn())) {
throw new BusyLockException("Lock already acquired for row '" + key + "' with lock column " + entry.getKey());
}
}
}
/**
* Release the lock by releasing this and any other stale lock columns
*/
@Override
public void release() throws Exception {
if (!locksToDelete.isEmpty() || getLockColumn() != null) {
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
fillReleaseMutation(m, false);
m.execute();
}
}
/**
* Release using the provided mutation. Use this when you want to commit actual data
* when releasing the lock
* @param m
* @throws Exception
*/
public void releaseWithMutation(MutationBatch m) throws Exception {
releaseWithMutation(m, false);
}
public boolean releaseWithMutation(MutationBatch m, boolean force) throws Exception {
long elapsed = System.currentTimeMillis() - acquireTime;
boolean isStale = false;
if (timeout > 0 && elapsed > TimeUnit.MILLISECONDS.convert(timeout, this.timeoutUnits)) {
isStale = true;
if (!force) {
throw new StaleLockException("Lock for '" + getKey() + "' became stale");
}
}
m.setConsistencyLevel(consistencyLevel);
fillReleaseMutation(m, false);
m.execute();
return isStale;
}
/**
* Return a mapping of existing lock columns and their expiration times
*
* @return
* @throws Exception
*/
public Map<C, Long> readLockColumns() throws Exception {
return readLockColumns(false);
}
/**
* Read all the lock columns. Will also ready data columns if withDataColumns(true) was called
*
* @param readDataColumns
* @return
* @throws Exception
*/
private Map<C, Long> readLockColumns(boolean readDataColumns) throws Exception {
Map<C, Long> result = Maps.newLinkedHashMap();
// Read all the columns
if (readDataColumns) {
columns = new OrderedColumnMap<C>();
ColumnList<C> lockResult = keyspace
.prepareQuery(columnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(key)
.execute()
.getResult();
for (Column<C> c : lockResult) {
if (columnStrategy.isLockColumn(c.getName()))
result.put(c.getName(), readTimeoutValue(c));
else
columns.add(c);
}
}
// Read only the lock columns
else {
ColumnList<C> lockResult = keyspace
.prepareQuery(columnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(key)
.withColumnRange(columnStrategy.getLockColumnRange())
.execute()
.getResult();
for (Column<C> c : lockResult) {
result.put(c.getName(), readTimeoutValue(c));
}
}
return result;
}
/**
* Release all locks. Use this carefully as it could release a lock for a
* running operation.
*
* @return
* @throws Exception
*/
public Map<C, Long> releaseAllLocks() throws Exception {
return releaseLocks(true);
}
/**
* Release all expired locks for this key.
*
* @return
* @throws Exception
*/
public Map<C, Long> releaseExpiredLocks() throws Exception {
return releaseLocks(false);
}
/**
* Delete locks columns. Set force=true to remove locks that haven't
* expired yet.
*
* This operation first issues a read to cassandra and then deletes columns
* in the response.
*
* @param force - Force delete of non expired locks as well
* @return
* @throws Exception
*/
public Map<C, Long> releaseLocks(boolean force) throws Exception {
Map<C, Long> locksToDelete = readLockColumns();
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
ColumnListMutation<C> row = m.withRow(columnFamily, key);
long now = getCurrentTimeMicros();
for (Entry<C, Long> c : locksToDelete.entrySet()) {
if (force || (c.getValue() > 0 && c.getValue() < now)) {
row.deleteColumn(c.getKey());
}
}
m.execute();
return locksToDelete;
}
/**
* Get the current system time
*
* @return
*/
private static long getCurrentTimeMicros() {
return TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
}
/**
* Fill a mutation with the lock column. This may be used when the mutation
* is executed externally but should be used with extreme caution to ensure
* the lock is properly released
*
* @param m
* @param time
* @param ttl
*/
public C fillLockMutation(MutationBatch m, Long time, Integer ttl) {
if (lockColumn != null) {
if (!lockColumn.equals(columnStrategy.generateLockColumn()))
throw new IllegalStateException("Can't change prefix or lockId after acquiring the lock");
}
else {
lockColumn = columnStrategy.generateLockColumn();
}
Long timeoutValue
= (time == null)
? new Long(0)
: time + TimeUnit.MICROSECONDS.convert(timeout, timeoutUnits);
m.withRow(columnFamily, key).putColumn(lockColumn, generateTimeoutValue(timeoutValue), ttl);
return lockColumn;
}
/**
* Generate the expire time value to put in the column value.
* @param timeout
* @return
*/
private ByteBuffer generateTimeoutValue(long timeout) {
if (columnFamily.getDefaultValueSerializer() == ByteBufferSerializer.get() ||
columnFamily.getDefaultValueSerializer() == LongSerializer.get()) {
return LongSerializer.get().toByteBuffer(timeout);
}
else {
return columnFamily.getDefaultValueSerializer().fromString(Long.toString(timeout));
}
}
/**
* Read the expiration time from the column value
* @param column
* @return
*/
public long readTimeoutValue(Column<?> column) {
if (columnFamily.getDefaultValueSerializer() == ByteBufferSerializer.get() ||
columnFamily.getDefaultValueSerializer() == LongSerializer.get()) {
return column.getLongValue();
}
else {
return Long.parseLong(column.getStringValue());
}
}
/**
* Fill a mutation that will release the locks. This may be used from a
* separate recipe to release multiple locks.
*
* @param m
*/
public void fillReleaseMutation(MutationBatch m, boolean excludeCurrentLock) {
// Add the deletes to the end of the mutation
ColumnListMutation<C> row = m.withRow(columnFamily, key);
for (C c : locksToDelete) {
row.deleteColumn(c);
}
if (!excludeCurrentLock && lockColumn != null)
row.deleteColumn(lockColumn);
locksToDelete.clear();
lockColumn = null;
}
public ColumnMap<C> getDataColumns() {
return columns;
}
public K getKey() {
return key;
}
public Keyspace getKeyspace() {
return keyspace;
}
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
public C getLockColumn() {
return lockColumn;
}
public int getRetryCount() {
return retryCount;
}
} | 8,035 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/locks/StringRowLockColumnStrategy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.locks;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.util.RangeBuilder;
public class StringRowLockColumnStrategy implements LockColumnStrategy<String> {
public static final String DEFAULT_LOCK_PREFIX = "_LOCK_";
private String lockId = null;
private String prefix = DEFAULT_LOCK_PREFIX;
public StringRowLockColumnStrategy() {
}
public String getLockId() {
return lockId;
}
public void setLockId(String lockId) {
this.lockId = lockId;
}
public StringRowLockColumnStrategy withLockId(String lockId) {
this.lockId = lockId;
return this;
}
public String getPrefix() {
return prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
public StringRowLockColumnStrategy withPrefix(String prefix) {
this.prefix = prefix;
return this;
}
@Override
public boolean isLockColumn(String c) {
return c.startsWith(prefix);
}
@Override
public ByteBufferRange getLockColumnRange() {
return new RangeBuilder().setStart(prefix + "\u0000").setEnd(prefix + "\uFFFF").build();
}
@Override
public String generateLockColumn() {
return prefix + lockId;
}
}
| 8,036 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/locks/LockColumnStrategy.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.locks;
import com.netflix.astyanax.model.ByteBufferRange;
/**
* Strategy used by locking and uniqueness recipes to generate
* and check lock columns
*
* @author elandau
*
* @param <C>
*/
public interface LockColumnStrategy<C> {
/**
* Return true if this is a lock column
* @param c
* @return
*/
boolean isLockColumn(C c);
/**
* Return the ByteBuffer range to use when querying all lock
* columns in a row
* @return
*/
ByteBufferRange getLockColumnRange();
/**
* Generate a unique lock column
* @return
*/
C generateLockColumn();
}
| 8,037 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/locks/BusyLockException.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.locks;
public class BusyLockException extends Exception {
private static final long serialVersionUID = -6818914810045830278L;
public BusyLockException(Exception e) {
super(e);
}
public BusyLockException(String message, Exception e) {
super(message, e);
}
public BusyLockException(String message) {
super(message);
}
}
| 8,038 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/CassandraChunkedStorageProvider.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.nio.ByteBuffer;
import java.util.Map;
import com.google.common.collect.Maps;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.BoundedExponentialBackoff;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.StringSerializer;
/**
* ChunkProvider responsible for reading and writing chunks to cassandra. Chunks
* are written to different row keys with the row key name having the format
* <chunknumber>$<objectname>
*
* @author elandau
*
*/
public class CassandraChunkedStorageProvider implements ChunkedStorageProvider {
private static final RetryPolicy DEFAULT_RETRY_POLICY = new BoundedExponentialBackoff(1000, 10000, 5);
private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.CL_QUORUM;
private static final int DEFAULT_CHUNKSIZE = 0x4000;
private static final String DEFAULT_ROW_KEY_FORMAT = "%s$%d";
public enum Columns {
DATA, OBJECTSIZE, CHUNKSIZE, CHUNKCOUNT, EXPIRES, ATTRIBUTES
}
private final ColumnFamily<String, String> cf;
private final Keyspace keyspace;
private final Map<Columns, String> names = Maps.newHashMap();
private RetryPolicy retryPolicy = DEFAULT_RETRY_POLICY;
private String rowKeyFormat = DEFAULT_ROW_KEY_FORMAT;
private ConsistencyLevel readConsistencyLevel = ConsistencyLevel.CL_ONE; // for backwards compatibility.
private ConsistencyLevel writeConsistencyLevel = DEFAULT_CONSISTENCY_LEVEL;
public CassandraChunkedStorageProvider(Keyspace keyspace, String cfName) {
this.keyspace = keyspace;
this.cf = ColumnFamily.newColumnFamily(cfName, StringSerializer.get(), StringSerializer.get());
}
public CassandraChunkedStorageProvider(Keyspace keyspace, ColumnFamily<String, String> cf) {
this.keyspace = keyspace;
this.cf = cf;
}
public CassandraChunkedStorageProvider withColumnName(Columns column, String name) {
names.put(column, name);
return this;
}
public CassandraChunkedStorageProvider withRowKeyFormat(String format) {
this.rowKeyFormat = format;
return this;
}
private String getColumnName(Columns column) {
if (names.containsKey(column))
return names.get(column);
return column.name();
}
@Override
public int writeChunk(String objectName, int chunkId, ByteBuffer data, Integer ttl) throws Exception {
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(writeConsistencyLevel).withRetryPolicy(retryPolicy);
m.withRow(cf, getRowKey(objectName, chunkId)).putColumn(getColumnName(Columns.DATA), data, ttl)
.putColumn(getColumnName(Columns.CHUNKSIZE), data.limit(), ttl);
if (chunkId == 0) {
m.withRow(cf, objectName).putColumn(getColumnName(Columns.CHUNKSIZE), data.limit(), ttl);
}
m.execute();
return data.limit();
}
@Override
public ByteBuffer readChunk(String objectName, int chunkId) throws Exception {
return keyspace.prepareQuery(cf).setConsistencyLevel(readConsistencyLevel).withRetryPolicy(retryPolicy)
.getKey(getRowKey(objectName, chunkId)).getColumn(getColumnName(Columns.DATA)).execute().getResult()
.getByteBufferValue();
}
private String getRowKey(String objectName, int chunkId) {
return new String(rowKeyFormat).replace("%s", objectName).replace("%d", Integer.toString(chunkId));
}
public CassandraChunkedStorageProvider setReadConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.readConsistencyLevel = consistencyLevel;
return this;
}
public ConsistencyLevel getReadConsistencyLevel() {
return this.readConsistencyLevel;
}
public CassandraChunkedStorageProvider setWriteConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.writeConsistencyLevel = consistencyLevel;
return this;
}
public ConsistencyLevel getWriteConsistencyLevel() {
return this.writeConsistencyLevel;
}
/**
* @deprecated use {@link #setReadConsistencyLevel(ConsistencyLevel) or #setWriteConsistencyLevel(ConsistencyLevel)}
* @param consistencyLevel
* @return
*/
@Deprecated
public CassandraChunkedStorageProvider setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.writeConsistencyLevel = consistencyLevel;
this.readConsistencyLevel = consistencyLevel;
return this;
}
/**
* @deprecated ise {@link #getReadConsistencyLevel()} or {@link #getWriteConsistencyLevel()}
* @return
*/
@Deprecated
public ConsistencyLevel getConsistencyLevel() {
return this.writeConsistencyLevel;
}
@Override
public void writeMetadata(String objectName, ObjectMetadata objMetaData) throws Exception {
MutationBatch m = keyspace.prepareMutationBatch().withRetryPolicy(retryPolicy);
ColumnListMutation<String> row = m.withRow(cf, objectName);
if (objMetaData.getChunkSize() != null)
row.putColumn(getColumnName(Columns.CHUNKSIZE), objMetaData.getChunkSize(), objMetaData.getTtl());
if (objMetaData.getChunkCount() != null)
row.putColumn(getColumnName(Columns.CHUNKCOUNT), objMetaData.getChunkCount(), objMetaData.getTtl());
if (objMetaData.getObjectSize() != null)
row.putColumn(getColumnName(Columns.OBJECTSIZE), objMetaData.getObjectSize(), objMetaData.getTtl());
if (objMetaData.getAttributes() != null)
row.putColumn(getColumnName(Columns.ATTRIBUTES), objMetaData.getAttributes(), objMetaData.getTtl());
m.execute();
}
@Override
public ObjectMetadata readMetadata(String objectName) throws Exception, NotFoundException {
ColumnList<String> columns = keyspace.prepareQuery(cf).getKey(objectName).execute().getResult();
if (columns.isEmpty()) {
throw new NotFoundException(objectName);
}
return new ObjectMetadata().setObjectSize(columns.getLongValue(getColumnName(Columns.OBJECTSIZE), null))
.setChunkSize(columns.getIntegerValue(getColumnName(Columns.CHUNKSIZE), null))
.setChunkCount(columns.getIntegerValue(getColumnName(Columns.CHUNKCOUNT), null))
.setAttributes(columns.getStringValue(getColumnName(Columns.ATTRIBUTES), null));
}
@Override
public void deleteObject(String objectName, Integer chunkCount) throws Exception, NotFoundException {
if (chunkCount == null) {
ObjectMetadata attr = readMetadata(objectName);
if (attr.getChunkCount() == null)
throw new NotFoundException("Object not found :" + objectName);
chunkCount = attr.getChunkCount();
}
MutationBatch m = keyspace.prepareMutationBatch().withRetryPolicy(retryPolicy);
for (int i = 0; i < chunkCount; i++) {
m.withRow(cf, getRowKey(objectName, i)).delete();
}
m.withRow(cf, objectName).delete();
m.execute();
}
@Override
public int getDefaultChunkSize() {
return DEFAULT_CHUNKSIZE;
}
}
| 8,039 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectWriteCallback.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
public interface ObjectWriteCallback {
void onChunk(int chunk, int size);
void onChunkException(int chunk, Exception exception);
void onFailure(Exception exception);
void onSuccess();
}
| 8,040 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectMetadata.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
public class ObjectMetadata {
private Integer ttl;
private Long objectSize;
private Integer chunkCount;
private Integer chunkSize;
private String parentPath;
private String attributes;
public ObjectMetadata setTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
public Integer getTtl() {
return this.ttl;
}
public boolean hasTtl() {
return this.ttl != null && this.ttl > 0;
}
public Long getObjectSize() {
return objectSize;
}
public ObjectMetadata setObjectSize(Long objectSize) {
this.objectSize = objectSize;
return this;
}
public Integer getChunkCount() {
return chunkCount;
}
public ObjectMetadata setChunkCount(Integer chunkCount) {
this.chunkCount = chunkCount;
return this;
}
public Integer getChunkSize() {
return chunkSize;
}
public ObjectMetadata setChunkSize(Integer chunkSize) {
this.chunkSize = chunkSize;
return this;
}
public boolean isValidForRead() {
return (this.objectSize != null && this.chunkCount != null && this.chunkSize != null);
}
public ObjectMetadata setParentPath(String parentPath) {
this.parentPath = parentPath;
return this;
}
public String getParentPath() {
return this.parentPath;
}
public ObjectMetadata setAttributes(String attributes) {
this.attributes = attributes;
return this;
}
public String getAttributes() {
return this.attributes;
}
}
| 8,041 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/NoOpObjectReadCallback.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.nio.ByteBuffer;
public class NoOpObjectReadCallback implements ObjectReadCallback {
@Override
public void onChunk(int chunk, ByteBuffer data) {
}
@Override
public void onChunkException(int chunk, Exception exception) {
}
@Override
public void onFailure(Exception exception) {
}
@Override
public void onSuccess() {
}
}
| 8,042 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectInfoReader.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.util.concurrent.Callable;
public class ObjectInfoReader implements Callable<ObjectMetadata> {
private final ChunkedStorageProvider provider;
private final String objectName;
public ObjectInfoReader(ChunkedStorageProvider provider, String objectName) {
this.provider = provider;
this.objectName = objectName;
}
@Override
public ObjectMetadata call() throws Exception {
return provider.readMetadata(objectName);
}
}
| 8,043 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ChunkedStorageProvider.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.nio.ByteBuffer;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
public interface ChunkedStorageProvider {
/**
* Write a single chunk to the storage
*
* @param chunk
* @returns bytes written
* @throws Exception
*/
int writeChunk(String objectName, int chunkId, ByteBuffer data, Integer ttl) throws Exception;
/**
* Read the request chunk id from the storage
*
* @param name
* @param chunkId
*/
ByteBuffer readChunk(String objectName, int chunkId) throws Exception, NotFoundException;
/**
* Delete a chunk
*
* @param objectName
* @param chunkId
* @throws Exception
*/
void deleteObject(String objectName, Integer chunkCount) throws Exception;
/**
* Persist all attributes for an object. Some attributes are written at the
* start of the operation but are updated after the file has been written
* with additional information such as the total number of chunks and the
* file size
*
* @param objectName
* @param attr
* @throws Exception
*/
void writeMetadata(String objectName, ObjectMetadata attr) throws Exception;
/**
* Retrieve information for a file
*
* @param objectName
* @throws Exception
*/
ObjectMetadata readMetadata(String objectName) throws Exception, NotFoundException;
/**
* @return Return the preferred chunk size for this provider
*/
int getDefaultChunkSize();
}
| 8,044 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectDeleter.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.util.concurrent.Callable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ObjectDeleter implements Callable<Void> {
private static final Logger LOG = LoggerFactory.getLogger(ObjectDeleter.class);
private final ChunkedStorageProvider provider;
private final String objectName;
private Integer chunkCount = null; // This will default to all being deleted
public ObjectDeleter(ChunkedStorageProvider provider, String objectName) {
this.provider = provider;
this.objectName = objectName;
}
public ObjectDeleter withChunkCountToDelete(int count) {
this.chunkCount = count;
return this;
}
@Override
public Void call() throws Exception {
LOG.info("Deleting " + objectName);
provider.deleteObject(objectName, chunkCount);
return null;
}
}
| 8,045 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/NoOpObjectWriteCallback.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
public class NoOpObjectWriteCallback implements ObjectWriteCallback {
@Override
public void onChunk(int chunk, int size) {
}
@Override
public void onChunkException(int chunk, Exception exception) {
}
@Override
public void onFailure(Exception exception) {
}
@Override
public void onSuccess() {
}
}
| 8,046 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectReader.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicReferenceArray;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.retry.RunOnce;
public class ObjectReader implements Callable<ObjectMetadata> {
private static final Logger LOG = LoggerFactory.getLogger(ObjectReader.class);
private static final int DEFAULT_CONCURRENCY_LEVEL = 4;
private static final int MAX_WAIT_TIME_TO_FINISH = 60;
private static final int DEFAULT_BATCH_SIZE = 11;
private final ChunkedStorageProvider provider;
private final String objectName;
private final OutputStream os;
private int concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL;
private int maxWaitTimeInSeconds = MAX_WAIT_TIME_TO_FINISH;
private int batchSize = DEFAULT_BATCH_SIZE;
private RetryPolicy retryPolicy;
private ObjectReadCallback callback = new NoOpObjectReadCallback();
public ObjectReader(ChunkedStorageProvider provider, String objectName, OutputStream os) {
this.provider = provider;
this.objectName = objectName;
this.os = os;
this.retryPolicy = new RunOnce();
}
public ObjectReader withBatchSize(int size) {
this.batchSize = size;
return this;
}
public ObjectReader withConcurrencyLevel(int level) {
this.concurrencyLevel = level;
return this;
}
public ObjectReader withRetryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
public ObjectReader withMaxWaitTime(int maxWaitTimeInSeconds) {
this.maxWaitTimeInSeconds = maxWaitTimeInSeconds;
return this;
}
public ObjectReader withCallback(ObjectReadCallback callback) {
this.callback = callback;
return this;
}
@Override
public ObjectMetadata call() throws Exception {
LOG.info("Reading: " + objectName);
Preconditions.checkNotNull(objectName);
Preconditions.checkNotNull(os);
try {
// Try to get the file metadata first. The entire file must be
// available before it can be downloaded.
// If not available then we back off and retry using the provided
// retry policy.
ObjectMetadata attributes;
RetryPolicy retry = retryPolicy.duplicate();
do {
try {
attributes = provider.readMetadata(objectName);
if (attributes.isValidForRead())
break;
if (!retry.allowRetry())
throw new NotFoundException("File doesn't exists or isn't ready to be read: " + objectName);
}
catch (Exception e) {
LOG.warn(e.getMessage());
if (!retry.allowRetry())
throw e;
}
} while (true);
final AtomicReference<Exception> exception = new AtomicReference<Exception>();
final AtomicLong totalBytesRead = new AtomicLong();
final AtomicLong totalBytesRead2 = new AtomicLong();
// Iterate sequentially building up the batches. Once a complete
// batch of ids is ready
// randomize fetching the chunks and then download them in parallel
List<Integer> idsToRead = Lists.newArrayList();
for (int block = 0; block < attributes.getChunkCount(); block++) {
idsToRead.add(block);
// Got a batch, or reached the end
if (idsToRead.size() == batchSize || block == attributes.getChunkCount() - 1) {
// Read blocks in random order
final int firstBlockId = idsToRead.get(0);
Collections.shuffle(idsToRead);
final AtomicReferenceArray<ByteBuffer> chunks = new AtomicReferenceArray<ByteBuffer>(
idsToRead.size());
ExecutorService executor = Executors.newFixedThreadPool(
concurrencyLevel,
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("ChunkReader-" + objectName + "-%d").build());
try {
for (final int chunkId : idsToRead) {
executor.submit(new Runnable() {
@Override
public void run() {
// Do the fetch
RetryPolicy retry = retryPolicy.duplicate();
while (exception.get() == null) {
try {
ByteBuffer chunk = provider.readChunk(objectName, chunkId);
totalBytesRead.addAndGet(chunk.remaining());
chunks.set(chunkId - firstBlockId, chunk);
callback.onChunk(chunkId, chunk);
break;
}
catch (Exception e) {
callback.onChunkException(chunkId, e);
if (retry.allowRetry())
continue;
exception.compareAndSet(null, e);
}
}
}
});
}
}
finally {
executor.shutdown();
if (!executor.awaitTermination(maxWaitTimeInSeconds, TimeUnit.SECONDS)) {
throw new Exception("Took too long to fetch object: " + objectName);
}
}
if (exception.get() != null)
throw exception.get();
for (int i = 0; i < chunks.length(); i++) {
ByteBuffer bb = chunks.get(i);
byte[] bytes = new byte[bb.remaining()];
bb.get(bytes, 0, bytes.length);
os.write(bytes);
//os.write(chunks.get(i).array());
os.flush();
}
idsToRead.clear();
}
}
if (totalBytesRead.get() != attributes.getObjectSize()) {
throw new Exception("Bytes read (" + totalBytesRead.get() + ") does not match object size ("
+ attributes.getObjectSize() + ") for object " + objectName);
}
callback.onSuccess();
return attributes;
}
catch (Exception e) {
callback.onFailure(e);
throw e;
}
}
}
| 8,047 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ChunkedStorage.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.io.InputStream;
import java.io.OutputStream;
public class ChunkedStorage {
public static ObjectWriter newWriter(ChunkedStorageProvider provider, String objectName, InputStream is) {
return new ObjectWriter(provider, objectName, is);
}
public static ObjectReader newReader(ChunkedStorageProvider provider, String objectName, OutputStream os) {
return new ObjectReader(provider, objectName, os);
}
public static ObjectDeleter newDeleter(ChunkedStorageProvider provider, String objectName) {
return new ObjectDeleter(provider, objectName);
}
public static ObjectInfoReader newInfoReader(ChunkedStorageProvider provider, String objectName) {
return new ObjectInfoReader(provider, objectName);
}
public static ObjectDirectoryLister newObjectDirectoryLister(ChunkedStorageProvider provider, String path) {
return new ObjectDirectoryLister(provider, path);
}
}
| 8,048 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectReadCallback.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.nio.ByteBuffer;
public interface ObjectReadCallback {
void onChunk(int chunk, ByteBuffer data);
void onChunkException(int chunk, Exception exception);
void onFailure(Exception exception);
void onSuccess();
}
| 8,049 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/AutoAllocatingLinkedBlockingQueue.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.util.concurrent.LinkedBlockingQueue;
import com.google.common.base.Supplier;
@SuppressWarnings("serial")
public class AutoAllocatingLinkedBlockingQueue<T> extends LinkedBlockingQueue<T> {
public AutoAllocatingLinkedBlockingQueue(int concurrencyLevel) {
super(concurrencyLevel);
}
public T poll(Supplier<T> supplier) {
T bb = super.poll();
if (bb == null) {
return supplier.get();
}
return bb;
}
}
| 8,050 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectWriter.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.astyanax.util.BlockingConcurrentWindowCounter;
public class ObjectWriter implements Callable<ObjectMetadata> {
private static final Logger LOG = LoggerFactory.getLogger(ObjectWriter.class);
private static final int DEFAULT_CONCURRENCY_LEVEL = 4;
private static final int MAX_WAIT_TIME_TO_FINISH = 60;
private static final Integer NO_TTL = null;
private final ChunkedStorageProvider provider;
private final String objectName;
private final InputStream is;
private int chunkSize;
private Integer ttl = NO_TTL;
private String attributes;
private int concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL;
private int maxWaitTimeInSeconds = MAX_WAIT_TIME_TO_FINISH;
private ObjectWriteCallback callback = new NoOpObjectWriteCallback();
public ObjectWriter(ChunkedStorageProvider provider, String objectName, InputStream is) {
this.provider = provider;
this.objectName = objectName;
this.chunkSize = provider.getDefaultChunkSize();
this.is = is;
}
public ObjectWriter withChunkSize(int size) {
this.chunkSize = size;
return this;
}
public ObjectWriter withTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
/**
* additional attributes (e.g. MD5 hash of the value)
* that user want to save along with the meta data
* @param attributes serialized string (e.g. JSON string)
*/
public ObjectWriter withAttributes(String attributes) {
this.attributes = attributes;
return this;
}
public ObjectWriter withConcurrencyLevel(int level) {
this.concurrencyLevel = level;
return this;
}
public ObjectWriter withMaxWaitTime(int maxWaitTimeInSeconds) {
this.maxWaitTimeInSeconds = maxWaitTimeInSeconds;
return this;
}
public ObjectWriter withCallback(ObjectWriteCallback callback) {
this.callback = callback;
return this;
}
@Override
public ObjectMetadata call() throws Exception {
LOG.debug("Writing: " + objectName);
Preconditions.checkNotNull(objectName, "Must provide a valid object name");
Preconditions.checkNotNull(is, "Must provide a valid input stream");
Preconditions.checkNotNull(chunkSize, "Must provide a valid chunkSize");
final AtomicLong nBytesWritten = new AtomicLong(0);
final AtomicInteger nChunksWritten = new AtomicInteger(0);
final AtomicReference<Exception> exception = new AtomicReference<Exception>();
try {
final ExecutorService executor = Executors.newFixedThreadPool(concurrencyLevel, new ThreadFactoryBuilder()
.setDaemon(true).setNameFormat("ChunkWriter-" + objectName + "-%d").build());
final BlockingConcurrentWindowCounter chunkCounter = new BlockingConcurrentWindowCounter(concurrencyLevel);
final AutoAllocatingLinkedBlockingQueue<ByteBuffer> blocks = new AutoAllocatingLinkedBlockingQueue<ByteBuffer>(
concurrencyLevel);
try {
// Write file data one block at a time
boolean done = false;
while (!done && exception.get() == null) {
// This throttles us so we don't get too far ahead of
// ourselves if one of the threads is stuck
final int chunkNumber = chunkCounter.incrementAndGet();
// Get a block or allocate a new one
final ByteBuffer bb = blocks.poll(new Supplier<ByteBuffer>() {
@Override
public ByteBuffer get() {
return ByteBuffer.allocate(chunkSize);
}
});
// Reset the array and copy some data
bb.position(0);
int nBytesRead = readFully(is, bb.array(), 0, chunkSize);
if (nBytesRead > 0) {
bb.limit(nBytesRead);
// Send data in a worker thread
executor.submit(new Runnable() {
@Override
public void run() {
try {
if (exception.get() == null) {
LOG.debug("WRITE " + chunkNumber + " size=" + bb.limit());
provider.writeChunk(objectName, chunkNumber, bb, ttl);
callback.onChunk(chunkNumber, bb.limit());
nBytesWritten.addAndGet(bb.limit());
nChunksWritten.incrementAndGet();
}
}
catch (Exception e) {
LOG.error(e.getMessage());
exception.compareAndSet(null, e);
callback.onChunkException(chunkNumber, e);
}
finally {
blocks.add(bb);
chunkCounter.release(chunkNumber);
}
}
});
}
else {
done = true;
}
}
}
finally {
executor.shutdown();
if (!executor.awaitTermination(maxWaitTimeInSeconds, TimeUnit.SECONDS)) {
throw new Exception("Took too long to write object: " + objectName);
}
// Rethrow any exception we got in a thread
if (exception.get() != null) {
throw exception.get();
}
}
ObjectMetadata objMetaData = new ObjectMetadata()
.setChunkCount(nChunksWritten.get())
.setObjectSize(nBytesWritten.get())
.setChunkSize(chunkSize)
.setTtl(ttl)
.setAttributes(attributes);
provider.writeMetadata(objectName, objMetaData);
callback.onSuccess();
return objMetaData;
}
catch (Exception e) {
callback.onFailure(e);
LOG.warn(e.getMessage());
e.printStackTrace();
try {
provider.deleteObject(objectName, nChunksWritten.get() + concurrencyLevel);
}
catch (Exception e2) {
LOG.warn(e2.getMessage());
}
throw e;
}
}
/**
* Should switch to IOUtils.read() when we update to the latest version of
* commons-io
*
* @param in
* @param b
* @param off
* @param len
* @return
* @throws IOException
*/
private static int readFully(InputStream in, byte[] b, int off, int len) throws IOException {
int total = 0;
for (;;) {
int got = in.read(b, off + total, len - total);
if (got < 0) {
return (total == 0) ? -1 : total;
}
else {
total += got;
if (total == len)
return total;
}
}
}
}
| 8,051 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/storage/ObjectDirectoryLister.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.storage;
import java.util.Map;
import java.util.concurrent.Callable;
public class ObjectDirectoryLister implements Callable<Map<String, ObjectMetadata>> {
private final ChunkedStorageProvider provider;
private final String path;
public ObjectDirectoryLister(ChunkedStorageProvider provider, String path) {
this.provider = provider;
this.path = path;
}
@Override
public Map<String, ObjectMetadata> call() throws Exception {
return null;
}
}
| 8,052 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/functions/RowCopierFunction.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.functions;
import java.io.Flushable;
import java.io.IOException;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.collect.Sets;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.Row;
/**
* Function to copy rows into a target column family
*
* TODO: Failover, retry
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public class RowCopierFunction<K,C> implements Function<Row<K,C>, Boolean>, Flushable {
private static final Logger LOG = LoggerFactory.getLogger(RowCopierFunction.class);
private static final int DEFAULT_BATCH_SIZE = 100;
public static class Builder<K,C> {
private final ColumnFamily<K,C> columnFamily;
private final Keyspace keyspace;
private int batchSize = DEFAULT_BATCH_SIZE;
public Builder(Keyspace keyspace, ColumnFamily<K,C> columnFamily) {
this.columnFamily = columnFamily;
this.keyspace = keyspace;
}
public Builder<K,C> withBatchSize(int batchSize) {
this.batchSize = batchSize;
return this;
}
public RowCopierFunction<K,C> build() {
return new RowCopierFunction<K,C>(this);
}
}
public static <K, C> Builder<K,C> builder(Keyspace keyspace, ColumnFamily<K,C> columnFamily) {
return new Builder<K,C>(keyspace, columnFamily);
}
private final ColumnFamily<K,C> columnFamily;
private final Keyspace keyspace;
private final int batchSize;
private final ThreadLocal<ThreadContext> context = new ThreadLocal<ThreadContext>();
private final Set<ThreadContext> contexts = Sets.newIdentityHashSet();
private static class ThreadContext {
MutationBatch mb;
int counter = 0;
}
private RowCopierFunction(Builder<K,C> builder) {
this.columnFamily = builder.columnFamily;
this.batchSize = builder.batchSize;
this.keyspace = builder.keyspace;
}
@Override
public Boolean apply(Row<K, C> row) {
ThreadContext context = this.context.get();
if (context == null) {
context = new ThreadContext();
context.mb = keyspace.prepareMutationBatch();
this.context.set(context);
synchronized (this) {
contexts.add(context);
}
}
ColumnListMutation<C> mbRow = context.mb.withRow(columnFamily, row.getKey());
context.mb.lockCurrentTimestamp();
for (Column<C> column : row.getColumns()) {
mbRow.setTimestamp(column.getTimestamp());
mbRow.putColumn(column.getName(), column.getByteBufferValue(), column.getTtl());
}
context.counter++;
if (context.counter == batchSize) {
try {
context.mb.execute();
context.counter = 0;
}
catch (Exception e) {
LOG.error("Failed to write mutation", e);
return false;
}
}
return true;
}
@Override
public void flush() throws IOException {
for (ThreadContext context : contexts) {
try {
context.mb.execute();
} catch (ConnectionException e) {
LOG.error("Failed to write mutation", e);
}
}
}
}
| 8,053 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/functions/RowCounterFunction.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.functions;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.base.Function;
import com.netflix.astyanax.model.Row;
/**
* Simple function to counter the number of rows
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public class RowCounterFunction<K,C> implements Function<Row<K,C>, Boolean> {
private final AtomicLong counter = new AtomicLong(0);
@Override
public Boolean apply(Row<K,C> input) {
counter.incrementAndGet();
return true;
}
public long getCount() {
return counter.get();
}
public void reset() {
counter.set(0);
}
}
| 8,054 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/functions/TraceFunction.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.functions;
import java.io.IOException;
import java.io.OutputStream;
import com.google.common.base.Function;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.Row;
/**
* Simple function to trace the contents
* @author elandau
*
* @param <K>
* @param <C>
*/
public class TraceFunction<K,C> implements Function<Row<K,C>, Boolean> {
public static class Builder<K,C> {
private OutputStream out = System.out;
private boolean showColumns = false;
public Builder(ColumnFamily<K,C> columnFamily) {
}
public Builder<K,C> withOutputStream(OutputStream out) {
this.out = out;
return this;
}
public Builder<K,C> withShowColumns(boolean showColumns) {
this.showColumns = showColumns;
return this;
}
public TraceFunction<K,C> build() {
return new TraceFunction<K,C>(this);
}
}
public static <K, C> Builder<K,C> builder(ColumnFamily<K,C> columnFamily) {
return new Builder<K,C>(columnFamily);
}
private final OutputStream out;
private final boolean showColumns;
private TraceFunction(Builder<K,C> builder) {
this.out = builder.out;
this.showColumns = builder.showColumns;
}
@Override
public synchronized Boolean apply(Row<K, C> row) {
long size = 0;
for (Column<C> column : row.getColumns()) {
size += column.getRawName().limit() + column.getByteBufferValue().limit();
}
StringBuilder sb = new StringBuilder();
sb.append(String.format("- row: '%s' size: '%dl' count: '%dl' \n", row.getKey(), size, row.getColumns().size()));
if (showColumns) {
for (Column<C> column : row.getColumns()) {
sb.append(String.format(" '%s' (ts='%dl', ttl='%d')\n", column.getName(), column.getTimestamp(), column.getTtl()));
}
}
try {
out.write(sb.toString().getBytes());
} catch (IOException e) {
}
return true;
}
}
| 8,055 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/functions/ColumnCounterFunction.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.recipes.functions;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.base.Function;
import com.netflix.astyanax.model.Row;
/**
* Very basic function to count the total number of columns
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public class ColumnCounterFunction<K,C> implements Function<Row<K,C>, Boolean> {
private final AtomicLong counter = new AtomicLong(0);
@Override
public Boolean apply(Row<K,C> input) {
counter.addAndGet(input.getColumns().size());
return true;
}
public long getCount() {
return counter.get();
}
public void reset() {
counter.set(0);
}
}
| 8,056 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/uniqueness/UniquenessConstraint.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.uniqueness;
import com.google.common.base.Function;
import com.netflix.astyanax.MutationBatch;
public interface UniquenessConstraint {
/**
* Acquire the row(s) for uniqueness. Call release() when the uniqueness on
* the row(s) is no longer needed, such as when deleting the rows.
*
* @throws NotUniqueException
* @throws Exception
*/
void acquire() throws NotUniqueException, Exception;
/**
* Release the uniqueness lock for this row. Only call this when you no longer
* need the uniqueness lock
*
* @throws Exception
*/
void release() throws Exception;
/**
* Acquire the uniqueness constraint and apply the final mutation if the
* row is found to be unique
* @param mutation
* @throws NotUniqueException
* @throws Exception
*
* @deprecated This method doesn't actually work because the MutationBatch timestamp being behind
*/
@Deprecated
void acquireAndMutate(MutationBatch mutation) throws NotUniqueException, Exception;
/**
* Acquire the uniqueness constraint and call the mutate callback to fill a mutation.
*
* @param callback
* @throws NotUniqueException
* @throws Exception
*/
void acquireAndApplyMutation(Function<MutationBatch, Boolean> callback) throws NotUniqueException, Exception;
}
| 8,057 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/uniqueness/ColumnPrefixUniquenessConstraint.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.uniqueness;
import java.util.Map.Entry;
import com.google.common.base.Function;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.recipes.locks.ColumnPrefixDistributedRowLock;
/**
* Perform a uniqueness constraint using the locking recipe. The usage here is to
* take the lock and then re-write the column without a TTL to 'persist' it in cassandra.
*
* @author elandau
*
* @param <K>
*/
public class ColumnPrefixUniquenessConstraint<K> implements UniquenessConstraint {
private final ColumnPrefixDistributedRowLock<K> lock;
public ColumnPrefixUniquenessConstraint(Keyspace keyspace, ColumnFamily<K, String> columnFamily, K key) {
lock = new ColumnPrefixDistributedRowLock<K>(keyspace, columnFamily, key);
}
public ColumnPrefixUniquenessConstraint<K> withTtl(Integer ttl) {
lock.withTtl(ttl);
return this;
}
public ColumnPrefixUniquenessConstraint<K> withConsistencyLevel(ConsistencyLevel consistencyLevel) {
lock.withConsistencyLevel(consistencyLevel);
return this;
}
public ColumnPrefixUniquenessConstraint<K> withPrefix(String prefix) {
lock.withColumnPrefix(prefix);
return this;
}
/**
* Specify the unique value to use for the column name when doing the uniqueness
* constraint. In many cases this will be a TimeUUID that is used as the row
* key to store the actual data for the unique key tracked in this column
* family.
*
* @param unique
* @return
*/
public ColumnPrefixUniquenessConstraint<K> withUniqueId(String unique) {
lock.withLockId(unique);
return this;
}
public String readUniqueColumn() throws Exception {
String column = null;
for (Entry<String, Long> entry : lock.readLockColumns().entrySet()) {
if (entry.getValue() == 0) {
if (column == null) {
column = entry.getKey().substring(lock.getPrefix().length());
}
else {
throw new IllegalStateException("Key has multiple locks");
}
}
}
if (column == null)
throw new NotFoundException("Unique column not found for " + lock.getKey());
return column;
}
@Override
public void acquire() throws NotUniqueException, Exception {
acquireAndApplyMutation(null);
}
/**
* @deprecated Use acquireAndExecuteMutation instead to avoid timestamp issues
*/
@Override
@Deprecated
public void acquireAndMutate(MutationBatch m) throws NotUniqueException, Exception {
lock.acquire();
m.lockCurrentTimestamp();
lock.fillReleaseMutation(m, true);
lock.fillLockMutation(m, null, null);
m.setConsistencyLevel(lock.getConsistencyLevel())
.execute();
}
@Override
public void acquireAndApplyMutation(Function<MutationBatch, Boolean> callback) throws NotUniqueException, Exception {
lock.acquire();
MutationBatch mb = lock.getKeyspace().prepareMutationBatch();
if (callback != null)
callback.apply(mb);
lock.fillReleaseMutation(mb, true);
lock.fillLockMutation(mb, null, null);
mb.setConsistencyLevel(lock.getConsistencyLevel())
.execute();
}
@Override
public void release() throws Exception {
lock.release();
}
}
| 8,058 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/uniqueness/DedicatedMultiRowUniquenessConstraint.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.uniqueness;
import java.util.List;
import com.google.common.base.Function;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.recipes.locks.BusyLockException;
import com.netflix.astyanax.recipes.locks.StaleLockException;
/**
* Multi-row uniqueness constraint where all involved column families are dedicated
* only to uniquness constraint.
* @author elandau
*
* @param <C>
*/
public class DedicatedMultiRowUniquenessConstraint<C> implements UniquenessConstraint {
private final Keyspace keyspace;
private Integer ttl = null;
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_LOCAL_QUORUM;
private final C uniqueColumnName;
private class Row<K> {
private final ColumnFamily<K, C> columnFamily;
private final K row;
Row(ColumnFamily<K, C> columnFamily, K row) {
super();
this.columnFamily = columnFamily;
this.row = row;
}
void fillMutation(MutationBatch m, Integer ttl) {
m.withRow(columnFamily, row).putEmptyColumn(uniqueColumnName, ttl);
}
void fillReleaseMutation(MutationBatch m) {
m.withRow(columnFamily, row).deleteColumn(uniqueColumnName);
}
void verifyLock() throws Exception {
// Phase 2: Read back all columns. There should be only 1
ColumnList<C> result = keyspace.prepareQuery(columnFamily).setConsistencyLevel(consistencyLevel)
.getKey(row).execute().getResult();
if (result.size() != 1) {
throw new NotUniqueException(row.toString());
}
}
Column<C> getUniqueColumn() throws ConnectionException {
ColumnList<C> columns = keyspace.prepareQuery(columnFamily).getKey(row).execute().getResult();
Column<C> foundColumn = null;
for (Column<C> column : columns) {
if (column.getTtl() == 0) {
if (foundColumn != null)
throw new RuntimeException("Row has duplicate quite columns");
foundColumn = column;
}
}
if (foundColumn == null) {
throw new NotFoundException("Unique column not found");
}
return foundColumn;
}
}
private final List<Row<?>> locks = Lists.newArrayList();
public DedicatedMultiRowUniquenessConstraint(Keyspace keyspace, Supplier<C> uniqueColumnSupplier) {
this.keyspace = keyspace;
this.uniqueColumnName = uniqueColumnSupplier.get();
}
public DedicatedMultiRowUniquenessConstraint(Keyspace keyspace, C uniqueColumnName) {
this.keyspace = keyspace;
this.uniqueColumnName = uniqueColumnName;
}
/**
* TTL to use for the uniquness operation. This is the TTL for the columns
* to expire in the event of a client crash before the uniqueness can be
* committed
*
* @param ttl
* @return
*/
public DedicatedMultiRowUniquenessConstraint<C> withTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
/**
* Consistency level used
*
* @param consistencyLevel
* @return
*/
public DedicatedMultiRowUniquenessConstraint<C> withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
/**
* Add a row to the set of rows being tested for uniqueness
*
* @param columnFamily
* @param rowKey
* @return
*/
public <K> DedicatedMultiRowUniquenessConstraint<C> withRow(ColumnFamily<K, C> columnFamily, K rowKey) {
locks.add(new Row<K>(columnFamily, rowKey));
return this;
}
public C getLockColumn() {
return uniqueColumnName;
}
@Override
public void acquire() throws NotUniqueException, Exception {
acquireAndApplyMutation(null);
}
/**
* @deprecated Use acquireAndExecuteMutation instead to avoid timestamp issues
*/
@Override
@Deprecated
public void acquireAndMutate(final MutationBatch other) throws NotUniqueException, Exception {
acquireAndApplyMutation(new Function<MutationBatch, Boolean>() {
@Override
public Boolean apply(MutationBatch input) {
if (other != null) {
input.mergeShallow(other);
}
return true;
}
});
}
@Override
public void acquireAndApplyMutation(Function<MutationBatch, Boolean> callback) throws NotUniqueException, Exception {
// Insert lock check column for all rows in a single batch mutation
try {
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
for (Row<?> lock : locks) {
lock.fillMutation(m, ttl);
}
m.execute();
// Check each lock in order
for (Row<?> lock : locks) {
lock.verifyLock();
}
// Commit the unique columns
for (Row<?> lock : locks) {
lock.fillMutation(m, null);
}
if (callback != null)
callback.apply(m);
m.execute();
}
catch (BusyLockException e) {
release();
throw new NotUniqueException(e);
}
catch (StaleLockException e) {
release();
throw new NotUniqueException(e);
}
catch (Exception e) {
release();
throw e;
}
}
@Override
public void release() throws Exception {
MutationBatch m = keyspace.prepareMutationBatch();
for (Row<?> lock : locks) {
lock.fillReleaseMutation(m);
}
m.execute();
}
/**
* @return
* @throws Exception
*/
public Column<C> getUniqueColumn() throws Exception {
if (locks.size() == 0)
throw new IllegalStateException("Missing call to withRow to add rows to the uniqueness constraint");
// Get the unique row from all columns
List<Column<C>> columns = Lists.newArrayList();
for (Row<?> row : locks) {
columns.add(row.getUniqueColumn());
}
// Check that all rows have the same unique column otherwise they are not part of
// the same unique group
Column<C> foundColumn = columns.get(0);
for (int i = 1; i < columns.size(); i++) {
Column<C> nextColumn = columns.get(i);
if (!nextColumn.getRawName().equals(foundColumn.getRawName())) {
throw new NotUniqueException("The provided rows are not part of the same uniquness constraint");
}
if (foundColumn.hasValue() != nextColumn.hasValue()) {
throw new NotUniqueException("The provided rows are not part of the same uniquness constraint");
}
if (foundColumn.hasValue() && !nextColumn.getByteBufferValue().equals((foundColumn.getByteBufferValue()))) {
throw new NotUniqueException("The provided rows are not part of the same uniquness constraint");
}
}
return foundColumn;
}
}
| 8,059 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/uniqueness/MultiRowUniquenessConstraint.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.uniqueness;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.recipes.locks.BusyLockException;
import com.netflix.astyanax.recipes.locks.ColumnPrefixDistributedRowLock;
import com.netflix.astyanax.recipes.locks.StaleLockException;
import com.netflix.astyanax.util.TimeUUIDUtils;
/**
* Check uniqueness for multiple rows. This test is done by
* 1. First writing a unique column to all rows, in a single batch. Include a TTL for some failure conditions.
* 2. Reading back the unique columns from each row (must be done in a separate call)
* and making sure there is only one such column
* 3. Committing the columns without a TTL
*
*
* @author elandau
*
*/
public class MultiRowUniquenessConstraint implements UniquenessConstraint {
private final Keyspace keyspace;
private final List<ColumnPrefixDistributedRowLock<String>> locks = Lists.newArrayList();
private Integer ttl = null;
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_LOCAL_QUORUM;
private String lockColumn;
private String prefix = ColumnPrefixDistributedRowLock.DEFAULT_LOCK_PREFIX;
public MultiRowUniquenessConstraint(Keyspace keyspace) {
this.keyspace = keyspace;
this.lockColumn = TimeUUIDUtils.getUniqueTimeUUIDinMicros().toString();
}
/**
* TTL to use for the uniquness operation. This is the TTL for the columns
* to expire in the event of a client crash before the uniqueness can be
* committed
* @param ttl
*/
public MultiRowUniquenessConstraint withTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
/**
* Specify the prefix that uniquely distinguishes the lock columns from data
* columns
* @param prefix
*/
public MultiRowUniquenessConstraint withColumnPrefix(String prefix) {
this.prefix = prefix;
return this;
}
/**
* Override the autogenerated lock column.
* @param column
*/
public MultiRowUniquenessConstraint withLockId(String column) {
this.lockColumn = column;
return this;
}
/**
* Consistency level used
* @param consistencyLevel
*/
public MultiRowUniquenessConstraint withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
/**
* Add a row to the set of rows being tested for uniqueness
*
* @param columnFamily
* @param rowKey
*/
public MultiRowUniquenessConstraint withRow(ColumnFamily<String, String> columnFamily, String rowKey) {
locks.add(new ColumnPrefixDistributedRowLock<String>(keyspace, columnFamily, rowKey));
return this;
}
/**
* @return Return the lock column written to ALL rows
*/
public String getLockColumn() {
return this.lockColumn;
}
@Override
public void acquire() throws NotUniqueException, Exception {
acquireAndApplyMutation(null);
}
@Override
public void acquireAndApplyMutation(Function<MutationBatch, Boolean> callback) throws NotUniqueException, Exception {
long now = TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
// Insert lock check column for all rows in a single batch mutation
try {
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
for (ColumnPrefixDistributedRowLock<String> lock : locks) {
lock.withConsistencyLevel(consistencyLevel)
.withColumnPrefix(prefix)
.withLockId(lockColumn)
.fillLockMutation(m, now, ttl);
}
m.execute();
// Check each lock in order
for (ColumnPrefixDistributedRowLock<String> lock : locks) {
lock.verifyLock(now);
}
// Commit the unique columns
m = keyspace.prepareMutationBatch();
for (ColumnPrefixDistributedRowLock<String> lock : locks) {
lock.fillLockMutation(m, null, null);
}
if (callback != null)
callback.apply(m);
m.execute();
}
catch (BusyLockException e) {
release();
throw new NotUniqueException(e);
}
catch (StaleLockException e) {
release();
throw new NotUniqueException(e);
}
catch (Exception e) {
release();
throw e;
} }
@Override
@Deprecated
public void acquireAndMutate(final MutationBatch mutation) throws NotUniqueException, Exception {
acquireAndApplyMutation(new Function<MutationBatch, Boolean>() {
@Override
public Boolean apply( MutationBatch input) {
if (mutation != null)
input.mergeShallow(mutation);
return true;
}
});
}
@Override
public void release() throws Exception {
MutationBatch m = keyspace.prepareMutationBatch();
for (ColumnPrefixDistributedRowLock<String> lock : locks) {
lock.fillReleaseMutation(m, false);
}
m.execute();
}
}
| 8,060 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/uniqueness/RowUniquenessConstraint.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.uniqueness;
import java.nio.ByteBuffer;
import com.google.common.base.Function;
import com.google.common.base.Supplier;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.serializers.StringSerializer;
/**
* Test uniqueness for a single row. This implementation allows for any
* column type. If the column family uses UTF8Type for the comparator
* then it is preferable to use ColumnPrefixUniquenessConstraint.
*
* @author elandau
*
* @param <K>
* @param <C>
*/
public class RowUniquenessConstraint<K, C> implements UniquenessConstraint {
private final ColumnFamily<K, C> columnFamily;
private final Keyspace keyspace;
private final C uniqueColumn;
private final K key;
private ConsistencyLevel consistencyLevel = ConsistencyLevel.CL_LOCAL_QUORUM;
private ByteBuffer data = null;
private Integer ttl = null;
public RowUniquenessConstraint(Keyspace keyspace, ColumnFamily<K, C> columnFamily, K key,
Supplier<C> uniqueColumnSupplier) {
this.keyspace = keyspace;
this.columnFamily = columnFamily;
this.uniqueColumn = uniqueColumnSupplier.get();
this.key = key;
}
public RowUniquenessConstraint<K, C> withTtl(Integer ttl) {
this.ttl = ttl;
return this;
}
public RowUniquenessConstraint<K, C> withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
/**
* Specify the data value to add to the column.
* @param data
* @return
*/
public RowUniquenessConstraint<K, C> withData(ByteBuffer data) {
this.data = data;
return this;
}
public RowUniquenessConstraint<K, C> withData(String data) {
this.data = StringSerializer.get().fromString(data);
return this;
}
@Override
public void acquire() throws NotUniqueException, Exception {
acquireAndApplyMutation(null);
}
/**
* @deprecated Use acquireAndExecuteMutation instead to avoid timestamp issues
*/
@Override
@Deprecated
public void acquireAndMutate(final MutationBatch mutation) throws NotUniqueException, Exception {
acquireAndApplyMutation(new Function<MutationBatch, Boolean>() {
@Override
public Boolean apply(MutationBatch input) {
if (mutation != null)
input.mergeShallow(mutation);
return true;
}
});
}
@Override
public void acquireAndApplyMutation(Function<MutationBatch, Boolean> callback) throws NotUniqueException, Exception {
try {
// Phase 1: Write a unique column
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
if (data == null) {
m.withRow(columnFamily, key).putEmptyColumn(uniqueColumn, ttl);
}
else {
m.withRow(columnFamily, key).putColumn(uniqueColumn, data, ttl);
}
m.execute();
// Phase 2: Read back all columns. There should be only 1
ColumnList<C> result = keyspace.prepareQuery(columnFamily).setConsistencyLevel(consistencyLevel)
.getKey(key).execute().getResult();
if (result.size() != 1) {
throw new NotUniqueException(key.toString());
}
// Phase 3: Persist the uniqueness with
m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
if (callback != null)
callback.apply(m);
if (data == null) {
m.withRow(columnFamily, key).putEmptyColumn(uniqueColumn, null);
}
else {
m.withRow(columnFamily, key).putColumn(uniqueColumn, data, null);
}
m.execute();
}
catch (Exception e) {
release();
throw e;
} }
@Override
public void release() throws Exception {
MutationBatch m = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
m.withRow(columnFamily, key).deleteColumn(uniqueColumn);
m.execute();
}
/**
* Read the data stored with the unique row. This data is normally a 'foreign' key to
* another column family.
* @return
* @throws Exception
*/
public ByteBuffer readData() throws Exception {
ColumnList<C> result = keyspace
.prepareQuery(columnFamily)
.setConsistencyLevel(consistencyLevel)
.getKey(key)
.execute()
.getResult();
boolean hasColumn = false;
ByteBuffer data = null;
for (Column<C> column : result) {
if (column.getTtl() == 0) {
if (hasColumn) {
throw new IllegalStateException("Row has multiple uniquneness locks");
}
hasColumn = true;
data = column.getByteBufferValue();
}
}
if (!hasColumn) {
throw new NotFoundException(this.key.toString() + " has no uniquness lock");
}
return data;
}
public String readDataAsString() throws Exception {
return StringSerializer.get().fromByteBuffer(readData());
}
}
| 8,061 |
0 | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes | Create_ds/astyanax/astyanax-recipes/src/main/java/com/netflix/astyanax/recipes/uniqueness/NotUniqueException.java | /*******************************************************************************
* Copyright 2011 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.netflix.astyanax.recipes.uniqueness;
public class NotUniqueException extends Exception {
/**
*
*/
private static final long serialVersionUID = -3735805268823536495L;
public NotUniqueException(Exception e) {
super(e);
}
public NotUniqueException(String message, Exception e) {
super(message, e);
}
public NotUniqueException(String message) {
super(message);
}
}
| 8,062 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/CqlOperationResultImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import java.net.InetAddress;
import java.util.concurrent.TimeUnit;
import com.datastax.driver.core.ExecutionInfo;
import com.datastax.driver.core.QueryTrace;
import com.datastax.driver.core.ResultSet;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.OperationResult;
/**
* Simple impl of {@link OperationResult} that tracks some basic info for every operation execution, such as
* 1. The host that was used for the operation
* 2. The operation attempt count
* 3. The encapsulated result
* 4. The overall latency for the operation.
*
* @author poberai
*
* @param <R>
*/
public class CqlOperationResultImpl<R> implements OperationResult<R> {
private Host host;
private R result;
private int attemptCount = 0;
private long durationMicros = 0L;
public CqlOperationResultImpl(ResultSet rs, R result) {
this.host = parseHostInfo(rs);
this.result = result;
this.durationMicros = parseDuration(rs);
}
private Host parseHostInfo(ResultSet rs) {
if (rs == null) {
return null;
}
com.datastax.driver.core.Host fromHost = rs.getExecutionInfo().getQueriedHost();
InetAddress add = fromHost.getAddress();
Host toHost = new Host(add.getHostAddress(), -1);
toHost.setRack(fromHost.getRack());
return toHost;
}
private long parseDuration(ResultSet rs) {
if (rs != null) {
ExecutionInfo info = rs.getExecutionInfo();
if (info !=null) {
QueryTrace qt = info.getQueryTrace();
if (qt != null) {
return qt.getDurationMicros();
}
}
}
return 0L;
}
@Override
public Host getHost() {
return host;
}
@Override
public R getResult() {
return result;
}
@Override
public long getLatency() {
return durationMicros;
}
@Override
public long getLatency(TimeUnit units) {
return units.convert(durationMicros, TimeUnit.MICROSECONDS);
}
@Override
public int getAttemptsCount() {
return attemptCount;
}
@Override
public void setAttemptsCount(int count) {
attemptCount = count;
}
}
| 8,063 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/JavaDriverConnectionPoolMonitorImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistryListener;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.Cluster;
import com.netflix.astyanax.connectionpool.ConnectionPoolMonitor;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.HostConnectionPool;
import com.netflix.astyanax.connectionpool.HostStats;
public class JavaDriverConnectionPoolMonitorImpl implements ConnectionPoolMonitor {
private final AtomicReference<Cluster> cluster = new AtomicReference<Cluster>();
private MetricRegistryListener metricsRegListener = new MetricRegistryListener(){
@Override
public void onGaugeAdded(String name, Gauge<?> gauge) {
// TODO Auto-generated method stub
}
@Override
public void onGaugeRemoved(String name) {
// TODO Auto-generated method stub
}
@Override
public void onCounterAdded(String name, Counter counter) {
// TODO Auto-generated method stub
}
@Override
public void onCounterRemoved(String name) {
// TODO Auto-generated method stub
}
@Override
public void onHistogramAdded(String name, Histogram histogram) {
// TODO Auto-generated method stub
}
@Override
public void onHistogramRemoved(String name) {
// TODO Auto-generated method stub
}
@Override
public void onMeterAdded(String name, Meter meter) {
// TODO Auto-generated method stub
}
@Override
public void onMeterRemoved(String name) {
// TODO Auto-generated method stub
}
@Override
public void onTimerAdded(String name, Timer timer) {
// TODO Auto-generated method stub
}
@Override
public void onTimerRemoved(String name) {
// TODO Auto-generated method stub
}};
public JavaDriverConnectionPoolMonitorImpl() {
}
public JavaDriverConnectionPoolMonitorImpl withJavaDriverMetricsRegistry(MetricRegistryListener metricsRegListener){
this.metricsRegListener = metricsRegListener;
return this;
}
public MetricRegistryListener getMetricsRegistryListener(){
return metricsRegListener;
}
/**
* Returns the number of Cassandra hosts currently known by the driver (that is
* whether they are currently considered up or down).
*
* @return the number of Cassandra hosts currently known by the driver.
*/
@Override
public long getHostCount() {
return cluster.get().getMetrics().getKnownHosts().getValue();
}
/**
* Returns the number of Cassandra hosts the driver is currently connected to
* (that is have at least one connection opened to).
*
* @return the number of Cassandra hosts the driver is currently connected to.
*/
@Override
public long getHostActiveCount() {
return cluster.get().getMetrics().getConnectedToHosts().getValue();
}
/**
* Returns the total number of currently opened connections to Cassandra hosts.
*
* @return The total number of currently opened connections to Cassandra hosts.
*/
public long getNumOpenConnections() {
return cluster.get().getMetrics().getOpenConnections().getValue();
}
/**
* Returns the number of connection to Cassandra nodes errors.
* <p>
* This represents the number of times that a request to a Cassandra node
* has failed due to a connection problem. This thus also corresponds to
* how often the driver had to pick a fallback host for a request.
* <p>
* You can expect a few connection errors when a Cassandra node fails
* (or is stopped) ,but if that number grows continuously you likely have
* a problem.
*
* @return the number of connection to Cassandra nodes errors.
*/
@Override
public long getConnectionCreateFailedCount() {
return cluster.get().getMetrics().getErrorMetrics().getConnectionErrors().getCount();
}
/**
* Returns the number of write requests that returned a timeout (independently
* of the final decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}).
*
* @return the number of write timeout.
*/
public long getWriteTimeouts() {
return cluster.get().getMetrics().getErrorMetrics().getWriteTimeouts().getCount();
}
/**
* Returns the number of read requests that returned a timeout (independently
* of the final decision taken by the {@link com.datastax.driver.core.policies.RetryPolicy}).
*
* @return the number of read timeout.
*/
public long getReadTimeouts() {
return cluster.get().getMetrics().getErrorMetrics().getReadTimeouts().getCount();
}
/**
* Returns the number of requests that returned errors not accounted for by
* another metric. This includes all types of invalid requests.
*
* @return the number of requests errors not accounted by another
* metric.
*/
@Override
public long getBadRequestCount() {
return cluster.get().getMetrics().getErrorMetrics().getOthers().getCount();
}
/**
* Returns the number of requests that returned an unavailable exception
* (independently of the final decision taken by the
* {@link com.datastax.driver.core.policies.RetryPolicy}).
*
* @return the number of unavailable exceptions.
*/
@Override
public long notFoundCount() {
return cluster.get().getMetrics().getErrorMetrics().getUnavailables().getCount();
}
/**
* Returns the number of times a request was ignored
* due to the {@link com.datastax.driver.core.policies.RetryPolicy}, for
* example due to timeouts or unavailability.
*
* @return the number of times a request was ignored due to the
* {@link com.datastax.driver.core.policies.RetryPolicy}.
*/
@Override
public long getSocketTimeoutCount() {
return cluster.get().getMetrics().getErrorMetrics().getIgnores().getCount();
}
/**
* Returns the number of times a request was retried due to the
* {@link com.datastax.driver.core.policies.RetryPolicy}.
*
* @return the number of times a requests was retried due to the
* {@link com.datastax.driver.core.policies.RetryPolicy}.
*/
@Override
public long getUnknownErrorCount() {
return cluster.get().getMetrics().getErrorMetrics().getRetries().getCount();
}
@Override
public void incOperationFailure(Host host, Exception reason) {
// TODO Auto-generated method stub
}
@Override
public long getOperationFailureCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void incFailover(Host host, Exception reason) {
// TODO Auto-generated method stub
}
@Override
public long getFailoverCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void incOperationSuccess(Host host, long latency) {
// TODO Auto-generated method stub
}
@Override
public long getOperationSuccessCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void incConnectionCreated(Host host) {
// TODO Auto-generated method stub
}
@Override
public long getConnectionCreatedCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void incConnectionClosed(Host host, Exception reason) {
// TODO Auto-generated method stub
}
@Override
public long getConnectionClosedCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void incConnectionCreateFailed(Host host, Exception reason) {
// TODO Auto-generated method stub
}
@Override
public void incConnectionBorrowed(Host host, long delay) {
// TODO Auto-generated method stub
}
@Override
public long getConnectionBorrowedCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getConnectionReturnedCount() {
return 0;
}
@Override
public void incConnectionReturned(Host host) {
// TODO Auto-generated method stub
}
@Override
public long getPoolExhaustedTimeoutCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getOperationTimeoutCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getNoHostCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getInterruptedCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getTransportErrorCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getHostAddedCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getHostRemovedCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getHostDownCount() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void onHostAdded(Host host, HostConnectionPool<?> pool) {
// TODO Auto-generated method stub
}
@Override
public void onHostRemoved(Host host) {
// TODO Auto-generated method stub
}
@Override
public void onHostDown(Host host, Exception reason) {
// TODO Auto-generated method stub
}
@Override
public void onHostReactivated(Host host, HostConnectionPool<?> pool) {
// TODO Auto-generated method stub
}
@Override
public Map<Host, HostStats> getHostStats() {
// TODO Auto-generated method stub
return null;
}
}
| 8,064 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/JavaDriverConfigBuilder.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import java.util.concurrent.TimeUnit;
import com.datastax.driver.core.Configuration;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.HostDistance;
import com.datastax.driver.core.MetricsOptions;
import com.datastax.driver.core.PoolingOptions;
import com.datastax.driver.core.ProtocolOptions;
import com.datastax.driver.core.QueryOptions;
import com.datastax.driver.core.SocketOptions;
import com.datastax.driver.core.policies.DefaultRetryPolicy;
import com.datastax.driver.core.policies.ExponentialReconnectionPolicy;
import com.datastax.driver.core.policies.LoadBalancingPolicy;
import com.datastax.driver.core.policies.Policies;
import com.datastax.driver.core.policies.ReconnectionPolicy;
import com.datastax.driver.core.policies.RetryPolicy;
import com.datastax.driver.core.policies.RoundRobinPolicy;
/**
* Helpful builder style class for configuring JavaDriver.
*
* @author poberai
*
*/
public class JavaDriverConfigBuilder {
// Config for Policies
private LoadBalancingPolicy loadBalancingPolicy = new RoundRobinPolicy();
private ReconnectionPolicy reconnectionPolicy = new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000);
private RetryPolicy retryPolicy = DefaultRetryPolicy.INSTANCE;
// Config for ProtocolOptions
private int nativeProtocolPort = -1;
// Config for PoolingOptions
private PoolingOptions poolingOptions = new PoolingOptions();
// Config for SocketOptions
private SocketOptions socketOptions = new SocketOptions();
// Config for MetricsOptions
private boolean jmxReportingEnabled = true;
// Config for QueryOptions
private QueryOptions queryOptions = new QueryOptions();
public JavaDriverConfigBuilder() {
super();
}
public JavaDriverConnectionPoolConfigurationImpl build() {
Policies policies = Policies.builder()
.withLoadBalancingPolicy(loadBalancingPolicy)
.withReconnectionPolicy(reconnectionPolicy)
.withRetryPolicy(retryPolicy).build();
ProtocolOptions protocolOptions = (nativeProtocolPort == -1) ? new ProtocolOptions() : new ProtocolOptions(nativeProtocolPort);
MetricsOptions metricsOptions = new MetricsOptions(true, jmxReportingEnabled);
return new JavaDriverConnectionPoolConfigurationImpl(Configuration.builder()
.withPolicies(policies)
.withProtocolOptions(protocolOptions)
.withPoolingOptions(poolingOptions)
.withSocketOptions(socketOptions)
.withMetricsOptions(metricsOptions)
.withQueryOptions(queryOptions).build());
}
public JavaDriverConfigBuilder withLoadBalancingPolicy(LoadBalancingPolicy lbPolicy) {
this.loadBalancingPolicy = lbPolicy;
return this;
}
public JavaDriverConfigBuilder withReconnectionPolicy(ReconnectionPolicy reconnectPolicy) {
this.reconnectionPolicy = reconnectPolicy;
return this;
}
public JavaDriverConfigBuilder withRetryPolicy(RetryPolicy rPolicy) {
this.retryPolicy = rPolicy;
return this;
}
public JavaDriverConfigBuilder withPort(int nativePort) {
this.nativeProtocolPort = nativePort;
return this;
}
public JavaDriverConfigBuilder withCoreConnsPerHost(HostDistance distance, int coreConnections) {
this.poolingOptions.setCoreConnectionsPerHost(distance, coreConnections);
return this;
}
public JavaDriverConfigBuilder withMaxConnsPerHost(HostDistance distance, int maxConnections) {
this.poolingOptions.setMaxConnectionsPerHost(distance, maxConnections);
return this;
}
public JavaDriverConfigBuilder withMinRequestsPerConnection(HostDistance distance, int minRequests) {
this.poolingOptions.setNewConnectionThreshold(distance, minRequests);
return this;
}
public JavaDriverConfigBuilder withMaxRequestsPerConnection(HostDistance distance, int maxRequests) {
this.poolingOptions.setMaxRequestsPerConnection(distance, maxRequests);
return this;
}
public JavaDriverConfigBuilder withConnectTimeout(int timeout, TimeUnit sourceUnit) {
Long connectTimeoutMillis = TimeUnit.MILLISECONDS.convert(timeout, sourceUnit);
this.socketOptions.setConnectTimeoutMillis(connectTimeoutMillis.intValue());
return this;
}
public JavaDriverConfigBuilder withReadTimeout(int timeout, TimeUnit sourceUnit) {
Long readTimeoutMillis = TimeUnit.MILLISECONDS.convert(timeout, sourceUnit);
this.socketOptions.setReadTimeoutMillis(readTimeoutMillis.intValue());
return this;
}
public JavaDriverConfigBuilder withKeepAlive(boolean keepAlive) {
this.socketOptions.setKeepAlive(keepAlive);
return this;
}
public JavaDriverConfigBuilder withReuseAddress(boolean reuseAddress) {
this.socketOptions.setReuseAddress(reuseAddress);
return this;
}
public JavaDriverConfigBuilder withSoLinger(int soLinger) {
this.socketOptions.setSoLinger(soLinger);
return this;
}
public JavaDriverConfigBuilder withTcpNoDelay(boolean tcpNoDelay) {
this.socketOptions.setTcpNoDelay(tcpNoDelay);
return this;
}
public JavaDriverConfigBuilder withReceiveBufferSize(int receiveBufferSize) {
this.socketOptions.setReceiveBufferSize(receiveBufferSize);
return this;
}
public JavaDriverConfigBuilder withSendBufferSize(int sendBufferSize) {
this.socketOptions.setSendBufferSize(sendBufferSize);
return this;
}
public JavaDriverConfigBuilder withJmxReportingEnabled(boolean enabled) {
this.jmxReportingEnabled = enabled;
return this;
}
public JavaDriverConfigBuilder withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.queryOptions.setConsistencyLevel(consistencyLevel);
return this;
}
public JavaDriverConfigBuilder withSerialConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.queryOptions.setSerialConsistencyLevel(consistencyLevel);
return this;
}
public JavaDriverConfigBuilder withFetchSize(int fetchSize) {
this.queryOptions.setFetchSize(fetchSize);
return this;
}
}
| 8,065 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/JavaDriverConnectionPoolConfigurationImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import java.util.List;
import java.util.concurrent.ScheduledExecutorService;
import com.datastax.driver.core.Configuration;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.AuthenticationCredentials;
import com.netflix.astyanax.connectionpool.BadHostDetector;
import com.netflix.astyanax.connectionpool.ConnectionPoolConfiguration;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.LatencyScoreStrategy;
import com.netflix.astyanax.connectionpool.OperationFilterFactory;
import com.netflix.astyanax.connectionpool.RetryBackoffStrategy;
import com.netflix.astyanax.connectionpool.SSLConnectionContext;
import com.netflix.astyanax.connectionpool.impl.HostSelectorStrategy;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.shallows.EmptyOperationTracer;
import com.netflix.astyanax.tracing.OperationTracer;
/**
* This class simply acts as a holder for the {@link Configuration} object for the java driver. It can be injected into the
* {@link AstyanaxContext} via the regular interface and thus helps serve as a bridge when setting up the java driver using the
* regular Astyanax setup route.
*
* The class does not actually implement any of the actual methods of {@link ConnectionPoolConfiguration}. It's sole purpose is just to
* hold a reference to the java driver config object and then be injected via the regular interfaces available in AstyanaxContext.
*
* @author poberai
*
*/
public class JavaDriverConnectionPoolConfigurationImpl implements ConnectionPoolConfiguration {
private final Configuration jdConfig;
private OperationTracer opTracer = new EmptyOperationTracer();
public JavaDriverConnectionPoolConfigurationImpl(Configuration configuration) {
this.jdConfig = configuration;
}
public Configuration getJavaDriverConfig() {
return jdConfig;
}
@Override
public LatencyScoreStrategy getLatencyScoreStrategy() {
return null;
}
@Override
public BadHostDetector getBadHostDetector() {
return null;
}
@Override
public int getPort() {
return jdConfig.getProtocolOptions().getPort();
}
@Override
public String getName() {
return null;
}
@Override
public int getMaxConnsPerHost() {
return 0;
}
@Override
public int getInitConnsPerHost() {
return 0;
}
@Override
public int getMaxConns() {
return 0;
}
@Override
public int getMaxTimeoutWhenExhausted() {
return 0;
}
@Override
public int getMaxFailoverCount() {
return 0;
}
@Override
public RetryBackoffStrategy getRetryBackoffStrategy() {
return null;
}
@Override
public HostSelectorStrategy getHostSelectorStrategy() {
return null;
}
@Override
public String getSeeds() {
return null;
}
@Override
public List<Host> getSeedHosts() {
return null;
}
@Override
public String getLocalDatacenter() {
return null;
}
@Override
public int getSocketTimeout() {
return 0;
}
@Override
public int getConnectTimeout() {
return 0;
}
@Override
public int getConnectionLimiterWindowSize() {
return 0;
}
@Override
public int getConnectionLimiterMaxPendingCount() {
return 0;
}
@Override
public int getLatencyAwareWindowSize() {
return 0;
}
@Override
public float getLatencyAwareSentinelCompare() {
return 0;
}
@Override
public float getLatencyAwareBadnessThreshold() {
return 0;
}
@Override
public int getBlockedThreadThreshold() {
return 0;
}
@Override
public float getMinHostInPoolRatio() {
return 0;
}
@Override
public int getLatencyAwareUpdateInterval() {
return 0;
}
@Override
public int getLatencyAwareResetInterval() {
return 0;
}
@Override
public int getMaxPendingConnectionsPerHost() {
return 0;
}
@Override
public int getMaxBlockedThreadsPerHost() {
return 0;
}
@Override
public int getTimeoutWindow() {
return 0;
}
@Override
public int getMaxTimeoutCount() {
return 0;
}
@Override
public int getRetrySuspendWindow() {
return 0;
}
@Override
public int getRetryMaxDelaySlice() {
return 0;
}
@Override
public int getRetryDelaySlice() {
return 0;
}
@Override
public int getMaxOperationsPerConnection() {
return 0;
}
@Override
public AuthenticationCredentials getAuthenticationCredentials() {
return null;
}
@Override
public OperationFilterFactory getOperationFilterFactory() {
return null;
}
@Override
public Partitioner getPartitioner() {
return null;
}
@Override
public SSLConnectionContext getSSLConnectionContext() {
return null;
}
@Override
public ScheduledExecutorService getMaintainanceScheduler() {
return null;
}
@Override
public ScheduledExecutorService getHostReconnectExecutor() {
return null;
}
@Override
public void initialize() {
}
@Override
public void shutdown() {
}
@Override
public OperationTracer getOperationTracer() {
return opTracer;
}
public void setOperationTracer(OperationTracer opTracer) {
this.opTracer = opTracer;
}
}
| 8,066 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/CqlClusterImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import com.codahale.metrics.MetricRegistryListener;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Configuration;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.connectionpool.ConnectionPoolConfiguration;
import com.netflix.astyanax.connectionpool.ConnectionPoolMonitor;
import com.netflix.astyanax.connectionpool.ConnectionPoolProxy.SeedHostListener;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.schema.CqlColumnDefinitionImpl;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.schema.CqlKeyspaceDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.KeyspaceDefinition;
import com.netflix.astyanax.ddl.SchemaChangeResult;
/**
* Java Driver based impl of {@link Cluster} that implements ddl operations.
* The class encapsulates a java driver cluster and session object to provide all the functionality.
*
* Note that due to the way the object is setup via AstyanaxContext and CqlFamilyFactory, it needs to implements
* a {@link SeedHostListener} so that it can construct the cluster and session object appropriately once the seed hosts
* have been provided by the {@link HostSupplier} object
*
* @author poberai
*/
public class CqlClusterImpl implements com.netflix.astyanax.Cluster, SeedHostListener {
public volatile Cluster cluster;
private volatile Session session;
private final AstyanaxConfiguration astyanaxConfig;
private final KeyspaceTracerFactory tracerFactory;
private final Configuration javaDriverConfig;
private final ConnectionPoolMonitor cpMonitor;
private final MetricRegistryListener metricsRegListener;
public CqlClusterImpl(AstyanaxConfiguration asConfig, KeyspaceTracerFactory tracerFactory, ConnectionPoolConfiguration cpConfig, ConnectionPoolMonitor cpMonitor) {
this.astyanaxConfig = asConfig;
this.tracerFactory = tracerFactory;
this.javaDriverConfig = ((JavaDriverConnectionPoolConfigurationImpl)cpConfig).getJavaDriverConfig();
this.cpMonitor = cpMonitor;
this.metricsRegListener = ((JavaDriverConnectionPoolMonitorImpl)cpMonitor).getMetricsRegistryListener();
}
@Override
public String describeClusterName() throws ConnectionException {
return cluster.getMetadata().getClusterName();
}
@Override
public String getVersion() throws ConnectionException {
Statement query = QueryBuilder.select("release_version")
.from("system", "local")
.where(eq("key", "local"));
return session.execute(query).one().getString("release_version");
}
public void shutdown() {
cluster.close();
}
@Override
public String describeSnitch() throws ConnectionException {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public String describePartitioner() throws ConnectionException {
Statement query = QueryBuilder.select("partitioner")
.from("system", "local")
.where(eq("key", "local"));
return session.execute(query).one().getString("partitioner");
}
@Override
public Map<String, List<String>> describeSchemaVersions() throws ConnectionException {
return new CqlSchemaVersionReader(session).exec();
}
@Override
public KeyspaceDefinition makeKeyspaceDefinition() {
return new CqlKeyspaceDefinitionImpl(session);
}
@Override
public Properties getAllKeyspaceProperties() throws ConnectionException {
Properties properties = new Properties();
try {
List<KeyspaceDefinition> ksDefs = describeKeyspaces();
for(KeyspaceDefinition ksDef : ksDefs) {
Properties ksProps = ksDef.getProperties();
for (Object key : ksProps.keySet()) {
properties.put(ksDef.getName() + "." + key, ksProps.get(key));
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return properties;
}
@Override
public Properties getKeyspaceProperties(String keyspace) throws ConnectionException {
try {
return describeKeyspace(keyspace.toLowerCase()).getProperties();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public List<KeyspaceDefinition> describeKeyspaces() throws ConnectionException {
Statement query = QueryBuilder.select().all().from("system", "schema_keyspaces");
List<KeyspaceDefinition> ksDefs = new ArrayList<KeyspaceDefinition>();
try {
for(Row row : session.execute(query).all()) {
String keyspaceName = row.getString("keyspace_name");
if (keyspaceName.equals("system") || keyspaceName.startsWith("system_")) {
continue;
}
ksDefs.add(new CqlKeyspaceDefinitionImpl(session, row));
}
return ksDefs;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public KeyspaceDefinition describeKeyspace(String ksName) throws ConnectionException {
return new CqlKeyspaceImpl(session, ksName, astyanaxConfig, tracerFactory,cpMonitor).describeKeyspace();
}
@Override
public Keyspace getKeyspace(String keyspace) throws ConnectionException {
return new CqlKeyspaceImpl(session, keyspace, astyanaxConfig, tracerFactory,cpMonitor);
}
@Override
public OperationResult<SchemaChangeResult> dropKeyspace(String keyspaceName) throws ConnectionException {
return new CqlKeyspaceImpl(session, keyspaceName.toLowerCase(), astyanaxConfig, tracerFactory,cpMonitor).dropKeyspace();
}
@Override
public OperationResult<SchemaChangeResult> addKeyspace(KeyspaceDefinition def) throws ConnectionException {
return ((CqlKeyspaceDefinitionImpl)def).execute();
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(KeyspaceDefinition def) throws ConnectionException {
return ((CqlKeyspaceDefinitionImpl)def).alterKeyspace().execute();
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(Map<String, Object> options) throws ConnectionException {
String keyspaceName = (String) options.remove("name");
if (keyspaceName == null) {
throw new RuntimeException("Options missing 'name' property for keyspace name");
}
return new CqlKeyspaceDefinitionImpl(session, options).setName(keyspaceName).execute();
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(Properties props) throws ConnectionException {
String keyspaceName = (String) props.remove("name");
if (keyspaceName == null) {
throw new RuntimeException("Options missing 'name' property for keyspace name");
}
return new CqlKeyspaceDefinitionImpl(session, props).setName(keyspaceName).execute();
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(Map<String, Object> options) throws ConnectionException {
String keyspaceName = (String) options.remove("name");
if (keyspaceName == null) {
throw new RuntimeException("Options missing 'name' property for keyspace name");
}
return new CqlKeyspaceDefinitionImpl(session, options).setName(keyspaceName).alterKeyspace().execute();
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(Properties props) throws ConnectionException {
String keyspaceName = (String) props.remove("name");
if (keyspaceName == null) {
throw new RuntimeException("Options missing 'name' property for keyspace name");
}
return new CqlKeyspaceDefinitionImpl(session, props).setName(keyspaceName).alterKeyspace().execute();
}
@Override
public AstyanaxConfiguration getConfig() {
return astyanaxConfig;
}
@Override
public ColumnFamilyDefinition makeColumnFamilyDefinition() {
return new CqlColumnFamilyDefinitionImpl(session);
}
@Override
public ColumnDefinition makeColumnDefinition() {
return new CqlColumnDefinitionImpl();
}
@Override
public Properties getColumnFamilyProperties(String keyspace, String columnfamilyName) throws ConnectionException {
try {
return new CqlKeyspaceDefinitionImpl(session).setName(keyspace).getColumnFamily(columnfamilyName).getProperties();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(Map<String, Object> options) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, null, options).execute();
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(Properties props) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, null, props).execute();
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(Map<String, Object> options) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, null, options).alterTable().execute();
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(Properties props) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, null, props).alterTable().execute();
}
@Override
public OperationResult<SchemaChangeResult> dropColumnFamily(String keyspaceName, String columnFamilyName) throws ConnectionException {
return new CqlKeyspaceImpl(session, keyspaceName, astyanaxConfig, tracerFactory,cpMonitor).dropColumnFamily(columnFamilyName);
}
@Override
public OperationResult<SchemaChangeResult> addColumnFamily(ColumnFamilyDefinition def) throws ConnectionException {
return ((CqlColumnFamilyDefinitionImpl)def).execute();
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(ColumnFamilyDefinition def) throws ConnectionException {
return ((CqlColumnFamilyDefinitionImpl)def).alterTable().execute();
}
@Override
public void setHosts(Collection<Host> hosts, int port) {
List<Host> hostList = Lists.newArrayList(hosts);
List<String> contactPoints = Lists.transform(hostList, new Function<Host, String>() {
@Override
public String apply(Host input) {
if (input != null) {
return input.getHostName();
}
return null;
}
});
Configuration config = javaDriverConfig;
// We really need a mechanism to easily override Configuration on the builder
Cluster.Builder builder = Cluster.builder()
.addContactPoints(contactPoints.toArray(new String[0]))
.withPort(port)
.withLoadBalancingPolicy(config.getPolicies().getLoadBalancingPolicy())
.withReconnectionPolicy(config.getPolicies().getReconnectionPolicy())
.withRetryPolicy(config.getPolicies().getRetryPolicy())
.withCompression(config.getProtocolOptions().getCompression())
.withPoolingOptions(config.getPoolingOptions())
.withSocketOptions(config.getSocketOptions())
.withQueryOptions(config.getQueryOptions());
if (config.getMetricsOptions() == null) {
builder.withoutMetrics();
} else if (!config.getMetricsOptions().isJMXReportingEnabled()) {
builder.withoutJMXReporting();
}
this.cluster = builder.build();
if (!(this.cpMonitor instanceof JavaDriverConnectionPoolMonitorImpl))
this.cluster.getMetrics().getRegistry().addListener((MetricRegistryListener) this.metricsRegListener);
this.session = cluster.connect();
}
}
| 8,067 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/ConsistencyLevelMapping.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import com.netflix.astyanax.model.ConsistencyLevel;
/**
* Helper class for translating Astyanax consistency level to java driver consistency level
*
* @author poberai
*/
public class ConsistencyLevelMapping {
public static com.datastax.driver.core.ConsistencyLevel getCL(ConsistencyLevel cl) {
switch (cl) {
case CL_ONE:
return com.datastax.driver.core.ConsistencyLevel.ONE;
case CL_TWO:
return com.datastax.driver.core.ConsistencyLevel.TWO;
case CL_THREE:
return com.datastax.driver.core.ConsistencyLevel.THREE;
case CL_QUORUM:
return com.datastax.driver.core.ConsistencyLevel.QUORUM;
case CL_LOCAL_QUORUM:
return com.datastax.driver.core.ConsistencyLevel.LOCAL_QUORUM;
case CL_EACH_QUORUM:
return com.datastax.driver.core.ConsistencyLevel.EACH_QUORUM;
case CL_ALL:
return com.datastax.driver.core.ConsistencyLevel.ALL;
case CL_ANY:
return com.datastax.driver.core.ConsistencyLevel.ANY;
default:
throw new RuntimeException("CL Level not recognized: " + cl.name());
}
}
}
| 8,068 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/CqlSchemaVersionReader.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
/**
* Simple class that reads the schema versions from the system local and peers table.
*
* @author poberai
*/
public class CqlSchemaVersionReader {
private static final Logger Logger = LoggerFactory.getLogger(CqlSchemaVersionReader.class);
private static final String SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'";
private static final String SELECT_SCHEMA_PEERS = "SELECT peer, schema_version FROM system.peers";
private final Session session;
public CqlSchemaVersionReader(Session session) {
this.session = session;
}
public Map<String, List<String>> exec() {
Map<String, List<String>> versions = new HashMap<String, List<String>>();
ResultSet rs = session.execute(SELECT_SCHEMA_LOCAL);
Row localRow = rs.one();
if (localRow != null && !localRow.isNull("schema_version")) {
UUID localSchemaVersion = localRow.getUUID("schema_version");
InetAddress localServer = rs.getExecutionInfo().getQueriedHost().getAddress();
addSchemaVersion(localSchemaVersion, localServer, versions);
}
rs = session.execute(SELECT_SCHEMA_PEERS);
for (Row row : rs.all()) {
if (row.isNull("rpc_address") || row.isNull("schema_version"))
continue;
UUID schema = row.getUUID("schema_version");
InetAddress remoteEndpoint = row.getInet("rpc_address");
addSchemaVersion(schema, remoteEndpoint, versions);
}
if (Logger.isDebugEnabled()) {
Logger.debug("Checking for schema agreement: versions are {}", versions);
}
return versions;
}
private void addSchemaVersion(UUID versionUUID, InetAddress endpoint, Map<String, List<String>> map) {
String version = versionUUID.toString();
List<String> endpoints = map.get(version);
if (endpoints == null) {
endpoints = new ArrayList<String>();
map.put(version, endpoints);
}
endpoints.add(endpoint.toString());
}
}
| 8,069 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/CqlAbstractExecutionImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationCategory;
import com.netflix.astyanax.CassandraOperationTracer;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.IsRetryableException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.connectionpool.exceptions.OperationException;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.retrypolicies.JavaDriverBasedRetryPolicy;
import com.netflix.astyanax.cql.util.AsyncOperationResult;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
/**
* Abstract class that encapsulates the functionality for executing an operations using the native protocol based java driver
* Note that class provides only the operation agnostic functionality such as retries, tracking metrics etc.
* The actual logic for constructing the query for the operation and then parsing the result set of the operation is left
* to the implementation of the extending class.
*
* @author poberai
*
* @param <R>
*/
public abstract class CqlAbstractExecutionImpl<R> implements Execution<R> {
private static final Logger LOG = LoggerFactory.getLogger(CqlAbstractExecutionImpl.class);
// The session for executing the query
protected final Session session;
// The keyspace being operated on
protected final String keyspace;
// The CF being operated on
protected final ColumnFamily<?, ?> cf;
// Factory for vending operation metrics
protected final KeyspaceTracerFactory tracerFactory;
// Retry policy
protected final RetryPolicy retry;
// ConsistencyLevel
protected final com.datastax.driver.core.ConsistencyLevel clLevel;
public CqlAbstractExecutionImpl(KeyspaceContext ksContext, CFQueryContext<?,?> cfContext) {
this.session = ksContext.getSession();
this.keyspace = ksContext.getKeyspace();
this.cf = (cfContext != null) ? cfContext.getColumnFamily() : null;
this.tracerFactory = ksContext.getTracerFactory();
// process the override retry policy first
RetryPolicy retryPolicy = ksContext.getConfig().getRetryPolicy();
retry = (retryPolicy != null) ? retryPolicy : getRetryPolicy(cfContext.getRetryPolicy());
clLevel = resolveConsistencyLevel(ksContext, cfContext);
}
public CqlAbstractExecutionImpl(KeyspaceContext ksContext, RetryPolicy retryPolicy) {
this.session = ksContext.getSession();
this.keyspace = ksContext.getKeyspace();
this.cf = null;
this.tracerFactory = ksContext.getTracerFactory();
// process the override retry policy first
retry = (retryPolicy != null) ? retryPolicy : getRetryPolicy(ksContext.getConfig().getRetryPolicy());
clLevel = resolveConsistencyLevel(ksContext, null);
}
@Override
public OperationResult<R> execute() throws ConnectionException {
ConnectionException lastException = null;
retry.begin();
do {
try {
return executeOp();
} catch (RuntimeException ex) {
lastException = new OperationException(ex);
} catch (ConnectionException ex) {
if (ex instanceof IsRetryableException)
lastException = ex;
else
throw ex;
}
} while (retry.allowRetry());
throw lastException;
}
private OperationResult<R> executeOp() throws ConnectionException {
CassandraOperationTracer tracer = null;
if (cf != null) {
tracer = tracerFactory.newTracer(getOperationType(), cf);
} else {
tracer = tracerFactory.newTracer(getOperationType());
}
tracer.start();
Statement query = getQuery();
if (LOG.isDebugEnabled()) {
LOG.debug("Query: " + query);
}
// Set the consistency level on the query
query.setConsistencyLevel(clLevel);
// Set the retry policy on the query
if (retry instanceof JavaDriverBasedRetryPolicy) {
JavaDriverBasedRetryPolicy jdRetryPolicy = (JavaDriverBasedRetryPolicy) retry;
query.setRetryPolicy(jdRetryPolicy.getJDRetryPolicy());
}
ResultSet resultSet = session.execute(query);
R result = parseResultSet(resultSet);
OperationResult<R> opResult = new CqlOperationResultImpl<R>(resultSet, result);
opResult.setAttemptsCount(retry.getAttemptCount());
tracer.success();
return opResult;
}
@Override
public ListenableFuture<OperationResult<R>> executeAsync() throws ConnectionException {
final CassandraOperationTracer tracer = tracerFactory.newTracer(getOperationType());
tracer.start();
Statement query = getQuery();
if (LOG.isDebugEnabled()) {
LOG.debug("Query: " + query);
}
ResultSetFuture rsFuture = session.executeAsync(query);
return new AsyncOperationResult<R>(rsFuture) {
@Override
public OperationResult<R> getOperationResult(ResultSet resultSet) {
R result = null;
try {
result = parseResultSet(resultSet);
} catch (NotFoundException e) {
e.printStackTrace();
}
tracer.success();
OperationResult<R> opResult = new CqlOperationResultImpl<R>(resultSet, result);
opResult.setAttemptsCount(retry.getAttemptCount());
return opResult;
}
};
}
private RetryPolicy getRetryPolicy(RetryPolicy policy) {
if (policy != null) {
return policy.duplicate();
} else {
return null;
}
}
private ConsistencyLevel getDefaultCL(KeyspaceContext ksContext) {
ConsistencyLevel clLevel = ksContext.getConfig().getDefaultReadConsistencyLevel();
CassandraOperationCategory op = getOperationType().getCategory();
switch (op) {
case READ:
clLevel = ksContext.getConfig().getDefaultReadConsistencyLevel();
break;
case WRITE:
clLevel = ksContext.getConfig().getDefaultWriteConsistencyLevel();
default:
clLevel = ksContext.getConfig().getDefaultReadConsistencyLevel();
}
return clLevel;
}
private com.datastax.driver.core.ConsistencyLevel resolveConsistencyLevel(KeyspaceContext ksContext, CFQueryContext<?,?> cfContext) {
ConsistencyLevel clLevel = null;
if (cfContext != null) {
clLevel = cfContext.getConsistencyLevel();
}
if (clLevel == null) {
clLevel = getDefaultCL(ksContext);
}
return ConsistencyLevelMapping.getCL(clLevel);
}
/**
* Specify what operation type this is. Used for emitting the right tracers
* @return CassandraOperationType
*/
public abstract CassandraOperationType getOperationType();
/**
* Get the Query for this operation
* @return Query
*/
public abstract Statement getQuery();
/**
* Parse the result set to get the required response
* @param resultSet
* @return
*/
public abstract R parseResultSet(ResultSet resultSet) throws NotFoundException;
}
| 8,070 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/JavaDriverConfigBridge.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import com.datastax.driver.core.*;
import com.datastax.driver.core.policies.LoadBalancingPolicy;
import com.datastax.driver.core.policies.Policies;
import com.datastax.driver.core.policies.RoundRobinPolicy;
import com.datastax.driver.core.policies.TokenAwarePolicy;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.AuthenticationCredentials;
import com.netflix.astyanax.connectionpool.ConnectionPoolConfiguration;
import com.netflix.astyanax.cql.util.ConsistencyLevelTransform;
import static com.datastax.driver.core.ProtocolOptions.DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS;
public class JavaDriverConfigBridge {
private final AstyanaxConfiguration asConfig;
private final ConnectionPoolConfiguration cpConfig;
public JavaDriverConfigBridge(AstyanaxConfiguration asConfig, ConnectionPoolConfiguration cpConfig) {
this.asConfig = asConfig;
this.cpConfig = cpConfig;
}
public Configuration getJDConfig() {
return Configuration.builder()
.withPolicies(getPolicies())
.withProtocolOptions(getProtocolOptions())
.withPoolingOptions(getPoolingOptions())
.withSocketOptions(getSocketOptions())
.withMetricsOptions(getMetricsOptions())
.withQueryOptions(getQueryOptions())
.build();
}
private Policies getPolicies() {
return Policies.builder()
.withLoadBalancingPolicy(getLB())
.build();
}
private LoadBalancingPolicy getLB() {
switch (asConfig.getConnectionPoolType()) {
case ROUND_ROBIN:
return new RoundRobinPolicy();
case TOKEN_AWARE:
return new TokenAwarePolicy(new RoundRobinPolicy());
case BAG:
throw new RuntimeException("Unsupported connection pool type, use ROUND_ROBIN or TOKEN_AWARE");
default:
return new RoundRobinPolicy();
}
}
private ProtocolOptions getProtocolOptions() {
int port = cpConfig.getPort();
AuthProvider authProvider = AuthProvider.NONE;
AuthenticationCredentials creds = cpConfig.getAuthenticationCredentials();
if (creds != null) {
authProvider = new PlainTextAuthProvider(creds.getUsername(), creds.getPassword());
}
return new ProtocolOptions(port, ProtocolVersion.NEWEST_SUPPORTED, DEFAULT_MAX_SCHEMA_AGREEMENT_WAIT_SECONDS,
null, authProvider);
}
private PoolingOptions getPoolingOptions() {
return new CpConfigBasedPoolingOptions();
}
private SocketOptions getSocketOptions() {
return new CpConfigBasedSocketOptions();
}
private MetricsOptions getMetricsOptions() {
return new MetricsOptions();
}
private QueryOptions getQueryOptions() {
return new ConfigBasedQueryOptions();
}
private class CpConfigBasedPoolingOptions extends PoolingOptions {
private CpConfigBasedPoolingOptions() {
}
@Override
public int getCoreConnectionsPerHost(HostDistance distance) {
return cpConfig.getMaxConnsPerHost() > 4 ? cpConfig.getMaxConnsPerHost()/2 : cpConfig.getMaxConnsPerHost();
}
@Override
public int getMaxConnectionsPerHost(HostDistance distance) {
return cpConfig.getMaxConnsPerHost();
}
}
private class CpConfigBasedSocketOptions extends SocketOptions {
private CpConfigBasedSocketOptions() {
}
@Override
public int getConnectTimeoutMillis() {
return cpConfig.getConnectTimeout();
}
@Override
public int getReadTimeoutMillis() {
return cpConfig.getSocketTimeout();
}
}
private class ConfigBasedQueryOptions extends QueryOptions {
@Override
public ConsistencyLevel getConsistencyLevel() {
return ConsistencyLevelTransform.getConsistencyLevel(asConfig.getDefaultReadConsistencyLevel());
}
}
}
| 8,071 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/CqlRingDescriber.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.netflix.astyanax.connectionpool.TokenRange;
import com.netflix.astyanax.connectionpool.impl.TokenRangeImpl;
import com.netflix.astyanax.partitioner.Murmur3Partitioner;
/**
* Helper class that parses the ring information from the system and peers table.
* Note that it maintains a cached reference and allows the user to either reuse the cache or refresh the cahe.
*
* @author poberai
*/
public class CqlRingDescriber {
private final AtomicReference<List<TokenRange>> cachedReference = new AtomicReference<List<TokenRange>>(null);
private static CqlRingDescriber Instance = new CqlRingDescriber();
private CqlRingDescriber() {
}
public static CqlRingDescriber getInstance() {
return Instance;
}
public List<TokenRange> getTokenRanges(Session session, boolean cached) {
if (cached && cachedReference.get() != null) {
return cachedReference.get();
}
// else get the actual token range list and then cache it
List<TokenRange> ranges = getTokenRanges(session, null, null);
cachedReference.set(ranges);
return ranges;
}
public List<TokenRange> getTokenRanges(Session session, String dc, String rack) {
List<HostInfo> hosts = new ArrayList<HostInfo>();
Statement query = QueryBuilder.select().all().from("system", "local");
ResultSet resultSet = session.execute(query);
hosts.add(new HostInfo(resultSet.one(), resultSet));
query = QueryBuilder.select("peer", "data_center", "host_id", "rack", "tokens").from("system", "peers");
resultSet = session.execute(query);
for (Row row : resultSet.all()) {
hosts.add(new HostInfo(row, null));
}
Collections.sort(hosts);
List<TokenRange> ranges = new ArrayList<TokenRange>();
for (int index = 0; index<hosts.size(); index++) {
HostInfo thisNode = hosts.get(index);
List<String> endpoints = new ArrayList<String>();
if (matchNode(dc, rack, thisNode)) {
endpoints.add(thisNode.endpoint); // the primary range owner
}
// secondary node
int nextIndex = getNextIndex(index, hosts.size());
if (nextIndex != index) {
HostInfo nextNode = hosts.get(nextIndex);
if (matchNode(dc, rack, nextNode)) {
endpoints.add(nextNode.endpoint);
}
// tertiary node
nextIndex = getNextIndex(nextIndex, hosts.size());
nextNode = hosts.get(nextIndex);
if (matchNode(dc, rack, nextNode)) {
endpoints.add(nextNode.endpoint);
}
}
int prevIndex = getPrevIndex(index, hosts.size());
String startToken = hosts.get(prevIndex).token.toString();
String endToken = thisNode.token.toString();
if (startToken.equals(endToken)) {
// TOKENS are the same. This happens during testing.
startToken = Murmur3Partitioner.get().getMinToken();
endToken = Murmur3Partitioner.get().getMinToken();
}
ranges.add(new TokenRangeImpl(startToken, endToken, endpoints));
}
return ranges;
}
private boolean matchNode(String dc, String rack, HostInfo host) {
if (dc == null && rack == null) {
return true; // node matches since there is no filter
}
if (dc != null && !dc.equals(host.datacenter)) {
return false; // wrong dc
}
if (rack != null && !rack.equals(host.rack)) {
return false; // wrong rack
}
return true; // match!
}
private int getNextIndex(int i, int n) {
int next = ++i;
if (i >= n) {
return 0;
} else {
return next;
}
}
private int getPrevIndex(int i, int n) {
int prev = --i;
if (i < 0) {
return n-1;
} else {
return prev;
}
}
private class HostInfo implements Comparable<HostInfo> {
private final BigInteger token;
private final String endpoint;
private final UUID hostId;
private final String datacenter;
private final String rack;
private HostInfo(Row row, ResultSet rs) {
if (row == null) {
throw new RuntimeException("RS is empty for system.local query");
}
Set<String> tokens = row.getSet("tokens", String.class);
String theToken = tokens.iterator().next();
token = new BigInteger(theToken);
hostId = row.getUUID("host_id");
datacenter = row.getString("data_center");
rack = row.getString("rack");
if (rs != null) {
endpoint = rs.getExecutionInfo().getQueriedHost().getAddress().getHostAddress();
} else {
endpoint = row.getInet("peer").getHostAddress();
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((token == null) ? 0 : token.hashCode());
result = prime * result + ((endpoint == null) ? 0 : endpoint.hashCode());
result = prime * result + ((hostId == null) ? 0 : hostId.hashCode());
result = prime * result + ((datacenter == null) ? 0 : datacenter.hashCode());
result = prime * result + ((rack == null) ? 0 : rack.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
HostInfo other = (HostInfo) obj;
boolean equal = true;
equal &= (token != null) ? token.equals(other.token) : (other.token == null);
equal &= (endpoint != null) ? endpoint.equals(other.endpoint) : (other.endpoint == null);
equal &= (hostId != null) ? hostId.equals(other.hostId) : (other.hostId == null);
equal &= (datacenter != null) ? datacenter.equals(other.datacenter) : (other.datacenter == null);
equal &= (rack != null) ? rack.equals(other.rack) : (other.rack == null);
return equal;
}
@Override
public String toString() {
return "HostInfo [token=" + token + ", endpoint=" + endpoint
+ ", hostId=" + hostId.toString() + ", datacenter=" + datacenter
+ ", rack=" + rack + "]";
}
@Override
public int compareTo(HostInfo o) {
return this.token.compareTo(o.token);
}
}
}
| 8,072 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/CqlKeyspaceImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Callable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.MetricRegistryListener;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Configuration;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.Clock;
import com.netflix.astyanax.ColumnMutation;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.SerializerPackage;
import com.netflix.astyanax.clock.MicrosecondsAsyncClock;
import com.netflix.astyanax.connectionpool.ConnectionPool;
import com.netflix.astyanax.connectionpool.ConnectionPoolConfiguration;
import com.netflix.astyanax.connectionpool.ConnectionPoolMonitor;
import com.netflix.astyanax.connectionpool.ConnectionPoolProxy.SeedHostListener;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.connectionpool.Operation;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.TokenRange;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.connectionpool.exceptions.OperationException;
import com.netflix.astyanax.connectionpool.impl.OperationResultImpl;
import com.netflix.astyanax.cql.direct.DirectCqlStatement;
import com.netflix.astyanax.cql.reads.CqlColumnFamilyQueryImpl;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.schema.CqlKeyspaceDefinitionImpl;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.cql.writes.CqlColumnMutationImpl;
import com.netflix.astyanax.cql.writes.CqlMutationBatchImpl;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.KeyspaceDefinition;
import com.netflix.astyanax.ddl.SchemaChangeResult;
import com.netflix.astyanax.ddl.impl.SchemaChangeResponseImpl;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.partitioner.BigInteger127Partitioner;
import com.netflix.astyanax.partitioner.Murmur3Partitioner;
import com.netflix.astyanax.partitioner.Partitioner;
import com.netflix.astyanax.query.ColumnFamilyQuery;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.SerializerPackageImpl;
import com.netflix.astyanax.serializers.UnknownComparatorException;
/**
* Java Driver based impl of {@link Keyspace} that implements ddl operations as well as row queries and mutation batches.
* The class encapsulates a java driver cluster and session object to provide all the functionality.
*
* Note that due to the way the object is setup via AstyanaxContext and CqlFamilyFactory, it needs to implements
* a {@link SeedHostListener} so that it can construct the cluster and session object appropriately once the seed hosts
* have been provided by the {@link HostSupplier} object.
*
* @author poberai
*/
public class CqlKeyspaceImpl implements Keyspace, SeedHostListener {
private static final Logger Logger = LoggerFactory.getLogger(CqlKeyspaceImpl.class);
private final Clock clock;
public volatile Cluster cluster;
public volatile Session session;
private final KeyspaceContext ksContext;
private final String keyspaceName;
private final AstyanaxConfiguration astyanaxConfig;
private final KeyspaceTracerFactory tracerFactory;
private final Configuration javaDriverConfig;
private final ConnectionPoolMonitor cpMonitor;
private final MetricRegistryListener metricsRegListener;
public CqlKeyspaceImpl(String ksName, AstyanaxConfiguration asConfig, KeyspaceTracerFactory tracerFactory, ConnectionPoolConfiguration cpConfig, ConnectionPoolMonitor cpMonitor) {
this(null, ksName, asConfig, tracerFactory, cpConfig,cpMonitor);
}
public CqlKeyspaceImpl(KeyspaceContext ksContext) {
this(ksContext.getSession(), ksContext.getKeyspace(), ksContext.getConfig(), ksContext.getTracerFactory(), null, ksContext.getConnectionPoolMonitor());
}
CqlKeyspaceImpl(Session session, String ksName, AstyanaxConfiguration asConfig, KeyspaceTracerFactory tracerFactory, ConnectionPoolMonitor cpMonitor) {
this(session, ksName, asConfig, tracerFactory, null,cpMonitor);
}
private CqlKeyspaceImpl(Session session, String ksName, AstyanaxConfiguration asConfig, KeyspaceTracerFactory tracerFactory, ConnectionPoolConfiguration cpConfig, ConnectionPoolMonitor cpMonitor) {
this.session = session;
this.keyspaceName = ksName.toLowerCase();
this.astyanaxConfig = asConfig;
this.tracerFactory = tracerFactory;
this.cpMonitor = cpMonitor;
this.metricsRegListener = ((JavaDriverConnectionPoolMonitorImpl)cpMonitor).getMetricsRegistryListener();
this.ksContext = new KeyspaceContext(this);
if (asConfig.getClock() != null) {
clock = asConfig.getClock();
} else {
clock = new MicrosecondsAsyncClock();
}
if (cpConfig != null) {
javaDriverConfig = ((JavaDriverConnectionPoolConfigurationImpl)cpConfig).getJavaDriverConfig();
} else {
javaDriverConfig = null;
}
}
@Override
public AstyanaxConfiguration getConfig() {
return astyanaxConfig;
}
@Override
public String getKeyspaceName() {
return keyspaceName;
}
@Override
public Partitioner getPartitioner() throws ConnectionException {
String pName = describePartitioner();
if (pName.contains("Murmur3Partitioner")) {
return Murmur3Partitioner.get();
} else if (pName.contains("RandomPartitioner")) {
return BigInteger127Partitioner.get();
} else {
throw new RuntimeException("Unrecognized partitioner: " + pName);
}
}
@Override
public String describePartitioner() throws ConnectionException {
Statement q = QueryBuilder.select("partitioner").from("system", "local");
ResultSet result = session.execute(q);
com.datastax.driver.core.Row row = result.one();
if (row == null) {
throw new RuntimeException("Missing paritioner");
}
String pName = row.getString(0);
return pName;
}
@Override
public List<TokenRange> describeRing() throws ConnectionException {
return CqlRingDescriber.getInstance().getTokenRanges(session, false);
}
@Override
public List<TokenRange> describeRing(String dc) throws ConnectionException {
return CqlRingDescriber.getInstance().getTokenRanges(session, dc, null);
}
@Override
public List<TokenRange> describeRing(String dc, String rack) throws ConnectionException {
return CqlRingDescriber.getInstance().getTokenRanges(session, dc, rack);
}
@Override
public List<TokenRange> describeRing(boolean cached) throws ConnectionException {
return CqlRingDescriber.getInstance().getTokenRanges(session, cached);
}
@Override
public KeyspaceDefinition describeKeyspace() throws ConnectionException {
Statement query = QueryBuilder.select().from("system", "schema_keyspaces").where(eq("keyspace_name", keyspaceName));
Row row = session.execute(query).one();
if (row == null) {
throw new RuntimeException("Keyspace not found: " + keyspaceName);
}
return (new CqlKeyspaceDefinitionImpl(session, row));
}
@Override
public Properties getKeyspaceProperties() throws ConnectionException {
try {
return describeKeyspace().getProperties();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Properties getColumnFamilyProperties(String columnFamily) throws ConnectionException {
KeyspaceDefinition ksDef = this.describeKeyspace();
ColumnFamilyDefinition cfDef = ksDef.getColumnFamily(columnFamily);
if (cfDef == null)
throw new NotFoundException(String.format("Column family '%s' in keyspace '%s' not found", columnFamily, getKeyspaceName()));
try {
return cfDef.getProperties();
} catch (Exception e) {
throw new RuntimeException();
}
}
@Override
public SerializerPackage getSerializerPackage(String cfName, boolean ignoreErrors) throws ConnectionException, UnknownComparatorException {
ColumnFamilyDefinition cfDef = describeKeyspace().getColumnFamily(cfName);
return new SerializerPackageImpl(cfDef, ignoreErrors);
}
@Override
public MutationBatch prepareMutationBatch() {
return new CqlMutationBatchImpl(ksContext, clock, astyanaxConfig.getDefaultWriteConsistencyLevel(), astyanaxConfig.getRetryPolicy());
}
@Override
public <K, C> ColumnMutation prepareColumnMutation(ColumnFamily<K, C> columnFamily, K rowKey, C column) {
return new CqlColumnMutationImpl<K,C>(ksContext, new CFQueryContext<K, C>(columnFamily, rowKey), column);
}
@Override
public <K, C> ColumnFamilyQuery<K, C> prepareQuery(ColumnFamily<K, C> cf) {
return new CqlColumnFamilyQueryImpl<K,C>(ksContext, cf);
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(Map<String, Object> options) throws ConnectionException {
return new CqlKeyspaceDefinitionImpl(session, options).setName(keyspaceName).execute();
}
@Override
public OperationResult<SchemaChangeResult> createKeyspace(Properties properties) throws ConnectionException {
return new CqlKeyspaceDefinitionImpl(session, properties).setName(keyspaceName).execute();
}
@SuppressWarnings("rawtypes")
@Override
public OperationResult<SchemaChangeResult> createKeyspace(Map<String, Object> options, Map<ColumnFamily, Map<String, Object>> cfs) throws ConnectionException {
CqlKeyspaceDefinitionImpl ksDef = new CqlKeyspaceDefinitionImpl(session, options);
if (ksDef.getName() == null) {
ksDef.setName(keyspaceName);
}
OperationResult<SchemaChangeResult> result = ksDef.execute();
for (ColumnFamily cf : cfs.keySet()) {
CqlColumnFamilyDefinitionImpl cfDef = new CqlColumnFamilyDefinitionImpl(session, ksDef.getName(), cf, cfs.get(cf));
ksDef.addColumnFamily(cfDef);
}
return result;
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(Map<String, Object> options) throws ConnectionException {
return new CqlKeyspaceDefinitionImpl(session, options).setName(keyspaceName).alterKeyspace().execute();
}
@Override
public OperationResult<SchemaChangeResult> updateKeyspace(Properties props) throws ConnectionException {
return new CqlKeyspaceDefinitionImpl(session, props).setName(keyspaceName).alterKeyspace().execute();
}
@Override
public OperationResult<SchemaChangeResult> dropKeyspace() throws ConnectionException {
return new CqlOperationResultImpl<SchemaChangeResult>(session.execute("DROP KEYSPACE " + keyspaceName), null);
}
@Override
public <K, C> OperationResult<Void> truncateColumnFamily(ColumnFamily<K, C> columnFamily) throws OperationException, ConnectionException {
ResultSet result = session.execute("TRUNCATE " + keyspaceName + "." + columnFamily.getName());
return new CqlOperationResultImpl<Void>(result, null);
}
@Override
public OperationResult<Void> truncateColumnFamily(String columnFamily) throws ConnectionException {
ResultSet result = session.execute("TRUNCATE " + keyspaceName + "." + columnFamily);
return new CqlOperationResultImpl<Void>(result, null);
}
@Override
public <K, C> OperationResult<SchemaChangeResult> createColumnFamily(ColumnFamily<K, C> columnFamily, Map<String, Object> options) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, keyspaceName, columnFamily, options).execute();
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(Properties props) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, keyspaceName, props).execute();
}
@Override
public OperationResult<SchemaChangeResult> createColumnFamily(Map<String, Object> options) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, keyspaceName, options).execute();
}
@Override
public <K, C> OperationResult<SchemaChangeResult> updateColumnFamily(ColumnFamily<K, C> columnFamily, Map<String, Object> options) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, keyspaceName, columnFamily, options).alterTable().execute();
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(Properties props) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, keyspaceName, props).alterTable().execute();
}
@Override
public OperationResult<SchemaChangeResult> updateColumnFamily(Map<String, Object> options) throws ConnectionException {
return new CqlColumnFamilyDefinitionImpl(session, keyspaceName, options).alterTable().execute();
}
@Override
public OperationResult<SchemaChangeResult> dropColumnFamily(String columnFamilyName) throws ConnectionException {
return new CqlOperationResultImpl<SchemaChangeResult>(session.execute("DROP TABLE " + keyspaceName + "." + columnFamilyName), null);
}
@Override
public <K, C> OperationResult<SchemaChangeResult> dropColumnFamily(ColumnFamily<K, C> columnFamily) throws ConnectionException {
return dropColumnFamily(columnFamily.getName());
}
@Override
public Map<String, List<String>> describeSchemaVersions() throws ConnectionException {
return new CqlSchemaVersionReader(session).exec();
}
@Override
public CqlStatement prepareCqlStatement() {
return new DirectCqlStatement(session);
}
@Override
public ConnectionPool<?> getConnectionPool() throws ConnectionException {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public OperationResult<Void> testOperation(Operation<?, ?> operation) throws ConnectionException {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public OperationResult<Void> testOperation(Operation<?, ?> operation, RetryPolicy retry) throws ConnectionException {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public void setHosts(Collection<Host> hosts, int port) {
try {
if (session != null) {
Logger.info("Session has already been set, SKIPPING SET HOSTS");
return;
}
List<Host> hostList = Lists.newArrayList(hosts);
List<String> contactPoints = Lists.transform(hostList, new Function<Host, String>() {
@Override
public String apply(Host input) {
if (input != null) {
return input.getHostName();
}
return null;
}
});
Configuration config = javaDriverConfig;
// We really need a mechanism to easily override Configuration on the builder
Logger.info("Using port: " + port);
Cluster.Builder builder = Cluster.builder()
.addContactPoints(contactPoints.toArray(new String[0]))
.withPort(port)
.withLoadBalancingPolicy(config.getPolicies().getLoadBalancingPolicy())
.withReconnectionPolicy(config.getPolicies().getReconnectionPolicy())
.withRetryPolicy(config.getPolicies().getRetryPolicy())
.withCompression(config.getProtocolOptions().getCompression())
.withPoolingOptions(config.getPoolingOptions())
.withSocketOptions(config.getSocketOptions())
.withQueryOptions(config.getQueryOptions());
if (config.getMetricsOptions() == null) {
builder.withoutMetrics();
} else if (!config.getMetricsOptions().isJMXReportingEnabled()) {
builder.withoutJMXReporting();
}
cluster = builder.build();
if (!(this.cpMonitor instanceof JavaDriverConnectionPoolMonitorImpl))
this.cluster.getMetrics().getRegistry().addListener((MetricRegistryListener) this.metricsRegListener);
Logger.info("Connecting to cluster");
session = cluster.connect();
Logger.info("Done connecting to cluster, session object created");
} catch (RuntimeException e) {
Logger.error("Failed to set hosts for keyspace impl", e);
} catch (Exception e) {
Logger.error("Failed to set hosts for keyspace impl", e);
}
}
@Override
public void shutdown() {
cluster.close();
}
public class KeyspaceContext {
private final Keyspace ks;
public KeyspaceContext(Keyspace keyspaceCtx) {
this.ks = keyspaceCtx;
}
public Session getSession() {
return session;
}
public String getKeyspace() {
return keyspaceName;
}
public AstyanaxConfiguration getConfig() {
return astyanaxConfig;
}
public KeyspaceTracerFactory getTracerFactory() {
return tracerFactory;
}
public Keyspace getKeyspaceContext() {
return ks;
}
public ConnectionPoolMonitor getConnectionPoolMonitor(){
return cpMonitor;
}
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Map<String, Object> options) throws ConnectionException {
return createKeyspaceIfNotExists(new Callable<OperationResult<SchemaChangeResult>>() {
@Override
public OperationResult<SchemaChangeResult> call() throws Exception {
return createKeyspace(options);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Properties properties) throws ConnectionException {
return createKeyspaceIfNotExists(new Callable<OperationResult<SchemaChangeResult>>() {
@Override
public OperationResult<SchemaChangeResult> call() throws Exception {
return createKeyspace(properties);
}
});
}
@Override
public OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(final Map<String, Object> options, final Map<ColumnFamily, Map<String, Object>> cfs) throws ConnectionException {
return createKeyspaceIfNotExists(new Callable<OperationResult<SchemaChangeResult>>() {
@Override
public OperationResult<SchemaChangeResult> call() throws Exception {
return createKeyspace(options, cfs);
}
});
}
private OperationResult<SchemaChangeResult> createKeyspaceIfNotExists(Callable<OperationResult<SchemaChangeResult>> createKeyspace) throws ConnectionException {
// Check if keyspace exists
ResultSet result = session.execute("select * from system.local where keyspace_name = '" + keyspaceName + "'");
List<Row> rows = result.all();
if (rows != null && rows.isEmpty()) {
return new OperationResultImpl<SchemaChangeResult>(Host.NO_HOST, new SchemaChangeResponseImpl().setSchemaId("no-op"), 0);
}
try {
return createKeyspace.call();
} catch (ConnectionException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 8,073 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/CqlFamilyFactory.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang.NotImplementedException;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Configuration;
import com.netflix.astyanax.AstyanaxConfiguration;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.AstyanaxTypeFactory;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.KeyspaceTracerFactory;
import com.netflix.astyanax.connectionpool.Connection;
import com.netflix.astyanax.connectionpool.ConnectionFactory;
import com.netflix.astyanax.connectionpool.ConnectionPool;
import com.netflix.astyanax.connectionpool.ConnectionPoolConfiguration;
import com.netflix.astyanax.connectionpool.ConnectionPoolMonitor;
import com.netflix.astyanax.connectionpool.ConnectionPoolProxy;
import com.netflix.astyanax.connectionpool.HostConnectionPool;
import com.netflix.astyanax.connectionpool.exceptions.ThrottledException;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolType;
import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
/**
* Simple impl of {@link AstyanaxTypeFactory} that acts as the bridge between the AstyanaxContext setup and all the java driver setup.
* The main link is the {@link ConnectionPoolProxy} class which gives us access to the {@link ConnectionPoolConfiguration} object.
* The class expects a {@link JavaDriverConnectionPoolConfigurationImpl} based impl which encapsulates all the config that is required
* by java driver.
*
* Thus this bridge is built with the intention to let the outside caller to directly use the {@link Configuration} object and inject it
* using {@link AstyanaxContext}.
*
* Restating, the simple flow that enables the bridge is
* 1. Construct the {@link Configuration} object with all the desired options for configuring the java driver.
* 2. Construct the {@link JavaDriverConnectionPoolConfigurationImpl} object and pass the java driver configuration object to it.
* 3. Set the {@link ConnectionPoolConfiguration} created in step 2. on the {@link AstyanaxContext} builder object when creating the Astyanax {@link Keyspace}
*
* See {@link AstyanaxContext} for more details on how to do this.
*
* @author poberai
*
*/
public class CqlFamilyFactory implements AstyanaxTypeFactory<Cluster> {
private static CqlFamilyFactory Instance = new CqlFamilyFactory();
private static AtomicBoolean BatchColumnUpdates = new AtomicBoolean(false);
public static CqlFamilyFactory getInstance() {
return Instance;
}
@Override
public Keyspace createKeyspace(String ksName, ConnectionPool<Cluster> cp, AstyanaxConfiguration asConfig, KeyspaceTracerFactory tracerFactory) {
if (!(cp instanceof ConnectionPoolProxy)) {
throw new RuntimeException("Cannot use CqlFamilyFactory with a connection pool type other than ConnectionPoolType.JAVA_DRIVER");
}
ConnectionPoolProxy<?> cpProxy = (ConnectionPoolProxy<?>)cp;
ConnectionPoolConfiguration jdConfig = getOrCreateJDConfiguration(asConfig, cpProxy.getConnectionPoolConfiguration());
ConnectionPoolMonitor monitor = cpProxy.getConnectionPoolMonitor();
if (monitor == null || !(monitor instanceof JavaDriverConnectionPoolMonitorImpl))
monitor = new JavaDriverConnectionPoolMonitorImpl();
CqlKeyspaceImpl keyspace = new CqlKeyspaceImpl(ksName, asConfig, tracerFactory, jdConfig, monitor);
cpProxy.addListener(keyspace);
return keyspace;
}
@Override
public com.netflix.astyanax.Cluster createCluster(ConnectionPool<Cluster> cp, AstyanaxConfiguration asConfig, KeyspaceTracerFactory tracerFactory) {
if (!(cp instanceof ConnectionPoolProxy)) {
throw new RuntimeException("Cannot use CqlFamilyFactory with a connection pool type other than ConnectionPoolType.JAVA_DRIVER");
}
ConnectionPoolProxy<?> cpProxy = (ConnectionPoolProxy<?>)cp;
ConnectionPoolConfiguration jdConfig = getOrCreateJDConfiguration(asConfig, cpProxy.getConnectionPoolConfiguration());
ConnectionPoolMonitor monitor = cpProxy.getConnectionPoolMonitor();
if (monitor == null || !(monitor instanceof JavaDriverConnectionPoolMonitorImpl))
monitor = new JavaDriverConnectionPoolMonitorImpl();
CqlClusterImpl cluster = new CqlClusterImpl(asConfig, tracerFactory, jdConfig, monitor);
((ConnectionPoolProxy<Cluster>)cp).addListener(cluster);
return cluster;
}
@Override
public ConnectionFactory<Cluster> createConnectionFactory(
AstyanaxConfiguration asConfig,
ConnectionPoolConfiguration cfConfig,
KeyspaceTracerFactory tracerFactory,
ConnectionPoolMonitor monitor) {
CqlBasedConnectionFactory<Cluster> factory = new CqlBasedConnectionFactory<Cluster>();
factory.asConfig = asConfig;
factory.cfConfig = cfConfig;
factory.tracerFactory = tracerFactory;
factory.monitor = monitor;
return factory;
}
@SuppressWarnings("unused")
private static class CqlBasedConnectionFactory<T> implements ConnectionFactory<T> {
protected AstyanaxConfiguration asConfig;
protected ConnectionPoolConfiguration cfConfig;
protected KeyspaceTracerFactory tracerFactory;
protected ConnectionPoolMonitor monitor;
@Override
public Connection<T> createConnection(HostConnectionPool<T> pool) throws ThrottledException {
throw new NotImplementedException();
}
}
public CqlFamilyFactory enableColumnBatchUpdates(boolean condition) {
BatchColumnUpdates.set(condition);
return this;
}
public static boolean batchColumnUpdates() {
return BatchColumnUpdates.get();
}
private ConnectionPoolConfiguration getOrCreateJDConfiguration(AstyanaxConfiguration asConfig, ConnectionPoolConfiguration cpConfig) {
if (cpConfig instanceof JavaDriverConnectionPoolConfigurationImpl) {
JavaDriverConnectionPoolConfigurationImpl jdConfig = (JavaDriverConnectionPoolConfigurationImpl) cpConfig;
if (jdConfig.getJavaDriverConfig() != null) {
return jdConfig; // Java Driver config has already been setup.
}
}
// Else create Java Driver Config from AstyanaxConfiguration and ConnectionPoolConfiguration and return that.
return new JavaDriverConnectionPoolConfigurationImpl(new JavaDriverConfigBridge(asConfig, cpConfig).getJDConfig());
}
}
| 8,074 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/direct/DirectCqlPreparedStatement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.direct;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlOperationResultImpl;
import com.netflix.astyanax.cql.CqlPreparedStatement;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.cql.util.AsyncOperationResult;
/**
* Impl of {@link CqlPreparedStatement} using java driver.
* it manages a {@link Session} object that is used when actually performing the real query with the
* driver underneath.
*
* @author poberai
*/
public class DirectCqlPreparedStatement implements CqlPreparedStatement {
private final Session session;
private final PreparedStatement pStmt;
private final List<Object> bindValues = new ArrayList<Object>();
public DirectCqlPreparedStatement(Session session, PreparedStatement pStmt) {
this.session = session;
this.pStmt = pStmt;
}
@Override
public OperationResult<CqlStatementResult> execute() throws ConnectionException {
BoundStatement bStmt = pStmt.bind(bindValues.toArray());
ResultSet resultSet = session.execute(bStmt);
CqlStatementResult result = new DirectCqlStatementResultImpl(resultSet);
return new CqlOperationResultImpl<CqlStatementResult>(resultSet, result);
}
@Override
public ListenableFuture<OperationResult<CqlStatementResult>> executeAsync() throws ConnectionException {
BoundStatement bStmt = pStmt.bind(bindValues.toArray());
ResultSetFuture rsFuture = session.executeAsync(bStmt);
return new AsyncOperationResult<CqlStatementResult>(rsFuture) {
@Override
public OperationResult<CqlStatementResult> getOperationResult(ResultSet rs) {
CqlStatementResult result = new DirectCqlStatementResultImpl(rs);
return new CqlOperationResultImpl<CqlStatementResult>(rs, result); }
};
}
@Override
public <V> CqlPreparedStatement withByteBufferValue(V value, Serializer<V> serializer) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withValue(ByteBuffer value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withValues(List<ByteBuffer> values) {
bindValues.addAll(values);
return this;
}
@Override
public CqlPreparedStatement withStringValue(String value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withIntegerValue(Integer value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withBooleanValue(Boolean value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withDoubleValue(Double value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withLongValue(Long value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withFloatValue(Float value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withShortValue(Short value) {
bindValues.add(value);
return this;
}
@Override
public CqlPreparedStatement withUUIDValue(UUID value) {
bindValues.add(value);
return this;
}
public PreparedStatement getInnerPreparedStatement() {
return pStmt;
}
}
| 8,075 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/direct/DirectCqlStatementResultImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.direct;
import java.util.List;
import com.datastax.driver.core.ResultSet;
import com.netflix.astyanax.cql.CqlSchema;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.cql.reads.model.CqlRowListImpl;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.Rows;
/**
* Impl of {@link CqlStatementResult} that parses the result set from java driver query operations.
*
* @author poberai
*/
public class DirectCqlStatementResultImpl implements CqlStatementResult {
private final ResultSet rs;
public DirectCqlStatementResultImpl(ResultSet rs) {
this.rs = rs;
}
@Override
public long asCount() {
return rs.one().getLong(0);
}
@Override
public <K, C> Rows<K, C> getRows(ColumnFamily<K, C> columnFamily) {
List<com.datastax.driver.core.Row> rows = rs.all();
return new CqlRowListImpl<K, C>(rows, columnFamily);
}
@Override
public CqlSchema getSchema() {
return new DirectCqlSchema(rs);
}
public static class DirectCqlSchema implements CqlSchema {
private final ResultSet rs;
public DirectCqlSchema(ResultSet result) {
this.rs = result;
}
public ResultSet getResultSet() {
return rs;
}
}
}
| 8,076 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/direct/DirectCqlStatement.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.direct;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlOperationResultImpl;
import com.netflix.astyanax.cql.CqlPreparedStatement;
import com.netflix.astyanax.cql.CqlStatement;
import com.netflix.astyanax.cql.CqlStatementResult;
import com.netflix.astyanax.cql.util.AsyncOperationResult;
import com.netflix.astyanax.cql.util.ConsistencyLevelTransform;
import com.netflix.astyanax.model.ConsistencyLevel;
/**
* Impl of {@link CqlStatement} using java driver.
* it manages a {@link Session} object that is used when actually performing the real query with the
* driver underneath.
*
* @author poberai
*/
public class DirectCqlStatement implements CqlStatement {
private final Session session;
private ConsistencyLevel cLevel = ConsistencyLevel.CL_ONE; // the default cl
private String cqlQuery;
public DirectCqlStatement(Session session) {
this.session = session;
}
@Override
public CqlStatement withConsistencyLevel(ConsistencyLevel cl) {
this.cLevel = cl;
return this;
}
@Override
public CqlStatement withCql(String cql) {
this.cqlQuery = cql;
return this;
}
@Override
public OperationResult<CqlStatementResult> execute() throws ConnectionException {
Statement q = new SimpleStatement(cqlQuery);
q.setConsistencyLevel(ConsistencyLevelTransform.getConsistencyLevel(cLevel));
q.setFetchSize(100);
ResultSet resultSet = session.execute(q);
CqlStatementResult result = new DirectCqlStatementResultImpl(resultSet);
return new CqlOperationResultImpl<CqlStatementResult>(resultSet, result);
}
@Override
public ListenableFuture<OperationResult<CqlStatementResult>> executeAsync() throws ConnectionException {
Statement q = new SimpleStatement(cqlQuery);
q.setConsistencyLevel(ConsistencyLevelTransform.getConsistencyLevel(cLevel));
ResultSetFuture rsFuture = session.executeAsync(q);
return new AsyncOperationResult<CqlStatementResult>(rsFuture) {
@Override
public OperationResult<CqlStatementResult> getOperationResult(ResultSet rs) {
CqlStatementResult result = new DirectCqlStatementResultImpl(rs);
return new CqlOperationResultImpl<CqlStatementResult>(rs, result); }
};
}
@Override
public CqlPreparedStatement asPreparedStatement() {
PreparedStatement pStmt = session.prepare(cqlQuery);
pStmt.setConsistencyLevel(ConsistencyLevelTransform.getConsistencyLevel(cLevel));
return new DirectCqlPreparedStatement(session, pStmt);
}
}
| 8,077 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/util/AsyncOperationResult.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.util;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.connectionpool.OperationResult;
/**
* Impl for istenableFuture<OperationResult<V>> that wraps the {@link ResultSetFuture} from java driver for async operations.
*
* @author poberai
*
* @param <V>
*/
public abstract class AsyncOperationResult<V> implements ListenableFuture<OperationResult<V>> {
private ResultSetFuture rsFuture;
public AsyncOperationResult(ResultSetFuture rsFuture) {
this.rsFuture = rsFuture;
}
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return rsFuture.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return rsFuture.isCancelled();
}
@Override
public boolean isDone() {
return rsFuture.isDone();
}
@Override
public OperationResult<V> get() throws InterruptedException, ExecutionException {
return getOperationResult(rsFuture.get());
}
public OperationResult<V> getUninterruptably() throws InterruptedException, ExecutionException {
return getOperationResult(rsFuture.getUninterruptibly());
}
@Override
public OperationResult<V> get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
return getOperationResult(rsFuture.get(timeout, unit));
}
@Override
public void addListener(Runnable listener, Executor executor) {
rsFuture.addListener(listener, executor);
}
public abstract OperationResult<V> getOperationResult(ResultSet rs);
}
| 8,078 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/util/DataTypeMapping.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.util;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.Row;
public class DataTypeMapping {
public static <T> Object getDynamicColumn(Row row, String columnName, DataType dataType) {
switch(dataType.getName()) {
case ASCII:
return row.getString(columnName);
case BIGINT:
return row.getLong(columnName);
case BLOB:
return row.getBytes(columnName);
case BOOLEAN:
return row.getBool(columnName);
case COUNTER:
return row.getLong(columnName);
case DECIMAL:
return row.getDecimal(columnName);
case DOUBLE:
return row.getDouble(columnName);
case FLOAT:
return row.getFloat(columnName);
case INET:
return row.getInet(columnName);
case INT:
return row.getInt(columnName);
case TEXT:
return row.getString(columnName);
case TIMESTAMP:
return row.getDate(columnName);
case UUID:
return row.getUUID(columnName);
case VARCHAR:
return row.getString(columnName);
case VARINT:
return row.getLong(columnName);
case TIMEUUID:
return row.getUUID(columnName);
case LIST:
throw new UnsupportedOperationException("Collection objects not supported for column: " + columnName);
case SET:
throw new UnsupportedOperationException("Collection objects not supported for column: " + columnName);
case MAP:
return row.getMap(columnName, Object.class, Object.class);
//throw new UnsupportedOperationException("Collection objects not supported for column: " + columnName);
case CUSTOM:
throw new UnsupportedOperationException("Collection objects not supported for column: " + columnName);
default:
throw new UnsupportedOperationException("Unrecognized object for column: " + columnName);
}
}
}
| 8,079 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/util/ConsistencyLevelTransform.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.util;
import com.datastax.driver.core.ConsistencyLevel;
/**
* Utility class for transforming Astyanx consistency level to java driver consistency level.
*
* @author poberai
*
*/
public class ConsistencyLevelTransform {
public static ConsistencyLevel getConsistencyLevel(com.netflix.astyanax.model.ConsistencyLevel level) {
ConsistencyLevel result = null;
switch(level) {
case CL_ONE:
result = ConsistencyLevel.ONE;
break;
case CL_ALL:
result = ConsistencyLevel.ALL;
break;
case CL_ANY:
result = ConsistencyLevel.ANY;
break;
case CL_QUORUM:
result = ConsistencyLevel.QUORUM;
break;
case CL_EACH_QUORUM:
result = ConsistencyLevel.EACH_QUORUM;
break;
case CL_LOCAL_QUORUM:
result = ConsistencyLevel.LOCAL_QUORUM;
break;
default:
throw new RuntimeException("Consistency level not supported");
}
return result;
}
}
| 8,080 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/util/CqlTypeMapping.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.util;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.NotImplementedException;
import com.datastax.driver.core.Row;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer.ComponentSerializer;
import com.netflix.astyanax.serializers.ComparatorType;
/**
* Helpful utility that maps the different data types and helps translate to and from Astyanax and java driver objects.
*
* @author poberai
*
*/
public class CqlTypeMapping {
private static Map<String, String> comparatorToCql3Type = new HashMap<String, String>();
private static Map<String, ComparatorType> cqlToComparatorType = new HashMap<String, ComparatorType>();
static {
initComparatorTypeMap();
}
private static void initComparatorTypeMap() {
Map<ComparatorType, String> tmpMap = new HashMap<ComparatorType, String>();
tmpMap.put(ComparatorType.ASCIITYPE, "ASCII");
tmpMap.put(ComparatorType.BYTESTYPE, "BLOB");
tmpMap.put(ComparatorType.BOOLEANTYPE, "BOOLEAN");
tmpMap.put(ComparatorType.COUNTERTYPE, "COUNTER");
tmpMap.put(ComparatorType.DECIMALTYPE, "DECIMAL");
tmpMap.put(ComparatorType.DOUBLETYPE, "DOUBLE");
tmpMap.put(ComparatorType.FLOATTYPE, "FLOAT");
tmpMap.put(ComparatorType.LONGTYPE, "BIGINT");
tmpMap.put(ComparatorType.INT32TYPE, "INT");
tmpMap.put(ComparatorType.UTF8TYPE, "TEXT");
tmpMap.put(ComparatorType.DATETYPE, "TIMESTAMP");
tmpMap.put(ComparatorType.UUIDTYPE, "UUID");
tmpMap.put(ComparatorType.INTEGERTYPE, "VARINT");
tmpMap.put(ComparatorType.TIMEUUIDTYPE, "TIMEUUID");
for (ComparatorType cType : tmpMap.keySet()) {
String value = tmpMap.get(cType);
comparatorToCql3Type.put(cType.getClassName(), value);
comparatorToCql3Type.put(cType.getTypeName(), value);
cqlToComparatorType.put(value, cType);
}
}
public static ComparatorType getComparatorFromCqlType(String cqlTypeString) {
ComparatorType value = cqlToComparatorType.get(cqlTypeString);
if (value == null) {
throw new RuntimeException("Unrecognized cql type: " + cqlTypeString);
}
return value;
}
public static String getCqlTypeFromComparator(String comparatorString) {
String value = comparatorToCql3Type.get(comparatorString);
if (value == null) {
throw new RuntimeException("Could not find comparator type string: " + comparatorString);
}
return value;
}
private static <T> Object getDynamicColumn(Row row, Serializer<T> serializer, String columnName, ColumnFamily<?,?> cf) {
ComparatorType comparatorType = serializer.getComparatorType();
switch(comparatorType) {
case ASCIITYPE:
return row.getString(columnName);
case BYTESTYPE:
return row.getBytes(columnName);
case INTEGERTYPE:
return row.getInt(columnName);
case INT32TYPE:
return row.getInt(columnName);
case DECIMALTYPE:
return row.getFloat(columnName);
case LEXICALUUIDTYPE:
return row.getUUID(columnName);
case LOCALBYPARTITIONERTYPE:
return row.getBytes(columnName);
case LONGTYPE:
return row.getLong(columnName);
case TIMEUUIDTYPE:
return row.getUUID(columnName);
case UTF8TYPE:
return row.getString(columnName);
case COMPOSITETYPE:
return getCompositeColumn(row, (AnnotatedCompositeSerializer<?>) serializer, cf);
case DYNAMICCOMPOSITETYPE:
throw new NotImplementedException();
case UUIDTYPE:
return row.getUUID(columnName);
case COUNTERTYPE:
return row.getLong(columnName);
case DOUBLETYPE:
return row.getDouble(columnName);
case FLOATTYPE:
return row.getFloat(columnName);
case BOOLEANTYPE:
return row.getBool(columnName);
case DATETYPE:
return row.getDate(columnName);
default:
throw new RuntimeException("Could not recognize comparator type: " + comparatorType.getTypeName());
}
}
public static <T> Object getDynamicColumn(Row row, Serializer<T> serializer, int columnIndex, ColumnFamily<?,?> cf) {
ComparatorType comparatorType = serializer.getComparatorType();
switch(comparatorType) {
case ASCIITYPE:
return row.getString(columnIndex);
case BYTESTYPE:
return row.getBytes(columnIndex);
case INTEGERTYPE:
return row.getInt(columnIndex);
case INT32TYPE:
return row.getInt(columnIndex);
case DECIMALTYPE:
return row.getFloat(columnIndex);
case LEXICALUUIDTYPE:
return row.getUUID(columnIndex);
case LOCALBYPARTITIONERTYPE:
return row.getBytes(columnIndex);
case LONGTYPE:
return row.getLong(columnIndex);
case TIMEUUIDTYPE:
return row.getUUID(columnIndex);
case UTF8TYPE:
return row.getString(columnIndex);
case COMPOSITETYPE:
return getCompositeColumn(row, (AnnotatedCompositeSerializer<?>) serializer, cf);
case DYNAMICCOMPOSITETYPE:
throw new NotImplementedException();
case UUIDTYPE:
return row.getUUID(columnIndex);
case COUNTERTYPE:
return row.getLong(columnIndex);
case DOUBLETYPE:
return row.getDouble(columnIndex);
case FLOATTYPE:
return row.getFloat(columnIndex);
case BOOLEANTYPE:
return row.getBool(columnIndex);
case DATETYPE:
return row.getDate(columnIndex);
default:
throw new RuntimeException("Could not recognize comparator type: " + comparatorType.getTypeName());
}
}
private static Object getCompositeColumn(Row row, AnnotatedCompositeSerializer<?> compositeSerializer, ColumnFamily<?,?> cf) {
Class<?> clazz = compositeSerializer.getClazz();
Object obj = null;
try {
obj = clazz.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
List<ColumnDefinition> cluseringKeyList = cfDef.getClusteringKeyColumnDefinitionList();
int componentIndex = 0;
for (ComponentSerializer<?> component : compositeSerializer.getComponents()) {
Object value = getDynamicColumn(row, component.getSerializer(), cluseringKeyList.get(componentIndex).getName(), cf);
try {
component.setFieldValueDirectly(obj, value);
componentIndex++;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return obj;
}
}
| 8,081 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/util/CFQueryContext.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.util;
import java.nio.ByteBuffer;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
public class CFQueryContext<K,C> {
private final ColumnFamily<K,C> columnFamily;
private final Object rowKey;
private RetryPolicy retryPolicy;
private ConsistencyLevel clLevel;
public CFQueryContext(ColumnFamily<K,C> cf) {
this(cf, null, null, null);
}
public CFQueryContext(ColumnFamily<K,C> cf, K rKey) {
this(cf, rKey, null, null);
}
public CFQueryContext(ColumnFamily<K,C> cf, K rKey, RetryPolicy retry, ConsistencyLevel cl) {
this.columnFamily = cf;
this.rowKey = checkRowKey(rKey);
this.retryPolicy = retry;
this.clLevel = cl;
}
public ColumnFamily<K, C> getColumnFamily() {
return columnFamily;
}
public Object getRowKey() {
return rowKey;
}
public void setRetryPolicy(RetryPolicy retry) {
this.retryPolicy = retry;
}
public RetryPolicy getRetryPolicy() {
return retryPolicy;
}
public void setConsistencyLevel(ConsistencyLevel cl) {
this.clLevel = cl;
}
public ConsistencyLevel getConsistencyLevel() {
return clLevel;
}
public Object checkRowKey(K rKey) {
if (rKey == null) {
return null;
}
CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl) columnFamily.getColumnFamilyDefinition();
if (cfDef.getKeyValidationClass().contains("BytesType")) {
// Row key is of type bytes. Convert row key to bytebuffer if needed
if (rKey instanceof ByteBuffer) {
return rKey;
}
return columnFamily.getKeySerializer().toByteBuffer(rKey);
}
// else just return the row key as is
return rKey;
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("CF=").append(columnFamily.getName());
sb.append(" RowKey: ").append(rowKey);
sb.append(" RetryPolicy: ").append(retryPolicy);
sb.append(" ConsistencyLevel: ").append(clLevel);
return sb.toString();
}
}
| 8,082 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/CqlColumnListMutationImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.base.Preconditions;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnPath;
import com.netflix.astyanax.model.ConsistencyLevel;
@SuppressWarnings("deprecation")
public class CqlColumnListMutationImpl<K, C> extends AbstractColumnListMutationImpl<C> {
public enum ColListMutationType {
RowDelete, ColumnsUpdate, CounterColumnsUpdate;
}
private ColListMutationType type = ColListMutationType.ColumnsUpdate;
private final KeyspaceContext ksContext;
private final CFQueryContext<K,C> cfContext;
private final CqlColumnFamilyDefinitionImpl cfDef;
private final List<CqlColumnMutationImpl<K, C>> mutationList = new ArrayList<CqlColumnMutationImpl<K,C>>();
private AtomicReference<Boolean> deleteRow = new AtomicReference<Boolean>(false);
public CqlColumnListMutationImpl(KeyspaceContext ksCtx, ColumnFamily<K,C> cf, K rowKey, ConsistencyLevel level, Long timestamp) {
super(timestamp);
this.ksContext = ksCtx;
this.cfContext = new CFQueryContext<K,C>(cf, rowKey, null, level);
this.cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
}
@Override
public <V> ColumnListMutation<C> putColumn(C columnName, V value, Serializer<V> valueSerializer, Integer ttl) {
checkColumnName(columnName);
CqlColumnMutationImpl<K,C> mutation = new CqlColumnMutationImpl<K,C>(ksContext, cfContext, columnName);
mutation.putValue(value, valueSerializer, getActualTTL(ttl));
if (this.getTimestamp() != null) {
mutation.withTimestamp(this.getTimestamp());
}
mutationList.add(mutation);
return this;
}
@Override
public <SC> ColumnListMutation<SC> withSuperColumn(ColumnPath<SC> superColumnPath) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnListMutation<C> putEmptyColumn(C columnName, Integer ttl) {
checkColumnName(columnName);
Integer theTTL = super.defaultTTL.get();
if (ttl != null) {
theTTL = ttl;
}
CqlColumnMutationImpl<K,C> mutation = new CqlColumnMutationImpl<K,C>(ksContext, cfContext, columnName);
mutation.putEmptyColumn(theTTL);
if (this.getTimestamp() != null) {
mutation.withTimestamp(this.getTimestamp());
}
mutationList.add(mutation);
return this;
}
@Override
public ColumnListMutation<C> incrementCounterColumn(C columnName, long amount) {
checkColumnName(columnName);
type = ColListMutationType.CounterColumnsUpdate;
CqlColumnMutationImpl<K,C> mutation = new CqlColumnMutationImpl<K,C>(ksContext, cfContext, columnName);
mutation.incrementCounterColumn(amount);
mutationList.add(mutation);
return this;
}
@Override
public ColumnListMutation<C> deleteColumn(C columnName) {
checkColumnName(columnName);
CqlColumnMutationImpl<K,C> mutation = new CqlColumnMutationImpl<K,C>(ksContext, cfContext, columnName);
mutation.deleteColumn();
if (this.getTimestamp() != null) {
mutation.withTimestamp(this.getTimestamp());
}
mutationList.add(mutation);
return this;
}
@Override
public ColumnListMutation<C> delete() {
deleteRow.set(true);
type = ColListMutationType.RowDelete;
return this;
}
@Override
public ColumnListMutation<C> setDefaultTtl(Integer newTTL) {
if (super.defaultTTL.get() == null) {
defaultTTL.set(newTTL);
return this;
}
if (!(defaultTTL.equals(newTTL))) {
throw new RuntimeException("Default TTL has already been set, cannot reset");
}
return this;
}
public void mergeColumnListMutation(CqlColumnListMutationImpl<?, ?> colListMutation) {
for (CqlColumnMutationImpl<?,?> colMutation : colListMutation.getMutationList()) {
this.mutationList.add((CqlColumnMutationImpl<K, C>) colMutation);
}
}
public List<CqlColumnMutationImpl<K,C>> getMutationList() {
return mutationList;
}
public ColumnListMutation<C> putColumnWithGenericValue(C columnName, Object value, Integer ttl) {
Preconditions.checkArgument(columnName != null, "Column Name must not be null");
CqlColumnMutationImpl<K,C> mutation = new CqlColumnMutationImpl<K,C>(ksContext, cfContext, columnName);
mutation.putGenericValue(value, getActualTTL(ttl));
mutationList.add(mutation);
return this;
}
private Integer getActualTTL(Integer overrideTTL) {
Integer theTTL = super.defaultTTL.get();
if (overrideTTL != null) {
theTTL = overrideTTL;
}
return theTTL;
}
private void checkColumnName(C columnName) {
Preconditions.checkArgument(columnName != null, "Column Name must not be null");
if (columnName instanceof String) {
Preconditions.checkArgument(!((String)columnName).isEmpty(), "Column Name must not be null");
}
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(cfContext.toString());
sb.append(" MutationList: ").append(mutationList.toString());
return sb.toString();
}
public CFMutationQueryGen getMutationQueryGen() {
return cfDef.getMutationQueryGenerator();
}
public ColListMutationType getType() {
return type;
}
public Object getRowKey() {
return cfContext.getRowKey();
}
} | 8,083 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/CqlStyleMutationQuery.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
public class CqlStyleMutationQuery {
protected final KeyspaceContext ksContext;
protected final CFQueryContext<?,?> cfContext;
protected final List<CqlColumnMutationImpl<?,?>> mutationList;
protected AtomicReference<Boolean> deleteRow;
protected final AtomicReference<Long> defaultTimestamp;
protected final AtomicReference<Integer> defaultTTL;
protected final ConsistencyLevel consistencyLevel;
private static final String USING = " USING ";
private static final String TTL = " TTL ";
private static final String AND = " AND";
private static final String TIMESTAMP = " TIMESTAMP ";
public CqlStyleMutationQuery(KeyspaceContext ksCtx, CFQueryContext<?,?> cfCtx,
List<CqlColumnMutationImpl<?,?>> mutationList, AtomicReference<Boolean> deleteRow,
AtomicReference<Integer> ttl, AtomicReference<Long> timestamp, ConsistencyLevel consistencyLevel) {
this.ksContext = ksCtx;
this.cfContext = cfCtx;
this.mutationList = mutationList;
this.deleteRow = deleteRow;
this.defaultTTL = ttl;
this.defaultTimestamp = timestamp;
this.consistencyLevel = consistencyLevel;
if (this.consistencyLevel != null) {
cfContext.setConsistencyLevel(consistencyLevel);
}
}
public String getDeleteEntireRowQuery() {
ColumnFamily<?,?> cf = cfContext.getColumnFamily();
CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl)cf.getColumnFamilyDefinition();
return "DELETE FROM " + ksContext.getKeyspace() + "." + cf.getName() +
" WHERE " + cfDef.getPartitionKeyColumnDefinition().getName() + " = ?;";
}
public void appendWriteOptions(StringBuilder sb, Integer overrideTTL, Long overrideTimestamp) {
Integer ttl = overrideTTL != null ? overrideTTL : defaultTTL.get();
Long timestamp = overrideTimestamp != null ? overrideTimestamp : defaultTimestamp.get();
if (ttl != null || timestamp != null) {
sb.append(USING);
}
if (ttl != null) {
sb.append(TTL + ttl);
}
if (timestamp != null) {
if (ttl != null) {
sb.append(AND);
}
sb.append(TIMESTAMP + timestamp);
}
}
}
| 8,084 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/AbstractColumnListMutationImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import java.util.zip.GZIPOutputStream;
import org.apache.commons.codec.binary.StringUtils;
import com.google.common.base.Preconditions;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.ByteSerializer;
import com.netflix.astyanax.serializers.BytesArraySerializer;
import com.netflix.astyanax.serializers.DateSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.FloatSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.ShortSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
public abstract class AbstractColumnListMutationImpl<C> implements ColumnListMutation<C> {
protected final AtomicReference<Long> defaultTimestamp = new AtomicReference<Long>(null);
protected final AtomicReference<Integer> defaultTTL = new AtomicReference<Integer>(null);
public AbstractColumnListMutationImpl(Long newTimestamp) {
this.defaultTimestamp.set(newTimestamp);
}
@Override
public ColumnListMutation<C> putColumn(C columnName, String value, Integer ttl) {
return putColumn(columnName, value, StringSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final String value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, String value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, String value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, byte[] value, Integer ttl) {
return putColumn(columnName, value, BytesArraySerializer.get(), ttl);
}
@Override
public <V> ColumnListMutation<C> putColumnIfNotNull(C columnName, V value, Serializer<V> valueSerializer, Integer ttl) {
if (value == null)
return this;
return putColumn(columnName, value, valueSerializer, ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final byte[] value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, byte[] value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, byte[] value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, byte value, Integer ttl) {
return putColumn(columnName, value, ByteSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final byte value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Byte value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Byte value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, short value, Integer ttl) {
return putColumn(columnName, value, ShortSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final short value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Short value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Short value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, int value, Integer ttl) {
return putColumn(columnName, value, IntegerSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final int value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Integer value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Integer value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, long value, Integer ttl) {
return putColumn(columnName, value, LongSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final long value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Long value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Long value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, boolean value, Integer ttl) {
return putColumn(columnName, value, BooleanSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final boolean value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Boolean value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Boolean value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, ByteBuffer value, Integer ttl) {
return putColumn(columnName, value, ByteBufferSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final ByteBuffer value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, ByteBuffer value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, ByteBuffer value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, Date value, Integer ttl) {
return putColumn(columnName, value, DateSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final Date value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Date value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Date value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, float value, Integer ttl) {
return putColumn(columnName, value, FloatSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final float value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Float value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Float value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, double value, Integer ttl) {
return putColumn(columnName, value, DoubleSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final double value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Double value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, Double value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putColumn(C columnName, UUID value, Integer ttl) {
return putColumn(columnName, value, UUIDSerializer.get(), ttl);
}
@Override
public ColumnListMutation<C> putColumn(final C columnName, final UUID value) {
return putColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, UUID value) {
if (value != null) {
return putColumn(columnName, value);
}
return this;
}
@Override
public ColumnListMutation<C> putColumnIfNotNull(C columnName, UUID value, Integer ttl) {
if (value != null) {
return putColumn(columnName, value, ttl);
}
return this;
}
@Override
public ColumnListMutation<C> putEmptyColumn(final C columnName) {
return putEmptyColumn(columnName, null);
}
@Override
public ColumnListMutation<C> setTimestamp(long timestamp) {
this.defaultTimestamp.set(timestamp);
return this;
}
@Override
public ColumnListMutation<C> putCompressedColumn(C columnName, String value, Integer ttl) {
Preconditions.checkNotNull(value, "Can't insert null value");
if (value == null) {
putEmptyColumn(columnName, ttl);
return this;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
GZIPOutputStream gzip;
try {
gzip = new GZIPOutputStream(out);
gzip.write(StringUtils.getBytesUtf8(value));
gzip.close();
return this.putColumn(columnName, ByteBuffer.wrap(out.toByteArray()), ttl);
} catch (IOException e) {
throw new RuntimeException("Error compressing column " + columnName, e);
}
}
@Override
public ColumnListMutation<C> putCompressedColumn(C columnName, String value) {
return putCompressedColumn(columnName, value, null);
}
@Override
public ColumnListMutation<C> putCompressedColumnIfNotNull(C columnName, String value, Integer ttl) {
if (value == null)
return this;
return putCompressedColumn(columnName, value, ttl);
}
@Override
public ColumnListMutation<C> putCompressedColumnIfNotNull(C columnName, String value) {
if (value == null)
return this;
return putCompressedColumn(columnName, value);
}
public Integer getDefaultTtl() {
return defaultTTL.get();
}
public Long getTimestamp() {
return defaultTimestamp.get();
}
}
| 8,085 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/BatchedStatements.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
public class BatchedStatements {
private static final Logger LOG = LoggerFactory.getLogger(BatchedStatements.class);
private List<String> batchQueries = new ArrayList<String>();
private List<Object> batchValues = new ArrayList<Object>();
public BatchedStatements() {
}
public List<String> getBatchQueries() {
return this.batchQueries;
}
public List<Object> getBatchValues() {
return this.batchValues;
}
public void addBatchQuery(String query) {
batchQueries.add(query);
}
public void addBatchValues(List<Object> values) {
batchValues.addAll(values);
}
public void addBatchValues(Object ... values) {
for (Object value : values) {
batchValues.add(value);
}
}
public void addBatch(String query, Object ... values) {
batchQueries.add(query);
for (Object value : values) {
batchValues.add(value);
}
}
public void addBatch(String query, List<Object> values) {
batchQueries.add(query);
batchValues.addAll(values);
}
public void addBatch(BatchedStatements otherBatch) {
batchQueries.addAll(otherBatch.getBatchQueries());
batchValues.addAll(otherBatch.getBatchValues());
}
public BoundStatement getBoundStatement(Session session, boolean atomicBatch) {
String query = getBatchQuery(atomicBatch);
PreparedStatement statement = session.prepare(query);
BoundStatement boundStatement = new BoundStatement(statement);
Object[] valueArr = batchValues.toArray();
boundStatement.bind(valueArr);
return boundStatement;
}
public String getBatchQuery(boolean atomicBatch) {
StringBuilder sb = new StringBuilder();
boolean isBatch = batchQueries.size() > 1;
if (isBatch) {
if (atomicBatch) {
sb.append("BEGIN BATCH ");
} else {
sb.append("BEGIN UNLOGGED BATCH ");
}
}
for (String query : batchQueries) {
sb.append(query);
}
if (isBatch) {
sb.append(" APPLY BATCH; ");
}
String query = sb.toString();
if (LOG.isDebugEnabled()) {
LOG.debug("Query : " + query);
LOG.debug("Bind values: " + batchValues);
}
return query;
}
}
| 8,086 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/MutationQueries.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
public class MutationQueries {
}
| 8,087 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/CqlColumnMutationImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.ColumnMutation;
import com.netflix.astyanax.Execution;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.cql.ConsistencyLevelMapping;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.DateSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.FloatSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.ShortSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
public class CqlColumnMutationImpl<K,C> implements ColumnMutation {
protected final KeyspaceContext ksContext;
protected final CFQueryContext<K,C> cfContext;
protected final CqlColumnFamilyDefinitionImpl cfDef;
protected final Object columnName;
protected Object columnValue;
// Tracking state
public enum ColMutationType {
UpdateColumn, CounterColumn, DeleteColumn;
}
private ColMutationType type = ColMutationType.UpdateColumn;
private ConsistencyLevel consistencyLevel;
private final AtomicReference<Long> timestamp = new AtomicReference<Long>(null);
private final AtomicReference<Integer> ttl = new AtomicReference<Integer>(null);
private final CFMutationQueryGen queryGen;
public CqlColumnMutationImpl(KeyspaceContext ksCtx, CFQueryContext<K,C> cfCtx, Object cName) {
this.ksContext = ksCtx;
this.columnName = cName;
this.cfContext = cfCtx;
this.cfDef = (CqlColumnFamilyDefinitionImpl) cfContext.getColumnFamily().getColumnFamilyDefinition();
this.queryGen = cfDef.getMutationQueryGenerator();
}
@Override
public ColumnMutation setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
@Override
public ColumnMutation withRetryPolicy(RetryPolicy retry) {
this.cfContext.setRetryPolicy(retry);
return this;
}
@Override
public ColumnMutation withTimestamp(long newValue) {
this.timestamp.set(newValue);
return this;
}
@Override
public Execution<Void> putValue(String value, Integer ttl) {
return putValue(value, StringSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(byte[] value, Integer ttl) {
return exec(ByteBuffer.wrap(value), ttl, CassandraOperationType.COLUMN_MUTATE);
}
@Override
public Execution<Void> putValue(byte value, Integer ttl) {
byte[] bytes = new byte[1];
bytes[0] = value;
return exec(ByteBuffer.wrap(bytes), ttl, CassandraOperationType.COLUMN_MUTATE);
}
@Override
public Execution<Void> putValue(short value, Integer ttl) {
return putValue(value, ShortSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(int value, Integer ttl) {
return putValue(value, IntegerSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(long value, Integer ttl) {
return putValue(value, LongSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(boolean value, Integer ttl) {
return putValue(value, BooleanSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(ByteBuffer value, Integer ttl) {
return exec(value, ttl, CassandraOperationType.COLUMN_MUTATE);
}
@Override
public Execution<Void> putValue(Date value, Integer ttl) {
return putValue(value, DateSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(float value, Integer ttl) {
return putValue(value, FloatSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(double value, Integer ttl) {
return putValue(value, DoubleSerializer.get(), ttl);
}
@Override
public Execution<Void> putValue(UUID value, Integer ttl) {
return putValue(value, UUIDSerializer.get(), ttl);
}
@Override
public <T> Execution<Void> putValue(T value, Serializer<T> serializer, Integer ttl) {
if (cfDef.getClusteringKeyColumnDefinitionList().size() == 0) {
return exec(value, ttl, CassandraOperationType.COLUMN_MUTATE);
}
if (cfContext.getColumnFamily().getDefaultValueSerializer().getComparatorType() == ByteBufferSerializer.get().getComparatorType()) {
ByteBuffer valueBytes = ((value instanceof ByteBuffer) ? (ByteBuffer) value : serializer.toByteBuffer(value));
return exec(valueBytes, ttl, CassandraOperationType.COLUMN_MUTATE);
} else {
return exec(value, ttl, CassandraOperationType.COLUMN_MUTATE);
}
}
public Execution<Void> putGenericValue(Object value, Integer ttl) {
return exec(value, ttl, CassandraOperationType.COLUMN_MUTATE);
}
@Override
public Execution<Void> putEmptyColumn(Integer ttl) {
return exec(null, ttl, CassandraOperationType.COLUMN_MUTATE);
}
@Override
public Execution<Void> incrementCounterColumn(long amount) {
type = ColMutationType.CounterColumn;
return exec(amount, null, CassandraOperationType.COUNTER_MUTATE);
}
@Override
public Execution<Void> deleteColumn() {
type = ColMutationType.DeleteColumn;
return exec(null, null, CassandraOperationType.COLUMN_DELETE);
}
@Override
public Execution<Void> deleteCounterColumn() {
type = ColMutationType.DeleteColumn;
return exec(null, null, CassandraOperationType.COLUMN_DELETE);
}
private Execution<Void> exec(final Object value, final Integer overrideTTL, final CassandraOperationType opType) {
final CqlColumnMutationImpl<K, C> thisMutation = this;
this.columnValue = value;
if (overrideTTL != null) {
this.ttl.set(overrideTTL);
}
return new CqlAbstractExecutionImpl<Void>(ksContext, cfContext) {
@Override
public CassandraOperationType getOperationType() {
return opType;
}
@Override
public Statement getQuery() {
BoundStatement bStmt = queryGen.getColumnMutationStatement(thisMutation, false);
if (thisMutation.getConsistencyLevel() != null) {
bStmt.setConsistencyLevel(ConsistencyLevelMapping.getCL(getConsistencyLevel()));
}
return bStmt;
}
@Override
public Void parseResultSet(ResultSet resultSet) {
return null;
}
};
}
public Integer getTTL() {
return ttl.get();
}
public Long getTimestamp() {
return timestamp.get();
}
public String toString() {
return columnName.toString();
}
public ColMutationType getType() {
return type;
}
public Object getRowKey() {
return cfContext.getRowKey();
}
public ConsistencyLevel getConsistencyLevel() {
return this.consistencyLevel;
}
}
| 8,088 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/CqlMutationBatchImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import com.datastax.driver.core.BatchStatement;
import com.datastax.driver.core.BatchStatement.Type;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.Clock;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.ConsistencyLevelMapping;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.writes.CqlColumnListMutationImpl.ColListMutationType;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
public class CqlMutationBatchImpl extends AbstractMutationBatchImpl {
private final KeyspaceContext ksContext;
// Control to turn use of prepared statement caching ON/OFF
private boolean useCaching = false;
public CqlMutationBatchImpl(KeyspaceContext ksCtx, Clock clock, ConsistencyLevel consistencyLevel, RetryPolicy retry) {
super(clock, consistencyLevel, retry);
this.ksContext = ksCtx;
}
@Override
public <K, C> ColumnListMutation<C> createColumnListMutation(String keyspace, ColumnFamily<K, C> cf, K rowKey) {
return new CqlColumnListMutationImpl<K, C>(ksContext, cf, rowKey, getConsistencyLevel(), timestamp);
}
@Override
public void mergeColumnListMutation(ColumnListMutation<?> from, ColumnListMutation<?> to) {
CqlColumnListMutationImpl<?, ?> fromCqlListMutation = (CqlColumnListMutationImpl<?, ?>) from;
CqlColumnListMutationImpl<?, ?> toCqlListMutation = (CqlColumnListMutationImpl<?, ?>) to;
toCqlListMutation.mergeColumnListMutation(fromCqlListMutation);
}
@Override
public OperationResult<Void> execute() throws ConnectionException {
return new CqlAbstractExecutionImpl<Void>(ksContext, getRetryPolicy()) {
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.BATCH_MUTATE;
}
@Override
public Statement getQuery() {
return getCachedPreparedStatement();
}
@Override
public Void parseResultSet(ResultSet resultSet) {
return null; // do nothing for mutations
}
}.execute();
}
@Override
public ListenableFuture<OperationResult<Void>> executeAsync() throws ConnectionException {
return new CqlAbstractExecutionImpl<Void>(ksContext, getRetryPolicy()) {
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.BATCH_MUTATE;
}
@Override
public Statement getQuery() {
return getCachedPreparedStatement();
}
@Override
public Void parseResultSet(ResultSet resultSet) {
return null; // do nothing for mutations
}
}.executeAsync();
}
private List<CqlColumnListMutationImpl<?, ?>> getColumnMutations() {
List<CqlColumnListMutationImpl<?,?>> colListMutation = new ArrayList<CqlColumnListMutationImpl<?,?>>();
for (Entry<ByteBuffer, Map<String, ColumnListMutation<?>>> entry : super.getMutationMap().entrySet()) {
for (ColumnListMutation<?> colMutation : entry.getValue().values()) {
colListMutation.add((CqlColumnListMutationImpl<?, ?>) colMutation);
}
}
return colListMutation;
}
private BatchStatement getCachedPreparedStatement() {
final List<CqlColumnListMutationImpl<?, ?>> colListMutations = getColumnMutations();
if (colListMutations == null || colListMutations.size() == 0) {
return new BatchStatement(Type.UNLOGGED);
}
ColListMutationType mutationType = colListMutations.get(0).getType();
BatchStatement batch = new BatchStatement(Type.UNLOGGED);
if (mutationType == ColListMutationType.CounterColumnsUpdate) {
batch = new BatchStatement(Type.COUNTER);
} else if (useAtomicBatch()) {
batch = new BatchStatement(Type.LOGGED);
}
for (CqlColumnListMutationImpl<?, ?> colListMutation : colListMutations) {
CFMutationQueryGen queryGen = colListMutation.getMutationQueryGen();
queryGen.addColumnListMutationToBatch(batch, colListMutation, useCaching);
}
batch.setConsistencyLevel(ConsistencyLevelMapping.getCL(this.getConsistencyLevel()));
return batch;
}
@Override
public MutationBatch withCaching(boolean condition) {
useCaching = condition;
return this;
}
}
| 8,089 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/StatementCache.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import com.datastax.driver.core.PreparedStatement;
public class StatementCache {
private final ConcurrentHashMap<Integer, PreparedStatement> statementCache = new ConcurrentHashMap<Integer, PreparedStatement>();
private StatementCache() {
}
public PreparedStatement getStatement(Integer id) {
return statementCache.get(id);
}
public PreparedStatement getStatement(Integer id, Callable<PreparedStatement> func) {
PreparedStatement stmt = statementCache.get(id);
if (stmt == null) {
try {
stmt = func.call();
statementCache.putIfAbsent(id, stmt);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return stmt;
}
private static final StatementCache Instance = new StatementCache();
public static StatementCache getInstance() {
return Instance;
}
}
| 8,090 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/AbstractMutationBatchImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.commons.codec.binary.Hex;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Maps.EntryTransformer;
import com.netflix.astyanax.Clock;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.WriteAheadLog;
import com.netflix.astyanax.connectionpool.Host;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.retry.RetryPolicy;
public abstract class AbstractMutationBatchImpl implements MutationBatch {
//private static final long UNSET_TIMESTAMP = -1;
protected Long timestamp = null; // UNSET_TIMESTAMP
protected ConsistencyLevel consistencyLevel;
protected Clock clock;
protected Host pinnedHost;
protected RetryPolicy retry;
protected WriteAheadLog wal;
protected boolean useAtomicBatch = false;
protected String keyspace;
protected Map<ByteBuffer, Map<String, ColumnListMutation<?>>> mutationMap = Maps.newLinkedHashMap();
protected Map<KeyAndColumnFamily, ColumnListMutation<?>> rowLookup = Maps.newHashMap();
private static class KeyAndColumnFamily {
private final String columnFamily;
private final ByteBuffer key;
public KeyAndColumnFamily(String columnFamily, ByteBuffer key) {
this.columnFamily = columnFamily;
this.key = key;
}
public int compareTo(Object obj) {
if (obj instanceof KeyAndColumnFamily) {
KeyAndColumnFamily other = (KeyAndColumnFamily)obj;
int result = columnFamily.compareTo(other.columnFamily);
if (result == 0) {
result = key.compareTo(other.key);
}
return result;
}
return -1;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((columnFamily == null) ? 0 : columnFamily.hashCode());
result = prime * result + ((key == null) ? 0 : key.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
KeyAndColumnFamily other = (KeyAndColumnFamily) obj;
if (columnFamily == null) {
if (other.columnFamily != null)
return false;
} else if (!columnFamily.equals(other.columnFamily))
return false;
if (key == null) {
if (other.key != null)
return false;
} else if (!key.equals(other.key))
return false;
return true;
}
}
public AbstractMutationBatchImpl(Clock clock, ConsistencyLevel consistencyLevel, RetryPolicy retry) {
this.clock = clock;
this.timestamp = null; //UNSET_TIMESTAMP;
this.consistencyLevel = consistencyLevel;
this.retry = retry;
}
@Override
public <K, C> ColumnListMutation<C> withRow(ColumnFamily<K, C> columnFamily, K rowKey) {
Preconditions.checkNotNull(columnFamily, "columnFamily cannot be null");
Preconditions.checkNotNull(rowKey, "Row key cannot be null");
ByteBuffer bbKey = columnFamily.getKeySerializer().toByteBuffer(rowKey);
if (!bbKey.hasRemaining()) {
throw new RuntimeException("Row key cannot be empty");
}
KeyAndColumnFamily kacf = new KeyAndColumnFamily(columnFamily.getName(), bbKey);
ColumnListMutation<C> clm = (ColumnListMutation<C>) rowLookup.get(kacf);
if (clm == null) {
Map<String, ColumnListMutation<?>> innerMutationMap = mutationMap.get(bbKey);
if (innerMutationMap == null) {
innerMutationMap = Maps.newHashMap();
mutationMap.put(bbKey, innerMutationMap);
}
ColumnListMutation<?> innerMutationList = innerMutationMap.get(columnFamily.getName());
if (innerMutationList == null) {
innerMutationList = createColumnListMutation(keyspace, columnFamily, rowKey);
innerMutationMap.put(columnFamily.getName(), innerMutationList);
}
rowLookup.put(kacf, innerMutationList);
clm = (ColumnListMutation<C>) innerMutationList;
}
return clm;
}
public abstract <K,C> ColumnListMutation<C> createColumnListMutation(String keyspace, ColumnFamily<K,C> cf, K rowKey);
@Override
public void discardMutations() {
this.timestamp = null; //UNSET_TIMESTAMP;
this.mutationMap.clear();
this.rowLookup.clear();
this.withCaching(false); // TURN Caching off.
}
@Override
public <K> void deleteRow(Iterable<? extends ColumnFamily<K, ?>> columnFamilies, K rowKey) {
for (ColumnFamily<K, ?> cf : columnFamilies) {
withRow(cf, rowKey).delete();
}
}
/**
* Checks whether the mutation object contains rows. While the map may
* contain row keys the row keys may not contain any mutations.
*
* @return
*/
@Override
public boolean isEmpty() {
return mutationMap.isEmpty();
}
/**
* Generate a string representation of the mutation with the following
* syntax Key1: [cf1, cf2], Key2: [cf1, cf3]
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("MutationBatch[");
boolean first = true;
for (Entry<ByteBuffer, Map<String, ColumnListMutation<?>>> row : mutationMap.entrySet()) {
if (!first)
sb.append(",");
sb.append(Hex.encodeHex(row.getKey().array()));
sb.append(row.getValue().entrySet().toString());
}
sb.append("]");
return sb.toString();
}
@Override
public ByteBuffer serialize() throws Exception {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public void deserialize(ByteBuffer data) throws Exception {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public Map<ByteBuffer, Set<String>> getRowKeys() {
return Maps.transformEntries(mutationMap,
new EntryTransformer<ByteBuffer, Map<String, ColumnListMutation<?>>, Set<String>>() {
@Override
public Set<String> transformEntry(ByteBuffer key, Map<String, ColumnListMutation<?>> value) {
return value.keySet();
}
});
}
public Map<ByteBuffer, Map<String, ColumnListMutation<?>>> getMutationMap() {
return mutationMap;
}
public void mergeShallow(MutationBatch other) {
if (!(other instanceof AbstractMutationBatchImpl)) {
throw new UnsupportedOperationException();
}
for (Map.Entry<ByteBuffer, Map<String, ColumnListMutation<?>>> otherRow : ((AbstractMutationBatchImpl) other).mutationMap
.entrySet()) {
Map<String, ColumnListMutation<?>> thisRow = mutationMap.get(otherRow.getKey());
// Key not in the map
if (thisRow == null) {
mutationMap.put(otherRow.getKey(), otherRow.getValue());
}
else {
for (Map.Entry<String, ColumnListMutation<?>> otherCf : otherRow.getValue().entrySet()) {
ColumnListMutation<?> thisCf = thisRow.get(otherCf.getKey());
// Column family not in the map
if (thisCf == null) {
thisRow.put(otherCf.getKey(), otherCf.getValue());
}
else {
mergeColumnListMutation(otherCf.getValue(), thisCf);
}
}
}
}
}
public abstract void mergeColumnListMutation(ColumnListMutation<?> from, ColumnListMutation<?> to);
@Override
public int getRowCount() {
return mutationMap.size();
}
@Override
public MutationBatch setTimeout(long timeout) {
return this;
}
@Override
public MutationBatch setTimestamp(long timestamp) {
return withTimestamp(timestamp);
}
@Override
public MutationBatch withTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
public MutationBatch lockCurrentTimestamp() {
this.timestamp = clock.getCurrentTime();
return this;
}
@Override
public MutationBatch setConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
@Override
public MutationBatch withConsistencyLevel(ConsistencyLevel consistencyLevel) {
this.consistencyLevel = consistencyLevel;
return this;
}
public ConsistencyLevel getConsistencyLevel() {
return this.consistencyLevel;
}
@Override
public MutationBatch pinToHost(Host host) {
this.pinnedHost = host;
return this;
}
@Override
public MutationBatch withRetryPolicy(RetryPolicy retry) {
this.retry = retry.duplicate();
return this;
}
@Override
public MutationBatch usingWriteAheadLog(WriteAheadLog manager) {
throw new UnsupportedOperationException("Operation not supported. ");
}
@Override
public MutationBatch withAtomicBatch(boolean condition) {
useAtomicBatch = condition;
return this;
}
public boolean useAtomicBatch() {
return useAtomicBatch;
}
public Host getPinnedHost() {
return this.pinnedHost;
}
public RetryPolicy getRetryPolicy() {
return this.retry;
}
public WriteAheadLog getWriteAheadLog() {
return this.wal;
}
}
| 8,091 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/writes/CFMutationQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.writes;
import java.util.Iterator;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.BatchStatement;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer.ComponentSerializer;
import com.netflix.astyanax.serializers.ComparatorType;
public class CFMutationQueryGen {
private static final Logger Logger = LoggerFactory.getLogger(CFMutationQueryGen.class);
public static enum MutationType {
ColumnUpdate, ColumnDelete, RowDelete, CounterColumnUpdate;
}
// Constants that are used frequently for constructing the query
private static final String INSERT_INTO = "INSERT INTO ";
private static final String OPEN_PARA = " (";
private static final String CLOSE_PARA = ") ";
private static final String VALUES = ") VALUES (";
private static final String BIND_MARKER = "?,";
private static final String LAST_BIND_MARKER = "?";
private static final String COMMA = ",";
private static final String USING = " USING ";
private static final String TTL = " TTL ";
private static final String AND = " AND ";
private static final String TIMESTAMP = " TIMESTAMP ";
private static final String DELETE_FROM = "DELETE FROM ";
private static final String WHERE = " WHERE ";
private static final String EQUALS = " = ";
private static final String UPDATE = " UPDATE ";
private static final String SET = " SET ";
private final String keyspace;
private final CqlColumnFamilyDefinitionImpl cfDef;
private final Session session;
public CFMutationQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
this.keyspace = keyspaceName;
this.cfDef = cfDefinition;
this.session = session;
}
private static void appendWriteOptions(StringBuilder sb, Integer ttl, Long timestamp) {
if (ttl != null || timestamp != null) {
sb.append(USING);
}
if (ttl != null) {
sb.append(TTL + ttl);
}
if (timestamp != null) {
if (ttl != null) {
sb.append(AND);
}
sb.append(TIMESTAMP + timestamp);
}
}
abstract class MutationQueryCache<M> {
private final AtomicReference<PreparedStatement> cachedStatement = new AtomicReference<PreparedStatement>(null);
public abstract Callable<String> getQueryGen(M mutation);
public void addToBatch(BatchStatement batch, M mutation, boolean useCaching) {
batch.add(getBoundStatement(mutation, useCaching));
}
public BoundStatement getBoundStatement(M mutation, boolean useCaching) {
PreparedStatement pStatement = getPreparedStatement(mutation, useCaching);
return bindValues(pStatement, mutation);
}
public abstract BoundStatement bindValues(PreparedStatement pStatement, M mutation);
public PreparedStatement getPreparedStatement(M mutation, boolean useCaching) {
PreparedStatement pStatement = null;
if (useCaching) {
pStatement = cachedStatement.get();
}
if (pStatement == null) {
try {
String query = getQueryGen(mutation).call();
pStatement = session.prepare(query);
if (Logger.isDebugEnabled()) {
Logger.debug("Query: " + pStatement.getQueryString());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
if (useCaching && cachedStatement.get() == null) {
cachedStatement.set(pStatement);
}
return pStatement;
}
}
private MutationQueryCache<CqlColumnListMutationImpl<?,?>> DeleteRowQuery = new MutationQueryCache<CqlColumnListMutationImpl<?,?>>() {
private final Callable<String> queryGen = new Callable<String>() {
@Override
public String call() throws Exception {
return DELETE_FROM + keyspace + "." + cfDef.getName() +
WHERE + cfDef.getPartitionKeyColumnDefinition().getName() + EQUALS + LAST_BIND_MARKER;
}
};
@Override
public Callable<String> getQueryGen(CqlColumnListMutationImpl<?, ?> mutation) {
return queryGen;
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnListMutationImpl<?, ?> mutation) {
return pStatement.bind(mutation.getRowKey());
}
};
abstract class BaseClusteringKeyMutation extends MutationQueryCache<CqlColumnMutationImpl<?,?>> {
public abstract boolean isDeleteQuery();
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnMutationImpl<?,?> colMutation) {
int size = cfDef.getPartitionKeyColumnDefinitionList().size() + cfDef.getClusteringKeyColumnDefinitionList().size();
if (!isDeleteQuery()) {
size += cfDef.getRegularColumnDefinitionList().size();
} else {
// we don't need to add the value component here. Just the partition key and the clustering key
}
Object[] arr = new Object[size];
int index = 0;
arr[index++] = colMutation.getRowKey();
ColumnFamily<?,?> cf = colMutation.cfContext.getColumnFamily();
boolean isCompositeColumn = cf.getColumnSerializer().getComparatorType() == ComparatorType.COMPOSITETYPE;
if (isCompositeColumn) {
AnnotatedCompositeSerializer<?> compSerializer = (AnnotatedCompositeSerializer<?>) cf.getColumnSerializer();
for (ComponentSerializer<?> component : compSerializer.getComponents()) {
try {
arr[index++] = component.getFieldValueDirectly(colMutation.columnName);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} else {
arr[index++] = colMutation.columnName;
}
if (!isDeleteQuery()) {
arr[index++] = colMutation.columnValue;
}
return pStatement.bind(arr);
}
}
private BaseClusteringKeyMutation InsertColumnWithClusteringKey = new BaseClusteringKeyMutation() {
@Override
public Callable<String> getQueryGen(final CqlColumnMutationImpl<?, ?> mutation) {
return new Callable<String>() {
@Override
public String call() throws Exception {
return genQuery().toString();
}
private StringBuilder genQuery() {
/**
* e.g
* insert into t (key, column1, value) values ('a', '2' , 'a2') using ttl 86400 and timestamp = 1234444;
*/
int columnCount = 0;
StringBuilder sb = new StringBuilder(INSERT_INTO);
sb.append(keyspace + "." + cfDef.getName());
sb.append(OPEN_PARA);
Iterator<ColumnDefinition> iter = cfDef.getPartitionKeyColumnDefinitionList().iterator();
while (iter.hasNext()) {
sb.append(iter.next().getName());
columnCount++;
if (iter.hasNext()) {
sb.append(COMMA);
}
}
iter = cfDef.getClusteringKeyColumnDefinitionList().iterator();
if (iter.hasNext()) {
sb.append(COMMA);
while (iter.hasNext()) {
sb.append(iter.next().getName());
columnCount++;
if (iter.hasNext()) {
sb.append(COMMA);
}
}
}
iter = cfDef.getRegularColumnDefinitionList().iterator();
if (iter.hasNext()) {
sb.append(COMMA);
while (iter.hasNext()) {
sb.append(iter.next().getName());
columnCount++;
if (iter.hasNext()) {
sb.append(COMMA);
}
}
}
sb.append(VALUES);
for (int i=0; i<columnCount; i++) {
if (i < (columnCount-1)) {
sb.append(BIND_MARKER);
} else {
sb.append(LAST_BIND_MARKER);
}
}
sb.append(CLOSE_PARA);
appendWriteOptions(sb, mutation.getTTL(), mutation.getTimestamp());
return sb;
}
};
}
@Override
public boolean isDeleteQuery() {
return false;
}
};
private BaseClusteringKeyMutation DeleteColumnWithClusteringKey = new BaseClusteringKeyMutation() {
@Override
public Callable<String> getQueryGen(final CqlColumnMutationImpl<?, ?> mutation) {
return new Callable<String>() {
@Override
public String call() throws Exception {
return genQuery().toString();
}
private StringBuilder genQuery() {
StringBuilder sb = new StringBuilder(DELETE_FROM);
sb.append(keyspace + "." + cfDef.getName());
appendWriteOptions(sb, mutation.getTTL(), mutation.getTimestamp());
Iterator<ColumnDefinition> iter = cfDef.getPartitionKeyColumnDefinitionList().iterator();
sb.append(WHERE);
while (iter.hasNext()) {
sb.append(iter.next().getName()).append(EQUALS).append(LAST_BIND_MARKER);
if (iter.hasNext()) {
sb.append(AND);
}
}
iter = cfDef.getClusteringKeyColumnDefinitionList().iterator();
if (iter.hasNext()) {
sb.append(AND);
while (iter.hasNext()) {
sb.append(iter.next().getName()).append(EQUALS).append(LAST_BIND_MARKER);
if (iter.hasNext()) {
sb.append(AND);
}
}
}
return sb;
}
};
}
@Override
public boolean isDeleteQuery() {
return true;
}
};
private MutationQueryCache<CqlColumnMutationImpl<?,?>> CounterColumnUpdate = new MutationQueryCache<CqlColumnMutationImpl<?,?>>() {
@Override
public Callable<String> getQueryGen(final CqlColumnMutationImpl<?, ?> mutation) {
return new Callable<String>() {
@Override
public String call() throws Exception {
String valueAlias = cfDef.getRegularColumnDefinitionList().get(0).getName();
StringBuilder sb = new StringBuilder();
sb.append(UPDATE + keyspace + "." + cfDef.getName());
appendWriteOptions(sb, mutation.getTTL(), mutation.getTimestamp());
sb.append(SET + valueAlias + " = " + valueAlias + " + ? ");
Iterator<ColumnDefinition> iter = cfDef.getPartitionKeyColumnDefinitionList().iterator();
sb.append(WHERE);
while (iter.hasNext()) {
sb.append(iter.next().getName()).append(EQUALS).append(LAST_BIND_MARKER);
if (iter.hasNext()) {
sb.append(AND);
}
}
iter = cfDef.getClusteringKeyColumnDefinitionList().iterator();
if (iter.hasNext()) {
sb.append(AND);
while (iter.hasNext()) {
sb.append(iter.next().getName()).append(EQUALS).append(LAST_BIND_MARKER);
if (iter.hasNext()) {
sb.append(AND);
}
}
}
return sb.toString();
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnMutationImpl<?, ?> mutation) {
int size = 1 + cfDef.getPartitionKeyColumnDefinitionList().size() + cfDef.getClusteringKeyColumnDefinitionList().size();
Object[] arr = new Object[size];
int index = 0;
arr[index++] = mutation.columnValue;
arr[index++] = mutation.getRowKey();
ColumnFamily<?,?> cf = mutation.cfContext.getColumnFamily();
boolean isCompositeColumn = cf.getColumnSerializer().getComparatorType() == ComparatorType.COMPOSITETYPE;
if (isCompositeColumn) {
AnnotatedCompositeSerializer<?> compSerializer = (AnnotatedCompositeSerializer<?>) cf.getColumnSerializer();
for (ComponentSerializer<?> component : compSerializer.getComponents()) {
try {
arr[index++] = component.getFieldValueDirectly(mutation.columnName);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} else {
arr[index++] = mutation.columnName;
}
return pStatement.bind(arr);
}
};
private MutationQueryCache<CqlColumnListMutationImpl<?,?>> InsertOrDeleteWithClusteringKey = new MutationQueryCache<CqlColumnListMutationImpl<?,?>>() {
@Override
public void addToBatch(BatchStatement batch, CqlColumnListMutationImpl<?,?> colListMutation, boolean useCaching) {
for (CqlColumnMutationImpl<?,?> colMutation : colListMutation.getMutationList()) {
switch (colMutation.getType()) {
case UpdateColumn :
InsertColumnWithClusteringKey.addToBatch(batch, colMutation, useCaching);
break;
case DeleteColumn :
DeleteColumnWithClusteringKey.addToBatch(batch, colMutation, useCaching);
break;
case CounterColumn :
throw new RuntimeException("Counter column update not allowed with other updates");
default:
throw new RuntimeException("Unsupported type: " + colMutation.getType());
};
}
}
@Override
public Callable<String> getQueryGen(CqlColumnListMutationImpl<?, ?> colListMutation) {
throw new RuntimeException("Not Supported");
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnListMutationImpl<?, ?> colListMutation) {
throw new RuntimeException("Not Supported");
}
};
private MutationQueryCache<CqlColumnListMutationImpl<?,?>> InsertOrDeleteColumnListWithClusteringKey = new MutationQueryCache<CqlColumnListMutationImpl<?,?>>() {
@Override
public void addToBatch(BatchStatement batch, CqlColumnListMutationImpl<?,?> colListMutation, boolean useCaching) {
for (CqlColumnMutationImpl<?,?> colMutation : colListMutation.getMutationList()) {
InsertOrDeleteColumnWithClusteringKey.addToBatch(batch, colMutation, useCaching);
}
}
@Override
public Callable<String> getQueryGen(CqlColumnListMutationImpl<?, ?> colListMutation) {
throw new RuntimeException("Not Supported");
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnListMutationImpl<?, ?> colListMutation) {
throw new RuntimeException("Not Supported");
}
};
private MutationQueryCache<CqlColumnMutationImpl<?,?>> InsertOrDeleteColumnWithClusteringKey = new MutationQueryCache<CqlColumnMutationImpl<?,?>>() {
@Override
public BoundStatement getBoundStatement(CqlColumnMutationImpl<?, ?> mutation, boolean useCaching) {
switch (mutation.getType()) {
case UpdateColumn :
return InsertColumnWithClusteringKey.getBoundStatement(mutation, useCaching);
case DeleteColumn :
return DeleteColumnWithClusteringKey.getBoundStatement(mutation, useCaching);
case CounterColumn :
return CounterColumnUpdate.getBoundStatement(mutation, useCaching);
default:
throw new RuntimeException("Unsupported type: " + mutation.getType());
}
}
@Override
public Callable<String> getQueryGen(CqlColumnMutationImpl<?, ?> colMutation) {
throw new RuntimeException("Not Supported");
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnMutationImpl<?, ?> colMutation) {
throw new RuntimeException("Not Supported");
}
};
private MutationQueryCache<CqlColumnListMutationImpl<?,?>> CounterColumnList = new MutationQueryCache<CqlColumnListMutationImpl<?,?>>() {
@Override
public void addToBatch(BatchStatement batch, CqlColumnListMutationImpl<?,?> colListMutation, boolean useCaching) {
for (CqlColumnMutationImpl<?,?> colMutation : colListMutation.getMutationList()) {
CounterColumnUpdate.addToBatch(batch, colMutation, useCaching);
}
}
@Override
public Callable<String> getQueryGen(CqlColumnListMutationImpl<?, ?> colListMutation) {
throw new RuntimeException("Not Supported");
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnListMutationImpl<?, ?> colListMutation) {
throw new RuntimeException("Not Supported");
}
};
private MutationQueryCache<CqlColumnListMutationImpl<?,?>> FlatTableInsertQuery = new MutationQueryCache<CqlColumnListMutationImpl<?,?>> () {
@Override
public void addToBatch(BatchStatement batch, CqlColumnListMutationImpl<?, ?> colListMutation, boolean useCaching) {
StringBuilder sb = new StringBuilder();
sb.append(INSERT_INTO).append(keyspace + "." + cfDef.getName());
sb.append(OPEN_PARA);
// Init the object array for the bind values
int size = colListMutation.getMutationList().size() + 1;
Object[] values = new Object[size];
int index = 0;
// Add in the primary key
sb.append(cfDef.getPartitionKeyColumnDefinition().getName()).append(COMMA);
values[index++] = colListMutation.getRowKey();
for (CqlColumnMutationImpl<?,?> colMutation : colListMutation.getMutationList()) {
sb.append(colMutation.columnName);
values[index++] = colMutation.columnValue;
if (index < size) {
sb.append(COMMA);
}
}
sb.append(VALUES);
for (int i=0; i<size; i++) {
if (i < (size-1)) {
sb.append(BIND_MARKER);
} else {
sb.append(LAST_BIND_MARKER);
}
}
sb.append(CLOSE_PARA);
appendWriteOptions(sb, colListMutation.getDefaultTtl(), colListMutation.getTimestamp());
String query = sb.toString();
if (Logger.isDebugEnabled()) {
Logger.debug("Query: " + query);
}
try {
PreparedStatement pStatement = session.prepare(query);
batch.add(pStatement.bind(values));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Callable<String> getQueryGen(CqlColumnListMutationImpl<?, ?> mutation) {
throw new RuntimeException("Not Supported");
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnListMutationImpl<?, ?> mutation) {
throw new RuntimeException("Not Supported");
}
};
private MutationQueryCache<CqlColumnMutationImpl<?,?>> FlatTableInsertQueryForColumn = new MutationQueryCache<CqlColumnMutationImpl<?,?>> () {
@Override
public Callable<String> getQueryGen(CqlColumnMutationImpl<?, ?> mutation) {
throw new RuntimeException("Not Supported");
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnMutationImpl<?, ?> mutation) {
throw new RuntimeException("Not Supported");
}
@Override
public void addToBatch(BatchStatement batch, CqlColumnMutationImpl<?, ?> mutation, boolean useCaching) {
throw new RuntimeException("Not Supported");
}
@Override
public BoundStatement getBoundStatement(CqlColumnMutationImpl<?, ?> mutation, boolean useCaching) {
StringBuilder sb = new StringBuilder();
sb.append(INSERT_INTO).append(keyspace + "." + cfDef.getName());
sb.append(OPEN_PARA);
sb.append(cfDef.getPartitionKeyColumnDefinition().getName());
sb.append(COMMA);
sb.append(mutation.columnName);
sb.append(VALUES);
sb.append(BIND_MARKER);
sb.append(LAST_BIND_MARKER);
sb.append(CLOSE_PARA);
appendWriteOptions(sb, mutation.getTTL(), mutation.getTimestamp());
String query = sb.toString();
if (Logger.isDebugEnabled()) {
Logger.debug("Query: " + query);
}
// Init the object array for the bind values
Object[] values = new Object[2];
values[0] = mutation.getRowKey();
values[1] = mutation.columnValue;
try {
PreparedStatement pStatement = session.prepare(query);
return pStatement.bind(values);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
public void addColumnListMutationToBatch(BatchStatement batch, CqlColumnListMutationImpl<?,?> colListMutation, boolean useCaching) {
switch (colListMutation.getType()) {
case RowDelete:
DeleteRowQuery.addToBatch(batch, colListMutation, useCaching);
break;
case ColumnsUpdate:
if (cfDef.getClusteringKeyColumnDefinitionList().size() == 0) {
// THIS IS A FLAT TABLE QUERY
FlatTableInsertQuery.addToBatch(batch, colListMutation, useCaching);
} else {
InsertOrDeleteWithClusteringKey.addToBatch(batch, colListMutation, useCaching);
}
break;
case CounterColumnsUpdate:
CounterColumnList.addToBatch(batch, colListMutation, useCaching);
break;
default:
throw new RuntimeException("Unrecognized ColumnListMutation Type");
}
}
public BoundStatement getColumnMutationStatement(CqlColumnMutationImpl<?,?> mutation, boolean useCaching) {
if (cfDef.getClusteringKeyColumnDefinitionList().size() == 0) {
// THIS IS A FLAT TABLE QUERY
return FlatTableInsertQueryForColumn.getBoundStatement(mutation, useCaching);
} else {
return InsertOrDeleteColumnWithClusteringKey.getBoundStatement(mutation, useCaching);
}
}
}
| 8,092 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/schema/CqlColumnDefinitionImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.schema;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.netflix.astyanax.shaded.org.apache.cassandra.db.marshal.UTF8Type;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.ddl.FieldMetadata;
/**
* Impl for {@link ColumnDefinition} interface that constructs the state from a {@link Row} object
* retrieved from the java driver {@link ResultSet}.
*
* @author poberai
*
*/
public class CqlColumnDefinitionImpl implements ColumnDefinition, Comparable<CqlColumnDefinitionImpl> {
Map<String, Object> options = new HashMap<String, Object>();
private CqlColumnType colType;
private Integer componentIndex;
public enum CqlColumnType {
partition_key, clustering_key, regular, compact_value;
}
public CqlColumnDefinitionImpl() {
}
public CqlColumnDefinitionImpl(Row row) {
this.setName(row.getString("column_name"));
String validationClass = row.getString("validator");
if (validationClass.contains("(")) {
int start = validationClass.indexOf("(");
int end = validationClass.indexOf(")");
validationClass = validationClass.substring(start+1, end);
}
this.setValidationClass(validationClass);
colType = CqlColumnType.valueOf(row.getString("type"));
if (colType == CqlColumnType.clustering_key) {
componentIndex = row.getInt("component_index");
}
}
@Override
public ColumnDefinition setName(String name) {
options.put("column_name", name);
return this;
}
@Override
public ColumnDefinition setName(byte[] name) {
return setName(ByteBuffer.wrap(name));
}
@Override
public ColumnDefinition setName(ByteBuffer name) {
return setName(UTF8Type.instance.compose(name));
}
@Override
public ColumnDefinition setValidationClass(String value) {
options.put("validator", value);
return this;
}
@Override
public ColumnDefinition setIndex(String name, String type) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnDefinition setKeysIndex(String name) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnDefinition setKeysIndex() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnDefinition setIndexWithType(String type) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public String getName() {
return (String) options.get("column_name");
}
@Override
public ByteBuffer getRawName() {
return UTF8Type.instance.decompose(getName());
}
@Override
public String getValidationClass() {
return (String) options.get("validator");
}
@Override
public String getIndexName() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public String getIndexType() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public boolean hasIndex() {
return getIndexName() != null;
}
@Override
public Map<String, String> getOptions() {
Map<String, String> result = new HashMap<String, String>();
for (String key : options.keySet()) {
result.put(key, options.get(key).toString());
}
return result;
}
@Override
public String getOption(String name, String defaultValue) {
String value = (String) options.get(name);
if (value == null) {
return defaultValue;
} else {
return value;
}
}
@Override
public ColumnDefinition setOptions(Map<String, String> setOptions) {
this.options.putAll(setOptions);
return this;
}
@Override
public String setOption(String name, String value) {
this.options.put(name, value);
return options.get(name).toString();
}
@Override
public Collection<String> getFieldNames() {
return options.keySet();
}
@Override
public Collection<FieldMetadata> getFieldsMetadata() {
List<FieldMetadata> list = new ArrayList<FieldMetadata>();
for (String key : options.keySet()) {
Object value = options.get(key);
Class<?> clazz = value.getClass();
String name = key.toUpperCase();
String type = clazz.getSimpleName().toUpperCase();
boolean isContainer = Collection.class.isAssignableFrom(clazz) || Map.class.isAssignableFrom(clazz);
list.add(new FieldMetadata(name, type, isContainer));
}
return list;
}
@Override
public Object getFieldValue(String name) {
return options.get(name);
}
@Override
public ColumnDefinition setFieldValue(String name, Object value) {
options.put(name, String.valueOf(value));
return this;
}
@Override
public ColumnDefinition setFields(Map<String, Object> fields) {
options.putAll(fields);
return this;
}
public String getCqlType() {
return CqlTypeMapping.getCqlTypeFromComparator(getValidationClass());
}
public CqlColumnType getColumnType() {
return this.colType;
}
public int getComponentIndex() {
return this.componentIndex;
}
@Override
public int compareTo(CqlColumnDefinitionImpl o) {
return this.componentIndex.compareTo(o.componentIndex);
}
}
| 8,093 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/schema/CqlColumnFamilyDefinitionImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.schema;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import com.datastax.driver.core.ColumnDefinitions.Definition;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.cql.CqlOperationResultImpl;
import com.netflix.astyanax.cql.reads.CFRowQueryGen;
import com.netflix.astyanax.cql.util.DataTypeMapping;
import com.netflix.astyanax.cql.writes.CFMutationQueryGen;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.FieldMetadata;
import com.netflix.astyanax.ddl.SchemaChangeResult;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer.ComponentSerializer;
import com.netflix.astyanax.serializers.ComparatorType;
/**
* Impl for {@link ColumnFamilyDefinition} interface that constructs it's state from the java driver {@link ResultSet}
*
* @author poberai
*
*/
public class CqlColumnFamilyDefinitionImpl implements ColumnFamilyDefinition {
private Session session;
private String cfName;
private String keyspaceName;
private Map<String, Object> optionsMap = new HashMap<String, Object>();
private List<ColumnDefinition> partitionKeyList = new ArrayList<ColumnDefinition>();
private List<ColumnDefinition> clusteringKeyList = new ArrayList<ColumnDefinition>();
private List<ColumnDefinition> regularColumnList = new ArrayList<ColumnDefinition>();
private List<ColumnDefinition> allColumnsDefinitionList = new ArrayList<ColumnDefinition>();
private String[] allPkColNames;
private AnnotatedCompositeSerializer<?> compositeSerializer = null;
private boolean alterTable = false;
private CFMutationQueryGen mutationQueryGen = null;
private CFRowQueryGen rowQueryGen = null;
public CqlColumnFamilyDefinitionImpl(Session session) {
this.session = session;
}
public CqlColumnFamilyDefinitionImpl(Session session, String keyspace, Properties props) {
this(session, keyspace, propertiesToMap(props));
}
public CqlColumnFamilyDefinitionImpl(Session session, String keyspace, Map<String, Object> options) {
this.session = session;
this.keyspaceName = keyspace;
if (options == null) {
options = new HashMap<String, Object>();
}
initFromMap(options);
}
public CqlColumnFamilyDefinitionImpl(Session session, Row row) {
initFromResultSet(session, row);
mutationQueryGen = new CFMutationQueryGen(session, keyspaceName, this);
rowQueryGen = new CFRowQueryGen(session, keyspaceName, this);
}
public CqlColumnFamilyDefinitionImpl(Session session, String keyspace, ColumnFamily<?, ?> columnFamily, Map<String, Object> options) {
this.session = session;
Preconditions.checkArgument(columnFamily != null, "ColumnFamily cannot be null");
if (options == null) {
options = new HashMap<String, Object>();
}
keyspaceName = keyspace;
cfName = columnFamily.getName();
optionsMap.put("key_validator", columnFamily.getKeySerializer().getComparatorType().getClassName());
optionsMap.put("comparator", columnFamily.getColumnSerializer().getComparatorType().getClassName());
optionsMap.put("default_validator", columnFamily.getDefaultValueSerializer().getComparatorType().getClassName());
if (columnFamily.getColumnSerializer() instanceof AnnotatedCompositeSerializer) {
compositeSerializer = (AnnotatedCompositeSerializer<?>) columnFamily.getColumnSerializer();
}
initFromMap(options);
}
private void initFromMap(Map<String, Object> options) {
String cName = (String) options.get("name");
if (cName != null) {
cfName = cName;
options.remove("name");
}
String kName = (String) options.get("keyspace");
if (kName != null) {
keyspaceName = kName;
}
this.optionsMap.putAll(options);
if (optionsMap.containsKey("key_validation_class")) {
optionsMap.put("key_validator", optionsMap.remove("key_validation_class"));
}
if (optionsMap.containsKey("comparator_type")) {
optionsMap.put("comparator", optionsMap.remove("comparator_type"));
}
if (optionsMap.containsKey("default_validation_class")) {
optionsMap.put("default_validator", optionsMap.remove("default_validation_class"));
}
}
private void initFromResultSet(Session session, Row row) {
if (row == null) {
throw new RuntimeException("Result Set is empty");
}
this.session = session;
this.keyspaceName = row.getString("keyspace_name");
this.cfName = row.getString("columnfamily_name");
List<Definition> colDefs = row.getColumnDefinitions().asList();
for (Definition colDef : colDefs) {
String colName = colDef.getName();
DataType dataType = colDef.getType();
Object value = DataTypeMapping.getDynamicColumn(row, colName, dataType);
optionsMap.put(colName, value);
}
readColDefinitions();
/**
if (partitionKeyList.size() == 0) {
readColDefinitionsForCass12();
}
*/
}
private void processCompositeComparator() {
int colIndex = 1;
for (ComponentSerializer<?> componentSerializer : compositeSerializer.getComponents()) {
String typeName = componentSerializer.getSerializer().getComparatorType().getTypeName();
ColumnDefinition column = new CqlColumnDefinitionImpl().setName("column" + colIndex++).setValidationClass(typeName);
clusteringKeyList.add(column);
}
}
private void processCompositeComparatorSpec(String comparatorSpec) {
// e.g CompositeType(UTF8Type, LongType, UTF8Type)
String regex = "[\\(,\\)]";
Pattern pattern = Pattern.compile(regex);
String[] parts = pattern.split(comparatorSpec);
int colIndex = 1;
for (int i=1; i<parts.length; i++) {
String componentTypeString = parts[i].trim();
ColumnDefinition column = new CqlColumnDefinitionImpl().setName("column" + colIndex++).setValidationClass(componentTypeString);
clusteringKeyList.add(column);
}
}
private void createColumnDefinitions() {
String keyClass = (String) optionsMap.remove("key_validator");
String defaultBytesType = ComparatorType.BYTESTYPE.getTypeName();
keyClass = (keyClass == null) ? keyClass = defaultBytesType : keyClass;
String comparatorClass = (String) optionsMap.remove("comparator");
comparatorClass = (comparatorClass == null) ? comparatorClass = defaultBytesType : comparatorClass;
String dataValidationClass = (String) optionsMap.remove("default_validator");
dataValidationClass = (dataValidationClass == null) ? dataValidationClass = defaultBytesType : dataValidationClass;
ColumnDefinition key = new CqlColumnDefinitionImpl().setName("key").setValidationClass(keyClass);
partitionKeyList.add(key);
if (compositeSerializer != null) {
processCompositeComparator();
} else if (comparatorClass.contains("CompositeType")) {
processCompositeComparatorSpec(comparatorClass);
} else {
ColumnDefinition column1 = new CqlColumnDefinitionImpl().setName("column1").setValidationClass(comparatorClass);
clusteringKeyList.add(column1);
}
ColumnDefinition valueColumn = new CqlColumnDefinitionImpl().setName("value").setValidationClass(dataValidationClass);
this.regularColumnList.add(valueColumn);
}
/**
private void readColDefinitionsForCass12() {
String defaultBytesType = ComparatorType.BYTESTYPE.getTypeName();
String keyClass = (String) optionsMap.get("key_validator");
keyClass = (keyClass == null) ? keyClass = defaultBytesType : keyClass;
String comparatorClass = (String) optionsMap.get("comparator");
comparatorClass = (comparatorClass == null) ? comparatorClass = defaultBytesType : comparatorClass;
String dataValidationClass = (String) optionsMap.get("default_validator");
dataValidationClass = (dataValidationClass == null) ? dataValidationClass = defaultBytesType : dataValidationClass;
ColumnDefinition key = new CqlColumnDefinitionImpl().setName("key").setValidationClass(keyClass);
partitionKeyList.add(key);
if (compositeSerializer != null) {
processCompositeComparator();
} else if (comparatorClass.contains("CompositeType")) {
processCompositeComparatorSpec(comparatorClass);
} else {
ColumnDefinition column1 = new CqlColumnDefinitionImpl().setName("column1").setValidationClass(comparatorClass);
clusteringKeyList.add(column1);
}
ColumnDefinition valueColumn = new CqlColumnDefinitionImpl().setName("value").setValidationClass(dataValidationClass);
this.regularColumnList.add(valueColumn);
this.allColumnsDefinitionList.addAll(partitionKeyList);
this.allColumnsDefinitionList.addAll(clusteringKeyList);
this.allColumnsDefinitionList.addAll(regularColumnList);
List<String> allPrimaryKeyColNames = new ArrayList<String>();
for (ColumnDefinition colDef : partitionKeyList) {
allPrimaryKeyColNames.add(colDef.getName());
}
for (ColumnDefinition colDef : clusteringKeyList) {
allPrimaryKeyColNames.add(colDef.getName());
}
allPkColNames = allPrimaryKeyColNames.toArray(new String[allPrimaryKeyColNames.size()]);
}
*/
private void readColDefinitions() {
// VALUE COLUMNS AND COLUMNS THAT ARE NOT PART OF THE PRIMARY KEY
Statement query = QueryBuilder.select().from("system", "schema_columns")
.where(eq("keyspace_name", keyspaceName))
.and(eq("columnfamily_name", cfName));
ResultSet rs = session.execute(query);
List<Row> rows = rs.all();
if (rows != null && rows.size() > 0) {
List<CqlColumnDefinitionImpl> tmpList = new ArrayList<CqlColumnDefinitionImpl>();
for (Row row : rows) {
CqlColumnDefinitionImpl colDef = new CqlColumnDefinitionImpl(row);
switch (colDef.getColumnType()) {
case partition_key:
partitionKeyList.add(colDef);
allColumnsDefinitionList.add(colDef);
break;
case clustering_key:
tmpList.add(colDef);
allColumnsDefinitionList.add(colDef);
break;
case regular:
regularColumnList.add(colDef);
allColumnsDefinitionList.add(colDef);
break;
case compact_value:
regularColumnList.add(colDef);
allColumnsDefinitionList.add(colDef);
break;
}
}
Collections.sort(tmpList);
clusteringKeyList.addAll(tmpList);
tmpList = null;
List<String> allPrimaryKeyColNames = new ArrayList<String>();
for (ColumnDefinition colDef : partitionKeyList) {
allPrimaryKeyColNames.add(colDef.getName());
}
for (ColumnDefinition colDef : clusteringKeyList) {
allPrimaryKeyColNames.add(colDef.getName());
}
allPkColNames = allPrimaryKeyColNames.toArray(new String[allPrimaryKeyColNames.size()]);
}
}
public CqlColumnFamilyDefinitionImpl alterTable() {
alterTable = true;
return this;
}
@Override
public ColumnFamilyDefinition setComment(String comment) {
optionsMap.put("comment", "'" + comment + "'");
return this;
}
@Override
public String getComment() {
return (String) optionsMap.get("comment");
}
@Override
public ColumnFamilyDefinition setKeyspace(String keyspace) {
keyspaceName = keyspace;
return this;
}
@Override
public String getKeyspace() {
return keyspaceName;
}
@Override
@Deprecated
public ColumnFamilyDefinition setMemtableFlushAfterMins(Integer value) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
@Deprecated
public Integer getMemtableFlushAfterMins() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
@Deprecated
public ColumnFamilyDefinition setMemtableOperationsInMillions(Double value) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
@Deprecated
public Double getMemtableOperationsInMillions() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
@Deprecated
public ColumnFamilyDefinition setMemtableThroughputInMb(Integer value) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
@Deprecated
public Integer getMemtableThroughputInMb() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyDefinition setMergeShardsChance(Double value) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public Double getMergeShardsChance() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyDefinition setMinCompactionThreshold(Integer value) {
optionsMap.put("min_compaction_threshold", value);
return this;
}
@Override
public Integer getMinCompactionThreshold() {
return (Integer) optionsMap.get("min_compaction_threshold");
}
@Override
public ColumnFamilyDefinition setMaxCompactionThreshold(Integer value) {
optionsMap.put("max_compaction_threshold", value);
return this;
}
@Override
public Integer getMaxCompactionThreshold() {
return (Integer) optionsMap.get("max_compaction_threshold");
}
@Override
public ColumnFamilyDefinition setCompactionStrategy(String strategy) {
optionsMap.put("compaction_strategy_class", strategy);
return this;
}
@Override
public String getCompactionStrategy() {
return (String) optionsMap.get("compaction_strategy_class");
}
@Override
public ColumnFamilyDefinition setCompactionStrategyOptions(Map<String, String> options) {
optionsMap.put("compaction_strategy_options", toJsonString(options));
return this;
}
@Override
public Map<String, String> getCompactionStrategyOptions() {
return fromJsonString((String) optionsMap.get("compaction_strategy_options"));
}
@Override
public ColumnFamilyDefinition setCompressionOptions(Map<String, String> options) {
optionsMap.put("compression_parameters", toJsonString(options));
return this;
}
@Override
public Map<String, String> getCompressionOptions() {
return fromJsonString((String) optionsMap.get("compression_parameters"));
}
@Override
public ColumnFamilyDefinition setBloomFilterFpChance(Double chance) {
optionsMap.put("bloom_filter_fp_chance", chance);
return this;
}
@Override
public Double getBloomFilterFpChance() {
return (Double) optionsMap.get("bloom_filter_fp_chance");
}
@Override
public ColumnFamilyDefinition setCaching(String caching) {
optionsMap.put("caching", caching);
return this;
}
@Override
public String getCaching() {
return (String) optionsMap.get("caching");
}
@Override
public ColumnFamilyDefinition setName(String name) {
cfName = name;
return this;
}
@Override
public String getName() {
return cfName;
}
@Override
public ColumnFamilyDefinition setReadRepairChance(Double value) {
optionsMap.put("read_repair_chance", value);
return this;
}
@Override
public Double getReadRepairChance() {
return (Double) optionsMap.get("read_repair_chance");
}
@Override
public ColumnFamilyDefinition setLocalReadRepairChance(Double value) {
optionsMap.put("local_read_repair_chance", value);
return this;
}
@Override
public Double getLocalReadRepairChance() {
return (Double) optionsMap.get("local_read_repair_chance");
}
@Override
public ColumnFamilyDefinition setReplicateOnWrite(Boolean value) {
optionsMap.put("replicate_on_write", value);
return this;
}
@Override
public Boolean getReplicateOnWrite() {
return (Boolean) optionsMap.get("replicate_on_write");
}
@Override
public ColumnFamilyDefinition setRowCacheProvider(String value) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public String getRowCacheProvider() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyDefinition setRowCacheSavePeriodInSeconds(Integer value) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public Integer getRowCacheSavePeriodInSeconds() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyDefinition setRowCacheSize(Double size) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public Double getRowCacheSize() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyDefinition setComparatorType(String value) {
optionsMap.put("comparator", value);
return this;
}
@Override
public String getComparatorType() {
return (String) optionsMap.get("comparator");
}
@Override
public ColumnFamilyDefinition setDefaultValidationClass(String value) {
optionsMap.put("default_validator", value);
return this;
}
@Override
public String getDefaultValidationClass() {
return (String) optionsMap.get("default_validator");
}
@Override
public ColumnFamilyDefinition setId(Integer id) {
optionsMap.put("id", id);
return this;
}
@Override
public Integer getId() {
return (Integer) optionsMap.get("id");
}
@Override
public ColumnFamilyDefinition setKeyAlias(ByteBuffer alias) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ByteBuffer getKeyAlias() {
return null;
}
@Override
public ColumnFamilyDefinition setKeyCacheSavePeriodInSeconds(Integer value) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public Integer getKeyCacheSavePeriodInSeconds() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyDefinition setKeyCacheSize(Double keyCacheSize) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public Double getKeyCacheSize() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnFamilyDefinition setKeyValidationClass(String keyValidationClass) {
optionsMap.put("key_validator", keyValidationClass);
return this;
}
@Override
public String getKeyValidationClass() {
return (String) optionsMap.get("key_validator");
}
public ColumnDefinition getPartitionKeyColumnDefinition() {
return partitionKeyList.get(0);
}
public List<ColumnDefinition> getRegularColumnDefinitionList() {
return regularColumnList;
}
public List<ColumnDefinition> getPartitionKeyColumnDefinitionList() {
return partitionKeyList;
}
public List<ColumnDefinition> getClusteringKeyColumnDefinitionList() {
return clusteringKeyList;
}
public String[] getAllPkColNames() {
return allPkColNames;
}
@Override
public ColumnFamilyDefinition addColumnDefinition(ColumnDefinition def) {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public ColumnDefinition makeColumnDefinition() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public void clearColumnDefinitionList() {
throw new UnsupportedOperationException("Operation not supported");
}
@Override
public Collection<String> getFieldNames() {
return optionsMap.keySet();
}
@Override
public Object getFieldValue(String name) {
return optionsMap.get(name);
}
@Override
public ColumnFamilyDefinition setFieldValue(String name, Object value) {
optionsMap.put(name, value);
return this;
}
@Override
public ColumnFamilyDefinition setGcGraceSeconds(Integer seconds) {
optionsMap.put("gc_grace_seconds", seconds);
return this;
}
@Override
public Integer getGcGraceSeconds() {
return (Integer) optionsMap.get("gc_grace_seconds");
}
@Override
public Collection<FieldMetadata> getFieldsMetadata() {
List<FieldMetadata> list = new ArrayList<FieldMetadata>();
for (String key : optionsMap.keySet()) {
Object value = optionsMap.get(key);
Class<?> clazz = value.getClass();
String name = key.toUpperCase();
String type = clazz.getSimpleName().toUpperCase();
boolean isContainer = Collection.class.isAssignableFrom(clazz) || Map.class.isAssignableFrom(clazz);
list.add(new FieldMetadata(name, type, isContainer));
}
return list;
}
@Override
public void setFields(Map<String, Object> options) {
optionsMap.putAll(options);
}
@Override
public Properties getProperties() {
Properties props = new Properties();
for (String key : optionsMap.keySet()) {
if (optionsMap.get(key) != null) {
props.put(key, optionsMap.get(key));
}
}
return props;
}
@Override
public void setProperties(Properties additionalProperties) throws Exception {
Map<String, Object> props = propertiesToMap(additionalProperties);
optionsMap.putAll(props);
}
public OperationResult<SchemaChangeResult> execute() {
createColumnDefinitions();
String query = (alterTable) ? getUpdateQuery() : getCreateQuery();
ResultSet rs = session.execute(query);
return new CqlOperationResultImpl<SchemaChangeResult>(rs, null);
}
private String getCreateQuery() {
StringBuilder sb = new StringBuilder("CREATE TABLE ");
sb.append(keyspaceName).append(".").append(cfName);
sb.append(" ( ");
boolean compositePrimaryKey = clusteringKeyList.size() > 0;
if (!compositePrimaryKey) {
appendColDefinition(sb, partitionKeyList.iterator());
sb.append(" PRIMARY KEY, ");
appendColDefinition(sb, regularColumnList.iterator());
} else {
appendColDefinition(sb, partitionKeyList.iterator());
sb.append(" ,");
appendColDefinition(sb, clusteringKeyList.iterator());
sb.append(" ,");
appendColDefinition(sb, regularColumnList.iterator());
sb.append(", PRIMARY KEY (");
appendPrimaryKeyDefinition(sb, partitionKeyList.iterator(), clusteringKeyList.iterator());
sb.append(") ");
}
sb.append(")");
if (optionsMap.size() > 0) {
sb.append(" WITH ");
Iterator<String> propIter = optionsMap.keySet().iterator();
while(propIter.hasNext()) {
String pKey = propIter.next();
Object pValue = optionsMap.get(pKey);
if (pValue == null) {
continue;
}
if (pValue instanceof String) {
sb.append(pKey).append(" = '").append(pValue).append("'");
} else {
sb.append(pKey).append(" = ").append(pValue);
}
if (propIter.hasNext()) {
sb.append(" AND ");
}
}
}
String query = sb.toString();
return query;
}
private String getUpdateQuery() {
StringBuilder sb = new StringBuilder("ALTER TABLE ");
sb.append(keyspaceName).append(".").append(cfName);
sb.append(" WITH ");
Iterator<String> propIter = optionsMap.keySet().iterator();
while(propIter.hasNext()) {
String pKey = propIter.next();
Object pValue = optionsMap.get(pKey);
sb.append(pKey).append(" = ").append(pValue);
if (propIter.hasNext()) {
sb.append(" AND ");
}
}
return sb.toString();
}
private void appendColDefinition(StringBuilder sb, Iterator<ColumnDefinition> iter) {
while (iter.hasNext()) {
CqlColumnDefinitionImpl colDef = (CqlColumnDefinitionImpl) iter.next();
sb.append(colDef.getName()).append(" ").append(colDef.getCqlType());
if (iter.hasNext()) {
sb.append(", ");
}
}
}
private void appendPrimaryKeyDefinition(StringBuilder sb, Iterator<ColumnDefinition> iter1, Iterator<ColumnDefinition> iter2) {
while (iter1.hasNext()) {
CqlColumnDefinitionImpl colDef = (CqlColumnDefinitionImpl) iter1.next();
sb.append(colDef.getName());
if (iter1.hasNext()) {
sb.append(", ");
}
}
if (iter2.hasNext()) {
sb.append(", ");
while (iter2.hasNext()) {
CqlColumnDefinitionImpl colDef = (CqlColumnDefinitionImpl) iter2.next();
sb.append(colDef.getName());
if (iter2.hasNext()) {
sb.append(", ");
}
}
}
}
private static Map<String, Object> propertiesToMap(Properties props) {
Map<String, Object> root = Maps.newTreeMap();
if (props == null) {
return root;
}
for (Entry<Object, Object> prop : props.entrySet()) {
String[] parts = StringUtils.split((String)prop.getKey(), ".");
Map<String, Object> node = root;
for (int i = 0; i < parts.length - 1; i++) {
if (!node.containsKey(parts[i])) {
node.put(parts[i], new LinkedHashMap<String, Object>());
}
node = (Map<String, Object>)node.get(parts[i]);
}
node.put(parts[parts.length-1], (String)prop.getValue());
}
return root;
}
private static String toJsonString(Map<String, String> options) {
if (options == null) {
return null;
}
JSONObject json = new JSONObject();
for(String key : options.keySet()) {
try {
json.put(key, options.get(key));
} catch (JSONException e) {
throw new RuntimeException(e);
}
}
return json.toString();
}
private static Map<String, String> fromJsonString(String jsonString) {
if (jsonString == null) {
return new HashMap<String, String>();
}
try {
JSONObject json = new JSONObject(jsonString);
Map<String, String> map = new HashMap<String, String>();
Iterator<String> iter = json.keys();
while(iter.hasNext()) {
String key = iter.next();
String value = json.getString(key).toString();
map.put(key, value);
}
return map;
} catch (JSONException e) {
throw new RuntimeException(e);
}
}
@Override
public List<ColumnDefinition> getColumnDefinitionList() {
return allColumnsDefinitionList;
}
public CFMutationQueryGen getMutationQueryGenerator() {
return mutationQueryGen;
}
public CFRowQueryGen getRowQueryGenerator() {
return rowQueryGen;
}
public void printOptionsMap() {
for (String key : optionsMap.keySet()) {
System.out.println(key + " " + optionsMap.get(key));
}
}
}
| 8,094 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/schema/CqlKeyspaceDefinitionImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.schema;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import org.apache.commons.lang.StringUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.cql.CqlOperationResultImpl;
import com.netflix.astyanax.ddl.ColumnFamilyDefinition;
import com.netflix.astyanax.ddl.FieldMetadata;
import com.netflix.astyanax.ddl.KeyspaceDefinition;
import com.netflix.astyanax.ddl.SchemaChangeResult;
/**
* Impl for {@link KeyspaceDefinition} using the java driver.
*
* @author poberai
*
*/
public class CqlKeyspaceDefinitionImpl implements KeyspaceDefinition {
private static final Logger Log = LoggerFactory.getLogger(CqlKeyspaceDefinitionImpl.class);
private final Session session;
private boolean alterKeyspace;
private final Map<String, Object> options = new HashMap<String, Object>();
private final List<CqlColumnFamilyDefinitionImpl> cfDefList = new ArrayList<CqlColumnFamilyDefinitionImpl>();
public CqlKeyspaceDefinitionImpl(Session session) {
this.session = session;
}
public CqlKeyspaceDefinitionImpl(Session session, Map<String, Object> input) {
this.session = session;
checkOptionsMap(input);
}
public CqlKeyspaceDefinitionImpl(Session session, Properties props) {
this.session = session;
checkOptionsMap(propertiesToMap(props));
}
public CqlKeyspaceDefinitionImpl(Session session, Row row) {
this.session = session;
this.setName(row.getString("keyspace_name"));
this.setStrategyClass(row.getString("strategy_class"));
this.setStrategyOptionsMap(parseStrategyOptions(row.getString("strategy_options")));
this.options.put("durable_writes", row.getBool("durable_writes"));
}
public CqlKeyspaceDefinitionImpl alterKeyspace() {
alterKeyspace = true;
return this;
}
@Override
public CqlKeyspaceDefinitionImpl setName(String name) {
this.options.put("name", name.toLowerCase());
return this;
}
@Override
public String getName() {
return (String) options.get("name");
}
@Override
public CqlKeyspaceDefinitionImpl setStrategyClass(String strategyClass) {
getOrCreateReplicationMap().put("class", strategyClass);
return this;
}
@Override
public String getStrategyClass() {
return (String) getOrCreateReplicationMap().get("class");
}
@Override
public CqlKeyspaceDefinitionImpl setStrategyOptions(Map<String, String> strategyOptions) {
getOrCreateReplicationMap().putAll(strategyOptions);
return this;
}
public CqlKeyspaceDefinitionImpl setStrategyOptionsMap(Map<String, Object> strategyOptions) {
getOrCreateReplicationMap().putAll(strategyOptions);
return this;
}
@Override
public CqlKeyspaceDefinitionImpl addStrategyOption(String name, String value) {
this.getOrCreateReplicationMap().put(name, value);
return this;
}
@Override
public Map<String, String> getStrategyOptions() {
Map<String, String> map = new HashMap<String, String>();
Map<String, Object> repMap = getOrCreateReplicationMap();
for (String key : repMap.keySet()) {
map.put(key, (String) repMap.get(key));
}
return map;
}
@Override
public List<ColumnFamilyDefinition> getColumnFamilyList() {
Statement query = QueryBuilder.select().all()
.from("system", "schema_columnfamilies")
.where(eq("keyspace_name", getName()));
ResultSet rs = session.execute(query);
List<ColumnFamilyDefinition> cfDefs = new ArrayList<ColumnFamilyDefinition>();
List<Row> rows = rs.all();
if (rows != null) {
for (Row row : rows) {
cfDefs.add(new CqlColumnFamilyDefinitionImpl(session, row));
}
}
return cfDefs;
}
@Override
public ColumnFamilyDefinition getColumnFamily(String columnFamilyName) {
Statement query = QueryBuilder.select().all()
.from("system", "schema_columnfamilies")
.where(eq("keyspace_name", getName()))
.and(eq("columnfamily_name", columnFamilyName.toLowerCase()));
Row row = session.execute(query).one();
if (row == null) {
throw new RuntimeException("CF not found: " + columnFamilyName);
}
return new CqlColumnFamilyDefinitionImpl(session, row);
}
@Override
public KeyspaceDefinition addColumnFamily(ColumnFamilyDefinition cfDef) {
CqlColumnFamilyDefinitionImpl cqlCfDef = (CqlColumnFamilyDefinitionImpl) cfDef;
cqlCfDef.execute();
return this;
}
@Override
public Collection<String> getFieldNames() {
return options.keySet();
}
@Override
public Object getFieldValue(String name) {
return options.get(name);
}
@Override
public KeyspaceDefinition setFieldValue(String name, Object value) {
this.options.put(name, value);
return this;
}
@Override
public Collection<FieldMetadata> getFieldsMetadata() {
List<FieldMetadata> list = new ArrayList<FieldMetadata>();
for (String key : options.keySet()) {
Object value = options.get(key);
Class<?> clazz = value.getClass();
String name = key.toUpperCase();
String type = clazz.getSimpleName().toUpperCase();
boolean isContainer = Collection.class.isAssignableFrom(clazz) || Map.class.isAssignableFrom(clazz);
list.add(new FieldMetadata(name, type, isContainer));
}
return list;
}
@Override
public void setFields(Map<String, Object> optionsMap) {
checkOptionsMap(optionsMap);
}
@Override
public Properties getProperties() throws Exception {
return mapToProperties(options);
}
@Override
public void setProperties(Properties props) throws Exception {
options.clear();
options.putAll(propertiesToMap(props));
}
public OperationResult<SchemaChangeResult> execute() {
String query = getQuery();
if (Log.isDebugEnabled()) {
Log.debug("Query : " + query);
}
CqlOperationResultImpl<SchemaChangeResult> result = new CqlOperationResultImpl<SchemaChangeResult>(session.execute(query), null);
for (CqlColumnFamilyDefinitionImpl cfDef : cfDefList) {
cfDef.execute();
}
return result;
}
private String getQuery() {
String cmd = (alterKeyspace) ? "ALTER" : "CREATE";
StringBuilder sb = new StringBuilder(cmd);
sb.append(" KEYSPACE ");
sb.append(getName());
Map<String, Object> replicationOptions = (Map<String, Object>) options.get("replication");
appendReplicationOptions(sb, replicationOptions);
Object durableWrites = options.get("durable_writes");
if (durableWrites != null) {
sb.append(" AND durable_writes = ").append(durableWrites);
}
return sb.toString();
}
private void appendReplicationOptions(StringBuilder sb, Map<String, Object> replicationOptions) {
if (replicationOptions == null || replicationOptions.size() == 0) {
throw new RuntimeException("Missing properties for 'replication'");
}
sb.append(" WITH replication = {" );
Iterator<Entry<String, Object>> iter = replicationOptions.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, Object> entry = iter.next();
sb.append("'").append(entry.getKey()).append("' : '").append(entry.getValue()).append("'");
if (iter.hasNext()) {
sb.append(", ");
}
}
sb.append("}");
}
private void checkOptionsMap(Map<String, Object> input) {
Object strategyOptions = input.get("strategy_options");
if (strategyOptions == null) {
Preconditions.checkArgument(input.get("replication") != null, "Invalid CREATE KEYSPACE properties");
options.clear();
options.putAll(input);
} else {
// this is an old style map. Convert to the new spec of CREATE KEYSPACE
options.clear();
Map<String, Object> replicationOptions = new HashMap<String, Object>();
options.put("replication", replicationOptions);
Map<String, Object> oldStrategyOptions = (Map<String, Object>) input.get("strategy_options");
replicationOptions.putAll(oldStrategyOptions);
String strategyClass = (String) input.get("strategy_class");
replicationOptions.put("class", strategyClass);
}
}
private Map<String, Object> getOrCreateReplicationMap() {
Map<String, Object> replicationMap = (Map<String, Object>) options.get("replication");
if (replicationMap == null) {
replicationMap = new HashMap<String, Object>();
options.put("replication", replicationMap);
}
return replicationMap;
}
private static Map<String, Object> propertiesToMap(Properties props) {
Map<String, Object> root = Maps.newTreeMap();
for (Entry<Object, Object> prop : props.entrySet()) {
String[] parts = StringUtils.split((String)prop.getKey(), ".");
Map<String, Object> node = root;
for (int i = 0; i < parts.length - 1; i++) {
if (!node.containsKey(parts[i])) {
node.put(parts[i], new LinkedHashMap<String, Object>());
}
node = (Map<String, Object>)node.get(parts[i]);
}
node.put(parts[parts.length-1], (String)prop.getValue());
}
return root;
}
private static Properties mapToProperties(Map<String, Object> map) {
Properties props = new Properties();
addProperties(props, null, map);
return props;
}
private static void addProperties(Properties props, String prefix, Map<String, Object> subMap) {
for (Entry<String, Object> entry : subMap.entrySet()) {
String key = (prefix != null) ? prefix + "." + entry.getKey() : entry.getKey();
if (entry.getValue() instanceof Map) {
addProperties(props, key, (Map<String, Object>) entry.getValue());
} else {
props.put(key, entry.getValue().toString());
}
}
}
private Map<String, Object> parseStrategyOptions(String jsonString) {
if (jsonString == null || jsonString.isEmpty()) {
return null;
}
Map<String, Object> map = new HashMap<String, Object>();
try {
JSONObject json = new JSONObject(jsonString);
Iterator<String> iter = json.keys();
while (iter.hasNext()) {
String key = iter.next();
Object obj = json.get(key);
map.put(key, obj);
}
return map;
} catch (JSONException e) {
throw new RuntimeException(e);
}
}
public String toString() {
return "CqlKeyspaceDefinition=[ " + options.toString() + " ]";
}
}
| 8,095 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlRowQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.RowCopier;
import com.netflix.astyanax.Serializer;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.CqlOperationResultImpl;
import com.netflix.astyanax.cql.reads.model.CqlColumnListImpl;
import com.netflix.astyanax.cql.reads.model.CqlColumnSlice;
import com.netflix.astyanax.cql.reads.model.CqlRangeBuilder;
import com.netflix.astyanax.cql.reads.model.CqlRangeImpl;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.model.ByteBufferRange;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.model.ColumnSlice;
import com.netflix.astyanax.query.ColumnCountQuery;
import com.netflix.astyanax.query.ColumnQuery;
import com.netflix.astyanax.query.RowQuery;
import com.netflix.astyanax.serializers.CompositeRangeBuilder;
import com.netflix.astyanax.serializers.CompositeRangeBuilder.CompositeByteBufferRange;
/**
* Impl for {@link RowQuery} that uses java driver. It manages all single row queries and also has support for pagination.
* All {@link ColumnQuery} and {@link ColumnCountQuery}(s) originate from this class.
*
* Note that the class acts more like a placeholder for the structure query context. The actual query construction
* is done by other classes like {@link CFRowQueryGen} and {@link CFColumnQueryGen}
*
* @author poberai
*
* @param <K>
* @param <C>
*/
public class CqlRowQueryImpl<K, C> implements RowQuery<K, C> {
private final KeyspaceContext ksContext;
private final CFQueryContext<K,C> cfContext;
private final Object rowKey;
private final CqlColumnSlice<C> columnSlice = new CqlColumnSlice<C>();
private CompositeByteBufferRange compositeRange;
private final PaginationContext paginationContext = new PaginationContext();
public enum RowQueryType {
AllColumns, ColumnSlice, ColumnRange, SingleColumn;
}
private RowQueryType queryType = RowQueryType.AllColumns; // The default
private boolean useCaching = false;
public CqlRowQueryImpl(KeyspaceContext ksCtx, CFQueryContext<K,C> cfCtx, K rKey, boolean useCaching) {
this.ksContext = ksCtx;
this.cfContext = cfCtx;
this.rowKey = cfCtx.checkRowKey(rKey);
this.useCaching = useCaching;
}
@Override
public OperationResult<ColumnList<C>> execute() throws ConnectionException {
if (paginationContext.isPaginating()) {
if (!paginationContext.isFirstPage()) {
return new CqlOperationResultImpl<ColumnList<C>>(paginationContext.getResultSet(), paginationContext.getNextColumns());
}
// Note that if we are paginating, and if this is the first time / page,
// then we will just execute the query normally, and then init the pagination context
}
return new InternalRowQueryExecutionImpl(this).execute();
}
@Override
public ListenableFuture<OperationResult<ColumnList<C>>> executeAsync() throws ConnectionException {
return new InternalRowQueryExecutionImpl(this).executeAsync();
}
@Override
public ColumnQuery<C> getColumn(C column) {
queryType = RowQueryType.SingleColumn;
return new CqlColumnQueryImpl<C>(ksContext, cfContext, rowKey, column, useCaching);
}
@Override
public RowQuery<K, C> withColumnSlice(Collection<C> columns) {
queryType = RowQueryType.ColumnSlice;
this.columnSlice.setColumns(columns);
return this;
}
@Override
public RowQuery<K, C> withColumnSlice(C... columns) {
queryType = RowQueryType.ColumnSlice;
return withColumnSlice(Arrays.asList(columns));
}
@Override
public RowQuery<K, C> withColumnSlice(ColumnSlice<C> colSlice) {
if (colSlice.getColumns() != null) {
return withColumnSlice(colSlice.getColumns());
} else {
return withColumnRange(colSlice.getStartColumn(), colSlice.getEndColumn(), colSlice.getReversed(), colSlice.getLimit());
}
}
@Override
public RowQuery<K, C> withColumnRange(C startColumn, C endColumn, boolean reversed, int count) {
queryType = RowQueryType.ColumnRange;
this.columnSlice.setCqlRange(new CqlRangeBuilder<C>()
.setColumn("column1")
.setStart(startColumn)
.setEnd(endColumn)
.setReversed(reversed)
.setLimit(count)
.build());
return this;
}
@Override
public RowQuery<K, C> withColumnRange(ByteBuffer startColumn, ByteBuffer endColumn, boolean reversed, int limit) {
queryType = RowQueryType.ColumnRange;
Serializer<C> colSerializer = cfContext.getColumnFamily().getColumnSerializer();
C start = (startColumn != null && startColumn.capacity() > 0) ? colSerializer.fromByteBuffer(startColumn) : null;
C end = (endColumn != null && endColumn.capacity() > 0) ? colSerializer.fromByteBuffer(endColumn) : null;
return this.withColumnRange(start, end, reversed, limit);
}
@SuppressWarnings("unchecked")
@Override
public RowQuery<K, C> withColumnRange(ByteBufferRange range) {
queryType = RowQueryType.ColumnRange;
if (range instanceof CompositeByteBufferRange) {
this.compositeRange = (CompositeByteBufferRange) range;
} else if (range instanceof CompositeRangeBuilder) {
this.compositeRange = ((CompositeRangeBuilder)range).build();
} else if (range instanceof CqlRangeImpl) {
this.columnSlice.setCqlRange((CqlRangeImpl<C>) range);
} else {
return this.withColumnRange(range.getStart(), range.getEnd(), range.isReversed(), range.getLimit());
}
return this;
}
@Override
@Deprecated
public RowQuery<K, C> setIsPaginating() {
return autoPaginate(true);
}
@Override
public RowQuery<K, C> autoPaginate(boolean enabled) {
paginationContext.setPaginating(enabled);
return this;
}
@Override
public RowCopier<K, C> copyTo(ColumnFamily<K, C> columnFamily, K rowKey) {
return new CqlRowCopier<K,C>(columnFamily, rowKey, this, ksContext);
}
@Override
public ColumnCountQuery getCount() {
return new CqlColumnCountQueryImpl(ksContext, cfContext, new InternalRowQueryExecutionImpl(this).getQuery());
}
private class InternalRowQueryExecutionImpl extends CqlAbstractExecutionImpl<ColumnList<C>> {
private final CqlColumnFamilyDefinitionImpl cfDef = (CqlColumnFamilyDefinitionImpl) cf.getColumnFamilyDefinition();
private final String[] allPkColumnNames = cfDef.getAllPkColNames();
private final List<ColumnDefinition> regularCols = cfDef.getRegularColumnDefinitionList();
private final CqlRowQueryImpl<?,?> rowQuery;
public InternalRowQueryExecutionImpl(CqlRowQueryImpl<?,?> rQuery) {
super(ksContext, cfContext);
this.rowQuery = rQuery;
}
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.GET_ROW;
}
@Override
public Statement getQuery() {
Statement stmt = cfDef.getRowQueryGenerator().getQueryStatement(rowQuery, useCaching);
// Translate the column limit to the fetch size. This is useful for pagination
if (paginationContext.isPaginating() && columnSlice.isRangeQuery()) {
// if (columnSlice.getFetchSize() > 0) {
// stmt.setFetchSize(columnSlice.getFetchSize() + 1);
// }
}
return stmt;
}
@Override
public ColumnList<C> parseResultSet(ResultSet resultSet) throws NotFoundException {
// Use case when the schema is just a flat table. Note that there is no pagination support here.
if (allPkColumnNames.length == 1 || regularCols.size() > 1) {
List<Row> rows = resultSet.all();
if (rows == null || rows.isEmpty()) {
return new CqlColumnListImpl<C>();
} else {
return new CqlColumnListImpl<C>(rows.get(0), cf);
}
}
// There is a clustering key for this schema. Check whether we are paginating for this row query
if (paginationContext.isPaginating()) {
paginationContext.init(resultSet, columnSlice.getFetchSize());
return paginationContext.getNextColumns();
} else {
List<Row> rows = resultSet.all();
if (rows == null || rows.isEmpty()) {
return new CqlColumnListImpl<C>();
} else {
return new CqlColumnListImpl<C>(rows, cf);
}
}
// List<Row> rows = resultSet.all();
//
// if (rows == null || rows.isEmpty()) {
// if (paginationContext.isPaginating()) {
// paginationContext.lastPageConsumed = true;
// }
// return new CqlColumnListImpl<C>();
// }
//
// if (allPkColumnNames.length == 1 || regularCols.size() > 1) {
// CqlColumnListImpl<C> columnList = new CqlColumnListImpl<C>(rows.get(0), cf);
// return columnList;
// } else {
// CqlColumnListImpl<C> columnList = new CqlColumnListImpl<C>(rows, cf);
// paginationContext.trackLastColumn(columnList);
// return columnList;
// }
}
}
private class PaginationContext {
// How many rows to fetch at a time
private int fetchSize = Integer.MAX_VALUE;
// Turn pagination ON/OFF
private boolean paginate = false;
// Indicate whether the first page has been consumed.
private boolean isFirstPage = true;
// Track the result set
private ResultSet resultSet = null;
// State for all rows
private Iterator<Row> rowIter = null;
private PaginationContext() {
}
private void setPaginating(boolean condition) {
paginate = condition;
}
private boolean isPaginating() {
return paginate;
}
private boolean isFirstPage() {
return isFirstPage;
}
private void firstPageConsumed() {
isFirstPage = false;
}
private CqlColumnListImpl<C> getNextColumns() {
try {
int count = 0;
List<Row> rows = new ArrayList<Row>();
while ((count < fetchSize) && rowIter.hasNext()) {
rows.add(rowIter.next());
count++;
}
return new CqlColumnListImpl<C>(rows, cfContext.getColumnFamily());
} finally {
firstPageConsumed();
}
}
private void init(ResultSet rs, int size) {
this.resultSet = rs;
this.rowIter = resultSet.iterator();
if (size > 0) {
fetchSize = size;
}
}
private ResultSet getResultSet() {
return this.resultSet;
}
}
public Object getRowKey() {
return rowKey;
}
public CqlColumnSlice<C> getColumnSlice() {
return columnSlice;
}
public CompositeByteBufferRange getCompositeRange() {
return compositeRange;
}
public RowQueryType getQueryType() {
return queryType;
}
public boolean isPaginating() {
return paginationContext.isPaginating();
}
}
| 8,096 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CFRowKeysQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import static com.datastax.driver.core.querybuilder.QueryBuilder.in;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.RegularStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Select.Where;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
/**
* This class encapsulates all the query generators for row slice queries that use a collection of row keys.
* There are different row query generators depending on the specific query signature.
*
* e.g
* 1. Select all columns for all the rows in the row range
* 2. Select rows with column slice
* 3. Select rows with column range
* 4. Select rows using a composite range builder for composite column based schema
*
* Note that for simplicity and brevity, there is another class that handles similar operations for queries that
* specify a row range as opposed to a collection of row keys (as is done here).
* See {@link CFRowRangeQueryGen} for that implementation. The current class is meant for row slice queries using only row key collections.
*
* Each of the query generators uses the {@link QueryGenCache} so that it can cache the {@link PreparedStatement} as well
* for future use by queries with the same signatures.
*
* But one must use this with care, since the subsequent query must have the exact signature, else binding values with
* the previously constructed prepared statement will break.
*
* Here is a simple example of a bad query that is not cacheable.
*
* Say that we want a simple query with a column range in it.
*
* ks.prepareQuery(myCF)
* .getRow("1")
* .withColumnSlice("colStart")
* .execute();
*
* In most cases this query lends itself to a CQL3 representation as follows
*
* SELECT * FROM ks.mfCF WHERE KEY = ? AND COLUMN1 > ?;
*
* Now say that we want to perform a successive query (with caching turned ON), but add to the column range query
*
* ks.prepareQuery(myCF)
* .getRow("1")
* .withColumnSlice("colStart", "colEnd")
* .execute();
*
* NOTE THE USE OF BOTH colStart AND colEnd <----- THIS IS A DIFFERENT QUERY SIGNATURE
* AND THE CQL QUERY WILL PROBABLY LOOK LIKE
*
* SELECT * FROM ks.mfCF WHERE KEY = ? AND COLUMN1 > ? AND COLUMN1 < ?; <----- NOTE THE EXTRA BIND MARKER AT THE END FOR THE colEnd
*
* If we re-use the previously cached prepared statement, then it will not work for the new query signature. The way out of this is to NOT
* use caching with different query signatures.
*
* @author poberai
*
*/
public class CFRowKeysQueryGen extends CFRowSliceQueryGen {
public CFRowKeysQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
super(session, keyspaceName, cfDefinition);
}
/**
* Query generator for selecting all columns for the specified row keys.
*
* Note that this object is an implementation of {@link QueryGenCache}
* and hence it maintains a cached reference to the previously constructed {@link PreparedStatement} for row range queries with the same
* signature (i.e all columns for a similar set of row keys)
*/
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectAllColumnsForRowKeys = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select select = selectAllColumnsFromKeyspaceAndCF();
return select.where(in(partitionKeyCol, bindMarkerArray(rowSliceQuery.getRowSlice().getKeys().size())));
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return pStatement.bind(rowSliceQuery.getRowSlice().getKeys().toArray());
}
};
/**
* Query generator for selecting a column set for the specified row keys.
*
* Note that this object is an implementation of {@link QueryGenCache}
* and hence it maintains a cached reference to the previously constructed {@link PreparedStatement} for row range queries with the same
* signature (i.e a similar set of columns for a similar set of rows )
*/
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectColumnSetForRowKeys = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot perform row slice with col slice query for this schema, clusteringKeyCols.size(): "
+ clusteringKeyCols.size());
}
Collection<?> rowKeys = rowSliceQuery.getRowSlice().getKeys();
Collection<?> cols = rowSliceQuery.getColumnSlice().getColumns();
// THIS IS A QUERY WHERE THE COLUMN NAME IS DYNAMIC E.G TIME SERIES
Object[] columns = cols.toArray(new Object[cols.size()]);
String clusteringCol = clusteringKeyCols.get(0).getName();
Select select = selectAllColumnsFromKeyspaceAndCF();
return select.where(in(partitionKeyCol, bindMarkerArray(rowKeys.size())))
.and(in(clusteringCol, bindMarkerArray(columns.length)));
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot perform row slice with col slice query for this schema, clusteringKeyCols.size(): "
+ clusteringKeyCols.size());
}
List<Object> values = new ArrayList<Object>();
values.addAll(rowSliceQuery.getRowSlice().getKeys());
values.addAll(rowSliceQuery.getColumnSlice().getColumns());
return pStatement.bind(values.toArray());
}
};
/**
* Query generator for selecting a column range for the specified row keys.
*
* Note that this object is an implementation of {@link QueryGenCache}
* and hence it maintains a cached reference to the previously constructed {@link PreparedStatement} for row range queries with the same
* signature (i.e a similar column range for a similar set of rows)
*/
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectColumnRangeForRowKeys = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot perform row slice with col slice query for this schema, clusteringKeyCols.size(): "
+ clusteringKeyCols.size());
}
Select select = selectAllColumnsFromKeyspaceAndCF();
Where where = select.where(in(partitionKeyCol, bindMarkerArray(rowSliceQuery.getRowSlice().getKeys().size())));
where = addWhereClauseForColumnRange(where, rowSliceQuery.getColumnSlice());
return where;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot perform row slice with col slice query for this schema, clusteringKeyCols.size(): "
+ clusteringKeyCols.size());
}
List<Object> values = new ArrayList<Object>();
values.addAll(rowSliceQuery.getRowSlice().getKeys());
bindWhereClauseForColumnRange(values, rowSliceQuery.getColumnSlice());
return pStatement.bind(values.toArray());
}
};
/**
* Query generator for selecting a composite column range for the specified row keys.
*
* Note that this object is an implementation of {@link QueryGenCache}
* and hence it maintains a cached reference to the previously constructed {@link PreparedStatement} for row range queries with the same
* signature (i.e a similar composite column range for a similar set of rows)
*/
private QueryGenCache<CqlRowSliceQueryImpl<?,?>> SelectCompositeColumnRangeForRowKeys = new QueryGenCache<CqlRowSliceQueryImpl<?,?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
Select select = selectAllColumnsFromKeyspaceAndCF();
Where stmt = select.where(in(partitionKeyCol, bindMarkerArray(rowSliceQuery.getRowSlice().getKeys().size())));
stmt = addWhereClauseForCompositeColumnRange(stmt, rowSliceQuery.getCompositeRange());
return stmt;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlRowSliceQueryImpl<?, ?> rowSliceQuery) {
List<Object> values = new ArrayList<Object>();
values.addAll(rowSliceQuery.getRowSlice().getKeys());
bindWhereClauseForCompositeColumnRange(values, rowSliceQuery.getCompositeRange());
return pStatement.bind(values.toArray());
}
};
/**
* Main method that is used to generate the java driver statement from the given Astyanax row slice query.
* Note that the method allows the caller to specify whether to use caching or not.
*
* If caching is disabled, then the PreparedStatement is generated every time
* If caching is enabled, then the cached PreparedStatement is used for the given Astyanax RowSliceQuery.
* In this case if the PreparedStatement is missing, then it is constructed from the Astyanax query and
* used to init the cached reference and hence can be used by other subsequent Astayanx RowSliceQuery
* operations with the same signature (that opt in for caching)
*
* @param rowSliceQuery
* @param useCaching
* @return
*/
public BoundStatement getQueryStatement(CqlRowSliceQueryImpl<?,?> rowSliceQuery, boolean useCaching) {
switch (rowSliceQuery.getColQueryType()) {
case AllColumns:
return SelectAllColumnsForRowKeys.getBoundStatement(rowSliceQuery, useCaching);
case ColumnSet:
return SelectColumnSetForRowKeys.getBoundStatement(rowSliceQuery, useCaching);
case ColumnRange:
if (isCompositeColumn) {
return SelectCompositeColumnRangeForRowKeys.getBoundStatement(rowSliceQuery, useCaching);
} else {
return SelectColumnRangeForRowKeys.getBoundStatement(rowSliceQuery, useCaching);
}
default :
throw new RuntimeException("RowSliceQuery with row keys use case not supported.");
}
}
}
| 8,097 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CFColumnQueryGen.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.RegularStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select.Builder;
import com.datastax.driver.core.querybuilder.Select.Where;
import com.netflix.astyanax.cql.schema.CqlColumnFamilyDefinitionImpl;
import com.netflix.astyanax.ddl.ColumnDefinition;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer;
import com.netflix.astyanax.serializers.AnnotatedCompositeSerializer.ComponentSerializer;
public class CFColumnQueryGen {
private AtomicReference<Session> sessionRef = new AtomicReference<Session>(null);
private final String keyspace;
private final CqlColumnFamilyDefinitionImpl cfDef;
private final String partitionKeyCol;
private final List<ColumnDefinition> clusteringKeyCols;
private final List<ColumnDefinition> regularCols;
private boolean isCompositeColumn = false;
private boolean isFlatTable = false;
private static final String BIND_MARKER = "?";
public CFColumnQueryGen(Session session, String keyspaceName, CqlColumnFamilyDefinitionImpl cfDefinition) {
this.keyspace = keyspaceName;
this.cfDef = cfDefinition;
this.sessionRef.set(session);
partitionKeyCol = cfDef.getPartitionKeyColumnDefinition().getName();
clusteringKeyCols = cfDef.getClusteringKeyColumnDefinitionList();
regularCols = cfDef.getRegularColumnDefinitionList();
isCompositeColumn = (clusteringKeyCols.size() > 1);
isFlatTable = (clusteringKeyCols.size() == 0);
}
private QueryGenCache<CqlColumnQueryImpl<?>> ColumnQueryWithClusteringKey = new QueryGenCache<CqlColumnQueryImpl<?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlColumnQueryImpl<?> columnQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
if (clusteringKeyCols.size() != 1) {
throw new RuntimeException("Cannot use this query for this schema, clustetingKeyCols.size: " + clusteringKeyCols.size());
}
String valueColName = regularCols.get(0).getName();
return QueryBuilder.select()
.column(valueColName).ttl(valueColName).writeTime(valueColName)
.from(keyspace, cfDef.getName())
.where(eq(partitionKeyCol, BIND_MARKER))
.and(eq(clusteringKeyCols.get(0).getName(), BIND_MARKER));
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnQueryImpl<?> columnQuery) {
return pStatement.bind(columnQuery.getRowKey(), columnQuery.getColumnName());
}
};
private QueryGenCache<CqlColumnQueryImpl<?>> ColumnQueryWithCompositeColumn = new QueryGenCache<CqlColumnQueryImpl<?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlColumnQueryImpl<?> columnQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
if (clusteringKeyCols.size() <= 1) {
throw new RuntimeException("Cannot use this query for this schema, clustetingKeyCols.size: " + clusteringKeyCols.size());
}
String valueColName = regularCols.get(0).getName();
ColumnFamily<?,?> cf = columnQuery.getCF();
AnnotatedCompositeSerializer<?> compSerializer = (AnnotatedCompositeSerializer<?>) cf.getColumnSerializer();
List<ComponentSerializer<?>> components = compSerializer.getComponents();
// select the individual columns as dictated by the no of component serializers
Builder select = QueryBuilder.select()
.column(valueColName).ttl(valueColName).writeTime(valueColName);
Where where = select.from(keyspace, cfDef.getName()).where(eq(partitionKeyCol, BIND_MARKER));
for (int index = 0; index<components.size(); index++) {
where.and(eq(clusteringKeyCols.get(index).getName(), BIND_MARKER));
}
return where;
}
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnQueryImpl<?> columnQuery) {
List<Object> values = new ArrayList<Object>();
values.add(columnQuery.getRowKey());
ColumnFamily<?,?> cf = columnQuery.getCF();
AnnotatedCompositeSerializer<?> compSerializer = (AnnotatedCompositeSerializer<?>) cf.getColumnSerializer();
List<ComponentSerializer<?>> components = compSerializer.getComponents();
Object columnName = columnQuery.getColumnName();
for (ComponentSerializer<?> component : components) {
values.add(component.getFieldValueDirectly(columnName));
}
return pStatement.bind(values.toArray());
}
};
private QueryGenCache<CqlColumnQueryImpl<?>> FlatTableColumnQuery = new QueryGenCache<CqlColumnQueryImpl<?>>(sessionRef) {
@Override
public Callable<RegularStatement> getQueryGen(final CqlColumnQueryImpl<?> columnQuery) {
return new Callable<RegularStatement>() {
@Override
public RegularStatement call() throws Exception {
if (clusteringKeyCols.size() != 0) {
throw new RuntimeException("Cannot use this query for this schema, clustetingKeyCols.size: " + clusteringKeyCols.size());
}
String columnNameString = (String)columnQuery.getColumnName();
return QueryBuilder.select()
.column(columnNameString).ttl(columnNameString).writeTime(columnNameString)
.from(keyspace, cfDef.getName())
.where(eq(partitionKeyCol, BIND_MARKER)); }
};
}
@Override
public BoundStatement bindValues(PreparedStatement pStatement, CqlColumnQueryImpl<?> columnQuery) {
return pStatement.bind(columnQuery.getRowKey());
}
};
public BoundStatement getQueryStatement(CqlColumnQueryImpl<?> columnQuery, boolean useCaching) {
if (isFlatTable) {
return FlatTableColumnQuery.getBoundStatement(columnQuery, useCaching);
}
if (isCompositeColumn) {
return ColumnQueryWithCompositeColumn.getBoundStatement(columnQuery, useCaching);
} else {
return ColumnQueryWithClusteringKey.getBoundStatement(columnQuery, useCaching);
}
}
}
| 8,098 |
0 | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql | Create_ds/astyanax/astyanax-cql/src/main/java/com/netflix/astyanax/cql/reads/CqlRowSliceColumnCountQueryImpl.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.astyanax.cql.reads;
import java.util.HashMap;
import java.util.Map;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Statement;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.astyanax.CassandraOperationType;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.cql.CqlAbstractExecutionImpl;
import com.netflix.astyanax.cql.CqlKeyspaceImpl.KeyspaceContext;
import com.netflix.astyanax.cql.util.CFQueryContext;
import com.netflix.astyanax.cql.util.CqlTypeMapping;
import com.netflix.astyanax.query.ColumnCountQuery;
import com.netflix.astyanax.query.RowSliceColumnCountQuery;
/**
* Impl for {@link RowSliceColumnCountQuery} interface.
* Just like {@link ColumnCountQuery}, this class only manages the context for the query.
* The actual query statement is supplied from the {@link CqlRowSliceQueryImpl} class.
*
* Note that CQL3 treats columns as rows for certain schemas that contain clustering keys.
* Hence this class collapses all {@link ResultSet} rows with the same partition key into a single row
* when counting all unique rows.
*
* @author poberai
*
* @param <K>
*/
public class CqlRowSliceColumnCountQueryImpl<K> implements RowSliceColumnCountQuery<K> {
private final KeyspaceContext ksContext;
private final CFQueryContext<?,?> cfContext;
private final Statement query;
public CqlRowSliceColumnCountQueryImpl(KeyspaceContext ksCtx, CFQueryContext<?,?> cfCtx, Statement query) {
this.ksContext = ksCtx;
this.cfContext = cfCtx;
this.query = query;
}
@Override
public OperationResult<Map<K, Integer>> execute() throws ConnectionException {
return new InternalQueryExecutionImpl().execute();
}
@Override
public ListenableFuture<OperationResult<Map<K, Integer>>> executeAsync() throws ConnectionException {
return new InternalQueryExecutionImpl().executeAsync();
}
private class InternalQueryExecutionImpl extends CqlAbstractExecutionImpl<Map<K, Integer>> {
public InternalQueryExecutionImpl() {
super(ksContext, cfContext);
}
@Override
public CassandraOperationType getOperationType() {
return CassandraOperationType.GET_ROWS_SLICE;
}
@Override
public Statement getQuery() {
return query;
}
@SuppressWarnings("unchecked")
@Override
public Map<K, Integer> parseResultSet(ResultSet resultSet) {
Map<K, Integer> columnCountPerRow = new HashMap<K, Integer>();
for (Row row : resultSet.all()) {
K key = (K) CqlTypeMapping.getDynamicColumn(row, cf.getKeySerializer(), 0, cf);
Integer colCount = columnCountPerRow.get(key);
if (colCount == null) {
colCount = new Integer(0);
}
colCount = colCount.intValue() + 1;
columnCountPerRow.put(key, colCount);
}
return columnCountPerRow;
}
}
}
| 8,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.