text stringlengths 0 59.1k |
|---|
# rpc_recv_buff_size_in_bytes: |
# Uncomment to set socket buffer size for internode communication |
# Note that when setting this, the buffer size is limited by net.core.wmem_max |
# and when not setting it it is defined by net.ipv4.tcp_wmem |
# See: |
# /proc/sys/net/core/wmem_max |
# /proc/sys/net/core/rmem_max |
# /proc/sys/net/ipv4/tcp_wmem |
# /proc/sys/net/ipv4/tcp_wmem |
# and: man tcp |
# internode_send_buff_size_in_bytes: |
# internode_recv_buff_size_in_bytes: |
# Frame size for thrift (maximum message length). |
thrift_framed_transport_size_in_mb: 15 |
# Set to true to have Cassandra create a hard link to each sstable |
# flushed or streamed locally in a backups/ subdirectory of the |
# keyspace data. Removing these links is the operator's |
# responsibility. |
incremental_backups: false |
# Whether or not to take a snapshot before each compaction. Be |
# careful using this option, since Cassandra won't clean up the |
# snapshots for you. Mostly useful if you're paranoid when there |
# is a data format change. |
snapshot_before_compaction: false |
# Whether or not a snapshot is taken of the data before keyspace truncation |
# or dropping of column families. The STRONGLY advised default of true |
# should be used to provide data safety. If you set this flag to false, you will |
# lose data on truncation or drop. |
auto_snapshot: true |
# When executing a scan, within or across a partition, we need to keep the |
# tombstones seen in memory so we can return them to the coordinator, which |
# will use them to make sure other replicas also know about the deleted rows. |
# With workloads that generate a lot of tombstones, this can cause performance |
# problems and even exaust the server heap. |
# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) |
# Adjust the thresholds here if you understand the dangers and want to |
# scan more tombstones anyway. These thresholds may also be adjusted at runtime |
# using the StorageService mbean. |
tombstone_warn_threshold: 1000 |
tombstone_failure_threshold: 100000 |
# Granularity of the collation index of rows within a partition. |
# Increase if your rows are large, or if you have a very large |
# number of rows per partition. The competing goals are these: |
# 1) a smaller granularity means more index entries are generated |
# and looking up rows within the partition by collation column |
# is faster |
# 2) but, Cassandra will keep the collation index in memory for hot |
# rows (as part of the key cache), so a larger granularity means |
# you can cache more hot rows |
column_index_size_in_kb: 64 |
# Log WARN on any batch size exceeding this value. 5kb per batch by default. |
# Caution should be taken on increasing the size of this threshold as it can lead to node instability. |
batch_size_warn_threshold_in_kb: 5 |
# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. |
batch_size_fail_threshold_in_kb: 50 |
# Number of simultaneous compactions to allow, NOT including |
# validation "compactions" for anti-entropy repair. Simultaneous |
# compactions can help preserve read performance in a mixed read/write |
# workload, by mitigating the tendency of small sstables to accumulate |
# during a single long running compactions. The default is usually |
# fine and if you experience problems with compaction running too |
# slowly or too fast, you should look at |
# compaction_throughput_mb_per_sec first. |
# |
# concurrent_compactors defaults to the smaller of (number of disks, |
# number of cores), with a minimum of 2 and a maximum of 8. |
# |
# If your data directories are backed by SSD, you should increase this |
# to the number of cores. |
#concurrent_compactors: 1 |
# Throttles compaction to the given total throughput across the entire |
# system. The faster you insert data, the faster you need to compact in |
# order to keep the sstable count down, but in general, setting this to |
# 16 to 32 times the rate you are inserting data is more than sufficient. |
# Setting this to 0 disables throttling. Note that this account for all types |
# of compaction, including validation compaction. |
compaction_throughput_mb_per_sec: 16 |
# Log a warning when compacting partitions larger than this value |
compaction_large_partition_warning_threshold_mb: 100 |
# When compacting, the replacement sstable(s) can be opened before they |
# are completely written, and used in place of the prior sstables for |
# any range that has been written. This helps to smoothly transfer reads |
# between the sstables, reducing page cache churn and keeping hot rows hot |
sstable_preemptive_open_interval_in_mb: 50 |
# Throttles all outbound streaming file transfers on this node to the |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.