text stringlengths 0 59.1k |
|---|
# commitlog_sync_batch_window_in_ms: 2 |
# |
# the other option is "periodic" where writes may be acked immediately |
# and the CommitLog is simply synced every commitlog_sync_period_in_ms |
# milliseconds. |
commitlog_sync: periodic |
commitlog_sync_period_in_ms: 10000 |
# The size of the individual commitlog file segments. A commitlog |
# segment may be archived, deleted, or recycled once all the data |
# in it (potentially from each columnfamily in the system) has been |
# flushed to sstables. |
# |
# The default size is 32, which is almost always fine, but if you are |
# archiving commitlog segments (see commitlog_archiving.properties), |
# then you probably want a finer granularity of archiving; 8 or 16 MB |
# is reasonable. |
# Max mutation size is also configurable via max_mutation_size_in_kb setting in |
# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. |
# |
# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must |
# be set to at least twice the size of max_mutation_size_in_kb / 1024 |
# |
commitlog_segment_size_in_mb: 32 |
# Compression to apply to the commit log. If omitted, the commit log |
# will be written uncompressed. LZ4, Snappy, and Deflate compressors |
# are supported. |
#commitlog_compression: |
# - class_name: LZ4Compressor |
# parameters: |
# - |
# any class that implements the SeedProvider interface and has a |
# constructor that takes a Map<String, String> of parameters will do. |
seed_provider: |
# Addresses of hosts that are deemed contact points. |
# Cassandra nodes use this list of hosts to find each other and learn |
# the topology of the ring. You must change this if you are running |
# multiple nodes! |
#- class_name: io.k8s.cassandra.KubernetesSeedProvider |
- class_name: SEED_PROVIDER |
parameters: |
# seeds is actually a comma-delimited list of addresses. |
# Ex: "<ip1>,<ip2>,<ip3>" |
- seeds: "127.0.0.1" |
# For workloads with more data than can fit in memory, Cassandra's |
# bottleneck will be reads that need to fetch data from |
# disk. "concurrent_reads" should be set to (16 * number_of_drives) in |
# order to allow the operations to enqueue low enough in the stack |
# that the OS and drives can reorder them. Same applies to |
# "concurrent_counter_writes", since counter writes read the current |
# values before incrementing and writing them back. |
# |
# On the other hand, since writes are almost never IO bound, the ideal |
# number of "concurrent_writes" is dependent on the number of cores in |
# your system; (8 * number_of_cores) is a good rule of thumb. |
concurrent_reads: 32 |
concurrent_writes: 32 |
concurrent_counter_writes: 32 |
# For materialized view writes, as there is a read involved, so this should |
# be limited by the less of concurrent reads or concurrent writes. |
concurrent_materialized_view_writes: 32 |
# Maximum memory to use for pooling sstable buffers. Defaults to the smaller |
# of 1/4 of heap or 512MB. This pool is allocated off-heap, so is in addition |
# to the memory allocated for heap. Memory is only allocated as needed. |
# file_cache_size_in_mb: 512 |
# Flag indicating whether to allocate on or off heap when the sstable buffer |
# pool is exhausted, that is when it has exceeded the maximum memory |
# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. |
# buffer_pool_use_heap_if_exhausted: true |
# The strategy for optimizing disk read |
# Possible values are: |
# ssd (for solid state disks, the default) |
# spinning (for spinning disks) |
# disk_optimization_strategy: ssd |
# Total permitted memory to use for memtables. Cassandra will stop |
# accepting writes when the limit is exceeded until a flush completes, |
# and will trigger a flush based on memtable_cleanup_threshold |
# If omitted, Cassandra will set both to 1/4 the size of the heap. |
# memtable_heap_space_in_mb: 2048 |
# memtable_offheap_space_in_mb: 2048 |
# Ratio of occupied non-flushing memtable size to total permitted size |
# that will trigger a flush of the largest memtable. Larger mct will |
# mean larger flushes and hence less compaction, but also less concurrent |
# flush activity which can make it difficult to keep your disks fed |
# under heavy write load. |
# |
# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) |
# memtable_cleanup_threshold: 0.11 |
# Specify the way Cassandra allocates and manages memtable memory. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.