text stringlengths 0 59.1k |
|---|
# given total throughput in Mbps. This is necessary because Cassandra does |
# mostly sequential IO when streaming data during bootstrap or repair, which |
# can lead to saturating the network connection and degrading rpc performance. |
# When unset, the default is 200 Mbps or 25 MB/s. |
# stream_throughput_outbound_megabits_per_sec: 200 |
# Throttles all streaming file transfer between the datacenters, |
# this setting allows users to throttle inter dc stream throughput in addition |
# to throttling all network stream traffic as configured with |
# stream_throughput_outbound_megabits_per_sec |
# When unset, the default is 200 Mbps or 25 MB/s |
# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 |
# How long the coordinator should wait for read operations to complete |
read_request_timeout_in_ms: 5000 |
# How long the coordinator should wait for seq or index scans to complete |
range_request_timeout_in_ms: 10000 |
# How long the coordinator should wait for writes to complete |
write_request_timeout_in_ms: 2000 |
# How long the coordinator should wait for counter writes to complete |
counter_write_request_timeout_in_ms: 5000 |
# How long a coordinator should continue to retry a CAS operation |
# that contends with other proposals for the same row |
cas_contention_timeout_in_ms: 1000 |
# How long the coordinator should wait for truncates to complete |
# (This can be much longer, because unless auto_snapshot is disabled |
# we need to flush first so we can snapshot before removing the data.) |
truncate_request_timeout_in_ms: 60000 |
# The default timeout for other, miscellaneous operations |
request_timeout_in_ms: 10000 |
# Enable operation timeout information exchange between nodes to accurately |
# measure request timeouts. If disabled, replicas will assume that requests |
# were forwarded to them instantly by the coordinator, which means that |
# under overload conditions we will waste that much extra time processing |
# already-timed-out requests. |
# |
# Warning: before enabling this property make sure to ntp is installed |
# and the times are synchronized between the nodes. |
cross_node_timeout: false |
# Set socket timeout for streaming operation. |
# The stream session is failed if no data is received by any of the |
# participants within that period. |
# Default value is 3600000, which means streams timeout after an hour. |
# streaming_socket_timeout_in_ms: 3600000 |
# phi value that must be reached for a host to be marked down. |
# most users should never need to adjust this. |
# phi_convict_threshold: 8 |
# endpoint_snitch -- Set this to a class that implements |
# IEndpointSnitch. The snitch has two functions: |
# - it teaches Cassandra enough about your network topology to route |
# requests efficiently |
# - it allows Cassandra to spread replicas around your cluster to avoid |
# correlated failures. It does this by grouping machines into |
# "datacenters" and "racks." Cassandra will do its best not to have |
# more than one replica on the same "rack" (which may not actually |
# be a physical location) |
# |
# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, |
# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS |
# ARE PLACED. |
# |
# IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN |
# ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED. |
# |
# Out of the box, Cassandra provides |
# - SimpleSnitch: |
# Treats Strategy order as proximity. This can improve cache |
# locality when disabling read repair. Only appropriate for |
# single-datacenter deployments. |
# - GossipingPropertyFileSnitch |
# This should be your go-to snitch for production use. The rack |
# and datacenter for the local node are defined in |
# cassandra-rackdc.properties and propagated to other nodes via |
# gossip. If cassandra-topology.properties exists, it is used as a |
# fallback, allowing migration from the PropertyFileSnitch. |
# - PropertyFileSnitch: |
# Proximity is determined by rack and data center, which are |
# explicitly configured in cassandra-topology.properties. |
# - Ec2Snitch: |
# Appropriate for EC2 deployments in a single Region. Loads Region |
# and Availability Zone information from the EC2 API. The Region is |
# treated as the datacenter, and the Availability Zone as the rack. |
# Only private IPs are used, so this will not work across multiple |
# Regions. |
# - Ec2MultiRegionSnitch: |
# Uses public IPs as broadcast_address to allow cross-region |
# connectivity. (Thus, you should set seed addresses to the public |
# IP as well.) You will need to open the storage_port or |
# ssl_storage_port on the public IP firewall. (For intra-Region |
# traffic, Cassandra will switch to the private IP after |
# establishing a connection.) |
# - RackInferringSnitch: |
# Proximity is determined by rack and data center, which are |
# assumed to correspond to the 3rd and 2nd octet of each node's IP |
# address, respectively. Unless this happens to match your |
# deployment conventions, this is best used as an example of |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.