Spaces:
Runtime error
Runtime error
| log_level: INFO | |
| storage: | |
| # Where to store all the data | |
| storage_path: ./storage | |
| # Where to store snapshots | |
| snapshots_path: ./snapshots | |
| # Where to store temporary files | |
| # If null, temporary snapshot are stored in: storage/snapshots_temp/ | |
| temp_path: null | |
| # If true - point's payload will not be stored in memory. | |
| # It will be read from the disk every time it is requested. | |
| # This setting saves RAM by (slightly) increasing the response time. | |
| # Note: those payload values that are involved in filtering and are indexed - remain in RAM. | |
| on_disk_payload: true | |
| # Maximum number of concurrent updates to shard replicas | |
| # If `null` - maximum concurrency is used. | |
| update_concurrency: null | |
| # Write-ahead-log related configuration | |
| wal: | |
| # Size of a single WAL segment | |
| wal_capacity_mb: 32 | |
| # Number of WAL segments to create ahead of actual data requirement | |
| wal_segments_ahead: 0 | |
| # Normal node - receives all updates and answers all queries | |
| node_type: "Normal" | |
| # Listener node - receives all updates, but does not answer search/read queries | |
| # Useful for setting up a dedicated backup node | |
| # node_type: "Listener" | |
| performance: | |
| # Number of parallel threads used for search operations. If 0 - auto selection. | |
| max_search_threads: 0 | |
| # Max total number of threads, which can be used for running optimization processes across all collections. | |
| # Note: Each optimization thread will also use `max_indexing_threads` for index building. | |
| # So total number of threads used for optimization will be `max_optimization_threads * max_indexing_threads` | |
| max_optimization_threads: 1 | |
| # Prevent DDoS of too many concurrent updates in distributed mode. | |
| # One external update usually triggers multiple internal updates, which breaks internal | |
| # timings. For example, the health check timing and consensus timing. | |
| # If null - auto selection. | |
| update_rate_limit: null | |
| optimizers: | |
| # The minimal fraction of deleted vectors in a segment, required to perform segment optimization | |
| deleted_threshold: 0.2 | |
| # The minimal number of vectors in a segment, required to perform segment optimization | |
| vacuum_min_vector_number: 1000 | |
| # Target amount of segments optimizer will try to keep. | |
| # Real amount of segments may vary depending on multiple parameters: | |
| # - Amount of stored points | |
| # - Current write RPS | |
| # | |
| # It is recommended to select default number of segments as a factor of the number of search threads, | |
| # so that each segment would be handled evenly by one of the threads. | |
| # If `default_segment_number = 0`, will be automatically selected by the number of available CPUs | |
| default_segment_number: 0 | |
| # Do not create segments larger this size (in KiloBytes). | |
| # Large segments might require disproportionately long indexation times, | |
| # therefore it makes sense to limit the size of segments. | |
| # | |
| # If indexation speed have more priority for your - make this parameter lower. | |
| # If search speed is more important - make this parameter higher. | |
| # Note: 1Kb = 1 vector of size 256 | |
| # If not set, will be automatically selected considering the number of available CPUs. | |
| max_segment_size_kb: null | |
| # Maximum size (in KiloBytes) of vectors to store in-memory per segment. | |
| # Segments larger than this threshold will be stored as read-only memmaped file. | |
| # To enable memmap storage, lower the threshold | |
| # Note: 1Kb = 1 vector of size 256 | |
| # To explicitly disable mmap optimization, set to `0`. | |
| # If not set, will be disabled by default. | |
| memmap_threshold_kb: null | |
| # Maximum size (in KiloBytes) of vectors allowed for plain index. | |
| # Default value based on https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md | |
| # Note: 1Kb = 1 vector of size 256 | |
| # To explicitly disable vector indexing, set to `0`. | |
| # If not set, the default value will be used. | |
| indexing_threshold_kb: 20000 | |
| # Interval between forced flushes. | |
| flush_interval_sec: 5 | |
| # Max number of threads, which can be used for optimization per collection. | |
| # Note: Each optimization thread will also use `max_indexing_threads` for index building. | |
| # So total number of threads used for optimization will be `max_optimization_threads * max_indexing_threads` | |
| # If `max_optimization_threads = 0`, optimization will be disabled. | |
| max_optimization_threads: 1 | |
| # Default parameters of HNSW Index. Could be overridden for each collection or named vector individually | |
| hnsw_index: | |
| # Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. | |
| m: 16 | |
| # Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build index. | |
| ef_construct: 100 | |
| # Minimal size (in KiloBytes) of vectors for additional payload-based indexing. | |
| # If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - | |
| # in this case full-scan search should be preferred by query planner and additional indexing is not required. | |
| # Note: 1Kb = 1 vector of size 256 | |
| full_scan_threshold_kb: 10000 | |
| # Number of parallel threads used for background index building. If 0 - auto selection. | |
| max_indexing_threads: 0 | |
| # Store HNSW index on disk. If set to false, index will be stored in RAM. Default: false | |
| on_disk: false | |
| # Custom M param for hnsw graph built for payload index. If not set, default M will be used. | |
| payload_m: null | |
| service: | |
| # Maximum size of POST data in a single request in megabytes | |
| max_request_size_mb: 32 | |
| # Number of parallel workers used for serving the api. If 0 - equal to the number of available cores. | |
| # If missing - Same as storage.max_search_threads | |
| max_workers: 0 | |
| # Host to bind the service on | |
| host: 0.0.0.0 | |
| # HTTP(S) port to bind the service on | |
| http_port: 6333 | |
| # gRPC port to bind the service on. | |
| # If `null` - gRPC is disabled. Default: null | |
| # Comment to disable gRPC: | |
| grpc_port: 6334 | |
| # Enable CORS headers in REST API. | |
| # If enabled, browsers would be allowed to query REST endpoints regardless of query origin. | |
| # More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS | |
| # Default: true | |
| enable_cors: true | |
| # Enable HTTPS for the REST and gRPC API | |
| enable_tls: false | |
| # Check user HTTPS client certificate against CA file specified in tls config | |
| verify_https_client_certificate: false | |
| # Set an api-key. | |
| # If set, all requests must include a header with the api-key. | |
| # example header: `api-key: <API-KEY>` | |
| # | |
| # If you enable this you should also enable TLS. | |
| # (Either above or via an external service like nginx.) | |
| # Sending an api-key over an unencrypted channel is insecure. | |
| # | |
| # Uncomment to enable. | |
| # api_key: your_secret_api_key_here | |
| # Set an api-key for read-only operations. | |
| # If set, all requests must include a header with the api-key. | |
| # example header: `api-key: <API-KEY>` | |
| # | |
| # If you enable this you should also enable TLS. | |
| # (Either above or via an external service like nginx.) | |
| # Sending an api-key over an unencrypted channel is insecure. | |
| # | |
| # Uncomment to enable. | |
| # read_only_api_key: your_secret_read_only_api_key_here | |
| cluster: | |
| # Use `enabled: true` to run Qdrant in distributed deployment mode | |
| enabled: false | |
| # Configuration of the inter-cluster communication | |
| p2p: | |
| # Port for internal communication between peers | |
| port: 6335 | |
| # Use TLS for communication between peers | |
| enable_tls: false | |
| # Configuration related to distributed consensus algorithm | |
| consensus: | |
| # How frequently peers should ping each other. | |
| # Setting this parameter to lower value will allow consensus | |
| # to detect disconnected nodes earlier, but too frequent | |
| # tick period may create significant network and CPU overhead. | |
| # We encourage you NOT to change this parameter unless you know what you are doing. | |
| tick_period_ms: 100 | |
| # Set to true to prevent service from sending usage statistics to the developers. | |
| # Read more: https://qdrant.tech/documentation/guides/telemetry | |
| telemetry_disabled: false | |
| # TLS configuration. | |
| # Required if either service.enable_tls or cluster.p2p.enable_tls is true. | |
| tls: | |
| # Server certificate chain file | |
| cert: ./tls/cert.pem | |
| # Server private key file | |
| key: ./tls/key.pem | |
| # Certificate authority certificate file. | |
| # This certificate will be used to validate the certificates | |
| # presented by other nodes during inter-cluster communication. | |
| # | |
| # If verify_https_client_certificate is true, it will verify | |
| # HTTPS client certificate | |
| # | |
| # Required if cluster.p2p.enable_tls is true. | |
| ca_cert: ./tls/cacert.pem | |
| # TTL in seconds to reload certificate from disk, useful for certificate rotations. | |
| # Only works for HTTPS endpoints. Does not support gRPC (and intra-cluster communication). | |
| # If `null` - TTL is disabled. | |
| cert_ttl: 3600 |