content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
# Number of days of inactivity before an issue becomes stale\ndaysUntilStale: 23\n# Number of days of inactivity before a stale issue is closed\ndaysUntilClose: 7\n# Issues with these labels will never be considered stale\nexemptLabels:\n - bug\n - enhancement\n - "help wanted"\n - blessed\n - workaround\n# Label to use when marking an issue as stale\nstaleLabel: stale\n# Comment to post when marking an issue as stale. Set to `false` to disable\nmarkComment: >\n This issue has been automatically marked as stale because it has not had\n recent activity. It will be closed if no further activity occurs. Thank you\n for your contributions.\n# Comment to post when closing a stale issue. Set to `false` to disable\ncloseComment: false\n
dataset_sample\yaml\aonez_Keka\.github\stale.yml
stale.yml
YAML
727
0.8
0.1
0.3
react-lib
862
2024-05-21T20:01:44.395558
BSD-3-Clause
false
b482158df625af7fedab96df3de51a55
name: Sync Upstream\n\nenv:\n # Required, URL to upstream (fork base)\n UPSTREAM_URL: "https://github.com/aonez/Keka.git"\n # Required, token to authenticate bot, could use ${{ secrets.GITHUB_TOKEN }} \n # Over here, we use a PAT instead to authenticate workflow file changes.\n WORKFLOW_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n # Optional, defaults to main\n UPSTREAM_BRANCH: "master"\n # Optional, defaults to UPSTREAM_BRANCH\n DOWNSTREAM_BRANCH: "master"\n # Optional fetch arguments\n FETCH_ARGS: ""\n # Optional merge arguments\n MERGE_ARGS: ""\n # Optional push arguments\n PUSH_ARGS: ""\n # Optional toggle to spawn time logs (keeps action active) \n SPAWN_LOGS: "false" # "true" or "false"\n\n# This runs every day on 1801 UTC\non:\n schedule:\n - cron: "0 16 * * *"\n # Allows manual workflow run (must in default branch to work)\n workflow_dispatch:\n\njobs:\n build:\n runs-on: ubuntu-latest\n steps:\n - name: GitHub Sync to Upstream Repository\n uses: dabreadman/sync-upstream-repo@v1.3.0\n with: \n upstream_repo: ${{ env.UPSTREAM_URL }}\n upstream_branch: ${{ env.UPSTREAM_BRANCH }}\n downstream_branch: ${{ env.DOWNSTREAM_BRANCH }}\n token: ${{ env.WORKFLOW_TOKEN }}\n fetch_args: ${{ env.FETCH_ARGS }}\n merge_args: ${{ env.MERGE_ARGS }}\n push_args: ${{ env.PUSH_ARGS }}\n spawn_logs: ${{ env.SPAWN_LOGS }}
dataset_sample\yaml\aonez_Keka\.github\workflows\upstreamSync.yml
upstreamSync.yml
YAML
1,399
0.8
0
0.275
python-kit
522
2024-10-20T21:33:50.068379
BSD-3-Clause
false
281cb73048b5572d29788149aec33e1a
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n
dataset_sample\yaml\apache_cassandra\.circleci\license.yml
license.yml
YAML
789
0.95
0.111111
1
awesome-app
703
2024-07-27T15:34:53.108300
MIT
false
ed3e3e8d27b99c8093852dfbbcc51f1c
name: Cassandra\nversion: 'trunk'\ndisplay_version: 'trunk'\nprerelease: true\nasciidoc:\n attributes:\n cass_url: 'http://cassandra.apache.org/'\n cass-50: 'Cassandra 5.0'\n cassandra: 'Cassandra' \n product: 'Apache Cassandra'\n\nnav:\n- modules/ROOT/nav.adoc\n- modules/cassandra/nav.adoc
dataset_sample\yaml\apache_cassandra\doc\antora.yml
antora.yml
YAML
291
0.8
0
0
node-utils
961
2024-02-22T06:19:13.467972
MIT
false
dbb403f93f945c180033024b00337878
# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n---\ntrickle_fsync: "java.lang.Boolean"\nrpc_listen_backlog: "java.lang.Integer"\nmax_streaming_retries: "java.lang.Integer"\nnative_transport_flush_in_batches_legacy: "java.lang.Boolean"\nrow_cache_save_period: "java.lang.Integer"\nrpc_address: "java.lang.String"\ndisk_optimization_estimate_percentile: "java.lang.Double"\nhinted_handoff_disabled_datacenters: "java.util.Set"\nnum_tokens: "java.lang.Integer"\nread_request_timeout_in_ms: "java.lang.Long"\nrpc_max_threads: "java.lang.Integer"\nenable_drop_compact_storage: "java.lang.Boolean"\ncommitlog_directory: "java.lang.String"\nunlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"\nauto_bootstrap: "java.lang.Boolean"\nauthorizer: "java.lang.String"\nmemtable_heap_space_in_mb: "java.lang.Integer"\nindex_interval: "java.lang.Integer"\nsstable_preemptive_open_interval_in_mb: "java.lang.Integer"\nbroadcast_rpc_address: "java.lang.String"\ncommitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"\nlisten_interface_prefer_ipv6: "java.lang.Boolean"\nrepair_session_max_tree_depth: "java.lang.Integer"\nrequest_scheduler_options:\n throttle_limit: "java.lang.Integer"\n default_weight: "java.lang.Integer"\n weights: "java.util.Map"\nuser_defined_function_warn_timeout: "java.lang.Long"\nrequest_scheduler_id: "org.apache.cassandra.config.Config.RequestSchedulerId"\ntracetype_repair_ttl: "java.lang.Integer"\nrpc_send_buff_size_in_bytes: "java.lang.Integer"\nconcurrent_compactors: "java.lang.Integer"\nbuffer_pool_use_heap_if_exhausted: "java.lang.Boolean"\nconcurrent_materialized_view_writes: "java.lang.Integer"\ncommitlog_total_space_in_mb: "java.lang.Integer"\nhints_directory: "java.lang.String"\nlisten_address: "java.lang.String"\nnative_transport_max_concurrent_connections_per_ip: "java.lang.Long"\nrpc_keepalive: "java.lang.Boolean"\nrequest_scheduler: "java.lang.String"\nallow_extra_insecure_udfs: "java.lang.Boolean"\nrpc_interface_prefer_ipv6: "java.lang.Boolean"\ncheck_for_duplicate_rows_during_compaction: "java.lang.Boolean"\nrequest_timeout_in_ms: "java.lang.Long"\nuser_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"\ndisk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"\nrpc_server_type: "java.lang.String"\nconcurrent_counter_writes: "java.lang.Integer"\ncounter_write_request_timeout_in_ms: "java.lang.Long"\nroles_update_interval_in_ms: "java.lang.Integer"\nrow_cache_size_in_mb: "java.lang.Long"\nmemtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"\ntrickle_fsync_interval_in_kb: "java.lang.Integer"\ncas_contention_timeout_in_ms: "java.lang.Long"\nkey_cache_size_in_mb: "java.lang.Long"\ntombstone_warn_threshold: "java.lang.Integer"\nmin_free_space_per_drive_in_mb: "java.lang.Integer"\nwrite_request_timeout_in_ms: "java.lang.Long"\ncross_node_timeout: "java.lang.Boolean"\ndynamic_snitch: "java.lang.Boolean"\npermissions_validity_in_ms: "java.lang.Integer"\nphi_convict_threshold: "java.lang.Double"\ncommitlog_sync_batch_window_in_ms: "java.lang.Double"\nnative_transport_max_threads: "java.lang.Integer"\nthrift_max_message_length_in_mb: "java.lang.Integer"\ndisk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"\npermissions_update_interval_in_ms: "java.lang.Integer"\ntombstone_failure_threshold: "java.lang.Integer"\nauthenticator: "java.lang.String"\nmax_mutation_size_in_kb: "java.lang.Integer"\nallow_insecure_udfs: "java.lang.Boolean"\ncache_load_timeout_seconds: "java.lang.Integer"\ninitial_token: "java.lang.String"\nbatch_size_warn_threshold_in_kb: "java.lang.Integer"\nconcurrent_replicates: "java.lang.Integer"\ndynamic_snitch_badness_threshold: "java.lang.Double"\nindex_summary_capacity_in_mb: "java.lang.Long"\ncommitlog_sync_period_in_ms: "java.lang.Integer"\ncounter_cache_keys_to_save: "java.lang.Integer"\ndisk_optimization_page_cross_chance: "java.lang.Double"\nlisten_on_broadcast_address: "java.lang.Boolean"\nnative_transport_max_concurrent_requests_in_bytes: "java.lang.Long"\nrpc_min_threads: "java.lang.Integer"\nrow_cache_class_name: "java.lang.String"\ngc_warn_threshold_in_ms: "java.lang.Integer"\ndisk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"\ncompaction_large_partition_warning_threshold_mb: "java.lang.Integer"\nenable_user_defined_functions_threads: "java.lang.Boolean"\nhinted_handoff_throttle_in_kb: "java.lang.Integer"\notc_backlog_expiration_interval_ms: "java.lang.Integer"\ncounter_cache_save_period: "java.lang.Integer"\notc_coalescing_enough_coalesced_messages: "java.lang.Integer"\nhints_flush_period_in_ms: "java.lang.Integer"\nrole_manager: "java.lang.String"\nthrift_framed_transport_size_in_mb: "java.lang.Integer"\nserver_encryption_options:\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n algorithm: "java.lang.String"\nmax_hints_delivery_threads: "java.lang.Integer"\ncolumn_index_size_in_kb: "java.lang.Integer"\nmemtable_offheap_space_in_mb: "java.lang.Integer"\ndata_file_directories: "java.util.List"\nsaved_caches_directory: "java.lang.String"\nnative_transport_max_frame_size_in_mb: "java.lang.Integer"\nindex_summary_resize_interval_in_minutes: "java.lang.Integer"\nstreaming_socket_timeout_in_ms: "java.lang.Integer"\nencryption_options:\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n algorithm: "java.lang.String"\nstart_rpc: "java.lang.Boolean"\nenable_user_defined_functions: "java.lang.Boolean"\nmax_hint_window_in_ms: "java.lang.Integer"\ngc_log_threshold_in_ms: "java.lang.Integer"\nsnapshot_on_duplicate_row_detection: "java.lang.Boolean"\nseed_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncheck_for_duplicate_rows_during_reads: "java.lang.Boolean"\ninternode_compression: "org.apache.cassandra.config.Config.InternodeCompression"\ninternode_send_buff_size_in_bytes: "java.lang.Integer"\notc_coalescing_window_us: "java.lang.Integer"\nbatchlog_replay_throttle_in_kb: "java.lang.Integer"\nenable_scripted_user_defined_functions: "java.lang.Boolean"\ncommitlog_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nbroadcast_address: "java.lang.String"\nrpc_recv_buff_size_in_bytes: "java.lang.Integer"\nenable_materialized_views: "java.lang.Boolean"\nroles_validity_in_ms: "java.lang.Integer"\nsnapshot_before_compaction: "java.lang.Boolean"\nnative_transport_port_ssl: "java.lang.Integer"\nallocate_tokens_for_keyspace: "java.lang.String"\nstorage_port: "java.lang.Integer"\ncounter_cache_size_in_mb: "java.lang.Long"\nnative_transport_port: "java.lang.Integer"\ndynamic_snitch_reset_interval_in_ms: "java.lang.Integer"\npermissions_cache_max_entries: "java.lang.Integer"\ntracetype_query_ttl: "java.lang.Integer"\nstream_throughput_outbound_megabits_per_sec: "java.lang.Integer"\nrpc_port: "java.lang.Integer"\ncommit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"\nconcurrent_writes: "java.lang.Integer"\nrange_request_timeout_in_ms: "java.lang.Long"\ndynamic_snitch_update_interval_in_ms: "java.lang.Integer"\nhints_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncommitlog_periodic_queue_size: "java.lang.Integer"\nforce_new_prepared_statement_behaviour: "java.lang.Boolean"\nhinted_handoff_enabled: "java.lang.Boolean"\nmax_value_size_in_mb: "java.lang.Integer"\nmemtable_flush_writers: "java.lang.Integer"\notc_coalescing_strategy: "java.lang.String"\ncommitlog_max_compression_buffers_in_pool: "java.lang.Integer"\nroles_cache_max_entries: "java.lang.Integer"\nnative_transport_max_negotiable_protocol_version: "java.lang.Integer"\npartitioner: "java.lang.String"\ninternode_recv_buff_size_in_bytes: "java.lang.Integer"\nlisten_interface: "java.lang.String"\nstart_native_transport: "java.lang.Boolean"\nssl_storage_port: "java.lang.Integer"\nuser_defined_function_fail_timeout: "java.lang.Long"\ncluster_name: "java.lang.String"\nincremental_backups: "java.lang.Boolean"\nfile_cache_size_in_mb: "java.lang.Integer"\ninter_dc_tcp_nodelay: "java.lang.Boolean"\ninternode_authenticator: "java.lang.String"\nkey_cache_keys_to_save: "java.lang.Integer"\nkey_cache_save_period: "java.lang.Integer"\nwindows_timer_interval: "java.lang.Integer"\nrpc_interface: "java.lang.String"\ncommitlog_segment_size_in_mb: "java.lang.Integer"\nrow_cache_keys_to_save: "java.lang.Integer"\nreplica_filtering_protection:\n cached_rows_fail_threshold: "java.lang.Integer"\n cached_rows_warn_threshold: "java.lang.Integer"\nnative_transport_max_concurrent_requests_in_bytes_per_ip: "java.lang.Long"\nnative_transport_max_concurrent_connections: "java.lang.Long"\nmemtable_cleanup_threshold: "java.lang.Float"\nconcurrent_reads: "java.lang.Integer"\ninter_dc_stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"\ntruncate_request_timeout_in_ms: "java.lang.Long"\nclient_encryption_options:\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n optional: "java.lang.Boolean"\n truststore: "java.lang.String"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n algorithm: "java.lang.String"\nauto_snapshot: "java.lang.Boolean"\nbatch_size_fail_threshold_in_kb: "java.lang.Integer"\ncompaction_throughput_mb_per_sec: "java.lang.Integer"\nmax_hints_file_size_in_mb: "java.lang.Integer"\nendpoint_snitch: "java.lang.String"\n
dataset_sample\yaml\apache_cassandra\test\data\config\version=3.0.0-alpha1.yml
version=3.0.0-alpha1.yml
YAML
10,774
0.95
0.008547
0.07265
node-utils
57
2025-06-26T18:56:33.982374
MIT
true
686049ba927314abbbb2fb95175da5e0
# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n---\ntrickle_fsync: "java.lang.Boolean"\nrpc_listen_backlog: "java.lang.Integer"\nmax_streaming_retries: "java.lang.Integer"\ncdc_total_space_in_mb: "java.lang.Integer"\nnative_transport_flush_in_batches_legacy: "java.lang.Boolean"\nrow_cache_save_period: "java.lang.Integer"\nrpc_address: "java.lang.String"\ndisk_optimization_estimate_percentile: "java.lang.Double"\nhinted_handoff_disabled_datacenters: "java.util.Set"\ncdc_enabled: "java.lang.Boolean"\ncdc_raw_directory: "java.lang.String"\nnum_tokens: "java.lang.Integer"\nread_request_timeout_in_ms: "java.lang.Long"\nrpc_max_threads: "java.lang.Integer"\nenable_drop_compact_storage: "java.lang.Boolean"\ncommitlog_directory: "java.lang.String"\nunlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"\ncredentials_validity_in_ms: "java.lang.Integer"\nauto_bootstrap: "java.lang.Boolean"\nauthorizer: "java.lang.String"\nmemtable_heap_space_in_mb: "java.lang.Integer"\nindex_interval: "java.lang.Integer"\nsstable_preemptive_open_interval_in_mb: "java.lang.Integer"\nbroadcast_rpc_address: "java.lang.String"\ncommitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"\nlisten_interface_prefer_ipv6: "java.lang.Boolean"\nrepair_session_max_tree_depth: "java.lang.Integer"\nrequest_scheduler_options:\n throttle_limit: "java.lang.Integer"\n default_weight: "java.lang.Integer"\n weights: "java.util.Map"\nuser_defined_function_warn_timeout: "java.lang.Long"\nrequest_scheduler_id: "org.apache.cassandra.config.Config.RequestSchedulerId"\ntracetype_repair_ttl: "java.lang.Integer"\nrpc_send_buff_size_in_bytes: "java.lang.Integer"\nconcurrent_compactors: "java.lang.Integer"\nbuffer_pool_use_heap_if_exhausted: "java.lang.Boolean"\nconcurrent_materialized_view_writes: "java.lang.Integer"\ncommitlog_total_space_in_mb: "java.lang.Integer"\nhints_directory: "java.lang.String"\nlisten_address: "java.lang.String"\nnative_transport_max_concurrent_connections_per_ip: "java.lang.Long"\nrpc_keepalive: "java.lang.Boolean"\nrequest_scheduler: "java.lang.String"\nallow_extra_insecure_udfs: "java.lang.Boolean"\nrpc_interface_prefer_ipv6: "java.lang.Boolean"\ncheck_for_duplicate_rows_during_compaction: "java.lang.Boolean"\nrequest_timeout_in_ms: "java.lang.Long"\nuser_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"\ndisk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"\nrpc_server_type: "java.lang.String"\nconcurrent_counter_writes: "java.lang.Integer"\ncounter_write_request_timeout_in_ms: "java.lang.Long"\nroles_update_interval_in_ms: "java.lang.Integer"\nrow_cache_size_in_mb: "java.lang.Long"\nmemtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"\ntrickle_fsync_interval_in_kb: "java.lang.Integer"\ncas_contention_timeout_in_ms: "java.lang.Long"\nkey_cache_size_in_mb: "java.lang.Long"\ntombstone_warn_threshold: "java.lang.Integer"\ncolumn_index_cache_size_in_kb: "java.lang.Integer"\nmin_free_space_per_drive_in_mb: "java.lang.Integer"\nwrite_request_timeout_in_ms: "java.lang.Long"\ncross_node_timeout: "java.lang.Boolean"\ndynamic_snitch: "java.lang.Boolean"\npermissions_validity_in_ms: "java.lang.Integer"\nphi_convict_threshold: "java.lang.Double"\ncommitlog_sync_batch_window_in_ms: "java.lang.Double"\nnative_transport_max_threads: "java.lang.Integer"\nthrift_max_message_length_in_mb: "java.lang.Integer"\ndisk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"\npermissions_update_interval_in_ms: "java.lang.Integer"\ntombstone_failure_threshold: "java.lang.Integer"\nauthenticator: "java.lang.String"\nmax_mutation_size_in_kb: "java.lang.Integer"\nallow_insecure_udfs: "java.lang.Boolean"\ncache_load_timeout_seconds: "java.lang.Integer"\ninitial_token: "java.lang.String"\nbatch_size_warn_threshold_in_kb: "java.lang.Integer"\nconcurrent_replicates: "java.lang.Integer"\ndynamic_snitch_badness_threshold: "java.lang.Double"\nindex_summary_capacity_in_mb: "java.lang.Long"\ncommitlog_sync_period_in_ms: "java.lang.Integer"\ncounter_cache_keys_to_save: "java.lang.Integer"\ndisk_optimization_page_cross_chance: "java.lang.Double"\nlisten_on_broadcast_address: "java.lang.Boolean"\nnative_transport_max_concurrent_requests_in_bytes: "java.lang.Long"\nrpc_min_threads: "java.lang.Integer"\nrow_cache_class_name: "java.lang.String"\ngc_warn_threshold_in_ms: "java.lang.Integer"\ndisk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"\ncompaction_large_partition_warning_threshold_mb: "java.lang.Integer"\nenable_user_defined_functions_threads: "java.lang.Boolean"\nhinted_handoff_throttle_in_kb: "java.lang.Integer"\notc_backlog_expiration_interval_ms: "java.lang.Integer"\ncounter_cache_save_period: "java.lang.Integer"\notc_coalescing_enough_coalesced_messages: "java.lang.Integer"\nslow_query_log_timeout_in_ms: "java.lang.Long"\nhints_flush_period_in_ms: "java.lang.Integer"\nrole_manager: "java.lang.String"\nthrift_framed_transport_size_in_mb: "java.lang.Integer"\nserver_encryption_options:\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n require_endpoint_verification: "java.lang.Boolean"\n algorithm: "java.lang.String"\nmax_hints_delivery_threads: "java.lang.Integer"\ncolumn_index_size_in_kb: "java.lang.Integer"\nmemtable_offheap_space_in_mb: "java.lang.Integer"\ndata_file_directories: "java.util.List"\nsaved_caches_directory: "java.lang.String"\nnative_transport_max_frame_size_in_mb: "java.lang.Integer"\nindex_summary_resize_interval_in_minutes: "java.lang.Integer"\nstreaming_socket_timeout_in_ms: "java.lang.Integer"\nencryption_options:\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n require_endpoint_verification: "java.lang.Boolean"\n algorithm: "java.lang.String"\nfile_cache_round_up: "java.lang.Boolean"\nstreaming_keep_alive_period_in_secs: "java.lang.Integer"\nstart_rpc: "java.lang.Boolean"\nenable_user_defined_functions: "java.lang.Boolean"\nmax_hint_window_in_ms: "java.lang.Integer"\nenable_sasi_indexes: "java.lang.Boolean"\ngc_log_threshold_in_ms: "java.lang.Integer"\nsnapshot_on_duplicate_row_detection: "java.lang.Boolean"\nseed_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncheck_for_duplicate_rows_during_reads: "java.lang.Boolean"\ninternode_compression: "org.apache.cassandra.config.Config.InternodeCompression"\ninternode_send_buff_size_in_bytes: "java.lang.Integer"\notc_coalescing_window_us: "java.lang.Integer"\ncredentials_cache_max_entries: "java.lang.Integer"\nbatchlog_replay_throttle_in_kb: "java.lang.Integer"\nenable_scripted_user_defined_functions: "java.lang.Boolean"\ncommitlog_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nbroadcast_address: "java.lang.String"\nrpc_recv_buff_size_in_bytes: "java.lang.Integer"\ncredentials_update_interval_in_ms: "java.lang.Integer"\nenable_materialized_views: "java.lang.Boolean"\nroles_validity_in_ms: "java.lang.Integer"\nsnapshot_before_compaction: "java.lang.Boolean"\nback_pressure_strategy:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nprepared_statements_cache_size_mb: "java.lang.Long"\nnative_transport_port_ssl: "java.lang.Integer"\nallocate_tokens_for_keyspace: "java.lang.String"\nstorage_port: "java.lang.Integer"\ncounter_cache_size_in_mb: "java.lang.Long"\nnative_transport_port: "java.lang.Integer"\ndynamic_snitch_reset_interval_in_ms: "java.lang.Integer"\npermissions_cache_max_entries: "java.lang.Integer"\ntracetype_query_ttl: "java.lang.Integer"\nstream_throughput_outbound_megabits_per_sec: "java.lang.Integer"\nrpc_port: "java.lang.Integer"\ncommit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"\nconcurrent_writes: "java.lang.Integer"\nrange_request_timeout_in_ms: "java.lang.Long"\ndynamic_snitch_update_interval_in_ms: "java.lang.Integer"\nhints_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncommitlog_periodic_queue_size: "java.lang.Integer"\nforce_new_prepared_statement_behaviour: "java.lang.Boolean"\nhinted_handoff_enabled: "java.lang.Boolean"\nback_pressure_enabled: "java.lang.Boolean"\nmax_value_size_in_mb: "java.lang.Integer"\nmemtable_flush_writers: "java.lang.Integer"\notc_coalescing_strategy: "java.lang.String"\ncommitlog_max_compression_buffers_in_pool: "java.lang.Integer"\nroles_cache_max_entries: "java.lang.Integer"\ncdc_free_space_check_interval_ms: "java.lang.Integer"\nnative_transport_max_negotiable_protocol_version: "java.lang.Integer"\ntransparent_data_encryption_options:\n cipher: "java.lang.String"\n chunk_length_kb: "java.lang.Integer"\n iv_length: "java.lang.Integer"\n key_alias: "java.lang.String"\n key_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n enabled: "java.lang.Boolean"\npartitioner: "java.lang.String"\ninternode_recv_buff_size_in_bytes: "java.lang.Integer"\nlisten_interface: "java.lang.String"\nstart_native_transport: "java.lang.Boolean"\nssl_storage_port: "java.lang.Integer"\nuser_defined_function_fail_timeout: "java.lang.Long"\ncluster_name: "java.lang.String"\nincremental_backups: "java.lang.Boolean"\nfile_cache_size_in_mb: "java.lang.Integer"\ninter_dc_tcp_nodelay: "java.lang.Boolean"\ninternode_authenticator: "java.lang.String"\nkey_cache_keys_to_save: "java.lang.Integer"\nkey_cache_save_period: "java.lang.Integer"\nwindows_timer_interval: "java.lang.Integer"\nrpc_interface: "java.lang.String"\ncommitlog_segment_size_in_mb: "java.lang.Integer"\nrow_cache_keys_to_save: "java.lang.Integer"\nreplica_filtering_protection:\n cached_rows_fail_threshold: "java.lang.Integer"\n cached_rows_warn_threshold: "java.lang.Integer"\nnative_transport_max_concurrent_requests_in_bytes_per_ip: "java.lang.Long"\nnative_transport_max_concurrent_connections: "java.lang.Long"\nmemtable_cleanup_threshold: "java.lang.Float"\nconcurrent_reads: "java.lang.Integer"\ninter_dc_stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"\nthrift_prepared_statements_cache_size_mb: "java.lang.Long"\ntruncate_request_timeout_in_ms: "java.lang.Long"\nclient_encryption_options:\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n optional: "java.lang.Boolean"\n truststore: "java.lang.String"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n require_endpoint_verification: "java.lang.Boolean"\n algorithm: "java.lang.String"\nauto_snapshot: "java.lang.Boolean"\nbatch_size_fail_threshold_in_kb: "java.lang.Integer"\ncompaction_throughput_mb_per_sec: "java.lang.Integer"\nmax_hints_file_size_in_mb: "java.lang.Integer"\nendpoint_snitch: "java.lang.String"\n
dataset_sample\yaml\apache_cassandra\test\data\config\version=3.11.0.yml
version=3.11.0.yml
YAML
12,017
0.95
0.007576
0.064394
python-kit
42
2023-08-03T13:47:43.809704
Apache-2.0
true
db4ae7066a2c302c4da216721ae0dcb0
# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n---\nrepaired_data_tracking_for_range_reads_enabled: "java.lang.Boolean"\nblock_for_peers_timeout_in_secs: "java.lang.Integer"\nflush_compression: "org.apache.cassandra.config.Config.FlushCompression"\naudit_logging_options:\n audit_logs_dir: "java.lang.String"\n included_users: "java.lang.String"\n logger:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n excluded_categories: "java.lang.String"\n roll_cycle: "java.lang.String"\n enabled: "java.lang.Boolean"\n included_categories: "java.lang.String"\n max_archive_retries: "java.lang.Integer"\n excluded_keyspaces: "java.lang.String"\n archive_command: "java.lang.String"\n included_keyspaces: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n excluded_users: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\ncdc_total_space_in_mb: "java.lang.Integer"\ninternode_application_send_queue_reserve_global_capacity_in_bytes: "java.lang.Integer"\nrow_cache_save_period: "java.lang.Integer"\nsnapshot_links_per_second: "java.lang.Long"\ndisk_optimization_estimate_percentile: "java.lang.Double"\nhinted_handoff_disabled_datacenters: "java.util.Set"\ncdc_enabled: "java.lang.Boolean"\nread_request_timeout_in_ms: "java.lang.Long"\ninternode_application_receive_queue_reserve_global_capacity_in_bytes: "java.lang.Integer"\ncredentials_validity_in_ms: "java.lang.Integer"\nmemtable_heap_space_in_mb: "java.lang.Integer"\ncommitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"\nuser_defined_function_warn_timeout: "java.lang.Long"\ntracetype_repair_ttl: "java.lang.Integer"\nconcurrent_materialized_view_writes: "java.lang.Integer"\ncommitlog_total_space_in_mb: "java.lang.Integer"\nhints_directory: "java.lang.String"\nnative_transport_max_concurrent_connections_per_ip: "java.lang.Long"\ninternode_socket_send_buffer_size_in_bytes: "java.lang.Integer"\nrpc_interface_prefer_ipv6: "java.lang.Boolean"\ncheck_for_duplicate_rows_during_compaction: "java.lang.Boolean"\ninternode_socket_receive_buffer_size_in_bytes: "java.lang.Integer"\nuser_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"\ncounter_write_request_timeout_in_ms: "java.lang.Long"\nroles_update_interval_in_ms: "java.lang.Integer"\nmemtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"\ntrickle_fsync_interval_in_kb: "java.lang.Integer"\nenable_transient_replication: "java.lang.Boolean"\nkey_cache_size_in_mb: "java.lang.Long"\ntombstone_warn_threshold: "java.lang.Integer"\ncolumn_index_cache_size_in_kb: "java.lang.Integer"\nfull_query_logging_options:\n log_dir: "java.lang.String"\n archive_command: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n roll_cycle: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\n max_archive_retries: "java.lang.Integer"\ntable_count_warn_threshold: "java.lang.Integer"\nwrite_request_timeout_in_ms: "java.lang.Long"\ninternode_tcp_user_timeout_in_ms: "java.lang.Integer"\nauto_optimise_inc_repair_streams: "java.lang.Boolean"\ncommitlog_sync_batch_window_in_ms: "java.lang.Double"\ndisk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"\ntombstone_failure_threshold: "java.lang.Integer"\nvalidation_preview_purge_head_start_in_sec: "java.lang.Integer"\nmax_mutation_size_in_kb: "java.lang.Integer"\ninitial_token: "java.lang.String"\nbatch_size_warn_threshold_in_kb: "java.lang.Integer"\ndynamic_snitch_badness_threshold: "java.lang.Double"\nindex_summary_capacity_in_mb: "java.lang.Long"\nallocate_tokens_for_local_replication_factor: "java.lang.Integer"\ncounter_cache_keys_to_save: "java.lang.Integer"\ndisk_optimization_page_cross_chance: "java.lang.Double"\nlisten_on_broadcast_address: "java.lang.Boolean"\ninternode_application_receive_queue_reserve_endpoint_capacity_in_bytes: "java.lang.Integer"\nrow_cache_class_name: "java.lang.String"\ngc_warn_threshold_in_ms: "java.lang.Integer"\ndisk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"\nhinted_handoff_throttle_in_kb: "java.lang.Integer"\notc_backlog_expiration_interval_ms: "java.lang.Integer"\ncounter_cache_save_period: "java.lang.Integer"\nkeyspace_count_warn_threshold: "java.lang.Integer"\nhints_flush_period_in_ms: "java.lang.Integer"\nrole_manager: "java.lang.String"\nblock_for_peers_in_remote_dcs: "java.lang.Boolean"\nrepair_command_pool_size: "java.lang.Integer"\ncolumn_index_size_in_kb: "java.lang.Integer"\nmemtable_offheap_space_in_mb: "java.lang.Integer"\ndata_file_directories: "java.util.List"\nnative_transport_max_frame_size_in_mb: "java.lang.Integer"\nindex_summary_resize_interval_in_minutes: "java.lang.Integer"\nenable_user_defined_functions: "java.lang.Boolean"\nmax_hint_window_in_ms: "java.lang.Integer"\nseed_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncheck_for_duplicate_rows_during_reads: "java.lang.Boolean"\nkey_cache_migrate_during_compaction: "java.lang.Boolean"\nnetwork_authorizer: "java.lang.String"\nbatchlog_replay_throttle_in_kb: "java.lang.Integer"\nenable_scripted_user_defined_functions: "java.lang.Boolean"\ninternode_application_send_queue_reserve_endpoint_capacity_in_bytes: "java.lang.Integer"\ncommitlog_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nbroadcast_address: "java.lang.String"\ncredentials_update_interval_in_ms: "java.lang.Integer"\nsnapshot_before_compaction: "java.lang.Boolean"\nback_pressure_strategy:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nprepared_statements_cache_size_mb: "java.lang.Long"\nnative_transport_port_ssl: "java.lang.Integer"\nallocate_tokens_for_keyspace: "java.lang.String"\ndiagnostic_events_enabled: "java.lang.Boolean"\nstorage_port: "java.lang.Integer"\ncounter_cache_size_in_mb: "java.lang.Long"\nrepair_request_timeout_in_ms: "java.lang.Long"\ndynamic_snitch_reset_interval_in_ms: "java.lang.Integer"\ntracetype_query_ttl: "java.lang.Integer"\nautocompaction_on_startup_enabled: "java.lang.Boolean"\ncommit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"\nconcurrent_writes: "java.lang.Integer"\nrange_request_timeout_in_ms: "java.lang.Long"\ndynamic_snitch_update_interval_in_ms: "java.lang.Integer"\nhinted_handoff_enabled: "java.lang.Boolean"\ninternode_application_receive_queue_capacity_in_bytes: "java.lang.Integer"\nautomatic_sstable_upgrade: "java.lang.Boolean"\nmax_value_size_in_mb: "java.lang.Integer"\nmemtable_flush_writers: "java.lang.Integer"\notc_coalescing_strategy: "java.lang.String"\nsnapshot_on_repaired_data_mismatch: "java.lang.Boolean"\ncommitlog_max_compression_buffers_in_pool: "java.lang.Integer"\ninternode_application_send_queue_capacity_in_bytes: "java.lang.Integer"\nroles_cache_max_entries: "java.lang.Integer"\nnative_transport_max_negotiable_protocol_version: "java.lang.Integer"\nstart_native_transport: "java.lang.Boolean"\nssl_storage_port: "java.lang.Integer"\ncluster_name: "java.lang.String"\nincremental_backups: "java.lang.Boolean"\nkey_cache_save_period: "java.lang.Integer"\nwindows_timer_interval: "java.lang.Integer"\nrpc_interface: "java.lang.String"\nrepair_session_space_in_mb: "java.lang.Integer"\nrow_cache_keys_to_save: "java.lang.Integer"\nrepair_command_pool_full_strategy: "org.apache.cassandra.config.Config.RepairCommandPoolFullStrategy"\ninter_dc_stream_throughput_outbound_megabits_per_sec: "java.lang.Integer"\nclient_encryption_options:\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\nconcurrent_validations: "java.lang.Integer"\nideal_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"\nconsecutive_message_errors_threshold: "java.lang.Integer"\ntrickle_fsync: "java.lang.Boolean"\nreject_repair_compaction_threshold: "java.lang.Integer"\nmax_streaming_retries: "java.lang.Integer"\nnative_transport_flush_in_batches_legacy: "java.lang.Boolean"\nrpc_address: "java.lang.String"\nfile_cache_enabled: "java.lang.Boolean"\ncdc_raw_directory: "java.lang.String"\nnum_tokens: "java.lang.Integer"\nrepaired_data_tracking_for_partition_reads_enabled: "java.lang.Boolean"\nenable_drop_compact_storage: "java.lang.Boolean"\ncommitlog_directory: "java.lang.String"\nunlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"\nauto_bootstrap: "java.lang.Boolean"\nauthorizer: "java.lang.String"\nsstable_preemptive_open_interval_in_mb: "java.lang.Integer"\nbroadcast_rpc_address: "java.lang.String"\nlisten_interface_prefer_ipv6: "java.lang.Boolean"\nrepair_session_max_tree_depth: "java.lang.Integer"\nauto_optimise_preview_repair_streams: "java.lang.Boolean"\nconcurrent_compactors: "java.lang.Integer"\nbuffer_pool_use_heap_if_exhausted: "java.lang.Boolean"\nlocal_system_data_file_directory: "java.lang.String"\nstream_entire_sstables: "java.lang.Boolean"\ncorrupted_tombstone_strategy: "org.apache.cassandra.config.Config.CorruptedTombstoneStrategy"\nlisten_address: "java.lang.String"\nrpc_keepalive: "java.lang.Boolean"\nallow_extra_insecure_udfs: "java.lang.Boolean"\nrequest_timeout_in_ms: "java.lang.Long"\ndisk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"\nconcurrent_counter_writes: "java.lang.Integer"\nrow_cache_size_in_mb: "java.lang.Long"\ncas_contention_timeout_in_ms: "java.lang.Long"\nmin_free_space_per_drive_in_mb: "java.lang.Integer"\ncross_node_timeout: "java.lang.Boolean"\ndynamic_snitch: "java.lang.Boolean"\npermissions_validity_in_ms: "java.lang.Integer"\nphi_convict_threshold: "java.lang.Double"\nnative_transport_max_threads: "java.lang.Integer"\npermissions_update_interval_in_ms: "java.lang.Integer"\nauthenticator: "java.lang.String"\nallow_insecure_udfs: "java.lang.Boolean"\ncache_load_timeout_seconds: "java.lang.Integer"\nconcurrent_replicates: "java.lang.Integer"\ncommitlog_sync_period_in_ms: "java.lang.Integer"\nauto_optimise_full_repair_streams: "java.lang.Boolean"\ninternode_max_message_size_in_bytes: "java.lang.Integer"\nnative_transport_max_concurrent_requests_in_bytes: "java.lang.Long"\ncompaction_large_partition_warning_threshold_mb: "java.lang.Integer"\nenable_user_defined_functions_threads: "java.lang.Boolean"\nnative_transport_allow_older_protocols: "java.lang.Boolean"\notc_coalescing_enough_coalesced_messages: "java.lang.Integer"\nslow_query_log_timeout_in_ms: "java.lang.Long"\nreport_unconfirmed_repaired_data_mismatches: "java.lang.Boolean"\nuse_offheap_merkle_trees: "java.lang.Boolean"\nconcurrent_materialized_view_builders: "java.lang.Integer"\nserver_encryption_options:\n enable_legacy_ssl_storage_port: "java.lang.Boolean"\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\nmax_hints_delivery_threads: "java.lang.Integer"\nnative_transport_idle_timeout_in_ms: "java.lang.Long"\nsaved_caches_directory: "java.lang.String"\nmax_concurrent_automatic_sstable_upgrades: "java.lang.Integer"\nfile_cache_round_up: "java.lang.Boolean"\nstreaming_keep_alive_period_in_secs: "java.lang.Integer"\nenable_sasi_indexes: "java.lang.Boolean"\ngc_log_threshold_in_ms: "java.lang.Integer"\nsnapshot_on_duplicate_row_detection: "java.lang.Boolean"\ncommitlog_sync_group_window_in_ms: "java.lang.Double"\ninternode_compression: "org.apache.cassandra.config.Config.InternodeCompression"\notc_coalescing_window_us: "java.lang.Integer"\ncredentials_cache_max_entries: "java.lang.Integer"\nperiodic_commitlog_sync_lag_block_in_ms: "java.lang.Integer"\nenable_materialized_views: "java.lang.Boolean"\nroles_validity_in_ms: "java.lang.Integer"\nnetworking_cache_size_in_mb: "java.lang.Integer"\nnative_transport_port: "java.lang.Integer"\npermissions_cache_max_entries: "java.lang.Integer"\nstream_throughput_outbound_megabits_per_sec: "java.lang.Integer"\nhints_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncommitlog_periodic_queue_size: "java.lang.Integer"\nforce_new_prepared_statement_behaviour: "java.lang.Boolean"\nback_pressure_enabled: "java.lang.Boolean"\ncdc_free_space_check_interval_ms: "java.lang.Integer"\ntransparent_data_encryption_options:\n cipher: "java.lang.String"\n chunk_length_kb: "java.lang.Integer"\n iv_length: "java.lang.Integer"\n key_alias: "java.lang.String"\n key_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n enabled: "java.lang.Boolean"\ninitial_range_tombstone_list_allocation_size: "java.lang.Integer"\npartitioner: "java.lang.String"\nlisten_interface: "java.lang.String"\nuser_defined_function_fail_timeout: "java.lang.Long"\nfile_cache_size_in_mb: "java.lang.Integer"\ninter_dc_tcp_nodelay: "java.lang.Boolean"\ninternode_authenticator: "java.lang.String"\nkey_cache_keys_to_save: "java.lang.Integer"\ncommitlog_segment_size_in_mb: "java.lang.Integer"\nreplica_filtering_protection:\n cached_rows_fail_threshold: "java.lang.Integer"\n cached_rows_warn_threshold: "java.lang.Integer"\ninternode_tcp_connect_timeout_in_ms: "java.lang.Integer"\nnative_transport_max_concurrent_requests_in_bytes_per_ip: "java.lang.Long"\nrange_tombstone_list_growth_factor: "java.lang.Double"\nnative_transport_max_concurrent_connections: "java.lang.Long"\nmemtable_cleanup_threshold: "java.lang.Float"\nconcurrent_reads: "java.lang.Integer"\nstreaming_connections_per_host: "java.lang.Integer"\ntruncate_request_timeout_in_ms: "java.lang.Long"\nauto_snapshot: "java.lang.Boolean"\nnative_transport_receive_queue_capacity_in_bytes: "java.lang.Integer"\ninternode_streaming_tcp_user_timeout_in_ms: "java.lang.Integer"\nbatch_size_fail_threshold_in_kb: "java.lang.Integer"\ncompaction_throughput_mb_per_sec: "java.lang.Integer"\nmax_hints_file_size_in_mb: "java.lang.Integer"\nendpoint_snitch: "java.lang.String"\n
dataset_sample\yaml\apache_cassandra\test\data\config\version=4.0-alpha1.yml
version=4.0-alpha1.yml
YAML
15,219
0.95
0.006231
0.05296
node-utils
259
2024-09-08T06:33:25.626652
GPL-3.0
true
16baf1e20cc6ccad2d33a99f110f4771
# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n---\ncolumns_per_table_warn_threshold: "java.lang.Integer"\nrepaired_data_tracking_for_range_reads_enabled: "java.lang.Boolean"\ncdc_block_writes: "java.lang.Boolean"\nblock_for_peers_timeout_in_secs: "java.lang.Integer"\nflush_compression: "org.apache.cassandra.config.Config.FlushCompression"\ncommitlog_total_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\naudit_logging_options:\n audit_logs_dir: "java.lang.String"\n included_users: "java.lang.String"\n logger:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n excluded_categories: "java.lang.String"\n roll_cycle: "java.lang.String"\n enabled: "java.lang.Boolean"\n included_categories: "java.lang.String"\n max_archive_retries: "java.lang.Integer"\n excluded_keyspaces: "java.lang.String"\n archive_command: "java.lang.String"\n included_keyspaces: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n excluded_users: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\nrow_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nsnapshot_links_per_second: "java.lang.Long"\ndisk_optimization_estimate_percentile: "java.lang.Double"\nroles_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nhinted_handoff_disabled_datacenters: "java.util.Set"\ncdc_enabled: "java.lang.Boolean"\ncompaction_tombstone_warning_threshold: "java.lang.Integer"\ninternode_application_receive_queue_reserve_endpoint_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nroles_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ncoordinator_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nscripted_user_defined_functions_enabled: "java.lang.Boolean"\nauth_cache_warming_enabled: "java.lang.Boolean"\nentire_sstable_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nroles_cache_active_update: "java.lang.Boolean"\ncommitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"\npaxos_repair_parallelism: "java.lang.Integer"\ndynamic_snitch_reset_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndynamic_snitch_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nnative_transport_idle_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ninternode_application_send_queue_reserve_global_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nconcurrent_materialized_view_writes: "java.lang.Integer"\nread_before_write_list_operations_enabled: "java.lang.Boolean"\ntable_properties_ignored: "java.util.Set"\nhints_directory: "java.lang.String"\nnative_transport_max_concurrent_connections_per_ip: "java.lang.Long"\nsecondary_indexes_per_table_fail_threshold: "java.lang.Integer"\nauto_snapshot_ttl: "java.lang.String"\nrpc_interface_prefer_ipv6: "java.lang.Boolean"\ncheck_for_duplicate_rows_during_compaction: "java.lang.Boolean"\ngc_log_threshold: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nsecondary_indexes_enabled: "java.lang.Boolean"\nuser_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"\nmemtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"\nhints_flush_period: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndefault_keyspace_rf: "java.lang.Integer"\ntombstone_warn_threshold: "java.lang.Integer"\nwrite_consistency_levels_warned: "java.util.Set"\nfull_query_logging_options:\n log_dir: "java.lang.String"\n archive_command: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n roll_cycle: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\n max_archive_retries: "java.lang.Integer"\nstreaming_keep_alive_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntable_count_warn_threshold: "java.lang.Integer"\nauto_optimise_inc_repair_streams: "java.lang.Boolean"\nuser_defined_functions_fail_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nwrite_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ncommitlog_sync_batch_window_in_ms: "java.lang.Double"\nmin_tracked_partition_tombstone_count: "java.lang.Long"\nsasi_indexes_enabled: "java.lang.Boolean"\ndisk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"\ntombstone_failure_threshold: "java.lang.Integer"\nnative_transport_max_request_data_in_flight: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ninitial_token: "java.lang.String"\nkeyspaces_warn_threshold: "java.lang.Integer"\ndynamic_snitch_badness_threshold: "java.lang.Double"\ntransient_replication_enabled: "java.lang.Boolean"\nallocate_tokens_for_local_replication_factor: "java.lang.Integer"\nnative_transport_max_requests_per_second: "java.lang.Integer"\ncounter_cache_keys_to_save: "java.lang.Integer"\ndisk_optimization_page_cross_chance: "java.lang.Double"\nin_select_cartesian_product_fail_threshold: "java.lang.Integer"\nlisten_on_broadcast_address: "java.lang.Boolean"\nrow_cache_class_name: "java.lang.String"\npermissions_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndenylist_initial_load_retry: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntrace_type_query_ttl: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ndisk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"\notc_backlog_expiration_interval_ms: "java.lang.Integer"\nnative_transport_max_request_data_in_flight_per_ip: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncounter_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntables_fail_threshold: "java.lang.Integer"\ntrace_type_repair_ttl: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nkeyspace_count_warn_threshold: "java.lang.Integer"\nminimum_replication_factor_warn_threshold: "java.lang.Integer"\npage_size_warn_threshold: "java.lang.Integer"\nrole_manager: "java.lang.String"\ncounter_write_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ndenylist_writes_enabled: "java.lang.Boolean"\nblock_for_peers_in_remote_dcs: "java.lang.Boolean"\ninter_dc_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nrepair_command_pool_size: "java.lang.Integer"\ndenylist_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"\ntable_properties_warned: "java.util.Set"\nentire_sstable_inter_dc_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\ndata_file_directories: "java.util.List"\nnetworking_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nfields_per_udt_fail_threshold: "java.lang.Integer"\nkey_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nmax_hint_window: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nmax_hints_size_per_host: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nclient_error_reporting_exclusions:\n subnets: "java.util.Set"\nseed_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncheck_for_duplicate_rows_during_reads: "java.lang.Boolean"\ninternode_max_message_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nitems_per_collection_warn_threshold: "java.lang.Integer"\nkey_cache_migrate_during_compaction: "java.lang.Boolean"\nnetwork_authorizer: "java.lang.String"\ndata_disk_usage_max_disk_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmemtable_offheap_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nprepared_statements_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\npaxos_contention_wait_randomizer: "java.lang.String"\ncommitlog_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nbroadcast_address: "java.lang.String"\nrepair_session_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\npartition_denylist_enabled: "java.lang.Boolean"\npaxos_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nsnapshot_before_compaction: "java.lang.Boolean"\nback_pressure_strategy:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nnative_transport_port_ssl: "java.lang.Integer"\nallocate_tokens_for_keyspace: "java.lang.String"\nuuid_sstable_identifiers_enabled: "java.lang.Boolean"\ncredentials_cache_active_update: "java.lang.Boolean"\ndiagnostic_events_enabled: "java.lang.Boolean"\nfailure_detector: "java.lang.String"\nstorage_port: "java.lang.Integer"\ndrop_compact_storage_enabled: "java.lang.Boolean"\nuncompressed_tables_enabled: "java.lang.Boolean"\ncommitlog_sync_group_window: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nauth_read_consistency_level: "java.lang.String"\nautocompaction_on_startup_enabled: "java.lang.Boolean"\nitems_per_collection_fail_threshold: "java.lang.Integer"\npaxos_state_purging: "org.apache.cassandra.config.Config.PaxosStatePurging"\ncolumn_index_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ncommit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"\nconcurrent_writes: "java.lang.Integer"\nmax_value_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\ncompaction_large_partition_warning_threshold: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nread_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nhinted_handoff_enabled: "java.lang.Boolean"\nautomatic_sstable_upgrade: "java.lang.Boolean"\nmemtable_flush_writers: "java.lang.Integer"\notc_coalescing_strategy: "java.lang.String"\nsnapshot_on_repaired_data_mismatch: "java.lang.Boolean"\ncommitlog_max_compression_buffers_in_pool: "java.lang.Integer"\ncdc_free_space_check_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nroles_cache_max_entries: "java.lang.Integer"\nallow_filtering_enabled: "java.lang.Boolean"\nnative_transport_max_negotiable_protocol_version: "java.lang.Integer"\ncolumns_per_table_fail_threshold: "java.lang.Integer"\nstart_native_transport: "java.lang.Boolean"\nssl_storage_port: "java.lang.Integer"\ncluster_name: "java.lang.String"\ncredentials_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nincremental_backups: "java.lang.Boolean"\ninternode_socket_send_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nmin_tracked_partition_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncas_contention_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nkey_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nwindows_timer_interval: "java.lang.Integer"\nrpc_interface: "java.lang.String"\nrow_cache_keys_to_save: "java.lang.Integer"\ncompact_tables_enabled: "java.lang.Boolean"\nrepair_command_pool_full_strategy: "org.apache.cassandra.config.Config.RepairCommandPoolFullStrategy"\ndrop_truncate_table_enabled: "java.lang.Boolean"\nmax_top_size_partition_count: "java.lang.Integer"\nclient_encryption_options:\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n ssl_context_factory:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\nconcurrent_validations: "java.lang.Integer"\nideal_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"\npartition_keys_in_select_warn_threshold: "java.lang.Integer"\nconsecutive_message_errors_threshold: "java.lang.Integer"\nread_thresholds_enabled: "java.lang.Boolean"\ninternode_application_send_queue_reserve_endpoint_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\npartition_keys_in_select_fail_threshold: "java.lang.Integer"\ntrickle_fsync: "java.lang.Boolean"\ngroup_by_enabled: "java.lang.Boolean"\nreject_repair_compaction_threshold: "java.lang.Integer"\nrequest_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ncdc_total_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\npaxos_variant: "org.apache.cassandra.config.Config.PaxosVariant"\nindex_summary_resize_interval: "org.apache.cassandra.config.DurationSpec.IntMinutesBound"\nmax_top_tombstone_partition_count: "java.lang.Integer"\ndata_disk_usage_percentage_fail_threshold: "java.lang.Integer"\nmax_streaming_retries: "java.lang.Integer"\ninternode_timeout: "java.lang.Boolean"\nnative_transport_flush_in_batches_legacy: "java.lang.Boolean"\nrpc_address: "java.lang.String"\ntrickle_fsync_interval: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nfile_cache_enabled: "java.lang.Boolean"\ncdc_raw_directory: "java.lang.String"\nnum_tokens: "java.lang.Integer"\nnative_transport_max_frame_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nuser_timestamps_enabled: "java.lang.Boolean"\npaxos_topology_repair_no_dc_checks: "java.lang.Boolean"\nrepaired_data_tracking_for_partition_reads_enabled: "java.lang.Boolean"\ninternode_streaming_tcp_user_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nfile_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\ndenylist_max_keys_per_table: "java.lang.Integer"\ncommitlog_directory: "java.lang.String"\nunlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"\nwrite_consistency_levels_disallowed: "java.util.Set"\nauto_bootstrap: "java.lang.Boolean"\nauthorizer: "java.lang.String"\nauth_write_consistency_level: "java.lang.String"\ncounter_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\ndenylist_range_reads_enabled: "java.lang.Boolean"\nbatchlog_replay_throttle: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ncompaction_throughput: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nstream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nstreaming_state_expires: "org.apache.cassandra.config.DurationSpec.LongNanosecondsBound"\nbroadcast_rpc_address: "java.lang.String"\nlisten_interface_prefer_ipv6: "java.lang.Boolean"\nrepair_session_max_tree_depth: "java.lang.Integer"\nauto_optimise_preview_repair_streams: "java.lang.Boolean"\ninternode_tcp_connect_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\npaxos_purge_grace_period: "org.apache.cassandra.config.DurationSpec.LongSecondsBound"\ncommitlog_sync_period: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nconcurrent_compactors: "java.lang.Integer"\nhint_window_persistent_enabled: "java.lang.Boolean"\nbuffer_pool_use_heap_if_exhausted: "java.lang.Boolean"\npaxos_contention_max_wait: "java.lang.String"\nnative_transport_receive_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nin_select_cartesian_product_warn_threshold: "java.lang.Integer"\nlocal_system_data_file_directory: "java.lang.String"\nstream_entire_sstables: "java.lang.Boolean"\ninternode_tcp_user_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ncorrupted_tombstone_strategy: "org.apache.cassandra.config.Config.CorruptedTombstoneStrategy"\npaxos_topology_repair_strict_each_quorum: "java.lang.Boolean"\nlisten_address: "java.lang.String"\ntop_partitions_enabled: "java.lang.Boolean"\nsstable_preemptive_open_interval: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nrpc_keepalive: "java.lang.Boolean"\nmin_free_space_per_drive: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nread_consistency_levels_warned: "java.util.Set"\nallow_extra_insecure_udfs: "java.lang.Boolean"\nbatch_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ndisk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"\nconcurrent_counter_writes: "java.lang.Integer"\ninternode_error_reporting_exclusions:\n subnets: "java.util.Set"\npaxos_contention_min_delta: "java.lang.String"\ndynamic_snitch: "java.lang.Boolean"\nuse_deterministic_table_id: "java.lang.Boolean"\nphi_convict_threshold: "java.lang.Double"\nmax_hints_file_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nnative_transport_max_threads: "java.lang.Integer"\nminimum_replication_factor_fail_threshold: "java.lang.Integer"\ntable_properties_disallowed: "java.util.Set"\ncollection_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmemtable:\n configurations: "java.util.LinkedHashMap"\nauthenticator: "java.lang.String"\ninternode_socket_receive_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nallow_insecure_udfs: "java.lang.Boolean"\ninternode_application_receive_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nconcurrent_replicates: "java.lang.Integer"\nuser_defined_functions_enabled: "java.lang.Boolean"\nuser_defined_functions_threads_enabled: "java.lang.Boolean"\nstreaming_slow_events_log_timeout: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntruncate_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nperiodic_commitlog_sync_lag_block: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nrepair_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nauto_optimise_full_repair_streams: "java.lang.Boolean"\ncommitlog_segment_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nbatch_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nstreaming_state_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ndenylist_max_keys_total: "java.lang.Integer"\nrepair_state_expires: "org.apache.cassandra.config.DurationSpec.LongNanosecondsBound"\nnative_transport_allow_older_protocols: "java.lang.Boolean"\notc_coalescing_enough_coalesced_messages: "java.lang.Integer"\nreport_unconfirmed_repaired_data_mismatches: "java.lang.Boolean"\nfields_per_udt_warn_threshold: "java.lang.Integer"\npaxos_on_linearizability_violations: "org.apache.cassandra.config.Config.PaxosOnLinearizabilityViolation"\nread_consistency_levels_disallowed: "java.util.Set"\nuse_offheap_merkle_trees: "java.lang.Boolean"\nconcurrent_materialized_view_builders: "java.lang.Integer"\nserver_encryption_options:\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n ssl_context_factory:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n legacy_ssl_storage_port_enabled: "java.lang.Boolean"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\ntraverse_auth_from_root: "java.lang.Boolean"\ndenylist_refresh: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nmax_hints_delivery_threads: "java.lang.Integer"\npermissions_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nmaterialized_views_enabled: "java.lang.Boolean"\nsaved_caches_directory: "java.lang.String"\ninternode_application_send_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nmemtable_heap_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nmax_concurrent_automatic_sstable_upgrades: "java.lang.Integer"\ndenylist_reads_enabled: "java.lang.Boolean"\npermissions_cache_active_update: "java.lang.Boolean"\navailable_processors: "java.lang.Integer"\nfile_cache_round_up: "java.lang.Boolean"\nsecondary_indexes_per_table_warn_threshold: "java.lang.Integer"\ntables_warn_threshold: "java.lang.Integer"\nsnapshot_on_duplicate_row_detection: "java.lang.Boolean"\ninternode_application_receive_queue_reserve_global_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\ninternode_compression: "org.apache.cassandra.config.Config.InternodeCompression"\nrange_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nstreaming_stats_enabled: "java.lang.Boolean"\nlocal_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\notc_coalescing_window_us: "java.lang.Integer"\npage_size_fail_threshold: "java.lang.Integer"\ncredentials_cache_max_entries: "java.lang.Integer"\ncoordinator_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nrow_index_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nindex_summary_capacity: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\ncollection_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncolumn_index_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nvalidation_preview_purge_head_start: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nskip_paxos_repair_on_topology_change_keyspaces: "java.util.Set"\nmaterialized_views_per_table_fail_threshold: "java.lang.Integer"\nmax_mutation_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nkeyspaces_fail_threshold: "java.lang.Integer"\ndata_disk_usage_percentage_warn_threshold: "java.lang.Integer"\nslow_query_log_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\npaxos_contention_min_wait: "java.lang.String"\nrepair_state_size: "java.lang.Integer"\nuser_defined_functions_warn_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nnative_transport_port: "java.lang.Integer"\npermissions_cache_max_entries: "java.lang.Integer"\nhinted_handoff_throttle: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nrow_index_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nhints_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncommitlog_periodic_queue_size: "java.lang.Integer"\nforce_new_prepared_statement_behaviour: "java.lang.Boolean"\nback_pressure_enabled: "java.lang.Boolean"\nmaterialized_views_per_table_warn_threshold: "java.lang.Integer"\ntransparent_data_encryption_options:\n cipher: "java.lang.String"\n chunk_length_kb: "java.lang.Integer"\n iv_length: "java.lang.Integer"\n key_alias: "java.lang.String"\n key_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n enabled: "java.lang.Boolean"\ninitial_range_tombstone_list_allocation_size: "java.lang.Integer"\npartitioner: "java.lang.String"\ngc_warn_threshold: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nlisten_interface: "java.lang.String"\nrow_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\npaxos_repair_enabled: "java.lang.Boolean"\ninter_dc_tcp_nodelay: "java.lang.Boolean"\ninternode_authenticator: "java.lang.String"\nkey_cache_keys_to_save: "java.lang.Integer"\nlocal_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nreplica_filtering_protection:\n cached_rows_fail_threshold: "java.lang.Integer"\n cached_rows_warn_threshold: "java.lang.Integer"\nrange_tombstone_list_growth_factor: "java.lang.Double"\nstartup_checks: "java.util.Map"\nnative_transport_max_concurrent_connections: "java.lang.Long"\nmemtable_cleanup_threshold: "java.lang.Float"\nconcurrent_reads: "java.lang.Integer"\ncredentials_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nstreaming_connections_per_host: "java.lang.Integer"\ncache_load_timeout: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nnative_transport_rate_limiting_enabled: "java.lang.Boolean"\nuse_statements_enabled: "java.lang.Boolean"\nauto_hints_cleanup_enabled: "java.lang.Boolean"\nauto_snapshot: "java.lang.Boolean"\nskip_paxos_repair_on_topology_change: "java.lang.Boolean"\nendpoint_snitch: "java.lang.String"\n
dataset_sample\yaml\apache_cassandra\test\data\config\version=4.1-alpha1.yml
version=4.1-alpha1.yml
YAML
25,146
0.95
0.004535
0.038549
react-lib
836
2025-02-06T11:12:13.113200
Apache-2.0
true
55d65d6c41c7c7d9bed58c3db62611bf
# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n---\ncolumns_per_table_warn_threshold: "java.lang.Integer"\nrepaired_data_tracking_for_range_reads_enabled: "java.lang.Boolean"\ncdc_block_writes: "java.lang.Boolean"\nblock_for_peers_timeout_in_secs: "java.lang.Integer"\nflush_compression: "org.apache.cassandra.config.Config.FlushCompression"\ncommitlog_total_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\naudit_logging_options:\n audit_logs_dir: "java.lang.String"\n included_users: "java.lang.String"\n logger:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n excluded_categories: "java.lang.String"\n roll_cycle: "java.lang.String"\n enabled: "java.lang.Boolean"\n included_categories: "java.lang.String"\n max_archive_retries: "java.lang.Integer"\n excluded_keyspaces: "java.lang.String"\n archive_command: "java.lang.String"\n included_keyspaces: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n excluded_users: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\nrow_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nsnapshot_links_per_second: "java.lang.Long"\ndisk_optimization_estimate_percentile: "java.lang.Double"\nroles_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nhinted_handoff_disabled_datacenters: "java.util.Set"\ncdc_enabled: "java.lang.Boolean"\ninternode_application_receive_queue_reserve_endpoint_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nroles_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ncoordinator_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nscripted_user_defined_functions_enabled: "java.lang.Boolean"\nauth_cache_warming_enabled: "java.lang.Boolean"\nalter_table_enabled: "java.lang.Boolean"\nclient_request_size_metrics_enabled: "java.lang.Boolean"\ncdc_on_repair_enabled: "java.lang.Boolean"\nentire_sstable_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\npartition_tombstones_warn_threshold: "java.lang.Long"\nroles_cache_active_update: "java.lang.Boolean"\ncommitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"\npaxos_repair_parallelism: "java.lang.Integer"\ndynamic_snitch_reset_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndynamic_snitch_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nnative_transport_idle_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ninternode_application_send_queue_reserve_global_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nconcurrent_materialized_view_writes: "java.lang.Integer"\nread_before_write_list_operations_enabled: "java.lang.Boolean"\ntable_properties_ignored: "java.util.Set"\nmaximum_timestamp_warn_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nhints_directory: "java.lang.String"\ndump_heap_on_uncaught_exception: "java.lang.Boolean"\nnative_transport_max_concurrent_connections_per_ip: "java.lang.Long"\nsecondary_indexes_per_table_fail_threshold: "java.lang.Integer"\nvector_dimensions_warn_threshold: "java.lang.Integer"\nauto_snapshot_ttl: "java.lang.String"\nrpc_interface_prefer_ipv6: "java.lang.Boolean"\ncheck_for_duplicate_rows_during_compaction: "java.lang.Boolean"\ngc_log_threshold: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nsecondary_indexes_enabled: "java.lang.Boolean"\nuser_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"\nmemtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"\nhints_flush_period: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndefault_keyspace_rf: "java.lang.Integer"\ntombstone_warn_threshold: "java.lang.Integer"\nwrite_consistency_levels_warned: "java.util.Set"\nfull_query_logging_options:\n log_dir: "java.lang.String"\n archive_command: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n roll_cycle: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\n max_archive_retries: "java.lang.Integer"\nstreaming_keep_alive_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nauto_optimise_inc_repair_streams: "java.lang.Boolean"\nuser_defined_functions_fail_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nwrite_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nmin_tracked_partition_tombstone_count: "java.lang.Long"\nsasi_indexes_enabled: "java.lang.Boolean"\ndisk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"\nheap_dump_path: "java.lang.String"\ntombstone_failure_threshold: "java.lang.Integer"\nnative_transport_max_request_data_in_flight: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ninitial_token: "java.lang.String"\nkeyspaces_warn_threshold: "java.lang.Integer"\ndynamic_snitch_badness_threshold: "java.lang.Double"\ntransient_replication_enabled: "java.lang.Boolean"\nallocate_tokens_for_local_replication_factor: "java.lang.Integer"\nnative_transport_max_requests_per_second: "java.lang.Integer"\ncounter_cache_keys_to_save: "java.lang.Integer"\ndisk_optimization_page_cross_chance: "java.lang.Double"\nin_select_cartesian_product_fail_threshold: "java.lang.Integer"\nlisten_on_broadcast_address: "java.lang.Boolean"\nrow_cache_class_name: "java.lang.String"\npermissions_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndenylist_initial_load_retry: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntrace_type_query_ttl: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ndisk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"\ndrop_keyspace_enabled: "java.lang.Boolean"\notc_backlog_expiration_interval_ms: "java.lang.Integer"\nnative_transport_max_request_data_in_flight_per_ip: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncounter_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntables_fail_threshold: "java.lang.Integer"\ntrace_type_repair_ttl: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nminimum_replication_factor_warn_threshold: "java.lang.Integer"\npage_size_warn_threshold: "java.lang.Integer"\nrole_manager:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncounter_write_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ndenylist_writes_enabled: "java.lang.Boolean"\nblock_for_peers_in_remote_dcs: "java.lang.Boolean"\ninter_dc_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nrepair_command_pool_size: "java.lang.Integer"\ndenylist_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"\ntable_properties_warned: "java.util.Set"\nentire_sstable_inter_dc_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\ndata_file_directories: "java.util.List"\ndefault_secondary_index: "java.lang.String"\nnetworking_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nfields_per_udt_fail_threshold: "java.lang.Integer"\nkey_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nmax_hint_window: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nsai_options:\n segment_write_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nvector_dimensions_fail_threshold: "java.lang.Integer"\nmax_hints_size_per_host: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\npartition_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmaximum_timestamp_fail_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nminimum_timestamp_warn_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nclient_error_reporting_exclusions:\n subnets: "java.util.Set"\nseed_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncheck_for_duplicate_rows_during_reads: "java.lang.Boolean"\ninternode_max_message_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nitems_per_collection_warn_threshold: "java.lang.Integer"\nkey_cache_migrate_during_compaction: "java.lang.Boolean"\nsstable_read_rate_persistence_enabled: "java.lang.Boolean"\nnetwork_authorizer:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ndata_disk_usage_max_disk_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmemtable_offheap_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nprepared_statements_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\npaxos_contention_wait_randomizer: "java.lang.String"\ncommitlog_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nbroadcast_address: "java.lang.String"\nrepair_session_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\npartition_denylist_enabled: "java.lang.Boolean"\npaxos_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nsnapshot_before_compaction: "java.lang.Boolean"\nback_pressure_strategy:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nnative_transport_port_ssl: "java.lang.Integer"\nallocate_tokens_for_keyspace: "java.lang.String"\nuuid_sstable_identifiers_enabled: "java.lang.Boolean"\ncredentials_cache_active_update: "java.lang.Boolean"\ndiagnostic_events_enabled: "java.lang.Boolean"\nfailure_detector: "java.lang.String"\nstorage_port: "java.lang.Integer"\ndrop_compact_storage_enabled: "java.lang.Boolean"\nuncompressed_tables_enabled: "java.lang.Boolean"\ncommitlog_sync_group_window: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nauth_read_consistency_level: "java.lang.String"\nautocompaction_on_startup_enabled: "java.lang.Boolean"\nitems_per_collection_fail_threshold: "java.lang.Integer"\npaxos_state_purging: "org.apache.cassandra.config.Config.PaxosStatePurging"\ncolumn_index_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ncommit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"\nconcurrent_writes: "java.lang.Integer"\nmax_value_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nread_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nhinted_handoff_enabled: "java.lang.Boolean"\nautomatic_sstable_upgrade: "java.lang.Boolean"\nmemtable_flush_writers: "java.lang.Integer"\notc_coalescing_strategy: "java.lang.String"\nsnapshot_on_repaired_data_mismatch: "java.lang.Boolean"\ncidr_authorizer:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncommitlog_max_compression_buffers_in_pool: "java.lang.Integer"\nzero_ttl_on_twcs_enabled: "java.lang.Boolean"\ncdc_free_space_check_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nroles_cache_max_entries: "java.lang.Integer"\nallow_filtering_enabled: "java.lang.Boolean"\ncolumns_per_table_fail_threshold: "java.lang.Integer"\nstart_native_transport: "java.lang.Boolean"\nssl_storage_port: "java.lang.Integer"\ncluster_name: "java.lang.String"\ncredentials_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nincremental_backups: "java.lang.Boolean"\ninternode_socket_send_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\ndefault_compaction:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nmin_tracked_partition_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncas_contention_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nkey_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nwindows_timer_interval: "java.lang.Integer"\nrpc_interface: "java.lang.String"\nrow_cache_keys_to_save: "java.lang.Integer"\ncompact_tables_enabled: "java.lang.Boolean"\npartition_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nminimum_timestamp_fail_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nrepair_command_pool_full_strategy: "org.apache.cassandra.config.Config.RepairCommandPoolFullStrategy"\ndrop_truncate_table_enabled: "java.lang.Boolean"\nmax_top_size_partition_count: "java.lang.Integer"\nclient_encryption_options:\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n ssl_context_factory:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\nconcurrent_validations: "java.lang.Integer"\nideal_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"\npartition_keys_in_select_warn_threshold: "java.lang.Integer"\nconsecutive_message_errors_threshold: "java.lang.Integer"\nread_thresholds_enabled: "java.lang.Boolean"\ninternode_application_send_queue_reserve_endpoint_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\npartition_keys_in_select_fail_threshold: "java.lang.Integer"\ntrickle_fsync: "java.lang.Boolean"\ngroup_by_enabled: "java.lang.Boolean"\nreject_repair_compaction_threshold: "java.lang.Integer"\nrequest_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ncdc_total_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\ndefault_secondary_index_enabled: "java.lang.Boolean"\npaxos_variant: "org.apache.cassandra.config.Config.PaxosVariant"\nindex_summary_resize_interval: "org.apache.cassandra.config.DurationSpec.IntMinutesBound"\nmax_top_tombstone_partition_count: "java.lang.Integer"\ndata_disk_usage_percentage_fail_threshold: "java.lang.Integer"\nmax_streaming_retries: "java.lang.Integer"\ninternode_timeout: "java.lang.Boolean"\nnative_transport_flush_in_batches_legacy: "java.lang.Boolean"\nrpc_address: "java.lang.String"\ntrickle_fsync_interval: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nfile_cache_enabled: "java.lang.Boolean"\ncdc_raw_directory: "java.lang.String"\nnum_tokens: "java.lang.Integer"\nnative_transport_max_frame_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nuser_timestamps_enabled: "java.lang.Boolean"\npaxos_topology_repair_no_dc_checks: "java.lang.Boolean"\nrepaired_data_tracking_for_partition_reads_enabled: "java.lang.Boolean"\ninternode_streaming_tcp_user_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nfile_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\ndenylist_max_keys_per_table: "java.lang.Integer"\ncommitlog_directory: "java.lang.String"\nunlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"\nwrite_consistency_levels_disallowed: "java.util.Set"\nauto_bootstrap: "java.lang.Boolean"\nauthorizer:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nauth_write_consistency_level: "java.lang.String"\ncounter_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\ndenylist_range_reads_enabled: "java.lang.Boolean"\nbatchlog_replay_throttle: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ncompaction_throughput: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nstream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nstreaming_state_expires: "org.apache.cassandra.config.DurationSpec.LongNanosecondsBound"\nbroadcast_rpc_address: "java.lang.String"\nlisten_interface_prefer_ipv6: "java.lang.Boolean"\nrepair_session_max_tree_depth: "java.lang.Integer"\nauto_optimise_preview_repair_streams: "java.lang.Boolean"\ncolumn_value_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ninternode_tcp_connect_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\npaxos_purge_grace_period: "org.apache.cassandra.config.DurationSpec.LongSecondsBound"\ncommitlog_sync_period: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nconcurrent_compactors: "java.lang.Integer"\nhint_window_persistent_enabled: "java.lang.Boolean"\nbuffer_pool_use_heap_if_exhausted: "java.lang.Boolean"\npaxos_contention_max_wait: "java.lang.String"\nnative_transport_receive_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nconcurrent_index_builders: "java.lang.Integer"\nin_select_cartesian_product_warn_threshold: "java.lang.Integer"\nlocal_system_data_file_directory: "java.lang.String"\nstream_entire_sstables: "java.lang.Boolean"\nstream_transfer_task_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ninternode_tcp_user_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ncorrupted_tombstone_strategy: "org.apache.cassandra.config.Config.CorruptedTombstoneStrategy"\npaxos_topology_repair_strict_each_quorum: "java.lang.Boolean"\nlisten_address: "java.lang.String"\ntop_partitions_enabled: "java.lang.Boolean"\nsstable_preemptive_open_interval: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nrpc_keepalive: "java.lang.Boolean"\nmin_free_space_per_drive: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nread_consistency_levels_warned: "java.util.Set"\nallow_extra_insecure_udfs: "java.lang.Boolean"\nbatch_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ndisk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"\nconcurrent_counter_writes: "java.lang.Integer"\ninternode_error_reporting_exclusions:\n subnets: "java.util.Set"\npaxos_contention_min_delta: "java.lang.String"\nsimplestrategy_enabled: "java.lang.Boolean"\ndynamic_snitch: "java.lang.Boolean"\nuse_deterministic_table_id: "java.lang.Boolean"\nphi_convict_threshold: "java.lang.Double"\nmax_hints_file_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nnative_transport_max_threads: "java.lang.Integer"\nsstable:\n selected_format: "java.lang.String"\n format: "java.util.Map"\nminimum_replication_factor_fail_threshold: "java.lang.Integer"\ntable_properties_disallowed: "java.util.Set"\ncollection_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmemtable:\n configurations: "java.util.LinkedHashMap"\nauthenticator:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ninternode_socket_receive_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nallow_insecure_udfs: "java.lang.Boolean"\ninternode_application_receive_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nuser_defined_functions_enabled: "java.lang.Boolean"\nuser_defined_functions_threads_enabled: "java.lang.Boolean"\nstreaming_slow_events_log_timeout: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntruncate_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nseverity_during_decommission: "java.lang.Double"\nperiodic_commitlog_sync_lag_block: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nrepair_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nauto_optimise_full_repair_streams: "java.lang.Boolean"\ncommitlog_segment_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nbatch_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nstreaming_state_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ndenylist_max_keys_total: "java.lang.Integer"\ndynamic_data_masking_enabled: "java.lang.Boolean"\nstorage_compatibility_mode: "org.apache.cassandra.utils.StorageCompatibilityMode"\nrepair_state_expires: "org.apache.cassandra.config.DurationSpec.LongNanosecondsBound"\nnative_transport_allow_older_protocols: "java.lang.Boolean"\notc_coalescing_enough_coalesced_messages: "java.lang.Integer"\ntransfer_hints_on_decommission: "java.lang.Boolean"\nreport_unconfirmed_repaired_data_mismatches: "java.lang.Boolean"\nfields_per_udt_warn_threshold: "java.lang.Integer"\npaxos_on_linearizability_violations: "org.apache.cassandra.config.Config.PaxosOnLinearizabilityViolation"\nread_consistency_levels_disallowed: "java.util.Set"\nuse_offheap_merkle_trees: "java.lang.Boolean"\nconcurrent_materialized_view_builders: "java.lang.Integer"\nserver_encryption_options:\n outbound_keystore: "java.lang.String"\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n outbound_keystore_password: "java.lang.String"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.Boolean"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n ssl_context_factory:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n legacy_ssl_storage_port_enabled: "java.lang.Boolean"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\npartition_tombstones_fail_threshold: "java.lang.Long"\ntraverse_auth_from_root: "java.lang.Boolean"\ndenylist_refresh: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nmax_hints_delivery_threads: "java.lang.Integer"\npermissions_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nmaterialized_views_enabled: "java.lang.Boolean"\nsaved_caches_directory: "java.lang.String"\ninternode_application_send_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nmemtable_heap_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nmax_concurrent_automatic_sstable_upgrades: "java.lang.Integer"\nmaximum_replication_factor_warn_threshold: "java.lang.Integer"\ndenylist_reads_enabled: "java.lang.Boolean"\npermissions_cache_active_update: "java.lang.Boolean"\navailable_processors: "org.apache.cassandra.config.OptionaldPositiveInt"\nfile_cache_round_up: "java.lang.Boolean"\nsecondary_indexes_per_table_warn_threshold: "java.lang.Integer"\ntables_warn_threshold: "java.lang.Integer"\ncolumn_value_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nsnapshot_on_duplicate_row_detection: "java.lang.Boolean"\ninternode_application_receive_queue_reserve_global_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\ninternode_compression: "org.apache.cassandra.config.Config.InternodeCompression"\nrange_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nstreaming_stats_enabled: "java.lang.Boolean"\nlocal_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\notc_coalescing_window_us: "java.lang.Integer"\npage_size_fail_threshold: "java.lang.Integer"\ncredentials_cache_max_entries: "java.lang.Integer"\ncoordinator_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nrow_index_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nindex_summary_capacity: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\ncollection_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncolumn_index_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nvalidation_preview_purge_head_start: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nskip_paxos_repair_on_topology_change_keyspaces: "java.util.Set"\nmaterialized_views_per_table_fail_threshold: "java.lang.Integer"\nmax_mutation_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nkeyspaces_fail_threshold: "java.lang.Integer"\ndata_disk_usage_percentage_warn_threshold: "java.lang.Integer"\nslow_query_log_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\npaxos_contention_min_wait: "java.lang.String"\nrepair_state_size: "java.lang.Integer"\nuser_defined_functions_warn_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nzero_ttl_on_twcs_warned: "java.lang.Boolean"\nnative_transport_port: "java.lang.Integer"\npermissions_cache_max_entries: "java.lang.Integer"\nhinted_handoff_throttle: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nrow_index_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nhints_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nnative_transport_max_auth_threads: "java.lang.Integer"\nforce_new_prepared_statement_behaviour: "java.lang.Boolean"\nback_pressure_enabled: "java.lang.Boolean"\nmaterialized_views_per_table_warn_threshold: "java.lang.Integer"\ntransparent_data_encryption_options:\n cipher: "java.lang.String"\n chunk_length_kb: "java.lang.Integer"\n iv_length: "java.lang.Integer"\n key_alias: "java.lang.String"\n key_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n enabled: "java.lang.Boolean"\ninitial_range_tombstone_list_allocation_size: "java.lang.Integer"\npartitioner: "java.lang.String"\ngc_warn_threshold: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nlisten_interface: "java.lang.String"\nrow_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nskip_stream_disk_space_check: "java.lang.Boolean"\npaxos_repair_enabled: "java.lang.Boolean"\ninter_dc_tcp_nodelay: "java.lang.Boolean"\ninternode_authenticator:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nkey_cache_keys_to_save: "java.lang.Integer"\ncrypto_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nlocal_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nreplica_filtering_protection:\n cached_rows_fail_threshold: "java.lang.Integer"\n cached_rows_warn_threshold: "java.lang.Integer"\nrange_tombstone_list_growth_factor: "java.lang.Double"\nstartup_checks: "java.util.Map"\nmax_space_usable_for_compactions_in_percentage: "java.lang.Double"\nnative_transport_max_concurrent_connections: "java.lang.Long"\nmaximum_replication_factor_fail_threshold: "java.lang.Integer"\nmemtable_cleanup_threshold: "java.lang.Float"\nconcurrent_reads: "java.lang.Integer"\ncredentials_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nstreaming_connections_per_host: "java.lang.Integer"\ncache_load_timeout: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nnative_transport_rate_limiting_enabled: "java.lang.Boolean"\nuse_statements_enabled: "java.lang.Boolean"\nauto_hints_cleanup_enabled: "java.lang.Boolean"\nauto_snapshot: "java.lang.Boolean"\nskip_paxos_repair_on_topology_change: "java.lang.Boolean"\nendpoint_snitch: "java.lang.String"\n
dataset_sample\yaml\apache_cassandra\test\data\config\version=5.0-alpha1.yml
version=5.0-alpha1.yml
YAML
27,643
0.95
0.004049
0.034413
node-utils
662
2024-08-18T17:07:18.361958
MIT
true
ad579642652f137ea24512cd1c69bda4
# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n---\ncolumns_per_table_warn_threshold: "java.lang.Integer"\nrepaired_data_tracking_for_range_reads_enabled: "java.lang.Boolean"\ncdc_block_writes: "java.lang.Boolean"\nblock_for_peers_timeout_in_secs: "java.lang.Integer"\nflush_compression: "org.apache.cassandra.config.Config.FlushCompression"\ncommitlog_total_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\naudit_logging_options:\n audit_logs_dir: "java.lang.String"\n included_users: "java.lang.String"\n logger:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n excluded_categories: "java.lang.String"\n roll_cycle: "java.lang.String"\n enabled: "java.lang.Boolean"\n included_categories: "java.lang.String"\n max_archive_retries: "java.lang.Integer"\n excluded_keyspaces: "java.lang.String"\n archive_command: "java.lang.String"\n included_keyspaces: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n excluded_users: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\nrow_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nsnapshot_links_per_second: "java.lang.Long"\ndisk_optimization_estimate_percentile: "java.lang.Double"\nroles_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nhinted_handoff_disabled_datacenters: "java.util.Set"\ncdc_enabled: "java.lang.Boolean"\ninternode_application_receive_queue_reserve_endpoint_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nroles_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ncoordinator_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nscripted_user_defined_functions_enabled: "java.lang.Boolean"\nauth_cache_warming_enabled: "java.lang.Boolean"\nalter_table_enabled: "java.lang.Boolean"\nclient_request_size_metrics_enabled: "java.lang.Boolean"\ncdc_on_repair_enabled: "java.lang.Boolean"\nentire_sstable_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\npartition_tombstones_warn_threshold: "java.lang.Long"\nroles_cache_active_update: "java.lang.Boolean"\ncommitlog_sync: "org.apache.cassandra.config.Config.CommitLogSync"\npaxos_repair_parallelism: "java.lang.Integer"\ndynamic_snitch_reset_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndynamic_snitch_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nnative_transport_idle_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ninternode_application_send_queue_reserve_global_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nconcurrent_materialized_view_writes: "java.lang.Integer"\nread_before_write_list_operations_enabled: "java.lang.Boolean"\ntable_properties_ignored: "java.util.Set"\nmaximum_timestamp_warn_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nhints_directory: "java.lang.String"\ndump_heap_on_uncaught_exception: "java.lang.Boolean"\nnative_transport_max_concurrent_connections_per_ip: "java.lang.Long"\nsecondary_indexes_per_table_fail_threshold: "java.lang.Integer"\nvector_dimensions_warn_threshold: "java.lang.Integer"\nauto_snapshot_ttl: "java.lang.String"\nrpc_interface_prefer_ipv6: "java.lang.Boolean"\ncheck_for_duplicate_rows_during_compaction: "java.lang.Boolean"\ngc_log_threshold: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nsecondary_indexes_enabled: "java.lang.Boolean"\nuser_function_timeout_policy: "org.apache.cassandra.config.Config.UserFunctionTimeoutPolicy"\nmemtable_allocation_type: "org.apache.cassandra.config.Config.MemtableAllocationType"\nhints_flush_period: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndefault_keyspace_rf: "java.lang.Integer"\ntombstone_warn_threshold: "java.lang.Integer"\nwrite_consistency_levels_warned: "java.util.Set"\nfull_query_logging_options:\n log_dir: "java.lang.String"\n archive_command: "java.lang.String"\n max_log_size: "java.lang.Long"\n allow_nodetool_archive_command: "java.lang.Boolean"\n block: "java.lang.Boolean"\n roll_cycle: "java.lang.String"\n max_queue_weight: "java.lang.Integer"\n max_archive_retries: "java.lang.Integer"\nstreaming_keep_alive_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nauto_optimise_inc_repair_streams: "java.lang.Boolean"\nuser_defined_functions_fail_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nwrite_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nmin_tracked_partition_tombstone_count: "java.lang.Long"\nsasi_indexes_enabled: "java.lang.Boolean"\ndisk_failure_policy: "org.apache.cassandra.config.Config.DiskFailurePolicy"\nheap_dump_path: "java.lang.String"\ntombstone_failure_threshold: "java.lang.Integer"\nnative_transport_max_request_data_in_flight: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ninitial_token: "java.lang.String"\nkeyspaces_warn_threshold: "java.lang.Integer"\ndynamic_snitch_badness_threshold: "java.lang.Double"\ntransient_replication_enabled: "java.lang.Boolean"\nallocate_tokens_for_local_replication_factor: "java.lang.Integer"\nnative_transport_max_requests_per_second: "java.lang.Integer"\ncounter_cache_keys_to_save: "java.lang.Integer"\ndisk_optimization_page_cross_chance: "java.lang.Double"\nin_select_cartesian_product_fail_threshold: "java.lang.Integer"\nlisten_on_broadcast_address: "java.lang.Boolean"\nrow_cache_class_name: "java.lang.String"\npermissions_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ndenylist_initial_load_retry: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntrace_type_query_ttl: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ndisk_optimization_strategy: "org.apache.cassandra.config.Config.DiskOptimizationStrategy"\ndrop_keyspace_enabled: "java.lang.Boolean"\notc_backlog_expiration_interval_ms: "java.lang.Integer"\nnative_transport_max_request_data_in_flight_per_ip: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncounter_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntables_fail_threshold: "java.lang.Integer"\ntrace_type_repair_ttl: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nminimum_replication_factor_warn_threshold: "java.lang.Integer"\npage_size_warn_threshold: "java.lang.Integer"\nrole_manager:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncounter_write_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ndenylist_writes_enabled: "java.lang.Boolean"\nblock_for_peers_in_remote_dcs: "java.lang.Boolean"\ninter_dc_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nrepair_command_pool_size: "java.lang.Integer"\ndenylist_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"\ntable_properties_warned: "java.util.Set"\nentire_sstable_inter_dc_stream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\ndata_file_directories: "java.util.List"\ndefault_secondary_index: "java.lang.String"\nnetworking_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nfields_per_udt_fail_threshold: "java.lang.Integer"\nkey_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nmax_hint_window: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nsai_options:\n segment_write_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nvector_dimensions_fail_threshold: "java.lang.Integer"\nmax_hints_size_per_host: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\npartition_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmaximum_timestamp_fail_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nminimum_timestamp_warn_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nclient_error_reporting_exclusions:\n subnets: "java.util.Set"\nseed_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncheck_for_duplicate_rows_during_reads: "java.lang.Boolean"\ninternode_max_message_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nitems_per_collection_warn_threshold: "java.lang.Integer"\nkey_cache_migrate_during_compaction: "java.lang.Boolean"\nsstable_read_rate_persistence_enabled: "java.lang.Boolean"\nnetwork_authorizer:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ndata_disk_usage_max_disk_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmemtable_offheap_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nprepared_statements_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\npaxos_contention_wait_randomizer: "java.lang.String"\ncommitlog_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nbroadcast_address: "java.lang.String"\nrepair_session_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\npartition_denylist_enabled: "java.lang.Boolean"\npaxos_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nsnapshot_before_compaction: "java.lang.Boolean"\nback_pressure_strategy:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nallocate_tokens_for_keyspace: "java.lang.String"\nuuid_sstable_identifiers_enabled: "java.lang.Boolean"\ncredentials_cache_active_update: "java.lang.Boolean"\ndiagnostic_events_enabled: "java.lang.Boolean"\nfailure_detector: "java.lang.String"\nstorage_port: "java.lang.Integer"\ndrop_compact_storage_enabled: "java.lang.Boolean"\nuncompressed_tables_enabled: "java.lang.Boolean"\ncommitlog_sync_group_window: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nauth_read_consistency_level: "java.lang.String"\nautocompaction_on_startup_enabled: "java.lang.Boolean"\nitems_per_collection_fail_threshold: "java.lang.Integer"\npaxos_state_purging: "org.apache.cassandra.config.Config.PaxosStatePurging"\ncolumn_index_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ncommit_failure_policy: "org.apache.cassandra.config.Config.CommitFailurePolicy"\nconcurrent_writes: "java.lang.Integer"\nmax_value_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nread_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nhinted_handoff_enabled: "java.lang.Boolean"\nautomatic_sstable_upgrade: "java.lang.Boolean"\nmemtable_flush_writers: "java.lang.Integer"\notc_coalescing_strategy: "java.lang.String"\nsnapshot_on_repaired_data_mismatch: "java.lang.Boolean"\ncidr_authorizer:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ncommitlog_max_compression_buffers_in_pool: "java.lang.Integer"\nzero_ttl_on_twcs_enabled: "java.lang.Boolean"\ncdc_free_space_check_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nroles_cache_max_entries: "java.lang.Integer"\nallow_filtering_enabled: "java.lang.Boolean"\ncolumns_per_table_fail_threshold: "java.lang.Integer"\nstart_native_transport: "java.lang.Boolean"\nssl_storage_port: "java.lang.Integer"\ncluster_name: "java.lang.String"\ncredentials_update_interval: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nincremental_backups: "java.lang.Boolean"\ninternode_socket_send_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\ndefault_compaction:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nmin_tracked_partition_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncas_contention_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nkey_cache_save_period: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nwindows_timer_interval: "java.lang.Integer"\nrpc_interface: "java.lang.String"\nrow_cache_keys_to_save: "java.lang.Integer"\ncompact_tables_enabled: "java.lang.Boolean"\npartition_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nminimum_timestamp_fail_threshold: "org.apache.cassandra.config.DurationSpec.LongMicrosecondsBound"\nrepair_command_pool_full_strategy: "org.apache.cassandra.config.Config.RepairCommandPoolFullStrategy"\ndrop_truncate_table_enabled: "java.lang.Boolean"\nmax_top_size_partition_count: "java.lang.Integer"\nclient_encryption_options:\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.String"\n ssl_context_factory:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\nconcurrent_validations: "java.lang.Integer"\nideal_consistency_level: "org.apache.cassandra.db.ConsistencyLevel"\npartition_keys_in_select_warn_threshold: "java.lang.Integer"\nconsecutive_message_errors_threshold: "java.lang.Integer"\nread_thresholds_enabled: "java.lang.Boolean"\ninternode_application_send_queue_reserve_endpoint_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\npartition_keys_in_select_fail_threshold: "java.lang.Integer"\ntrickle_fsync: "java.lang.Boolean"\ngroup_by_enabled: "java.lang.Boolean"\nreject_repair_compaction_threshold: "java.lang.Integer"\nrequest_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ncdc_total_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\ndefault_secondary_index_enabled: "java.lang.Boolean"\npaxos_variant: "org.apache.cassandra.config.Config.PaxosVariant"\nindex_summary_resize_interval: "org.apache.cassandra.config.DurationSpec.IntMinutesBound"\nmax_top_tombstone_partition_count: "java.lang.Integer"\ndata_disk_usage_percentage_fail_threshold: "java.lang.Integer"\nmax_streaming_retries: "java.lang.Integer"\ninternode_timeout: "java.lang.Boolean"\nnative_transport_flush_in_batches_legacy: "java.lang.Boolean"\nrpc_address: "java.lang.String"\ntrickle_fsync_interval: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nfile_cache_enabled: "java.lang.Boolean"\ncdc_raw_directory: "java.lang.String"\nnum_tokens: "java.lang.Integer"\nnative_transport_max_frame_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nuser_timestamps_enabled: "java.lang.Boolean"\npaxos_topology_repair_no_dc_checks: "java.lang.Boolean"\nrepaired_data_tracking_for_partition_reads_enabled: "java.lang.Boolean"\ninternode_streaming_tcp_user_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nfile_cache_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\ndenylist_max_keys_per_table: "java.lang.Integer"\ncommitlog_directory: "java.lang.String"\nunlogged_batch_across_partitions_warn_threshold: "java.lang.Integer"\nwrite_consistency_levels_disallowed: "java.util.Set"\nauto_bootstrap: "java.lang.Boolean"\nauthorizer:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nauth_write_consistency_level: "java.lang.String"\ncounter_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\ndenylist_range_reads_enabled: "java.lang.Boolean"\nbatchlog_replay_throttle: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ncompaction_throughput: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nstream_throughput_outbound: "org.apache.cassandra.config.DataRateSpec.LongBytesPerSecondBound"\nstreaming_state_expires: "org.apache.cassandra.config.DurationSpec.LongNanosecondsBound"\nbroadcast_rpc_address: "java.lang.String"\nlisten_interface_prefer_ipv6: "java.lang.Boolean"\nrepair_session_max_tree_depth: "java.lang.Integer"\nauto_optimise_preview_repair_streams: "java.lang.Boolean"\ncolumn_value_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ninternode_tcp_connect_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\npaxos_purge_grace_period: "org.apache.cassandra.config.DurationSpec.LongSecondsBound"\ncommitlog_sync_period: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nconcurrent_compactors: "java.lang.Integer"\nhint_window_persistent_enabled: "java.lang.Boolean"\nbuffer_pool_use_heap_if_exhausted: "java.lang.Boolean"\npaxos_contention_max_wait: "java.lang.String"\nnative_transport_receive_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nconcurrent_index_builders: "java.lang.Integer"\nin_select_cartesian_product_warn_threshold: "java.lang.Integer"\nlocal_system_data_file_directory: "java.lang.String"\nstream_entire_sstables: "java.lang.Boolean"\nstream_transfer_task_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\ninternode_tcp_user_timeout: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\ncorrupted_tombstone_strategy: "org.apache.cassandra.config.Config.CorruptedTombstoneStrategy"\npaxos_topology_repair_strict_each_quorum: "java.lang.Boolean"\nlisten_address: "java.lang.String"\ntop_partitions_enabled: "java.lang.Boolean"\nsstable_preemptive_open_interval: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nrpc_keepalive: "java.lang.Boolean"\nmin_free_space_per_drive: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nread_consistency_levels_warned: "java.util.Set"\nallow_extra_insecure_udfs: "java.lang.Boolean"\nbatch_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\ndisk_access_mode: "org.apache.cassandra.config.Config.DiskAccessMode"\nconcurrent_counter_writes: "java.lang.Integer"\ninternode_error_reporting_exclusions:\n subnets: "java.util.Set"\npaxos_contention_min_delta: "java.lang.String"\nsimplestrategy_enabled: "java.lang.Boolean"\ndynamic_snitch: "java.lang.Boolean"\nuse_deterministic_table_id: "java.lang.Boolean"\nphi_convict_threshold: "java.lang.Double"\nmax_hints_file_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nnative_transport_max_threads: "java.lang.Integer"\nsstable:\n selected_format: "java.lang.String"\n format: "java.util.Map"\nminimum_replication_factor_fail_threshold: "java.lang.Integer"\ntable_properties_disallowed: "java.util.Set"\ncollection_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nmemtable:\n configurations: "java.util.LinkedHashMap"\nauthenticator:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\ninternode_socket_receive_buffer_size: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nallow_insecure_udfs: "java.lang.Boolean"\ninternode_application_receive_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nuser_defined_functions_enabled: "java.lang.Boolean"\nuser_defined_functions_threads_enabled: "java.lang.Boolean"\nstreaming_slow_events_log_timeout: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\ntruncate_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nseverity_during_decommission: "java.lang.Double"\nperiodic_commitlog_sync_lag_block: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nrepair_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nauto_optimise_full_repair_streams: "java.lang.Boolean"\ncommitlog_segment_size: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nbatch_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nstreaming_state_size: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ndenylist_max_keys_total: "java.lang.Integer"\ndynamic_data_masking_enabled: "java.lang.Boolean"\nstorage_compatibility_mode: "org.apache.cassandra.utils.StorageCompatibilityMode"\nrepair_state_expires: "org.apache.cassandra.config.DurationSpec.LongNanosecondsBound"\nnative_transport_allow_older_protocols: "java.lang.Boolean"\notc_coalescing_enough_coalesced_messages: "java.lang.Integer"\ntransfer_hints_on_decommission: "java.lang.Boolean"\nreport_unconfirmed_repaired_data_mismatches: "java.lang.Boolean"\nfields_per_udt_warn_threshold: "java.lang.Integer"\npaxos_on_linearizability_violations: "org.apache.cassandra.config.Config.PaxosOnLinearizabilityViolation"\nread_consistency_levels_disallowed: "java.util.Set"\nuse_offheap_merkle_trees: "java.lang.Boolean"\nconcurrent_materialized_view_builders: "java.lang.Integer"\nserver_encryption_options:\n outbound_keystore: "java.lang.String"\n optional: "java.lang.Boolean"\n store_type: "java.lang.String"\n cipher_suites: "java.util.List"\n enabled: "java.lang.Boolean"\n outbound_keystore_password: "java.lang.String"\n require_endpoint_verification: "java.lang.Boolean"\n accepted_protocols: "java.util.List"\n keystore_password: "java.lang.String"\n protocol: "java.lang.String"\n require_client_auth: "java.lang.String"\n internode_encryption: "org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions.InternodeEncryption"\n ssl_context_factory:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n legacy_ssl_storage_port_enabled: "java.lang.Boolean"\n truststore_password: "java.lang.String"\n keystore: "java.lang.String"\n truststore: "java.lang.String"\n algorithm: "java.lang.String"\n max_certificate_validity_period: "org.apache.cassandra.config.DurationSpec.IntMinutesBound"\n certificate_validity_warn_threshold: "org.apache.cassandra.config.DurationSpec.IntMinutesBound"\npartition_tombstones_fail_threshold: "java.lang.Long"\ntraverse_auth_from_root: "java.lang.Boolean"\ndenylist_refresh: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nmax_hints_delivery_threads: "java.lang.Integer"\npermissions_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nmaterialized_views_enabled: "java.lang.Boolean"\nsaved_caches_directory: "java.lang.String"\ninternode_application_send_queue_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\nmemtable_heap_space: "org.apache.cassandra.config.DataStorageSpec.IntMebibytesBound"\nmax_concurrent_automatic_sstable_upgrades: "java.lang.Integer"\nmaximum_replication_factor_warn_threshold: "java.lang.Integer"\ndenylist_reads_enabled: "java.lang.Boolean"\npermissions_cache_active_update: "java.lang.Boolean"\navailable_processors: "org.apache.cassandra.config.OptionaldPositiveInt"\nfile_cache_round_up: "java.lang.Boolean"\nsecondary_indexes_per_table_warn_threshold: "java.lang.Integer"\ntables_warn_threshold: "java.lang.Integer"\ncolumn_value_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nsnapshot_on_duplicate_row_detection: "java.lang.Boolean"\ninternode_application_receive_queue_reserve_global_capacity: "org.apache.cassandra.config.DataStorageSpec.IntBytesBound"\ninternode_compression: "org.apache.cassandra.config.Config.InternodeCompression"\nrange_request_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nstreaming_stats_enabled: "java.lang.Boolean"\nlocal_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\notc_coalescing_window_us: "java.lang.Integer"\npage_size_fail_threshold: "java.lang.Integer"\ncredentials_cache_max_entries: "java.lang.Integer"\ncoordinator_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nrow_index_read_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nindex_summary_capacity: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\ncollection_size_fail_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\ncolumn_index_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nvalidation_preview_purge_head_start: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nskip_paxos_repair_on_topology_change_keyspaces: "java.util.Set"\nmaterialized_views_per_table_fail_threshold: "java.lang.Integer"\nmax_mutation_size: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nkeyspaces_fail_threshold: "java.lang.Integer"\ndata_disk_usage_percentage_warn_threshold: "java.lang.Integer"\nslow_query_log_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\npaxos_contention_min_wait: "java.lang.String"\nrepair_state_size: "java.lang.Integer"\nuser_defined_functions_warn_timeout: "org.apache.cassandra.config.DurationSpec.LongMillisecondsBound"\nzero_ttl_on_twcs_warned: "java.lang.Boolean"\nnative_transport_port: "java.lang.Integer"\npermissions_cache_max_entries: "java.lang.Integer"\nhinted_handoff_throttle: "org.apache.cassandra.config.DataStorageSpec.IntKibibytesBound"\nrow_index_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nhints_compression:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nnative_transport_max_auth_threads: "java.lang.Integer"\nforce_new_prepared_statement_behaviour: "java.lang.Boolean"\nback_pressure_enabled: "java.lang.Boolean"\nmaterialized_views_per_table_warn_threshold: "java.lang.Integer"\ntransparent_data_encryption_options:\n cipher: "java.lang.String"\n chunk_length_kb: "java.lang.Integer"\n iv_length: "java.lang.Integer"\n key_alias: "java.lang.String"\n key_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\n enabled: "java.lang.Boolean"\ninitial_range_tombstone_list_allocation_size: "java.lang.Integer"\npartitioner: "java.lang.String"\ngc_warn_threshold: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nlisten_interface: "java.lang.String"\nrow_cache_size: "org.apache.cassandra.config.DataStorageSpec.LongMebibytesBound"\nskip_stream_disk_space_check: "java.lang.Boolean"\npaxos_repair_enabled: "java.lang.Boolean"\ninter_dc_tcp_nodelay: "java.lang.Boolean"\ninternode_authenticator:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nkey_cache_keys_to_save: "java.lang.Integer"\ncrypto_provider:\n class_name: "java.lang.String"\n parameters: "java.util.Map"\nlocal_read_size_warn_threshold: "org.apache.cassandra.config.DataStorageSpec.LongBytesBound"\nreplica_filtering_protection:\n cached_rows_fail_threshold: "java.lang.Integer"\n cached_rows_warn_threshold: "java.lang.Integer"\nrange_tombstone_list_growth_factor: "java.lang.Double"\nstartup_checks: "java.util.Map"\nmax_space_usable_for_compactions_in_percentage: "java.lang.Double"\nnative_transport_max_concurrent_connections: "java.lang.Long"\nmaximum_replication_factor_fail_threshold: "java.lang.Integer"\nmemtable_cleanup_threshold: "java.lang.Float"\nconcurrent_reads: "java.lang.Integer"\ncredentials_validity: "org.apache.cassandra.config.DurationSpec.IntMillisecondsBound"\nstreaming_connections_per_host: "java.lang.Integer"\ncache_load_timeout: "org.apache.cassandra.config.DurationSpec.IntSecondsBound"\nnative_transport_rate_limiting_enabled: "java.lang.Boolean"\nuse_statements_enabled: "java.lang.Boolean"\nauto_hints_cleanup_enabled: "java.lang.Boolean"\nauto_snapshot: "java.lang.Boolean"\nskip_paxos_repair_on_topology_change: "java.lang.Boolean"\nendpoint_snitch: "java.lang.String"\n
dataset_sample\yaml\apache_cassandra\test\data\config\version=5.1-alpha1.yml
version=5.1-alpha1.yml
YAML
27,786
0.95
0.00404
0.034343
vue-tools
209
2024-12-20T06:12:15.965356
Apache-2.0
true
bbb57374197db3fd2291d1c9cfcc5517
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Number of days of inactivity before an issue becomes stale\ndaysUntilStale: 120\n# Number of days of inactivity before a stale issue is closed\ndaysUntilClose: 14\n# Issues with these labels will never be considered stale\nexemptLabels:\n - pinned\n - security\n# Label to use when marking an issue as stale\nstaleLabel: wontfix\n# Comment to post when marking an issue as stale. Set to `false` to disable\nmarkComment: >\n This issue has been automatically marked as stale because it has not had\n recent activity. It will be closed if no further activity occurs. Thank you\n for your contributions.\n# Comment to post when closing a stale issue. Set to `false` to disable\ncloseComment: false\n
dataset_sample\yaml\apache_drill\.github\stale.yml
stale.yml
YAML
1,471
0.95
0.114286
0.676471
vue-tools
636
2024-08-03T04:44:25.960325
GPL-3.0
false
1a49e977563d206974f6895405f37426
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This workflow is triggered for each push event to the master branch or pull request.\n# It contains two jobs:\n# - "Main Build" job runs all unit tests jor two LTS java versions and the latest Java version.\n# - "Run checkstyle and generate protobufs" job builds Drill and checks its license,\n# builds Drill native client and regenerates protobufs to ensure that committed files are up-to-date.\nname: Github CI\n\non: [push, pull_request]\n\njobs:\n build:\n name: Main Build\n runs-on: ubuntu-latest\n if: github.repository == 'apache/drill'\n timeout-minutes: 150\n strategy:\n matrix:\n # Java versions to run unit tests\n java: [ '8', '11', '17' ]\n profile: ['default-hadoop']\n include:\n - java: '8'\n profile: 'hadoop-2'\n fail-fast: false\n steps:\n - name: Checkout\n uses: actions/checkout@v4\n - name: Setup java\n uses: actions/setup-java@v4\n with:\n distribution: 'temurin'\n java-version: ${{ matrix.java }}\n cache: 'maven'\n - name: Set up swap space\n # Linux Action Runners come with 7GB of RAM which isn't quite enoough\n # to run the test suite. Two workarounds are used below: a swap file is\n # added to the Runner and memory hungry tests are run separately.\n run: |\n sudo sh -c "\n fallocate -l 2G /tmp/swapfile\n chmod 0600 /tmp/swapfile\n mkswap /tmp/swapfile\n swapon /tmp/swapfile\n "\n - name: Build and test\n run: |\n mvn -P${{ matrix.profile }} install --batch-mode --no-transfer-progress \\n -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.httpconnectionManager.ttlSeconds=120\n - name: Remove swap space\n run : |\n sudo sh -c "\n free -h\n swapoff /tmp/swapfile\n rm /tmp/swapfile\n "\n checkstyle_protobuf:\n name: Run checkstyle and generate protobufs\n runs-on: ubuntu-22.04\n if: github.repository == 'apache/drill'\n steps:\n - name: Checkout\n uses: actions/checkout@v4\n - name: Setup java\n uses: actions/setup-java@v4\n with:\n distribution: 'temurin'\n java-version: '8'\n cache: 'maven'\n # Caches built protobuf library\n - name: Cache protobufs\n uses: actions/cache@v4\n with:\n path: ~/protobuf\n key: ${{ runner.os }}-protobuf\n # Install libraries required for protobuf generation\n - name: Install dependencies\n run: |\n sudo apt update -y && sudo apt install -y libboost-all-dev libzookeeper-mt-dev libsasl2-dev cmake libcppunit-dev checkinstall && \\n pushd .. && \\n if [ -f $HOME/protobuf/protobuf_3.16.3* ]; then \\n sudo dpkg -i $HOME/protobuf/protobuf_3.16.3*; \\n else \\n wget https://github.com/protocolbuffers/protobuf/releases/download/v3.16.3/protobuf-java-3.16.3.zip && \\n unzip protobuf-java-3.16.3.zip && pushd protobuf-3.16.3 && \\n ./configure && sudo make && sudo checkinstall -y && \\n if [ ! -d $HOME/protobuf ]; then \\n mkdir -p $HOME/protobuf; \\n fi && \\n mv protobuf_3.16.3* $HOME/protobuf/ && popd; \\n fi && \\n sudo ldconfig && popd; \\n # Builds Drill project, performs license checkstyle goal and regenerates java and C++ protobuf files\n - name: Build\n run: |\n MAVEN_OPTS="-Xms1G -Xmx1G" mvn install -Drat.skip=false -Dlicense.skip=false --batch-mode --no-transfer-progress -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.httpconnectionManager.ttlSeconds=120 -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -DskipTests=true -Dmaven.javadoc.skip=true -Dmaven.source.skip=true && \\n pushd protocol && mvn process-sources -P proto-compile && popd && \\n mkdir contrib/native/client/build && pushd contrib/native/client/build && cmake -G "Unix Makefiles" .. && make cpProtobufs && popd; \\n # Checks whether project files weren't changed after regenerating protobufs\n - name: Check protobufs\n run: |\n if [ "$(git status -s | grep -c "")" -gt 0 ]; then \\n echo "The following changes are found in files after regenerating protobufs (output may be used as a patchto apply):" >&2 &&\n echo "$(git diff --color)" && \\n exit 1; \\n else\n echo "All checks are passed!";\n fi\n
dataset_sample\yaml\apache_drill\.github\workflows\ci.yml
ci.yml
YAML
5,358
0.95
0.072
0.245902
vue-tools
179
2025-04-26T21:36:33.568082
MIT
false
558c392d4eb9121d7825e8ab94e8c7c5
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# For most projects, this workflow file will not need changing; you simply need\n# to commit it to your repository.\n#\n# You may wish to alter this file to override the set of languages analyzed,\n# or to provide custom queries or build logic.\n#\n# ******** NOTE ********\n# We have attempted to detect the languages in your repository. Please check\n# the `language` matrix defined below to confirm you have the correct set of\n# supported CodeQL languages.\n#\nname: "CodeQL"\n\non:\n push:\n branches: [ master ]\n pull_request:\n # The branches below must be a subset of the branches above\n branches: [ master ]\n schedule:\n - cron: '33 21 * * 5'\n\njobs:\n analyze:\n name: Analyze\n runs-on: ubuntu-latest\n if: github.repository == 'apache/drill'\n permissions:\n actions: read\n contents: read\n security-events: write\n\n strategy:\n fail-fast: false\n matrix:\n language: [ 'java', 'javascript' ]\n # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]\n # Learn more about CodeQL language support at https://git.io/codeql-language-support\n\n steps:\n - name: Checkout repository\n uses: actions/checkout@v4\n\n # Initializes the CodeQL tools for scanning.\n - name: Initialize CodeQL\n uses: github/codeql-action/init@v2\n with:\n languages: ${{ matrix.language }}\n # If you wish to specify custom queries, you can do so here or in a config file.\n # By default, queries listed here will override any specified in a config file.\n # Prefix the list here with "+" to use these queries and those in the config file.\n # queries: ./path/to/local/query, your-org/your-repo/queries@main\n\n # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).\n # If this step fails, then you should remove it and run the build manually (see below)\n - name: Autobuild\n uses: github/codeql-action/autobuild@v2\n\n # ℹ️ Command-line programs to run using the OS shell.\n # 📚 https://git.io/JvXDl\n\n # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines\n # and modify them (or add more) to build your code if your project\n # uses a compiled language\n\n #- run: |\n # make bootstrap\n # make release\n\n - name: Perform CodeQL Analysis\n uses: github/codeql-action/analyze@v2\n
dataset_sample\yaml\apache_drill\.github\workflows\codeql-analysis.yml
codeql-analysis.yml
YAML
3,179
0.95
0.05618
0.589744
react-lib
822
2024-05-27T03:15:09.861983
GPL-3.0
false
27b7294e16ddd2b7833b000e1de68293
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nname: Publish snapshot artifacts\n\non:\n push:\n branches:\n - master\n\njobs:\n publish:\n name: Publish snapshot artifacts\n runs-on: ubuntu-latest\n if: github.repository == 'apache/drill'\n steps:\n - name: Checkout\n uses: actions/checkout@v4\n - name: Cache Maven Repository\n uses: actions/cache@v4\n with:\n path: ~/.m2/repository\n key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}\n restore-keys: |\n ${{ runner.os }}-maven-\n - name: Deploy Maven snapshots\n env:\n ASF_USERNAME: ${{ secrets.NEXUS_USER }}\n ASF_PASSWORD: ${{ secrets.NEXUS_PW }}\n run: |\n echo "<settings><servers><server><id>apache.snapshots.https</id><username>$ASF_USERNAME</username><password>$ASF_PASSWORD</password></server></servers></settings>" > settings.xml\n mvn --settings settings.xml -U -B -e -fae -ntp -DskipTests deploy\n
dataset_sample\yaml\apache_drill\.github\workflows\publish-snapshot.yml
publish-snapshot.yml
YAML
1,728
0.95
0.06383
0.386364
vue-tools
405
2024-11-27T11:17:12.186539
BSD-3-Clause
false
06a1a4388600049d8b0eff02b8bb52be
#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n# Pull Request Labeler Github Action Configuration: https://github.com/marketplace/actions/labeler\n\ntrunk:\n - '**'\nINFRA:\n - .asf.yaml\n - .gitattributes\n - .gitignore\n - .github/**\n - dev-support/**\n - start-build-env.sh\nBUILD:\n - '**/pom.xml'\nCOMMON:\n - hadoop-common-project/**\nHDFS:\n - hadoop-hdfs-project/**\nRBF:\n - hadoop-hdfs-project/hadoop-hdfs-rbf/**\nNATIVE:\n - hadoop-hdfs-project/hadoop-hdfs-native-client/**\n - hadoop-common-project/hadoop-common/src/main/native/**\nYARN:\n - hadoop-yarn-project/**\nMAPREDUCE:\n - hadoop-mapreduce-project/**\nDISTCP:\n - hadoop-tools/hadoop-distcp/**\nTOOLS:\n - hadoop-tools/**\nAWS:\n - hadoop-tools/hadoop-aws/**\nABFS:\n - hadoop-tools/hadoop-azure/**\nDYNAMOMETER:\n - hadoop-tools/hadoop-dynamometer/**\nMAVEN-PLUGINS:\n - hadoop-maven-plugins/**\n
dataset_sample\yaml\apache_hadoop\.github\labeler.yml
labeler.yml
YAML
1,596
0.95
0.035088
0.357143
awesome-app
821
2023-09-23T14:02:35.922370
MIT
false
76319a5cab4cd466a9846609d37b35cb
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Pull Request Labeler"\non: pull_request_target\n\npermissions:\n contents: read\n pull-requests: write\n\njobs:\n triage:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v3\n with:\n sparse-checkout: |\n .github\n - uses: actions/labeler@v4.3.0\n with:\n repo-token: ${{ secrets.GITHUB_TOKEN }}\n sync-labels: true\n configuration-path: .github/labeler.yml\n dot: true
dataset_sample\yaml\apache_hadoop\.github\workflows\labeler.yml
labeler.yml
YAML
1,247
0.95
0.051282
0.486486
awesome-app
637
2025-05-03T12:15:23.008439
GPL-3.0
false
a697b0199ae45a5b0e9b30abd43f59f9
# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the "License"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nname: website\n\n# Controls when the action will run.\non:\n push:\n branches: [ trunk ]\n\njobs:\n build:\n runs-on: ubuntu-latest\n steps:\n - name: Checkout Hadoop trunk\n uses: actions/checkout@v3\n with:\n repository: apache/hadoop\n - name: Set up JDK 8\n uses: actions/setup-java@v3\n with:\n java-version: '8'\n distribution: 'temurin'\n - name: Cache local Maven repository\n uses: actions/cache@v3\n with:\n path: ~/.m2/repository\n key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}\n restore-keys: |\n ${{ runner.os }}-maven-\n - name: Build Hadoop maven plugins\n run: cd hadoop-maven-plugins && mvn --batch-mode install\n - name: Build Hadoop\n run: mvn clean install -DskipTests -DskipShade\n - name: Build document\n run: mvn clean site\n - name: Stage document\n run: mvn site:stage -DstagingDirectory=${GITHUB_WORKSPACE}/staging/\n - name: Deploy to GitHub Pages\n uses: peaceiris/actions-gh-pages@v3\n with:\n github_token: ${{ secrets.GITHUB_TOKEN }}\n publish_dir: ./staging/hadoop-project\n user_name: 'github-actions[bot]'\n user_email: 'github-actions[bot]@users.noreply.github.com'\n force_orphan: true\n\n
dataset_sample\yaml\apache_hadoop\.github\workflows\website.yml
website.yml
YAML
2,123
0.95
0.033333
0.272727
node-utils
832
2024-04-12T22:54:54.992465
BSD-3-Clause
false
6e7d7445d0b49b6c0546fd60bb92dea0
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nINFRA:\n - changed-files:\n - any-glob-to-any-file: [\n '.github/**/*',\n 'tools/**/*',\n 'dev/create-release/**/*',\n '.asf.yaml',\n '.gitattributes',\n '.gitignore',\n 'dev/merge_spark_pr.py'\n ]\n\nBUILD:\n - changed-files:\n - all-globs-to-any-file: [\n 'dev/**/*',\n '!dev/merge_spark_pr.py'\n ]\n - any-glob-to-any-file: [\n 'build/**/*',\n 'project/**/*',\n 'assembly/**/*',\n '**/*pom.xml',\n 'bin/docker-image-tool.sh',\n 'bin/find-spark-home*',\n 'scalastyle-config.xml'\n ]\n\nDOCS:\n - changed-files:\n - any-glob-to-any-file: [\n 'docs/**/*',\n '**/README.md',\n '**/CONTRIBUTING.md',\n 'python/docs/**/*'\n ]\n\nEXAMPLES:\n - changed-files:\n - any-glob-to-any-file: [\n 'examples/**/*',\n 'bin/run-example*'\n ]\n\nCORE:\n - changed-files:\n - all-globs-to-any-file: [\n 'core/**/*',\n '!**/*UI.scala',\n '!**/ui/**/*'\n ]\n - any-glob-to-any-file: [\n 'common/kvstore/**/*',\n 'common/network-common/**/*',\n 'common/network-shuffle/**/*',\n 'python/pyspark/*.py',\n 'python/pyspark/tests/**/*.py'\n ]\n\nSPARK SUBMIT:\n - changed-files:\n - any-glob-to-any-file: [\n 'bin/spark-submit*'\n ]\n\nSPARK SHELL:\n - changed-files:\n - any-glob-to-any-file: [\n 'repl/**/*',\n 'bin/spark-shell*'\n ]\n\nSQL:\n - changed-files:\n - all-globs-to-any-file: [\n '**/sql/**/*',\n '!python/**/avro/**/*',\n '!python/**/protobuf/**/*',\n '!python/**/streaming/**/*'\n ]\n - any-glob-to-any-file: [\n 'common/unsafe/**/*',\n 'common/sketch/**/*',\n 'common/variant/**/*',\n 'bin/spark-sql*',\n 'bin/beeline*',\n 'sbin/*thriftserver*.sh',\n '**/*SQL*.R',\n '**/DataFrame.R',\n '**/*WindowSpec.R',\n '**/*catalog.R',\n '**/*column.R',\n '**/*functions.R',\n '**/*group.R',\n '**/*schema.R',\n '**/*types.R'\n ]\n\nAVRO:\n - changed-files:\n - any-glob-to-any-file: [\n 'connector/avro/**/*',\n 'python/**/avro/**/*'\n ]\n\nDSTREAM:\n - changed-files:\n - any-glob-to-any-file: [\n 'streaming/**/*',\n 'data/streaming/**/*',\n 'connector/kinesis-asl/**/*',\n 'connector/kinesis-asl-assembly/**/*',\n 'connector/kafka-0-10/**/*',\n 'connector/kafka-0-10-assembly/**/*',\n 'connector/kafka-0-10-token-provider/**/*',\n 'python/pyspark/streaming/**/*'\n ]\n\nGRAPHX:\n - changed-files:\n - any-glob-to-any-file: [\n 'graphx/**/*',\n 'data/graphx/**/*'\n ]\n\nML:\n - changed-files:\n - any-glob-to-any-file: [\n '**/ml/**/*',\n '**/*mllib_*.R'\n ]\n\nMLLIB:\n - changed-files:\n - any-glob-to-any-file: [\n '**/mllib/**/*',\n 'mllib-local/**/*'\n ]\n\nSTRUCTURED STREAMING:\n - changed-files:\n - any-glob-to-any-file: [\n '**/sql/**/streaming/**/*',\n 'connector/kafka-0-10-sql/**/*',\n 'python/pyspark/sql/**/streaming/**/*',\n '**/*streaming.R'\n ]\n\nPYTHON:\n - changed-files:\n - any-glob-to-any-file: [\n 'bin/pyspark*',\n '**/python/**/*'\n ]\n\nPANDAS API ON SPARK:\n - changed-files:\n - any-glob-to-any-file: [\n 'python/pyspark/pandas/**/*'\n ]\n\nR:\n - changed-files:\n - any-glob-to-any-file: [\n '**/r/**/*',\n '**/R/**/*',\n 'bin/sparkR*'\n ]\n\nYARN:\n - changed-files:\n - any-glob-to-any-file: [\n 'resource-managers/yarn/**/*'\n ]\n\nKUBERNETES:\n - changed-files:\n - any-glob-to-any-file: [\n 'bin/docker-image-tool.sh',\n 'resource-managers/kubernetes/**/*'\n ]\n\nWINDOWS:\n - changed-files:\n - any-glob-to-any-file: [\n '**/*.cmd',\n 'R/pkg/tests/fulltests/test_Windows.R'\n ]\n\nWEB UI:\n - changed-files:\n - any-glob-to-any-file: [\n '**/ui/**/*',\n '**/*UI.scala'\n ]\n\nDEPLOY:\n - changed-files:\n - any-glob-to-any-file: [\n 'sbin/**/*'\n ]\n\nCONNECT:\n - changed-files:\n - any-glob-to-any-file: [\n 'sql/connect/**/*',\n 'python/**/connect/**/*'\n ]\n\nPROTOBUF:\n - changed-files:\n - any-glob-to-any-file: [\n 'connector/protobuf/**/*',\n 'python/**/protobuf/**/*'\n ]\n
dataset_sample\yaml\apache_spark\.github\labeler.yml
labeler.yml
YAML
4,827
0.95
0.008547
0.085714
vue-tools
403
2025-04-19T16:51:10.159819
GPL-3.0
false
4bbf02958bb34e49e5583878805ce12f
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: Run benchmarks\n\non:\n workflow_dispatch:\n inputs:\n class:\n description: 'Benchmark class'\n required: true\n default: '*'\n jdk:\n type: choice\n description: 'JDK version: 17 or 21'\n required: true\n default: '17'\n options:\n - '17'\n - '21'\n scala:\n type: choice\n description: 'Scala version: 2.13'\n required: true\n default: '2.13'\n options:\n - '2.13'\n failfast:\n type: boolean\n description: 'Failfast'\n required: true\n default: true\n num-splits:\n description: 'Number of job splits'\n required: true\n default: '1'\n\njobs:\n matrix-gen:\n name: Generate matrix for job splits\n runs-on: ubuntu-latest\n outputs:\n matrix: ${{ steps.set-matrix.outputs.matrix }}\n env:\n SPARK_BENCHMARK_NUM_SPLITS: ${{ inputs.num-splits }}\n steps:\n - name: Generate matrix\n id: set-matrix\n run: echo "matrix=["`seq -s, 1 $SPARK_BENCHMARK_NUM_SPLITS`"]" >> $GITHUB_OUTPUT\n\n # Any TPC-DS related updates on this job need to be applied to tpcds-1g job of build_and_test.yml as well\n tpcds-1g-gen:\n name: "Generate an input dataset for TPCDSQueryBenchmark with SF=1"\n if: contains(inputs.class, 'TPCDSQueryBenchmark') || contains(inputs.class, '*')\n runs-on: ubuntu-latest\n env:\n SPARK_LOCAL_IP: localhost\n steps:\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n # In order to get diff files\n with:\n fetch-depth: 0\n - name: Cache SBT and Maven\n uses: actions/cache@v4\n with:\n path: |\n build/apache-maven-*\n build/*.jar\n ~/.sbt\n key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }}\n restore-keys: |\n build-\n - name: Cache Coursier local repository\n uses: actions/cache@v4\n with:\n path: ~/.cache/coursier\n key: benchmark-coursier-${{ inputs.jdk }}-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }}\n restore-keys: |\n benchmark-coursier-${{ inputs.jdk }}\n - name: Cache TPC-DS generated data\n id: cache-tpcds-sf-1\n uses: actions/cache@v4\n with:\n path: ./tpcds-sf-1\n key: tpcds-${{ hashFiles('.github/workflows/benchmark.yml', 'sql/core/src/test/scala/org/apache/spark/sql/TPCDSSchema.scala') }}\n - name: Checkout tpcds-kit repository\n if: steps.cache-tpcds-sf-1.outputs.cache-hit != 'true'\n uses: actions/checkout@v4\n with:\n repository: databricks/tpcds-kit\n ref: 1b7fb7529edae091684201fab142d956d6afd881\n path: ./tpcds-kit\n - name: Build tpcds-kit\n if: steps.cache-tpcds-sf-1.outputs.cache-hit != 'true'\n run: cd tpcds-kit/tools && make OS=LINUX\n - name: Install Java ${{ inputs.jdk }}\n if: steps.cache-tpcds-sf-1.outputs.cache-hit != 'true'\n uses: actions/setup-java@v4\n with:\n distribution: zulu\n java-version: ${{ inputs.jdk }}\n - name: Generate TPC-DS (SF=1) table data\n if: steps.cache-tpcds-sf-1.outputs.cache-hit != 'true'\n run: build/sbt "sql/Test/runMain org.apache.spark.sql.GenTPCDSData --dsdgenDir `pwd`/tpcds-kit/tools --location `pwd`/tpcds-sf-1 --scaleFactor 1 --numPartitions 1 --overwrite"\n\n benchmark:\n name: "Run benchmarks: ${{ inputs.class }} (JDK ${{ inputs.jdk }}, Scala ${{ inputs.scala }}, ${{ matrix.split }} out of ${{ inputs.num-splits }} splits)"\n if: always()\n needs: [matrix-gen, tpcds-1g-gen]\n runs-on: ubuntu-latest\n strategy:\n fail-fast: false\n matrix:\n split: ${{fromJSON(needs.matrix-gen.outputs.matrix)}}\n env:\n SPARK_BENCHMARK_FAILFAST: ${{ inputs.failfast }}\n SPARK_BENCHMARK_NUM_SPLITS: ${{ inputs.num-splits }}\n SPARK_BENCHMARK_CUR_SPLIT: ${{ matrix.split }}\n SPARK_GENERATE_BENCHMARK_FILES: 1\n SPARK_LOCAL_IP: localhost\n # To prevent spark.test.home not being set. See more detail in SPARK-36007.\n SPARK_HOME: ${{ github.workspace }}\n SPARK_TPCDS_DATA: ${{ github.workspace }}/tpcds-sf-1\n steps:\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n # In order to get diff files\n with:\n fetch-depth: 0\n - name: Cache SBT and Maven\n uses: actions/cache@v4\n with:\n path: |\n build/apache-maven-*\n build/*.jar\n ~/.sbt\n key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }}\n restore-keys: |\n build-\n - name: Cache Coursier local repository\n uses: actions/cache@v4\n with:\n path: ~/.cache/coursier\n key: benchmark-coursier-${{ inputs.jdk }}-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }}\n restore-keys: |\n benchmark-coursier-${{ inputs.jdk }}\n - name: Install Java ${{ inputs.jdk }}\n uses: actions/setup-java@v4\n with:\n distribution: zulu\n java-version: ${{ inputs.jdk }}\n - name: Cache TPC-DS generated data\n if: contains(inputs.class, 'TPCDSQueryBenchmark') || contains(inputs.class, '*')\n id: cache-tpcds-sf-1\n uses: actions/cache@v4\n with:\n path: ./tpcds-sf-1\n key: tpcds-${{ hashFiles('.github/workflows/benchmark.yml', 'sql/core/src/test/scala/org/apache/spark/sql/TPCDSSchema.scala') }}\n - name: Run benchmarks\n run: |\n ./build/sbt -Pscala-${{ inputs.scala }} -Pyarn -Pkubernetes -Phive -Phive-thriftserver -Phadoop-cloud -Pkinesis-asl -Pspark-ganglia-lgpl Test/package\n # Make less noisy\n cp conf/log4j2.properties.template conf/log4j2.properties\n sed -i 's/rootLogger.level = info/rootLogger.level = warn/g' conf/log4j2.properties\n # In benchmark, we use local as master so set driver memory only. Note that GitHub Actions has 7 GB memory limit.\n bin/spark-submit \\n --driver-memory 6g --class org.apache.spark.benchmark.Benchmarks \\n --jars "`find . -name '*-SNAPSHOT-tests.jar' -o -name '*avro*-SNAPSHOT.jar' | paste -sd ',' -`,`find ~/.cache/coursier -name 'curator-test-*.jar'`" \\n "`find . -name 'spark-core*-SNAPSHOT-tests.jar'`" \\n "${{ inputs.class }}"\n # To keep the directory structure and file permissions, tar them\n # See also https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files\n echo "Preparing the benchmark results:"\n tar -cvf benchmark-results-${{ inputs.jdk }}-${{ inputs.scala }}.tar `git diff --name-only` `git ls-files --others --exclude=tpcds-sf-1 --exclude-standard`\n - name: Upload benchmark results\n uses: actions/upload-artifact@v4\n with:\n name: benchmark-results-${{ inputs.jdk }}-${{ inputs.scala }}-${{ matrix.split }}\n path: benchmark-results-${{ inputs.jdk }}-${{ inputs.scala }}.tar\n\n
dataset_sample\yaml\apache_spark\.github\workflows\benchmark.yml
benchmark.yml
YAML
7,915
0.95
0.101523
0.136126
python-kit
365
2025-05-24T12:34:51.157491
Apache-2.0
false
8e619f5e5eb240aca668b50f74a4d93b
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build (branch-3.5, Scala 2.13, Hadoop 3, JDK 8)"\n\non:\n schedule:\n - cron: '0 11 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 8\n branch: branch-3.5\n hadoop: hadoop3\n envs: >-\n {\n "SCALA_PROFILE": "scala2.13",\n "PYSPARK_IMAGE_TO_TEST": "",\n "PYTHON_TO_TEST": "",\n "ORACLE_DOCKER_IMAGE_NAME": "gvenzl/oracle-xe:21.3.0"\n }\n jobs: >-\n {\n "build": "true",\n "sparkr": "true",\n "tpcds-1g": "true",\n "docker-integration-tests": "true",\n "k8s-integration-tests": "true",\n "lint" : "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch35.yml
build_branch35.yml
YAML
1,591
0.95
0.056604
0.36
awesome-app
560
2025-02-18T08:03:45.154725
GPL-3.0
false
7556ffeeaa1b449d047dd5a0f44d9eb1
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (branch-3.5)"\n\non:\n schedule:\n - cron: '0 11 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 8\n branch: branch-3.5\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "",\n "PYTHON_TO_TEST": ""\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch35_python.yml
build_branch35_python.yml
YAML
1,335
0.95
0.06383
0.409091
python-kit
972
2023-12-10T16:13:00.841382
BSD-3-Clause
false
1918fe44cff1a08f45dc61d86299c198
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build (branch-4.0, Scala 2.13, Hadoop 3, JDK 17)"\n\non:\n schedule:\n - cron: '0 12 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: branch-4.0\n hadoop: hadoop3\n envs: >-\n {\n "SCALA_PROFILE": "scala2.13",\n "PYSPARK_IMAGE_TO_TEST": "",\n "PYTHON_TO_TEST": "",\n "ORACLE_DOCKER_IMAGE_NAME": "gvenzl/oracle-free:23.6-slim"\n }\n jobs: >-\n {\n "build": "true",\n "sparkr": "true",\n "tpcds-1g": "true",\n "docker-integration-tests": "true",\n "k8s-integration-tests": "true",\n "lint" : "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch40.yml
build_branch40.yml
YAML
1,598
0.95
0.056604
0.36
vue-tools
797
2024-04-13T13:32:03.278095
BSD-3-Clause
false
439d613dc09b727d02872647206b3379
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build (branch-4.0, Scala 2.13, Hadoop 3, JDK 21)"\n\non:\n schedule:\n - cron: '0 5 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 21\n branch: branch-4.0\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11",\n "SKIP_MIMA": "true",\n "SKIP_UNIDOC": "true",\n "DEDICATED_JVM_SBT_TESTS": "org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormatV1Suite,org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormatV2Suite,org.apache.spark.sql.execution.datasources.orc.OrcSourceV1Suite,org.apache.spark.sql.execution.datasources.orc.OrcSourceV2Suite"\n }\n jobs: >-\n {\n "build": "true",\n "pyspark": "true",\n "sparkr": "true",\n "tpcds-1g": "true",\n "docker-integration-tests": "true",\n "yarn": "true",\n "k8s-integration-tests": "true",\n "buf": "true",\n "ui": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch40_java21.yml
build_branch40_java21.yml
YAML
1,968
0.95
0.052632
0.333333
react-lib
531
2025-03-03T06:37:32.733981
Apache-2.0
false
2dd00eb38be4a5a05690e3863b561a0e
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Maven (branch-4.0, Scala 2.13, Hadoop 3, JDK 17)"\n\non:\n schedule:\n - cron: '0 14 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/maven_test.yml\n if: github.repository == 'apache/spark'\n with:\n branch: branch-4.0\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch40_maven.yml
build_branch40_maven.yml
YAML
1,115
0.95
0.085714
0.5625
react-lib
57
2024-12-15T05:26:48.314586
MIT
false
c211a182827aa6f948d82e39e65f61a6
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Maven (branch-4.0, Scala 2.13, Hadoop 3, JDK 21)"\n\non:\n schedule:\n - cron: '0 14 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/maven_test.yml\n if: github.repository == 'apache/spark'\n with:\n branch: branch-4.0\n java: 21\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch40_maven_java21.yml
build_branch40_maven_java21.yml
YAML
1,130
0.95
0.083333
0.545455
awesome-app
49
2025-01-27T11:47:54.290808
GPL-3.0
false
4ef7d70e5199080622d0db8082d73bd7
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Non-ANSI (branch-4.0, Hadoop 3, JDK 17, Scala 2.13)"\n\non:\n schedule:\n - cron: '0 2 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: branch-4.0\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11",\n "SPARK_ANSI_SQL_MODE": "false",\n }\n jobs: >-\n {\n "build": "true",\n "docs": "true",\n "pyspark": "true",\n "sparkr": "true",\n "tpcds-1g": "true",\n "docker-integration-tests": "true",\n "yarn": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch40_non_ansi.yml
build_branch40_non_ansi.yml
YAML
1,572
0.95
0.056604
0.36
react-lib
28
2024-06-13T06:19:43.612082
Apache-2.0
false
05a860c259d368b8f86f5f1aef8553fe
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (branch-4.0)"\n\non:\n schedule:\n - cron: '0 12 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: branch-4.0\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch40_python.yml
build_branch40_python.yml
YAML
1,356
0.95
0.06383
0.409091
vue-tools
236
2025-04-13T14:23:50.819974
BSD-3-Clause
false
71fa61d6d262385ccff6c1cbe491492b
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (branch-4.0, PyPy 3.10)"\n\non:\n schedule:\n - cron: '0 16 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: branch-4.0\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "pypy-310",\n "PYTHON_TO_TEST": "pypy3"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_branch40_python_pypy3.10.yml
build_branch40_python_pypy3.10.yml
YAML
1,360
0.95
0.06383
0.409091
node-utils
878
2024-02-01T01:33:26.609960
MIT
false
149ff23c8553a9317cd160a4dbe711e4
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Coverage (master, Scala 2.13, Hadoop 3, JDK 17)"\n\non:\n schedule:\n - cron: '0 10 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11",\n "PYSPARK_CODECOV": "true"\n }\n jobs: >-\n {\n "pyspark": "true"\n }\n secrets:\n codecov_token: ${{ secrets.CODECOV_TOKEN }}\n
dataset_sample\yaml\apache_spark\.github\workflows\build_coverage.yml
build_coverage.yml
YAML
1,439
0.95
0.061224
0.391304
react-lib
346
2023-09-26T11:52:51.901165
Apache-2.0
false
8dad6b15643bf5e21cce92a4f6108c33
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: Build / Cache base image\n\non:\n # Run jobs when a commit is merged\n push:\n branches:\n - 'master'\n - 'branch-*'\n paths:\n - 'dev/infra/Dockerfile'\n - 'dev/spark-test-image/docs/Dockerfile'\n - 'dev/spark-test-image/lint/Dockerfile'\n - 'dev/spark-test-image/sparkr/Dockerfile'\n - 'dev/spark-test-image/python-minimum/Dockerfile'\n - 'dev/spark-test-image/python-ps-minimum/Dockerfile'\n - 'dev/spark-test-image/pypy-310/Dockerfile'\n - 'dev/spark-test-image/python-309/Dockerfile'\n - 'dev/spark-test-image/python-310/Dockerfile'\n - 'dev/spark-test-image/python-311/Dockerfile'\n - 'dev/spark-test-image/python-312/Dockerfile'\n - 'dev/spark-test-image/python-313/Dockerfile'\n - 'dev/spark-test-image/python-313-nogil/Dockerfile'\n - 'dev/spark-test-image/numpy-213/Dockerfile'\n - '.github/workflows/build_infra_images_cache.yml'\n # Create infra image when cutting down branches/tags\n create:\n workflow_dispatch:\njobs:\n main:\n if: github.repository == 'apache/spark'\n runs-on: ubuntu-latest\n permissions:\n packages: write\n steps:\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n - name: Set up QEMU\n uses: docker/setup-qemu-action@v3\n - name: Set up Docker Buildx\n uses: docker/setup-buildx-action@v3\n - name: Login to DockerHub\n uses: docker/login-action@v3\n with:\n registry: ghcr.io\n username: ${{ github.actor }}\n password: ${{ secrets.GITHUB_TOKEN }}\n - name: Build and push\n id: docker_build\n uses: docker/build-push-action@v6\n with:\n context: ./dev/infra/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-cache:${{ github.ref_name }},mode=max\n - name: Image digest\n run: echo ${{ steps.docker_build.outputs.digest }}\n - name: Build and push (Documentation)\n if: hashFiles('dev/spark-test-image/docs/Dockerfile') != ''\n id: docker_build_docs\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/docs/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-docs-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-docs-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-docs-cache:${{ github.ref_name }},mode=max\n - name: Image digest (Documentation)\n if: hashFiles('dev/spark-test-image/docs/Dockerfile') != ''\n run: echo ${{ steps.docker_build_docs.outputs.digest }}\n - name: Build and push (Linter)\n if: hashFiles('dev/spark-test-image/lint/Dockerfile') != ''\n id: docker_build_lint\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/lint/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-lint-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-lint-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-lint-cache:${{ github.ref_name }},mode=max\n - name: Image digest (Linter)\n if: hashFiles('dev/spark-test-image/lint/Dockerfile') != ''\n run: echo ${{ steps.docker_build_lint.outputs.digest }}\n - name: Build and push (SparkR)\n if: hashFiles('dev/spark-test-image/sparkr/Dockerfile') != ''\n id: docker_build_sparkr\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/sparkr/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-sparkr-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-sparkr-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-sparkr-cache:${{ github.ref_name }},mode=max\n - name: Image digest (SparkR)\n if: hashFiles('dev/spark-test-image/sparkr/Dockerfile') != ''\n run: echo ${{ steps.docker_build_sparkr.outputs.digest }}\n - name: Build and push (PySpark with old dependencies)\n if: hashFiles('dev/spark-test-image/python-minimum/Dockerfile') != ''\n id: docker_build_pyspark_python_minimum\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-minimum/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-minimum-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-minimum-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-minimum-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with old dependencies)\n if: hashFiles('dev/spark-test-image/python-minimum/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_minimum.outputs.digest }}\n - name: Build and push (PySpark PS with old dependencies)\n if: hashFiles('dev/spark-test-image/python-ps-minimum/Dockerfile') != ''\n id: docker_build_pyspark_python_ps_minimum\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-ps-minimum/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-ps-minimum-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-ps-minimum-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-ps-minimum-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark PS with old dependencies)\n if: hashFiles('dev/spark-test-image/python-ps-minimum/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_ps_minimum.outputs.digest }}\n - name: Build and push (PySpark with PyPy 3.10)\n if: hashFiles('dev/spark-test-image/pypy-310/Dockerfile') != ''\n id: docker_build_pyspark_pypy_310\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/pypy-310/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-pypy-310-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-pypy-310-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-pypy-310-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with PyPy 3.10)\n if: hashFiles('dev/spark-test-image/pypy-310/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_pypy_310.outputs.digest }}\n - name: Build and push (PySpark with Python 3.9)\n if: hashFiles('dev/spark-test-image/python-309/Dockerfile') != ''\n id: docker_build_pyspark_python_309\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-309/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-309-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-309-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-309-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with Python 3.9)\n if: hashFiles('dev/spark-test-image/python-309/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_309.outputs.digest }}\n - name: Build and push (PySpark with Python 3.10)\n if: hashFiles('dev/spark-test-image/python-310/Dockerfile') != ''\n id: docker_build_pyspark_python_310\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-310/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-310-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-310-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-310-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with Python 3.10)\n if: hashFiles('dev/spark-test-image/python-310/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_310.outputs.digest }}\n - name: Build and push (PySpark with Python 3.11)\n if: hashFiles('dev/spark-test-image/python-311/Dockerfile') != ''\n id: docker_build_pyspark_python_311\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-311/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-311-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-311-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-311-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with Python 3.11)\n if: hashFiles('dev/spark-test-image/python-311/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_311.outputs.digest }}\n - name: Build and push (PySpark with Python 3.12)\n if: hashFiles('dev/spark-test-image/python-312/Dockerfile') != ''\n id: docker_build_pyspark_python_312\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-312/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-312-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-312-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-312-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with Python 3.12)\n if: hashFiles('dev/spark-test-image/python-312/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_312.outputs.digest }}\n - name: Build and push (PySpark with Python 3.13)\n if: hashFiles('dev/spark-test-image/python-313/Dockerfile') != ''\n id: docker_build_pyspark_python_313\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-313/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-313-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-313-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-313-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with Python 3.13)\n if: hashFiles('dev/spark-test-image/python-313/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_313.outputs.digest }}\n - name: Build and push (PySpark with Python 3.13 no GIL)\n if: hashFiles('dev/spark-test-image/python-313-nogil/Dockerfile') != ''\n id: docker_build_pyspark_python_313_nogil\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/python-313-nogil/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-313-nogil-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-313-nogil-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-python-313-nogil-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with Python 3.13 no GIL)\n if: hashFiles('dev/spark-test-image/python-313-nogil/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_python_313_nogil.outputs.digest }}\n - name: Build and push (PySpark with Numpy 2.1.3)\n if: hashFiles('dev/spark-test-image/numpy-213/Dockerfile') != ''\n id: docker_build_pyspark_numpy_213\n uses: docker/build-push-action@v6\n with:\n context: ./dev/spark-test-image/numpy-213/\n push: true\n tags: ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-numpy-213-cache:${{ github.ref_name }}-static\n cache-from: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-numpy-213-cache:${{ github.ref_name }}\n cache-to: type=registry,ref=ghcr.io/apache/spark/apache-spark-github-action-image-pyspark-numpy-213-cache:${{ github.ref_name }},mode=max\n - name: Image digest (PySpark with Numpy 2.1.3)\n if: hashFiles('dev/spark-test-image/numpy-213/Dockerfile') != ''\n run: echo ${{ steps.docker_build_pyspark_numpy_213.outputs.digest }}\n
dataset_sample\yaml\apache_spark\.github\workflows\build_infra_images_cache.yml
build_infra_images_cache.yml
YAML
14,778
0.95
0.118367
0.082305
python-kit
764
2023-11-11T04:46:59.968387
Apache-2.0
false
e2bdbdb9e164883b0ee804a92bed1511
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build (master, Scala 2.13, Hadoop 3, JDK 21)"\n\non:\n schedule:\n - cron: '0 4 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 21\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11",\n "SKIP_MIMA": "true",\n "SKIP_UNIDOC": "true",\n "DEDICATED_JVM_SBT_TESTS": "org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormatV1Suite,org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormatV2Suite,org.apache.spark.sql.execution.datasources.orc.OrcSourceV1Suite,org.apache.spark.sql.execution.datasources.orc.OrcSourceV2Suite"\n }\n jobs: >-\n {\n "build": "true",\n "pyspark": "true",\n "sparkr": "true",\n "tpcds-1g": "true",\n "docker-integration-tests": "true",\n "yarn": "true",\n "k8s-integration-tests": "true",\n "buf": "true",\n "ui": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_java21.yml
build_java21.yml
YAML
1,960
0.95
0.052632
0.333333
node-utils
159
2025-06-12T02:23:45.152187
GPL-3.0
false
aa675860f34ba08efc32abf9b377196a
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build"\n\non:\n push:\n branches:\n - '**'\n\njobs:\n call-build-and-test:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n secrets:\n codecov_token: ${{ secrets.CODECOV_TOKEN }}\n
dataset_sample\yaml\apache_spark\.github\workflows\build_main.yml
build_main.yml
YAML
1,037
0.95
0.058824
0.580645
vue-tools
18
2025-03-04T08:54:54.225686
MIT
false
089fcce2a27fbfc05d7de279de642b2d
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Maven (master, Scala 2.13, Hadoop 3, JDK 17)"\n\non:\n schedule:\n - cron: '0 13 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/maven_test.yml\n if: github.repository == 'apache/spark'\n
dataset_sample\yaml\apache_spark\.github\workflows\build_maven.yml
build_maven.yml
YAML
1,076
0.95
0.090909
0.6
react-lib
139
2023-08-27T20:15:38.781077
BSD-3-Clause
false
143c32d69afe6eca1220d3d2c97f49b2
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Maven (master, Scala 2.13, Hadoop 3, JDK 21)"\n\non:\n schedule:\n - cron: '0 14 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/maven_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 21\n
dataset_sample\yaml\apache_spark\.github\workflows\build_maven_java21.yml
build_maven_java21.yml
YAML
1,101
0.95
0.085714
0.5625
awesome-app
148
2024-05-17T05:32:08.275195
Apache-2.0
false
2752495ad5a9f8456830a3fc62bcce30
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Maven (master, Scala 2.13, Hadoop 3, JDK 21, ARM)"\n\non:\n schedule:\n - cron: '0 15 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/maven_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 21\n os: ubuntu-24.04-arm\n arch: arm64\n
dataset_sample\yaml\apache_spark\.github\workflows\build_maven_java21_arm.yml
build_maven_java21_arm.yml
YAML
1,151
0.95
0.081081
0.529412
python-kit
4
2024-08-15T06:01:54.623779
Apache-2.0
false
a43410b307d2a5b1666c73b158f066e0
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Maven (master, Scala 2.13, Hadoop 3, JDK 21, MacOS-15)"\n\non:\n schedule:\n - cron: '0 20 */2 * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/maven_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 21\n os: macos-15\n arch: arm64\n envs: >-\n {\n "SPARK_TEST_SQL_SHUFFLE_EXCHANGE_MAX_THREAD_THRESHOLD": "256",\n "SPARK_TEST_SQL_RESULT_QUERY_STAGE_MAX_THREAD_THRESHOLD": "256",\n "SPARK_TEST_HIVE_SHUFFLE_EXCHANGE_MAX_THREAD_THRESHOLD": "48",\n "SPARK_TEST_HIVE_RESULT_QUERY_STAGE_MAX_THREAD_THRESHOLD": "48"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_maven_java21_macos15.yml
build_maven_java21_macos15.yml
YAML
1,480
0.95
0.068182
0.439024
vue-tools
131
2024-08-16T08:39:51.224722
MIT
false
df228e3a13eea9fd6e76ec80a164cc78
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Non-ANSI (master, Hadoop 3, JDK 17, Scala 2.13)"\n\non:\n schedule:\n - cron: '0 1 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11",\n "SPARK_ANSI_SQL_MODE": "false",\n }\n jobs: >-\n {\n "build": "true",\n "docs": "true",\n "pyspark": "true",\n "sparkr": "true",\n "tpcds-1g": "true",\n "docker-integration-tests": "true",\n "yarn": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_non_ansi.yml
build_non_ansi.yml
YAML
1,564
0.95
0.056604
0.36
vue-tools
486
2023-10-27T17:23:58.754531
BSD-3-Clause
false
5ff823696c368bf1c8ef505d773c7995
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.10)"\n\non:\n schedule:\n - cron: '0 17 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-310",\n "PYTHON_TO_TEST": "python3.10"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_3.10.yml
build_python_3.10.yml
YAML
1,361
0.95
0.06383
0.409091
awesome-app
224
2025-04-28T18:22:41.311926
Apache-2.0
false
da42dfbb7d34ac4f3e8a3eaacb32d736
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.11, ARM)"\n\non:\n schedule:\n - cron: '0 22 */3 * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/python_hosted_runner_test.yml\n if: github.repository == 'apache/spark'\n with:\n os: ubuntu-24.04-arm\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_3.11_arm.yml
build_python_3.11_arm.yml
YAML
1,124
0.95
0.085714
0.5625
react-lib
319
2024-08-03T20:17:10.894126
MIT
false
f0e18e5f6409c7a2285d92d3594c5998
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.11, MacOS)"\n\non:\n schedule:\n - cron: '0 21 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/python_hosted_runner_test.yml\n if: github.repository == 'apache/spark'\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_3.11_macos.yml
build_python_3.11_macos.yml
YAML
1,087
0.95
0.090909
0.6
awesome-app
980
2023-11-23T09:00:43.443201
MIT
false
99536f44cecd8de7b90f5bf98fe3ae85
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.12)"\n\non:\n schedule:\n - cron: '0 19 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-312",\n "PYTHON_TO_TEST": "python3.12"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_3.12.yml
build_python_3.12.yml
YAML
1,361
0.95
0.06383
0.409091
react-lib
53
2024-09-14T07:05:09.597646
MIT
false
b59e3052dfb6ab62f7bc1166570d10a6
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.13)"\n\non:\n schedule:\n - cron: '0 20 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-313",\n "PYTHON_TO_TEST": "python3.13"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_3.13.yml
build_python_3.13.yml
YAML
1,361
0.95
0.06383
0.409091
awesome-app
66
2024-12-03T16:36:07.663471
Apache-2.0
false
48a37fac1f78a92ba930c2095a6974a0
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.13 no GIL)"\n\non:\n schedule:\n - cron: '0 19 */3 * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-313-nogil",\n "PYTHON_TO_TEST": "python3.13t",\n "PYTHON_GIL": "0"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_3.13_nogil.yml
build_python_3.13_nogil.yml
YAML
1,406
0.95
0.0625
0.4
vue-tools
928
2024-01-21T22:44:04.739701
Apache-2.0
false
f6f1aa7ce3c048730189be47dc2a6adc
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.9)"\n\non:\n schedule:\n - cron: '0 21 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-309",\n "PYTHON_TO_TEST": "python3.9"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_3.9.yml
build_python_3.9.yml
YAML
1,359
0.95
0.06383
0.409091
python-kit
441
2025-03-31T22:44:45.594764
BSD-3-Clause
false
21295999b2a709935aa0eb3617f2c25a
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: Build / Spark Connect Python-only (master, Python 3.11)\n\non:\n schedule:\n - cron: '0 19 * * *'\n workflow_dispatch:\n\njobs:\n # Build: build Spark and run the tests for specified modules using SBT\n build:\n name: "Build modules: pyspark-client"\n runs-on: ubuntu-latest\n timeout-minutes: 120\n if: github.repository == 'apache/spark'\n steps:\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n - name: Cache SBT and Maven\n uses: actions/cache@v4\n with:\n path: |\n build/apache-maven-*\n build/*.jar\n ~/.sbt\n key: build-spark-connect-python-only-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }}\n restore-keys: |\n build-spark-connect-python-only-\n - name: Cache Coursier local repository\n uses: actions/cache@v4\n with:\n path: ~/.cache/coursier\n key: coursier-build-spark-connect-python-only-${{ hashFiles('**/pom.xml') }}\n restore-keys: |\n coursier-build-spark-connect-python-only-\n - name: Install Java 17\n uses: actions/setup-java@v4\n with:\n distribution: zulu\n java-version: 17\n - name: Install Python 3.11\n uses: actions/setup-python@v5\n with:\n python-version: '3.11'\n architecture: x64\n - name: Build Spark\n run: |\n ./build/sbt -Phive Test/package\n - name: Install pure Python package (pyspark-client)\n env:\n SPARK_TESTING: 1\n run: |\n cd python\n python packaging/client/setup.py sdist\n cd dist\n pip install pyspark*client-*.tar.gz\n pip install 'grpcio==1.67.0' 'grpcio-status==1.67.0' 'protobuf==5.29.1' 'googleapis-common-protos==1.65.0' 'graphviz==0.20.3' 'six==1.16.0' 'pandas==2.2.3' scipy 'plotly<6.0.0' 'mlflow>=2.8.1' coverage matplotlib openpyxl 'memory-profiler>=0.61.0' 'scikit-learn>=1.3.2' 'graphviz==0.20.3' 'torch<2.6.0' torchvision torcheval deepspeed unittest-xml-reporting\n - name: List Python packages\n run: python -m pip list\n - name: Run tests (local)\n env:\n SPARK_TESTING: 1\n SPARK_CONNECT_TESTING_REMOTE: sc://localhost\n run: |\n # Make less noisy\n cp conf/log4j2.properties.template conf/log4j2.properties\n sed -i 's/rootLogger.level = info/rootLogger.level = warn/g' conf/log4j2.properties\n\n # Start a Spark Connect server for local\n PYTHONPATH="python/lib/pyspark.zip:python/lib/py4j-0.10.9.9-src.zip:$PYTHONPATH" ./sbin/start-connect-server.sh \\n --driver-java-options "-Dlog4j.configurationFile=file:$GITHUB_WORKSPACE/conf/log4j2.properties" \\n --jars "`find connector/protobuf/target -name spark-protobuf-*SNAPSHOT.jar`,`find connector/avro/target -name spark-avro*SNAPSHOT.jar`"\n\n # Remove Py4J and PySpark zipped library to make sure there is no JVM connection\n mv python/lib lib.back\n mv python/pyspark pyspark.back\n\n # Several tests related to catalog requires to run them sequencially, e.g., writing a table in a listener.\n ./python/run-tests --parallelism=1 --python-executables=python3 --modules pyspark-connect,pyspark-ml-connect\n # None of tests are dependent on each other in Pandas API on Spark so run them in parallel\n ./python/run-tests --parallelism=1 --python-executables=python3 --modules pyspark-pandas-connect-part0,pyspark-pandas-connect-part1,pyspark-pandas-connect-part2,pyspark-pandas-connect-part3\n\n # Stop Spark Connect server.\n ./sbin/stop-connect-server.sh\n mv lib.back python/lib\n mv pyspark.back python/pyspark\n\n - name: Run tests (local-cluster)\n env:\n SPARK_TESTING: 1\n SPARK_CONNECT_TESTING_REMOTE: sc://localhost\n run: |\n # Start a Spark Connect server for local-cluster\n PYTHONPATH="python/lib/pyspark.zip:python/lib/py4j-0.10.9.9-src.zip:$PYTHONPATH" ./sbin/start-connect-server.sh \\n --master "local-cluster[2, 4, 1024]" \\n --driver-java-options "-Dlog4j.configurationFile=file:$GITHUB_WORKSPACE/conf/log4j2.properties" \\n --jars "`find connector/protobuf/target -name spark-protobuf-*SNAPSHOT.jar`,`find connector/avro/target -name spark-avro*SNAPSHOT.jar`"\n\n # Remove Py4J and PySpark zipped library to make sure there is no JVM connection\n mv python/lib lib.back\n mv python/pyspark pyspark.back\n\n ./python/run-tests --parallelism=1 --python-executables=python3 --testnames "pyspark.resource.tests.test_connect_resources,pyspark.sql.tests.connect.client.test_artifact,pyspark.sql.tests.connect.client.test_artifact_localcluster,pyspark.sql.tests.connect.test_resources"\n\n # Stop Spark Connect server.\n ./sbin/stop-connect-server.sh\n mv lib.back python/lib\n mv pyspark.back python/pyspark\n - name: Upload test results to report\n if: always()\n uses: actions/upload-artifact@v4\n with:\n name: test-results-spark-connect-python-only\n path: "**/target/test-reports/*.xml"\n - name: Upload Spark Connect server log file\n if: ${{ !success() }}\n uses: actions/upload-artifact@v4\n with:\n name: unit-tests-log-spark-connect-python-only\n path: logs/*.out\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_connect.yml
build_python_connect.yml
YAML
6,310
0.95
0.057971
0.220472
node-utils
120
2023-08-17T09:13:22.300932
MIT
false
f407756430627941d7a27aee8b41e5cf
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: Build / Spark Connect Python-only (master-server, 35-client, Python 3.11)\n\non:\n schedule:\n - cron: '0 21 * * *'\n workflow_dispatch:\n\njobs:\n # Build: build Spark and run the tests for specified modules using SBT\n build:\n name: "Build modules: pyspark-connect"\n runs-on: ubuntu-latest\n timeout-minutes: 100\n if: github.repository == 'apache/spark'\n steps:\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n with:\n fetch-depth: 0\n - name: Cache SBT and Maven\n uses: actions/cache@v4\n with:\n path: |\n build/apache-maven-*\n build/*.jar\n ~/.sbt\n key: build-spark-connect-python-only-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }}\n restore-keys: |\n build-spark-connect-python-only-\n - name: Cache Coursier local repository\n uses: actions/cache@v4\n with:\n path: ~/.cache/coursier\n key: coursier-build-spark-connect-python-only-${{ hashFiles('**/pom.xml') }}\n restore-keys: |\n coursier-build-spark-connect-python-only-\n - name: Install Java 17\n uses: actions/setup-java@v4\n with:\n distribution: zulu\n java-version: 17\n - name: Install Python 3.11\n uses: actions/setup-python@v5\n with:\n python-version: '3.11'\n architecture: x64\n - name: Build Spark\n run: |\n ./build/sbt -Phive Test/package\n - name: Install Python dependencies\n run: |\n pip install 'numpy==1.25.1' 'pyarrow==12.0.1' 'pandas<=2.0.3' scipy unittest-xml-reporting 'plotly<6.0.0' 'mlflow>=2.3.1' coverage 'matplotlib==3.7.2' openpyxl 'memory-profiler==0.60.0' 'scikit-learn==1.1.*'\n\n # Add Python deps for Spark Connect.\n pip install 'grpcio==1.67.0' 'grpcio-status==1.67.0' 'protobuf==5.29.1' 'googleapis-common-protos==1.65.0' 'graphviz==0.20.3'\n\n # Add torch as a testing dependency for TorchDistributor\n pip install 'torch==2.0.1' 'torchvision==0.15.2' torcheval\n - name: List Python packages\n run: python -m pip list\n - name: Run tests\n env:\n SPARK_TESTING: 1\n SPARK_SKIP_CONNECT_COMPAT_TESTS: 1\n SPARK_CONNECT_TESTING_REMOTE: sc://localhost\n run: |\n # Make less noisy\n cp conf/log4j2.properties.template conf/log4j2.properties\n sed -i 's/rootLogger.level = info/rootLogger.level = warn/g' conf/log4j2.properties\n\n # Start a Spark Connect server for local\n PYTHONPATH="python/lib/pyspark.zip:python/lib/py4j-0.10.9.9-src.zip:$PYTHONPATH" ./sbin/start-connect-server.sh \\n --driver-java-options "-Dlog4j.configurationFile=file:$GITHUB_WORKSPACE/conf/log4j2.properties" \\n --jars "`find connector/protobuf/target -name spark-protobuf-*SNAPSHOT.jar`,`find connector/avro/target -name spark-avro*SNAPSHOT.jar`" \\n --conf spark.sql.execution.arrow.pyspark.validateSchema.enabled=false\n\n # Checkout to branch-3.5 to use the tests in branch-3.5.\n cd ..\n git clone --single-branch --branch branch-3.5 $GITHUB_SERVER_URL/$GITHUB_REPOSITORY spark-3.5\n cd spark-3.5\n\n # Several tests related to catalog requires to run them sequencially, e.g., writing a table in a listener.\n # Run branch-3.5 tests\n ./python/run-tests --parallelism=1 --python-executables=python3 --modules pyspark-connect\n # None of tests are dependent on each other in Pandas API on Spark so run them in parallel\n ./python/run-tests --parallelism=1 --python-executables=python3 --modules pyspark-pandas-connect,pyspark-pandas-slow-connect\n - name: Upload test results to report\n if: always()\n uses: actions/upload-artifact@v4\n with:\n name: test-results-spark-connect-python-only\n path: "**/target/test-reports/*.xml"\n - name: Upload Spark Connect server log file\n if: ${{ !success() }}\n uses: actions/upload-artifact@v4\n with:\n name: unit-tests-log-spark-connect-python-only\n path: logs/*.out\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_connect35.yml
build_python_connect35.yml
YAML
5,064
0.95
0.076923
0.247706
vue-tools
353
2024-03-20T09:55:55.314857
GPL-3.0
false
e6c0d3b29f803eb910fec07f74781833
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python with old dependencies)"\n\non:\n schedule:\n - cron: '0 9 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-minimum",\n "PYTHON_TO_TEST": "python3.9"\n }\n jobs: >-\n {\n "pyspark": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_minimum.yml
build_python_minimum.yml
YAML
1,344
0.95
0.065217
0.418605
vue-tools
974
2023-10-17T17:27:22.468396
BSD-3-Clause
false
905abdc6cf8b52684c96d9ea872af434
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python 3.11, Numpy 2.1.3)"\n\non:\n schedule:\n - cron: '0 3 */3 * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "numpy-213",\n "PYTHON_TO_TEST": "python3.11"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_numpy_2.1.3.yml
build_python_numpy_2.1.3.yml
YAML
1,374
0.95
0.06383
0.409091
awesome-app
601
2025-05-22T01:54:28.429177
Apache-2.0
false
b25163be0eb9ced4594d1dd7c1356848
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, Python PS with old dependencies)"\n\non:\n schedule:\n - cron: '0 10 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-ps-minimum",\n "PYTHON_TO_TEST": "python3.9"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_ps_minimum.yml
build_python_ps_minimum.yml
YAML
1,387
0.95
0.06383
0.409091
node-utils
273
2025-05-01T17:51:58.745745
MIT
false
65003696ed195c2c17a147588d4fd0fe
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Python-only (master, PyPy 3.10)"\n\non:\n schedule:\n - cron: '0 15 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "pypy-310",\n "PYTHON_TO_TEST": "pypy3"\n }\n jobs: >-\n {\n "pyspark": "true",\n "pyspark-pandas": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_python_pypy3.10.yml
build_python_pypy3.10.yml
YAML
1,352
0.95
0.06383
0.409091
node-utils
556
2025-03-20T16:49:27.198318
GPL-3.0
false
930b626a4af92ac00bb4679520533e50
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / RocksDB as UI Backend (master, Hadoop 3, JDK 17, Scala 2.13)"\n\non:\n schedule:\n - cron: '0 6 * * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11",\n "LIVE_UI_LOCAL_STORE_DIR": "/tmp/kvStore",\n }\n jobs: >-\n {\n "build": "true",\n "pyspark": "true",\n "sparkr": "true",\n "yarn": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_rockdb_as_ui_backend.yml
build_rockdb_as_ui_backend.yml
YAML
1,486
0.95
0.06
0.382979
awesome-app
563
2024-01-05T05:31:18.188466
MIT
false
bc3e526f1385143175095de60581c1ae
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\nname: "Build / SparkR-only (master, 4.4.3, windows-2022)"\n\non:\n schedule:\n - cron: '0 17 * * *'\n workflow_dispatch:\n\njobs:\n build:\n name: "Build module: sparkr"\n runs-on: windows-2022\n timeout-minutes: 120\n if: github.repository == 'apache/spark'\n steps:\n - name: Download winutils Hadoop binary\n uses: actions/checkout@v4\n with:\n repository: cdarlint/winutils\n - name: Move Hadoop winutil into home directory\n run: |\n Move-Item -Path hadoop-3.3.6 -Destination ~\\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n - name: Cache Maven local repository\n uses: actions/cache@v4\n with:\n path: ~/.m2/repository\n key: build-sparkr-windows-maven-${{ hashFiles('**/pom.xml') }}\n restore-keys: |\n build-sparkr-windows-maven-\n - name: Install Java 17\n uses: actions/setup-java@v4\n with:\n distribution: zulu\n java-version: 17\n - name: Install R 4.4.3\n uses: r-lib/actions/setup-r@v2\n with:\n r-version: 4.4.3\n - name: Install R dependencies\n run: |\n Rscript -e "install.packages(c('knitr', 'rmarkdown', 'testthat', 'e1071', 'survival', 'arrow', 'xml2'), repos='https://cloud.r-project.org/')"\n Rscript -e "pkg_list <- as.data.frame(installed.packages()[,c(1, 3:4)]); pkg_list[is.na(pkg_list$Priority), 1:2, drop = FALSE]"\n shell: cmd\n # SparkR build does not need Python. However, it shows warnings when the Python version is too low during\n # the attempt to look up Python Data Sources for session initialization. The Windows 2019 runner\n # includes Python 3.7, which Spark does not support. Therefore, we simply install the proper Python\n # for simplicity, see SPARK-47116.\n - name: Install Python 3.11\n uses: actions/setup-python@v5\n with:\n python-version: '3.11'\n architecture: x64\n - name: Build Spark\n run: |\n rem 1. '-Djna.nosys=true' is required to avoid kernel32.dll load failure.\n rem See SPARK-28759.\n rem 2. Ideally we should check the tests related to Hive in SparkR as well (SPARK-31745).\n rem 3. setup-java installs Maven 3.8.7 but does not allow changing its version, so overwrite\n rem Maven version as a workaround.\n mvn -DskipTests -Psparkr -Djna.nosys=true package -Dmaven.version=3.8.7\n shell: cmd\n - name: Run SparkR tests\n run: |\n set HADOOP_HOME=%USERPROFILE%\hadoop-3.3.6\n set PATH=%HADOOP_HOME%\bin;%PATH%\n .\bin\spark-submit2.cmd --driver-java-options "-Dlog4j.configurationFile=file:///%CD:\=/%/R/log4j2.properties" --conf spark.hadoop.fs.defaultFS="file:///" R\pkg\tests\run-all.R\n shell: cmd\n env:\n NOT_CRAN: true\n SPARKR_SUPPRESS_DEPRECATION_WARNING: 1\n # See SPARK-27848. Currently installing some dependent packages causes\n # "(converted from warning) unable to identify current timezone 'C':" for an unknown reason.\n # This environment variable works around to test SparkR against a higher version.\n R_REMOTES_NO_ERRORS_FROM_WARNINGS: true\n
dataset_sample\yaml\apache_spark\.github\workflows\build_sparkr_window.yml
build_sparkr_window.yml
YAML
3,923
0.95
0.064516
0.274725
vue-tools
461
2024-08-24T15:44:08.258303
BSD-3-Clause
false
1c822f2267f6895ca4170b5d5f0a532c
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: "Build / Unix Domain Socket (master, Hadoop 3, JDK 17, Scala 2.13)"\n\non:\n schedule:\n - cron: '0 1 */3 * *'\n workflow_dispatch:\n\njobs:\n run-build:\n permissions:\n packages: write\n name: Run\n uses: ./.github/workflows/build_and_test.yml\n if: github.repository == 'apache/spark'\n with:\n java: 17\n branch: master\n hadoop: hadoop3\n envs: >-\n {\n "PYSPARK_IMAGE_TO_TEST": "python-311",\n "PYTHON_TO_TEST": "python3.11",\n "PYSPARK_UDS_MODE": "true",\n }\n jobs: >-\n {\n "build": "true",\n "docs": "true",\n "pyspark": "true",\n "sparkr": "true",\n "tpcds-1g": "true",\n "docker-integration-tests": "true",\n "yarn": "true"\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\build_uds.yml
build_uds.yml
YAML
1,572
0.95
0.056604
0.36
react-lib
523
2024-05-17T23:20:50.416818
GPL-3.0
false
e9b77261dd784b92400507d4cc91a7ba
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n# Intentionally has a general name.\n# because the test status check created in GitHub Actions\n# currently randomly picks any associated workflow.\n# So, the name was changed to make sense in that context too.\n# See also https://github.community/t/specify-check-suite-when-creating-a-checkrun/118380/10\n\nname: "On pull requests"\non: pull_request_target\n\njobs:\n label:\n name: Label pull requests\n runs-on: ubuntu-latest\n permissions:\n contents: read\n pull-requests: write\n steps:\n - uses: actions/labeler@v5\n with:\n repo-token: "${{ secrets.GITHUB_TOKEN }}"\n sync-labels: true\n
dataset_sample\yaml\apache_spark\.github\workflows\labeler.yml
labeler.yml
YAML
1,409
0.95
0.05
0.621622
vue-tools
581
2025-05-31T05:52:23.916396
MIT
false
57c296c66918b758df31da6a7f255093
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: GitHub Pages deployment\n\non:\n push:\n branches:\n - master\n\nconcurrency:\n group: 'docs preview'\n cancel-in-progress: false\n\njobs:\n docs:\n name: Build and deploy documentation\n runs-on: ubuntu-latest\n permissions:\n id-token: write\n pages: write\n environment:\n name: github-pages # https://github.com/actions/deploy-pages/issues/271\n env:\n SPARK_TESTING: 1 # Reduce some noise in the logs\n RELEASE_VERSION: 'In-Progress'\n if: github.repository == 'apache/spark'\n steps:\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n with:\n repository: apache/spark\n ref: 'master'\n - name: Install Java 17\n uses: actions/setup-java@v4\n with:\n distribution: zulu\n java-version: 17\n - name: Install Python 3.9\n uses: actions/setup-python@v5\n with:\n python-version: '3.9'\n architecture: x64\n cache: 'pip'\n - name: Install Python dependencies\n run: |\n pip install 'sphinx==4.5.0' mkdocs 'pydata_sphinx_theme>=0.13' sphinx-copybutton nbsphinx numpydoc jinja2 markupsafe 'pyzmq<24.0.0' \\n ipython ipython_genutils sphinx_plotly_directive 'numpy>=1.20.0' pyarrow 'pandas==2.2.3' 'plotly>=4.8' 'docutils<0.18.0' \\n 'flake8==3.9.0' 'mypy==1.8.0' 'pytest==7.1.3' 'pytest-mypy-plugins==1.9.3' 'black==23.12.1' \\n 'pandas-stubs==1.2.0.53' 'grpcio==1.67.0' 'grpcio-status==1.67.0' 'protobuf==5.29.1' 'grpc-stubs==1.24.11' 'googleapis-common-protos-stubs==2.2.0' \\n 'sphinxcontrib-applehelp==1.0.4' 'sphinxcontrib-devhelp==1.0.2' 'sphinxcontrib-htmlhelp==2.0.1' 'sphinxcontrib-qthelp==1.0.3' 'sphinxcontrib-serializinghtml==1.1.5'\n - name: Install Ruby for documentation generation\n uses: ruby/setup-ruby@v1\n with:\n ruby-version: '3.3'\n bundler-cache: true\n - name: Install Pandoc\n run: |\n sudo apt-get update -y\n sudo apt-get install pandoc\n - name: Install dependencies for documentation generation\n run: |\n cd docs\n gem install bundler -v 2.4.22 -n /usr/local/bin\n bundle install --retry=100\n - name: Run documentation build\n run: |\n sed -i".tmp1" 's/SPARK_VERSION:.*$/SPARK_VERSION: '"$RELEASE_VERSION"'/g' docs/_config.yml\n sed -i".tmp2" 's/SPARK_VERSION_SHORT:.*$/SPARK_VERSION_SHORT: '"$RELEASE_VERSION"'/g' docs/_config.yml\n sed -i".tmp3" "s/'facetFilters':.*$/'facetFilters': [\"version:$RELEASE_VERSION\"]/g" docs/_config.yml\n sed -i".tmp4" 's/__version__: str = .*$/__version__: str = "'"$RELEASE_VERSION"'"/' python/pyspark/version.py\n cd docs\n SKIP_RDOC=1 bundle exec jekyll build\n - name: Setup Pages\n uses: actions/configure-pages@v5\n - name: Upload artifact\n uses: actions/upload-pages-artifact@v3\n with:\n path: 'docs/_site'\n - name: Deploy to GitHub Pages\n id: deployment\n uses: actions/deploy-pages@v4\n
dataset_sample\yaml\apache_spark\.github\workflows\pages.yml
pages.yml
YAML
3,870
0.95
0.05102
0.191489
python-kit
332
2023-10-16T11:20:45.205389
GPL-3.0
false
f8de18aed6ebdc676b0bc58eb723ef2d
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: Publish snapshot\n\non:\n schedule:\n - cron: '0 0 * * *'\n workflow_dispatch:\n inputs:\n branch:\n description: 'list of branches to publish (JSON)'\n required: true\n # keep in sync with default value of strategy matrix 'branch'\n default: '["master", "branch-4.0", "branch-3.5"]'\n\njobs:\n publish-snapshot:\n if: github.repository == 'apache/spark'\n runs-on: ubuntu-latest\n strategy:\n fail-fast: false\n matrix:\n # keep in sync with default value of workflow_dispatch input 'branch'\n branch: ${{ fromJSON( inputs.branch || '["master", "branch-4.0", "branch-3.5"]' ) }}\n steps:\n - name: Checkout Spark repository\n uses: actions/checkout@v4\n with:\n ref: ${{ matrix.branch }}\n - name: Cache Maven local repository\n uses: actions/cache@v4\n with:\n path: ~/.m2/repository\n key: snapshot-maven-${{ hashFiles('**/pom.xml') }}\n restore-keys: |\n snapshot-maven-\n - name: Install Java 8 for branch-3.x\n if: matrix.branch == 'branch-3.5'\n uses: actions/setup-java@v4\n with:\n distribution: temurin\n java-version: 8\n - name: Install Java 17\n if: matrix.branch != 'branch-3.5'\n uses: actions/setup-java@v4\n with:\n distribution: temurin\n java-version: 17\n - name: Publish snapshot\n env:\n ASF_USERNAME: ${{ secrets.NEXUS_USER }}\n ASF_PASSWORD: ${{ secrets.NEXUS_PW }}\n GPG_KEY: "not_used"\n GPG_PASSPHRASE: "not_used"\n GIT_REF: ${{ matrix.branch }}\n MAVEN_MXM_OPT: 2g\n run: ./dev/create-release/release-build.sh publish-snapshot\n
dataset_sample\yaml\apache_spark\.github\workflows\publish_snapshot.yml
publish_snapshot.yml
YAML
2,455
0.95
0.081081
0.28169
node-utils
365
2024-07-22T23:33:42.894605
BSD-3-Clause
false
3c1b87fbe366f03192de4099709cb56c
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: Close stale PRs\n\non:\n schedule:\n - cron: "0 0 * * *"\n\njobs:\n stale:\n if: github.repository == 'apache/spark'\n runs-on: ubuntu-latest\n steps:\n - uses: actions/stale@c201d45ef4b0ccbd3bb0616f93bae13e73d0a080 # pin@v1.1.0\n with:\n repo-token: ${{ secrets.GITHUB_TOKEN }}\n stale-pr-message: >\n We're closing this PR because it hasn't been updated in a while.\n This isn't a judgement on the merit of the PR in any way. It's just\n a way of keeping the PR queue manageable.\n\n If you'd like to revive this PR, please reopen it and ask a\n committer to remove the Stale tag!\n days-before-stale: 100\n # Setting this to 0 is the same as setting it to 1.\n # See: https://github.com/actions/stale/issues/28\n days-before-close: 0\n
dataset_sample\yaml\apache_spark\.github\workflows\stale.yml
stale.yml
YAML
1,616
0.95
0.090909
0.5
react-lib
332
2024-03-25T12:22:22.866348
GPL-3.0
false
4d617dafe22ba4796b8f368ffe463fa4
#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nname: Update build status workflow\n\non:\n schedule:\n - cron: "*/15 * * * *"\n\njobs:\n update:\n name: Update build status\n runs-on: ubuntu-latest\n permissions:\n actions: read\n checks: write\n steps:\n - name: "Update build status"\n uses: actions/github-script@v7\n with:\n github-token: ${{ secrets.GITHUB_TOKEN }}\n script: |\n const endpoint = 'GET /repos/:owner/:repo/pulls?state=:state'\n const params = {\n owner: context.repo.owner,\n repo: context.repo.repo,\n state: 'open'\n }\n\n // See https://docs.github.com/en/graphql/reference/enums#mergestatestatus\n const maybeReady = ['behind', 'clean', 'draft', 'has_hooks', 'unknown', 'unstable'];\n\n // Iterate open PRs\n for await (const prs of github.paginate.iterator(endpoint,params)) {\n // Each page\n for await (const pr of prs.data) {\n console.log('SHA: ' + pr.head.sha)\n console.log(' Mergeable status: ' + pr.mergeable_state)\n if (pr.mergeable_state == null || maybeReady.includes(pr.mergeable_state)) {\n const checkRuns = await github.request('GET /repos/{owner}/{repo}/commits/{ref}/check-runs', {\n owner: context.repo.owner,\n repo: context.repo.repo,\n ref: pr.head.sha\n })\n\n // Iterator GitHub Checks in the PR\n for await (const cr of checkRuns.data.check_runs) {\n if (cr.name == 'Build' && cr.conclusion != "action_required") {\n // text contains parameters to make request in JSON.\n const params = JSON.parse(cr.output.text)\n\n // Get the workflow run in the forked repository\n let run\n try {\n run = await github.request('GET /repos/{owner}/{repo}/actions/runs/{run_id}', params)\n } catch (error) {\n console.error(error)\n // Run not found. This can happen when the PR author removes GitHub Actions runs or\n // disables GitHub Actions.\n continue\n }\n\n // Keep syncing the status of the checks\n if (run.data.status == 'completed') {\n console.log(' Run ' + cr.id + ': set status (' + run.data.status + ') and conclusion (' + run.data.conclusion + ')')\n const response = await github.request('PATCH /repos/{owner}/{repo}/check-runs/{check_run_id}', {\n owner: context.repo.owner,\n repo: context.repo.repo,\n check_run_id: cr.id,\n output: cr.output,\n status: run.data.status,\n conclusion: run.data.conclusion,\n details_url: run.data.details_url\n })\n } else {\n console.log(' Run ' + cr.id + ': set status (' + run.data.status + ')')\n const response = await github.request('PATCH /repos/{owner}/{repo}/check-runs/{check_run_id}', {\n owner: context.repo.owner,\n repo: context.repo.repo,\n check_run_id: cr.id,\n output: cr.output,\n status: run.data.status,\n details_url: run.data.details_url\n })\n }\n\n break\n }\n }\n }\n }\n }\n
dataset_sample\yaml\apache_spark\.github\workflows\update_build_status.yml
update_build_status.yml
YAML
4,650
0.95
0.092593
0.272727
python-kit
308
2023-08-16T20:12:31.529064
BSD-3-Clause
false
42a95f1b314af307845b8e80510a80fe
highlighter: rouge\nmarkdown: kramdown\ngems:\n - jekyll-redirect-from\n\n# For some reason kramdown seems to behave differently on different\n# OS/packages wrt encoding. So we hard code this config.\nkramdown:\n entity_output: numeric\n\nplugins:\n - jekyll-redirect-from\n\ninclude:\n - _static\n - _modules\n - _images\n - _sources\n\n# These allow the documentation to be updated with newer releases\n# of Spark, Scala.\nSPARK_VERSION: 4.1.0-SNAPSHOT\nSPARK_VERSION_SHORT: 4.1.0\nSCALA_BINARY_VERSION: "2.13"\nSCALA_VERSION: "2.13.16"\nSPARK_ISSUE_TRACKER_URL: https://issues.apache.org/jira/browse/SPARK\nSPARK_GITHUB_URL: https://github.com/apache/spark\n# Before a new release, we should:\n# 1. update the `version` array for the new Spark documentation\n# on https://github.com/algolia/docsearch-configs/blob/master/configs/apache_spark.json.\n# 2. update the value of `facetFilters.version` in `algoliaOptions` on the new release branch.\n# Otherwise, after release, the search results are always based on the latest documentation\n# (https://spark.apache.org/docs/latest/) even when visiting the documentation of previous releases.\nDOCSEARCH_SCRIPT: |\n docsearch({\n apiKey: 'd62f962a82bc9abb53471cb7b89da35e',\n appId: 'RAI69RXRSK',\n indexName: 'apache_spark',\n inputSelector: '#docsearch-input',\n enhancedSearchInput: true,\n algoliaOptions: {\n 'facetFilters': ["version:latest"]\n },\n debug: false // Set debug to true if you want to inspect the dropdown\n });\n\npermalink: 404.html\n\nexclude: ['README.md']\n
dataset_sample\yaml\apache_spark\docs\_config.yml
_config.yml
YAML
1,547
0.8
0.040816
0.232558
vue-tools
978
2023-10-25T10:16:15.685016
BSD-3-Clause
false
49f8a540e732173b86b860674e3c9992
#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the "License"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\napiVersion: scheduling.k8s.io/v1\nkind: PriorityClass\nmetadata:\n name: high\nvalue: 100\n---\napiVersion: scheduling.k8s.io/v1\nkind: PriorityClass\nmetadata:\n name: medium\nvalue: 50\n---\napiVersion: scheduling.k8s.io/v1\nkind: PriorityClass\nmetadata:\n name: low\nvalue: 0\n
dataset_sample\yaml\apache_spark\resource-managers\kubernetes\integration-tests\src\test\resources\volcano\priorityClasses.yml
priorityClasses.yml
YAML
1,051
0.95
0.060606
0.484848
python-kit
244
2024-11-26T05:38:29.148431
Apache-2.0
true
cf962b7ba34385cae8f91d8594034349
# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the "License"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsite_name: Spark SQL, Built-in Functions\ntheme: readthedocs\nnav:\n - 'Functions': 'index.md'\nmarkdown_extensions:\n - toc:\n anchorlink: True\n
dataset_sample\yaml\apache_spark\sql\mkdocs.yml
mkdocs.yml
YAML
922
0.95
0.090909
0.666667
vue-tools
125
2024-02-28T02:08:34.338537
GPL-3.0
false
44226ccac0556dade687ea9791417349
---\nignore:\n - "testsuite"\n\ncoverage:\n # range for color spectrum display, red=50%, green=80%\n range: "50...80"\n precision: 1\n\n status:\n project:\n default:\n informational: true\n patch:\n default:\n informational: true\n changes: false\n
dataset_sample\yaml\aptos-labs_aptos-core\codecov.yml
codecov.yml
YAML
270
0.8
0.058824
0.066667
vue-tools
822
2024-08-20T19:56:49.624366
BSD-3-Clause
false
8e671ac02063bbeab70b0259728780bb
files:\n - source: /developer-docs-site/docs/**/*.m*\n translation: /developer-docs-site/translations/%locale%/%original_path%/%original_file_name%\n
dataset_sample\yaml\aptos-labs_aptos-core\crowdin.yml
crowdin.yml
YAML
150
0.8
0
0
python-kit
562
2024-02-03T08:30:34.114157
MIT
false
6cf5219a1db9c4872fd2fb1964b29daf
version: 2\nupdates:\n - package-ecosystem: "cargo"\n directory: "/"\n schedule:\n interval: "daily"\n reviewers:\n - "aptos-labs/security"\n - "aptos-labs/prod-eng"\n labels:\n - "CICD:run-e2e-tests"\n - "dependencies"\n - "rust"\n open-pull-requests-limit: 0\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\dependabot.yml
dependabot.yml
YAML
294
0.7
0
0
awesome-app
593
2025-04-29T17:11:33.240862
Apache-2.0
false
a017b2cc7049d3633fccc79762e51c0a
# Inherit from the internal-ops repo\n# https://runs-on.com/configuration/repo-config/\n_extends: internal-ops\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\runs-on.yml
runs-on.yml
YAML
109
0.8
0
0.666667
python-kit
573
2024-07-10T00:42:57.476207
Apache-2.0
false
0d80da488b3cbfdfabb4c7f7cf3ca7ef
name: "Docker buildx setup"\ndescription: Sets up buildx for docker builds\n\nruns:\n using: composite\n steps:\n - name: setup docker context for buildx\n id: buildx-context\n shell: bash\n run: docker context create builders\n\n - name: setup docker buildx\n uses: aptos-labs/setup-buildx-action@7952e9cf0debaf1f3f3e5dc7d9c5ea6ececb127e # pin v2.4.0\n with:\n endpoint: builders\n version: v0.11.0\n custom-name: "core-builder"\n keep-state: true\n config-inline: |\n [worker.oci]\n gc = true\n gckeepstorage = 900000000000 # Use 900GB out of 1TB for builder storage\n [[worker.oci.gcpolicy]]\n keepBytes = 700000000000 # Use 700GB out of 900GB for cache storage\n keepDuration = 604800 # Keep cache for 7 days\n filters = [ "type==source.local", "type==exec.cachemount", "type==source.git.checkout"]\n [[worker.oci.gcpolicy]]\n all = true\n keepBytes = 900000000000\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\actions\buildx-setup\action.yml
action.yml
YAML
1,027
0.8
0.172414
0
node-utils
842
2024-07-09T14:37:08.280309
MIT
false
92318b263041ab5a4fbedca14e16e684
name: "Get the latest docker image"\ndescription: |\n Get the latest built docker image from the given branch\n\ninputs:\n branch:\n description: "The branch to check"\n required: true\n variants:\n description: "The variants to check, as a space-separated string, e.g. 'performance failpoints'"\n required: false\n\noutputs:\n IMAGE_TAG:\n description: "The latest docker image tag for the given branch and variants"\n value: ${{ steps.determine-test-image-tag.outputs.IMAGE_TAG }}\n\nruns:\n using: composite\n steps:\n - uses: actions/checkout@v4\n with:\n ref: ${{ inputs.branch }}\n path: checkout_branch\n fetch-depth: 0\n - uses: ./checkout_branch/.github/actions/python-setup # use python-setup from that branch\n with:\n pyproject_directory: checkout_branch/testsuite\n - name: Determine image tag\n id: determine-test-image-tag\n # Forge relies on the default and failpoints variants\n run: |\n variants=(${{ inputs.variants }}) # split the variants string into an array\n variants_args=()\n for variant in "${variants[@]}"; do\n variants_args+=("--variant" "$variant")\n done\n ./testrun find_latest_image.py "${variants_args[@]}"\n shell: bash\n working-directory: checkout_branch/testsuite # the checkout_branch is a subdirectory\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\actions\get-latest-docker-image-tag\action.yml
action.yml
YAML
1,344
0.95
0.05
0.027027
vue-tools
443
2025-02-21T08:03:10.730957
MIT
true
9e1aaf74f770d13bcfa5e382384e778b
name: "Install grpcurl"\ndescription: |\n Installs grpcurl https://github.com/fullstorydev/grpcurl\ninputs:\n install_directory:\n description: "Where to install grpcurl binary. Defaults to github.workspace."\n required: false\n\nruns:\n using: composite\n steps:\n - name: Install grpcurl\n shell: bash\n run: ${{ github.action_path }}/install_grpcurl.sh\n - name: Add grpcurl to install directory and path\n shell: bash\n run: |\n if [ -z "${INSTALL_DIRECTORY}" ]; then\n INSTALL_DIRECTORY=${{ github.workspace }}\n else\n mv grpcurl $INSTALL_DIRECTORY\n fi\n echo "${INSTALL_DIRECTORY}" | tee -a $GITHUB_PATH\n env:\n INSTALL_DIRECTORY: ${{ inputs.install_directory }}\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\actions\install-grpcurl\action.yml
action.yml
YAML
742
0.95
0.04
0
vue-tools
819
2025-02-08T03:18:51.036736
GPL-3.0
false
35b74daccb716384605b96a8be4ab678
name: "Bump aptos-node cargo version"\ndescription: |\n Bumps the aptos-node cargo version against the aptos-core branch name.\ninputs:\n release_tag:\n description: "The release tag which includes the version to bump"\n required: true\n aptos_node_cargo_toml:\n description: "The path to the aptos-node Cargo.toml file"\n required: true\n\nruns:\n using: composite\n steps:\n - name: Bump aptos-node-version\n shell: bash\n run: |\n python3 ${{ github.action_path }}/bump_aptos_node_version.py\n env:\n RELEASE_TAG: ${{ inputs.release_tag }}\n APTOS_NODE_CARGO_TOML: ${{ inputs.aptos_node_cargo_toml }}\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\actions\release-aptos-node\action.yml
action.yml
YAML
640
0.85
0
0
vue-tools
433
2024-01-06T15:19:37.536463
Apache-2.0
false
f867255888ed0c98507b34e0f7f93fab
blank_issues_enabled: true\ncontact_links:\n - name: Questions and Help (on Aptos Developer Discussions)\n url: (https://github.com/aptos-labs/aptos-developer-discussions/discussions\n about: Support and other Questions are handled by the team and the community on Aptos Developer Discussions.\n - name: Questions, Help and Chat (on Discord)\n url: https://discord.gg/aptosnetwork\n about: Contact the development team, contributors and community on Discord\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\ISSUE_TEMPLATE\config.yml
config.yml
YAML
465
0.8
0
0
vue-tools
9
2023-10-24T21:06:23.708562
GPL-3.0
false
f14f46b3653a37244a1913a05dd7ea08
name: Fuzzer Data Update\n\non:\n push:\n branches:\n - 'main'\n paths:\n - 'testsuite/fuzzer/data/**'\n workflow_dispatch:\n\npermissions:\n contents: read\n id-token: write # Required for GCP Workload Identity federation\n\njobs:\n update-fuzzer-data:\n runs-on: runs-on,cpu=16,family=m6id,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }}\n steps:\n - name: Checkout repository\n uses: actions/checkout@v4\n\n - name: Set up Rust\n uses: aptos-labs/aptos-core/.github/actions/rust-setup@main\n\n - name: Authenticate to Google Cloud\n uses: google-github-actions/auth@v2\n with:\n workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}\n service_account: ${{ secrets.GCP_SERVICE_ACCOUNT_EMAIL }}\n\n - name: 'Set up Cloud SDK'\n uses: 'google-github-actions/setup-gcloud@v2'\n with:\n version: '>= 418.0.0'\n\n # The fuzzers for which we have existing seed corpus are:\n # - move_aptosvm_publish_and_run\n # - move_bytecode_verifier_compiled_modules\n # - move_bytecode_verifier_compiled_modules_seed_corpus\n # All of them reuse the same seed corpus, so we only need to download/upload one of them.\n - name: Download existing corpus\n run: |\n gcloud storage cp gs://aptos-core-corpora/move_aptosvm_publish_and_run_seed_corpus.zip move_aptosvm_publish_and_run_seed_corpus.zip\n unzip -q move_aptosvm_publish_and_run_seed_corpus.zip -d move_aptosvm_publish_and_run_seed_corpus\n\n - name: Generate runnable states\n run: |\n chmod +x ./fuzz.sh\n ./fuzz.sh block-builder generate_runnable_states_recursive testsuite/fuzzer/data/0x1/ move_aptosvm_publish_and_run_seed_corpus\n\n - name: Create and upload new corpus\n run: |\n zip -r move_aptosvm_publish_and_run_seed_corpus.zip move_aptosvm_publish_and_run_seed_corpus\n gcloud storage cp move_aptosvm_publish_and_run_seed_corpus.zip gs://aptos-core-corpora/move_aptosvm_publish_and_run_seed_corpus.zip\n gcloud storage objects update gs://aptos-core-corpora/move_aptosvm_publish_and_run_seed_corpus.zip --add-acl=publicRead\n
dataset_sample\yaml\aptos-labs_aptos-core\.github\workflows\fuzzer-data-update.yml
fuzzer-data-update.yml
YAML
2,192
0.8
0.036364
0.108696
awesome-app
91
2024-01-02T13:26:02.185470
GPL-3.0
false
ecd9d9cc8f9955909676ab8baf4cf1de
## Implementation of: https://github.com/marketplace/actions/lychee-broken-link-checker\n\nname: Aptos GitHub Links Checker\n\non:\n repository_dispatch:\n workflow_dispatch:\n schedule:\n - cron: "00 18 * * *"\n\npermissions: \n # contents: write # only for delete-branch option \n issues: write \n pull-requests: write \n\njobs:\n linkChecker:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n\n - name: Link Checker\n id: lychee\n uses: lycheeverse/lychee-action@v1.5.4\n env:\n GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}\n\n - name: Create Issue From File\n if: env.lychee_exit_code != 0\n uses: peter-evans/create-issue-from-file@v4\n with:\n title: Link Checker Report\n content-filepath: ./lychee/out.md\n labels: report, automated issue\n assignees: clay-aptos
dataset_sample\yaml\aptos-labs_aptos-core\.github\workflows\links.yml
links.yml
YAML
868
0.8
0.058824
0.068966
react-lib
476
2023-12-20T10:06:12.150248
GPL-3.0
false
befcd097ce2c4fbcbf1a248f57eb12f2
# Severeties: info, warning, [error, critical]\n# Last 2 items are high urgency\n\nglobal:\n\nroute:\n group_by: ["instance", "kubernetes_pod_name", "role"]\n\n # When a new group of alerts is created by an incoming alert, wait at\n # least 'group_wait' to send the initial notification.\n # This way ensures that you get multiple alerts for the same group that start\n # firing shortly after another are batched together on the first\n # notification.\n group_wait: 30s\n\n # When the first notification was sent, wait 'group_interval' to send a batch\n # of new alerts that started firing for that group.\n group_interval: 5m\n\n # If an alert has successfully been sent, wait 'repeat_interval' to\n # resend them.\n repeat_interval: 10m\n\n # A default receiver\n receiver: "default"\n\n # The child route trees.\n # https://prometheus.io/docs/alerting/latest/configuration/#route\n routes: {{ .Values.monitoring.alertmanager.alertRouteTrees | toJson }}\n\n# A list of notification receivers\n# https://prometheus.io/docs/alerting/latest/configuration/#receiver\nreceivers: {{ .Values.monitoring.alertmanager.alertReceivers | toJson }}\n
dataset_sample\yaml\aptos-labs_aptos-core\terraform\helm\monitoring\files\alertmanager.yml
alertmanager.yml
YAML
1,125
0.8
0.060606
0.64
vue-tools
776
2024-06-26T10:37:43.074413
Apache-2.0
false
5b39abb65ef6759a67d8c0869c5cafef
global:\n scrape_interval: 15s\n evaluation_interval: 15s\n external_labels:\n chain_name: {{ .Values.chain.name }}\n {{- if .Values.validator.name }}\n owner: {{ .Values.validator.name }}\n {{- else if .Values.fullnode.name }}\n owner: {{ .Values.fullnode.name }}\n {{- else }}\n owner: release:{{ .Release.Name }}\n {{- end }}\n\n# Alertmanager configuration\nalerting:\n alertmanagers:\n - static_configs:\n - targets:\n - localhost:9093\n\n# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.\nrule_files:\n{{- range $path, $_ := .Files.Glob "files/rules/*.yml" }}\n - {{ base $path }}\n{{- end }}\n\nscrape_configs:\n{{ if .Values.monitoring.prometheus.fullKubernetesScrape }}\n- job_name: 'kubernetes-apiservers'\n scheme: https\n tls_config:\n ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n\n kubernetes_sd_configs:\n - role: endpoints\n\n # Keep only the default/kubernetes service endpoints for the https port. This\n # will add targets for each API server which Kubernetes adds an endpoint to\n # the default/kubernetes service.\n metric_relabel_configs:\n - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]\n action: keep\n regex: default;kubernetes;https\n - source_labels: [__name__]\n action: drop\n regex: '(.+)_request_duration_seconds_bucket'\n - target_label: owner\n {{- if .Values.validator.name }}\n replacement: {{ .Values.validator.name }}\n {{- else if .Values.fullnode.name }}\n replacement: {{ .Values.fullnode.name }}\n {{- else }}\n replacement: {{ .Release.Name }}\n {{- end }}\n{{ end }}\n\n- job_name: 'kubernetes-nodes'\n scheme: https\n tls_config:\n ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n\n kubernetes_sd_configs:\n - role: node\n\n {{ if not .Values.monitoring.prometheus.fullKubernetesScrape }}\n metric_relabel_configs:\n - source_labels: [namespace]\n action: keep\n regex: "{{ .Release.Namespace }}"\n # Explicitly drop spammy metrics\n - source_labels: [__name__]\n regex: 'storage_operation_duration_seconds_bucket'\n action: drop\n {{ end }}\n\n relabel_configs:\n - action: labelmap\n regex: __meta_kubernetes_node_label_(.+)\n - target_label: __address__\n replacement: kubernetes.default.svc:443\n - source_labels: [__meta_kubernetes_node_name]\n regex: (.+)\n target_label: __metrics_path__\n replacement: /api/v1/nodes/${1}/proxy/metrics\n\n- job_name: 'kubernetes-cadvisor'\n scheme: https\n tls_config:\n ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n\n kubernetes_sd_configs:\n - role: node\n\n relabel_configs:\n - target_label: __address__\n replacement: kubernetes.default.svc:443\n - source_labels: [__meta_kubernetes_node_name]\n regex: (.+)\n target_label: __metrics_path__\n replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor\n\n {{ if not .Values.monitoring.prometheus.fullKubernetesScrape }}\n # Only keep container task state for key containers\n metric_relabel_configs:\n - source_labels: [__name__, container]\n action: drop\n regex: container_tasks_state;!validator|!fullnode\n - source_labels: [container]\n action: drop\n regex: calico.*|csi.*|ebs.*|chaos.*|aws-node|node-driver-registrar\n {{ end }}\n\n# Scrape config for service endpoints.\n#\n# The relabeling allows the actual service scrape endpoint to be configured\n# via the following annotations:\n#\n# * `prometheus.io/scrape`: Only scrape services that have a value of `true`\n# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need\n# to set this to `https` & most likely set the `tls_config` of the scrape config.\n# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n# * `prometheus.io/port`: If the metrics are exposed on a different port to the\n# service then set this appropriately.\n- job_name: 'kubernetes-service-endpoints'\n\n kubernetes_sd_configs:\n - role: endpoints\n\n relabel_configs:\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]\n action: keep\n regex: true\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]\n action: replace\n target_label: __scheme__\n regex: (https?)\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]\n action: replace\n target_label: __metrics_path__\n regex: (.+)\n - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]\n action: replace\n target_label: __address__\n regex: ([^:]+)(?::\d+)?;(\d+)\n replacement: $1:$2\n - action: labelmap\n regex: __meta_kubernetes_service_label_(.+)\n - source_labels: [__meta_kubernetes_namespace]\n action: replace\n target_label: kubernetes_namespace\n - source_labels: [__meta_kubernetes_service_name]\n action: replace\n target_label: kubernetes_name\n - source_labels: [__meta_kubernetes_pod_node_name]\n action: replace\n target_label: kubernetes_node\n\n # Drop some redundant labels from kube-state-metrics\n metric_relabel_configs:\n - action: labeldrop\n regex: uid|container_id\n # Drop tmpfs metrics from node-exporter\n - source_labels: [fstype]\n regex: tmpfs\n action: drop\n\n # Scrape config for pods\n #\n # The relabeling allows the actual pod scrape endpoint to be configured via the\n # following annotations:\n #\n # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`\n # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.\n- job_name: "kubernetes-pods"\n\n kubernetes_sd_configs:\n - role: pod\n\n relabel_configs:\n - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]\n action: keep\n regex: true\n - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]\n action: replace\n target_label: __metrics_path__\n regex: (.+)\n - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]\n action: replace\n regex: ([^:]+)(?::\d+)?;(\d+)\n replacement: ${1}:${2}\n target_label: __address__\n - source_labels: [__meta_kubernetes_namespace]\n action: replace\n target_label: namespace\n - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]\n action: replace\n target_label: role\n - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]\n action: replace\n target_label: instance\n - source_labels: [__meta_kubernetes_pod_name]\n action: replace\n target_label: kubernetes_pod_name\n # Explicitly drop all vector metrics\n - source_labels: [namespace]\n regex: 'vector'\n action: drop\n\n{{ if .Values.monitoring.prometheus.remote_write.enabled }}\n{{ with .Values.monitoring.prometheus.remote_write }}\nremote_write:\n - url: {{ .url }}\n sigv4:\n region: {{ .region }}\n queue_config:\n max_samples_per_send: 1000\n max_shards: 200\n capacity: 2500\n{{ end }}\n{{ end }}\n\n
dataset_sample\yaml\aptos-labs_aptos-core\terraform\helm\monitoring\files\prometheus.yml
prometheus.yml
YAML
7,296
0.8
0.057778
0.142857
python-kit
744
2025-02-17T02:09:17.505033
BSD-3-Clause
false
bed8e5729f8321c10742c4d008585339
groups:\n- name: "Aptos alerts"\n rules:\n{{- if .Values.validator.name }}\n # consensus\n - alert: Zero Block Commit Rate\n expr: rate(aptos_consensus_last_committed_round{role="validator"}[1m]) == 0 OR absent(aptos_consensus_last_committed_round{role="validator"})\n for: 20m\n labels:\n severity: error\n summary: "The block commit rate is low"\n annotations:\n - alert: High local timeout rate\n expr: rate(aptos_consensus_timeout_count{role="validator"}[1m]) > 0.5\n for: 20m\n labels:\n severity: warning\n summary: "Consensus timeout rate is high"\n annotations:\n - alert: High consensus error rate\n expr: rate(aptos_consensus_error_count{role="validator"}[1m]) / on (role) rate(consensus_duration_count{op='main_loop', role="validator"}[1m]) > 0.25\n for: 20m\n labels:\n severity: warning\n summary: "Consensus error rate is high"\n annotations:\n{{- end }}\n # State sync alerts\n - alert: State sync is not making progress\n expr: rate(aptos_state_sync_version{type="synced"}[5m]) == 0 OR absent(aptos_state_sync_version{type="synced"})\n for: 5m\n labels:\n severity: error\n summary: "State sync is not making progress (i.e., the synced version is not increasing!)"\n annotations:\n - alert: State sync is lagging significantly\n expr: (aptos_data_client_highest_advertised_data{data_type="transactions"} - on(kubernetes_pod_name, role) aptos_state_sync_version{type="synced"}) > 1000000\n for: 5m\n labels:\n severity: error\n summary: "State sync is lagging significantly (i.e., the lag is greater than 1 million versions)"\n annotations:\n\n # Mempool alerts\n - alert: Mempool has no active upstream peers\n expr: (sum by (kubernetes_pod_name) (aptos_mempool_active_upstream_peers_count)) == 0\n for: 3m\n labels:\n severity: error\n summary: "Mempool has no active upstream peers (unable to forward transactions to anyone!)"\n annotations:\n - alert: Mempool is at >80% capacity (count)\n expr: aptos_core_mempool_index_size{index="system_ttl"} > 1600000 # assumes default mempool size 2_000_000\n for: 5m\n labels:\n severity: warning\n summary: "Mempool count is at >80% capacity (it may soon become full!)"\n annotations:\n - alert: Mempool is at >80% capacity (bytes)\n expr: aptos_core_mempool_index_size{index="size_bytes"} > 1717986918 # assumes default mempool size 2 * 1024 * 1024 * 1024\n for: 5m\n labels:\n severity: warning\n summary: "Mempool bytes is at >80% capacity (it may soon become full!)"\n annotations:\n - alert: Mempool is growing at a significant rate (count)\n expr: rate(aptos_core_mempool_index_size{index="system_ttl"}[1m]) > 60000 # 3% growth per minute - assumes default mempool size 2_000_000\n for: 10m\n labels:\n severity: warning\n summary: "Mempool count is growing at a significant rate (it may soon become full!)"\n annotations:\n - alert: Mempool is growing at a significant rate (bytes)\n expr: rate(aptos_core_mempool_index_size{index="size_bytes"}[1m]) > 64424509 # 3% growth per minute - assumes default mempool size 2 * 1024 * 1024 * 1024\n for: 10m\n labels:\n severity: warning\n summary: "Mempool bytes is growing at a significant rate (it may soon become full!)"\n annotations:\n\n # Networking alerts\n - alert: Validator Connected Peers\n expr: 0 == min(aptos_network_peers{state="connected", role_type="validator", role="validator"})\n for: 15m\n labels:\n severity: error\n summary: "Validator node has zero connected peers"\n annotations:\n\n # Storage core metrics\n - alert: Validator Low Disk Space (warning)\n expr: (kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=~".*(validator|fullnode)-e.*"} - kubelet_volume_stats_used_bytes) / 1024 / 1024 / 1024 < 200\n for: 1h\n labels:\n severity: warning\n summary: "Less than 200 GB of free space on Aptos Node."\n annotations:\n description: "(This is a warning, deal with it in working hours.) A validator or fullnode pod has less than 200 GB of disk space. Take these steps:\n 1. If only a few nodes have this issue, it might be that they are not typically spec'd or customized differently, \\n it's most likely a expansion of the volume is needed soon. Talk to the PE team. Otherwise, it's a bigger issue.\n 2. Pass this issue on to the storage team. If you are the storage team, read on.\n 3. Go to the dashboard and look for the stacked up column family sizes. \\n If the total size on that chart can't justify low free disk space, we need to log in to a node to see if something other than the AptosDB is eating up disk. \\n Start from things under /opt/aptos/data.\n 3 Otherwise, if the total size on that chart is the majority of the disk consumption, zoom out and look for anomalies -- sudden increases overall or on a few \\n specific Column Families, etc. Also check average size of each type of data. Reason about the anomaly with changes in recent releases in mind.\n 4 If everything made sense, it's a bigger issue, somehow our gas schedule didn't stop state explosion before an alert is triggered. Our recommended disk \\n spec and/or default pruning configuration, as well as storage gas schedule need updates. Discuss with the ecosystem team and send out a PR on the docs site, \\n form a plan to inform the node operator community and prepare for a on-chain proposal to update the gas schedule."\n - alert: Validator Very Low Disk Space (critical)\n expr: (kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=~".*(validator|fullnode)-e.*"} - kubelet_volume_stats_used_bytes) / 1024 / 1024 / 1024 < 50\n for: 5m\n labels:\n severity: critical\n summary: "Less than 50 GB of free space on Aptos Node."\n annotations:\n description: "A validator or fullnode pod has less than 50 GB of disk space -- that's dangerously low. \\n 1. A warning level alert of disk space less than 200GB should've fired a few days ago at least, search on slack and understand why it's not dealt with.\n 2. Search in the code for the runbook of the warning alert, quickly go through that too determine if it's a bug. Involve the storage team and other team accordingly.\n If no useful information is found, evaluate the trend of disk usage increasing, how long can we run further? If it can't last the night, you have these options to mitigate this:\n 1. Expand the disk if it's a cloud volume.\n 2. Shorten the pruner windows. Before that, find the latest version of these https://github.com/aptos-labs/aptos-core/blob/48cc64df8a64f2d13012c10d8bd5bf25d94f19dc/config/src/config/storage_config.rs#L166-L218 \\n and read carefully the comments on the prune window config entries -- set safe values.\n 3. If you believe this is happening on nodes that are not run by us, involve the PE / Community / Ecosystem teams to coordinate efforts needed on those nodes.\n "\n - alert: AptosDB API Success Rate\n expr: sum by(kubernetes_pod_name) (rate(aptos_storage_api_latency_seconds_count{result="Ok"}[1m])) / sum by(kubernetes_pod_name) (rate(aptos_storage_api_latency_seconds_count[1m])) < 0.99 # 99%\n for: 5m\n labels:\n severity: error\n summary: "AptosDB API success rate dropped."\n annotations:\n description: "AptosDB APIs started to return Error.\n This must be looked at together with alerts / dashboards of upper level components -- it unfortunately can be either the cause or victim of issues over there. Things you can do:\n 1. Go to the storage dashboard and see if the errors are on specific APIs.\n 2. Look at logs and see storage related errors, understand if it's hardware / dependency errors or logical errors in our code.\n 3. Previous steps should narrow down the possibilities of the issue, at this point if it's still not clear, read the code to understand if the error is caused by a bug or a change of input pattern.\n 4. See if changes in recent releases can cause this issue.\n "\n - alert: RocksDB Read Latency\n expr: sum by (kubernetes_pod_name) (rate(aptos_schemadb_get_latency_seconds_sum[1m])) / sum by (kubernetes_pod_name) (rate(aptos_schemadb_get_latency_seconds_count[1m])) > 0.001 # 1 millisecond\n for: 5m\n labels:\n severity: warning\n summary: "RocksDB read latency raised."\n annotations:\n description: "RocksDB read latency raised, which indicates bad performance.\n If alerts on other components are not fired, this is probably not urgent. But things you can do:\n 1. On the system dashboard, see if we get a flat line on the IOPs panel -- it can be disk being throttled. It's either the node is not spec'd as expected, or we are using more IOPs than expected.\n 2. Check out the traffic pattern on various dashboards, is there a sudden increase in traffic? Verify that on the storage dashboard by looking at the number of API calls, per API if needed.\n 3. Check the system dashboard to see if we are bottle necked by the memory (we rely heavily on the filesystem cache) or the CPU. It might be helpful to restart one of the nodes that's having this issue.\n\n 9. After all those, our threshold was set strictly initially, so if everything looks fine, we can change the alarm threshold.\n "\n # Logging alerts\n - alert: Logs Being Dropped\n expr: 1 < (rate(aptos_struct_log_queue_error[1m]) + rate(aptos_struct_log_send_error[1m]))\n for: 5m\n labels:\n severity: warning\n summary: "Logs being dropped"\n annotations:\n description: "Logging Transmit Error rate is high \\n check the logging dashboard and \\n there may be network issues, downstream throughput issues, or something wrong with Vector \\n TODO: Runbook"\n
dataset_sample\yaml\aptos-labs_aptos-core\terraform\helm\monitoring\files\rules\alerts.yml
alerts.yml
YAML
9,874
0.8
0.204819
0.037037
node-utils
478
2024-12-31T22:58:26.084942
MIT
false
66d318a25720ac4603634323bb49d2f7
version: 2\n\nproject_name: trivy_canary_build\nbuilds:\n -\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - darwin\n - linux\n - windows\n goarch:\n - amd64\n - arm64\n ignore:\n - goos: windows\n goarch: arm64\n\narchives:\n -\n format: tar.gz\n name_template: >-\n {{ .ProjectName }}_{{ .Version }}_\n {{- if eq .Os "darwin" }}macOS\n {{- else}}{{- title .Os }}{{ end }}-\n {{- if eq .Arch "amd64" }}64bit\n {{- else if eq .Arch "arm64" }}ARM64\n {{- else }}{{ .Arch }}{{ end }}\n files:\n - README.md\n - LICENSE\n - contrib/*.tpl\n format_overrides:\n - goos: windows\n format: zip\n
dataset_sample\yaml\aquasecurity_trivy\goreleaser-canary.yml
goreleaser-canary.yml
YAML
840
0.8
0.073171
0
vue-tools
607
2024-01-24T07:09:52.833483
MIT
false
9043654b802f88b66e47b762d971c932
version: 2\n\nproject_name: trivy\nbuilds:\n - id: build-linux\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - linux\n goarch:\n - 386\n - arm\n - amd64\n - arm64\n - s390x\n - ppc64le\n goarm:\n - 7\n - id: build-bsd\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - freebsd\n goarch:\n # modernc.org/sqlite doesn't support freebsd/arm64, etc.\n - 386\n - amd64\n - id: build-macos\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - darwin\n goarch:\n - amd64\n - arm64\n goarm:\n - 7\n - id: build-windows\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - windows\n goarch:\n # modernc.org/sqlite doesn't support windows/386 and windows/arm, etc.\n - amd64\n goarm:\n - 7\n\nrelease:\n extra_files:\n - glob: ./bom.json\n discussion_category_name: Announcements\n\nnfpms:\n -\n formats:\n - deb\n - rpm\n vendor: "aquasecurity"\n homepage: "https://github.com/aquasecurity"\n maintainer: "Teppei Fukuda <knqyf263@gmail.com>"\n description: "A Fast Vulnerability Scanner for Containers"\n license: "Apache-2.0"\n file_name_template: >-\n {{ .ProjectName }}_{{ .Version }}_\n {{- if eq .Os "darwin" }}macOS\n {{- else if eq .Os "openbsd" }}OpenBSD\n {{- else if eq .Os "netbsd" }}NetBSD\n {{- else if eq .Os "freebsd" }}FreeBSD\n {{- else if eq .Os "dragonfly" }}DragonFlyBSD\n {{- else}}{{- title .Os }}{{ end }}-\n {{- if eq .Arch "amd64" }}64bit\n {{- else if eq .Arch "386" }}32bit\n {{- else if eq .Arch "arm" }}ARM\n {{- else if eq .Arch "arm64" }}ARM64\n {{- else if eq .Arch "ppc64le" }}PPC64LE\n {{- else }}{{ .Arch }}{{ end }}\n contents:\n - src: contrib/*.tpl\n dst: /usr/local/share/trivy/templates\n rpm:\n signature:\n key_file: '{{ .Env.GPG_FILE }}'\n\narchives:\n - id: archive\n format: tar.gz\n name_template: >-\n {{ .ProjectName }}_{{ .Version }}_\n {{- if eq .Os "darwin" }}macOS\n {{- else if eq .Os "linux" }}Linux\n {{- else if eq .Os "openbsd" }}OpenBSD\n {{- else if eq .Os "netbsd" }}NetBSD\n {{- else if eq .Os "freebsd" }}FreeBSD\n {{- else if eq .Os "dragonfly" }}DragonFlyBSD\n {{- else}}{{- .Os }}{{ end }}-\n {{- if eq .Arch "amd64" }}64bit\n {{- else if eq .Arch "386" }}32bit\n {{- else if eq .Arch "arm" }}ARM\n {{- else if eq .Arch "arm64" }}ARM64\n {{- else if eq .Arch "ppc64le" }}PPC64LE\n {{- else }}{{ .Arch }}{{ end }}\n files:\n - README.md\n - LICENSE\n - contrib/*.tpl\n format_overrides:\n - goos: windows\n format: zip\n\n\nbrews:\n -\n repository:\n owner: aquasecurity\n name: homebrew-trivy\n homepage: "https://github.com/aquasecurity/trivy"\n description: "Scanner for vulnerabilities in container images, file systems, and Git repositories, as well as for configuration issues"\n test: |\n system "#{bin}/trivy", "--version"\n\ndockers:\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-amd64"\n - "docker.io/aquasec/trivy:latest-amd64"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-amd64"\n - "ghcr.io/aquasecurity/trivy:latest-amd64"\n - "public.ecr.aws/aquasecurity/trivy:latest-amd64"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-amd64"\n use: buildx\n goos: linux\n goarch: amd64\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/amd64"\n extra_files:\n - contrib/\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-arm64"\n - "docker.io/aquasec/trivy:latest-arm64"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-arm64"\n - "ghcr.io/aquasecurity/trivy:latest-arm64"\n - "public.ecr.aws/aquasecurity/trivy:latest-arm64"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-arm64"\n use: buildx\n goos: linux\n goarch: arm64\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/arm64"\n extra_files:\n - contrib/\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-s390x"\n - "docker.io/aquasec/trivy:latest-s390x"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-s390x"\n - "ghcr.io/aquasecurity/trivy:latest-s390x"\n - "public.ecr.aws/aquasecurity/trivy:latest-s390x"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-s390x"\n use: buildx\n goos: linux\n goarch: s390x\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/s390x"\n extra_files:\n - contrib/\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-ppc64le"\n - "docker.io/aquasec/trivy:latest-ppc64le"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-ppc64le"\n - "ghcr.io/aquasecurity/trivy:latest-ppc64le"\n - "public.ecr.aws/aquasecurity/trivy:latest-ppc64le"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-ppc64le"\n use: buildx\n goos: linux\n goarch: ppc64le\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/ppc64le"\n extra_files:\n - contrib/\n\ndocker_manifests:\n - name_template: 'aquasec/trivy:{{ .Version }}'\n image_templates:\n - 'aquasec/trivy:{{ .Version }}-amd64'\n - 'aquasec/trivy:{{ .Version }}-arm64'\n - 'aquasec/trivy:{{ .Version }}-s390x'\n - 'aquasec/trivy:{{ .Version }}-ppc64le'\n - name_template: 'ghcr.io/aquasecurity/trivy:{{ .Version }}'\n image_templates:\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-ppc64le'\n - name_template: 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}'\n image_templates:\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-ppc64le'\n - name_template: 'aquasec/trivy:latest'\n image_templates:\n - 'aquasec/trivy:{{ .Version }}-amd64'\n - 'aquasec/trivy:{{ .Version }}-arm64'\n - 'aquasec/trivy:{{ .Version }}-s390x'\n - 'aquasec/trivy:{{ .Version }}-ppc64le'\n - name_template: 'ghcr.io/aquasecurity/trivy:latest'\n image_templates:\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-ppc64le'\n - name_template: 'public.ecr.aws/aquasecurity/trivy:latest'\n image_templates:\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-ppc64le'\n\nsigns:\n- cmd: cosign\n env:\n - COSIGN_EXPERIMENTAL=1\n signature: "${artifact}.sig"\n certificate: "${artifact}.pem"\n args:\n - "sign-blob"\n - "--oidc-issuer=https://token.actions.githubusercontent.com"\n - "--output-certificate=${certificate}"\n - "--output-signature=${signature}"\n - "${artifact}"\n - "--yes"\n artifacts: all\n output: true\n\ndocker_signs:\n- cmd: cosign\n env:\n - COSIGN_EXPERIMENTAL=1\n artifacts: manifests\n output: true\n args:\n - 'sign'\n - '${artifact}'\n - '--yes'\n
dataset_sample\yaml\aquasecurity_trivy\goreleaser.yml
goreleaser.yml
YAML
10,861
0.8
0.090032
0.006645
python-kit
85
2024-01-10T13:25:55.577894
Apache-2.0
false
b861bf47dafbbd42fc82a29c64ae2d90
site_name: Trivy\nsite_url: https://trivy.dev/\nsite_description: Trivy - All-in-one open source security scanner\ndocs_dir: docs/\nrepo_name: GitHub\nrepo_url: https://github.com/aquasecurity/trivy\nedit_uri: "blob/main/docs/"\n\nnav:\n - Home: index.md\n - Getting Started:\n - First steps: getting-started/index.md \n - Installation: getting-started/installation.md\n - Signature Verification: getting-started/signature-verification.md\n - FAQ: getting-started/faq.md\n - Tutorials:\n - Overview: tutorials/overview.md \n - CI/CD:\n - Overview: tutorials/integrations/index.md\n - GitHub Actions: tutorials/integrations/github-actions.md\n - CircleCI: tutorials/integrations/circleci.md\n - Travis CI: tutorials/integrations/travis-ci.md\n - GitLab CI: tutorials/integrations/gitlab-ci.md\n - Bitbucket Pipelines: tutorials/integrations/bitbucket.md\n - AWS CodePipeline: tutorials/integrations/aws-codepipeline.md\n - AWS Security Hub: tutorials/integrations/aws-security-hub.md\n - Azure: tutorials/integrations/azure-devops.md\n - Kubernetes:\n - Cluster Scanning: tutorials/kubernetes/cluster-scanning.md\n - Kyverno: tutorials/kubernetes/kyverno.md\n - GitOps: tutorials/kubernetes/gitops.md\n - Misconfiguration:\n - Terraform scanning: tutorials/misconfiguration/terraform.md\n - Custom Checks with Rego: tutorials/misconfiguration/custom-checks.md\n - Signing:\n - Vulnerability Scan Record Attestation: tutorials/signing/vuln-attestation.md\n - Shell:\n - Completion: tutorials/shell/shell-completion.md\n - Additional Resources:\n - Additional Resources: tutorials/additional-resources/references.md\n - Community References: tutorials/additional-resources/community.md\n - CKS Reference: tutorials/additional-resources/cks.md\n - Docs:\n - Overview: docs/index.md\n - Target:\n - Container Image: docs/target/container_image.md\n - Filesystem: docs/target/filesystem.md\n - Rootfs: docs/target/rootfs.md\n - Code Repository: docs/target/repository.md\n - Virtual Machine Image: docs/target/vm.md\n - Kubernetes: docs/target/kubernetes.md\n - SBOM: docs/target/sbom.md\n - Scanner:\n - Vulnerability: docs/scanner/vulnerability.md\n - Misconfiguration:\n - Overview: docs/scanner/misconfiguration/index.md\n - Policy:\n - Built-in Checks: docs/scanner/misconfiguration/check/builtin.md\n - Custom Checks:\n - Overview: docs/scanner/misconfiguration/custom/index.md\n - Data: docs/scanner/misconfiguration/custom/data.md\n - Combine: docs/scanner/misconfiguration/custom/combine.md\n - Selectors: docs/scanner/misconfiguration/custom/selectors.md\n - Schemas: docs/scanner/misconfiguration/custom/schema.md\n - Testing: docs/scanner/misconfiguration/custom/testing.md\n - Debugging Policies: docs/scanner/misconfiguration/custom/debug.md\n - Contribute Checks: docs/scanner/misconfiguration/custom/contribute-checks.md\n - Secret: docs/scanner/secret.md\n - License: docs/scanner/license.md\n - Coverage:\n - Overview: docs/coverage/index.md\n - OS:\n - Overview: docs/coverage/os/index.md\n - AlmaLinux: docs/coverage/os/alma.md\n - Alpine Linux: docs/coverage/os/alpine.md\n - Amazon Linux: docs/coverage/os/amazon.md\n - Azure Linux (CBL-Mariner): docs/coverage/os/azure.md\n - CentOS: docs/coverage/os/centos.md\n - Chainguard: docs/coverage/os/chainguard.md\n - Debian: docs/coverage/os/debian.md\n - Oracle Linux: docs/coverage/os/oracle.md\n - Photon OS: docs/coverage/os/photon.md\n - Red Hat: docs/coverage/os/rhel.md\n - Rocky Linux: docs/coverage/os/rocky.md\n - SUSE: docs/coverage/os/suse.md\n - Ubuntu: docs/coverage/os/ubuntu.md\n - Wolfi: docs/coverage/os/wolfi.md\n - Google Distroless (Images): docs/coverage/os/google-distroless.md\n - Language:\n - Overview: docs/coverage/language/index.md\n - C/C++: docs/coverage/language/c.md\n - Dart: docs/coverage/language/dart.md\n - .NET: docs/coverage/language/dotnet.md\n - Elixir: docs/coverage/language/elixir.md\n - Go: docs/coverage/language/golang.md\n - Java: docs/coverage/language/java.md\n - Node.js: docs/coverage/language/nodejs.md\n - PHP: docs/coverage/language/php.md\n - Python: docs/coverage/language/python.md\n - Ruby: docs/coverage/language/ruby.md\n - Rust: docs/coverage/language/rust.md\n - Swift: docs/coverage/language/swift.md\n - Julia: docs/coverage/language/julia.md\n - IaC:\n - Overview: docs/coverage/iac/index.md\n - Azure ARM Template: docs/coverage/iac/azure-arm.md\n - CloudFormation: docs/coverage/iac/cloudformation.md\n - Docker: docs/coverage/iac/docker.md\n - Helm: docs/coverage/iac/helm.md\n - Kubernetes: docs/coverage/iac/kubernetes.md\n - Terraform: docs/coverage/iac/terraform.md\n - Others:\n - Overview: docs/coverage/others/index.md\n - Bitnami Images: docs/coverage/others/bitnami.md\n - Conda: docs/coverage/others/conda.md\n - RPM Archives: docs/coverage/others/rpm.md\n - Kubernetes: docs/coverage/kubernetes.md\n - Configuration:\n - Overview: docs/configuration/index.md\n - Filtering: docs/configuration/filtering.md\n - Skipping Files: docs/configuration/skipping.md\n - Reporting: docs/configuration/reporting.md\n - Cache: docs/configuration/cache.md\n - Databases: docs/configuration/db.md\n - Others: docs/configuration/others.md\n - Supply Chain:\n - SBOM: docs/supply-chain/sbom.md\n - Attestation:\n - SBOM: docs/supply-chain/attestation/sbom.md\n - Cosign Vulnerability Scan Record: docs/supply-chain/attestation/vuln.md\n - SBOM Attestation in Rekor: docs/supply-chain/attestation/rekor.md\n - VEX:\n - Overview: docs/supply-chain/vex/index.md\n - VEX Repository: docs/supply-chain/vex/repo.md\n - Local VEX Files: docs/supply-chain/vex/file.md\n - VEX SBOM Reference: docs/supply-chain/vex/sbom-ref.md\n - VEX Attestation: docs/supply-chain/vex/oci.md\n - Compliance:\n - Built-in Compliance: docs/compliance/compliance.md\n - Custom Compliance: docs/compliance/contrib-compliance.md\n - Plugins:\n - Overview: docs/plugin/index.md\n - User guide: docs/plugin/user-guide.md\n - Developer guide: docs/plugin/developer-guide.md\n - Advanced:\n - Modules: docs/advanced/modules.md\n - Connectivity and Network considerations: docs/advanced/air-gap.md\n - Self-Hosting Trivy's Databases: docs/advanced/self-hosting.md\n - Container Image:\n - Embed in Dockerfile: docs/advanced/container/embed-in-dockerfile.md\n - Unpacked container image filesystem: docs/advanced/container/unpacked-filesystem.md\n - Private Docker Registries:\n - Overview: docs/advanced/private-registries/index.md\n - Docker Hub: docs/advanced/private-registries/docker-hub.md\n - AWS ECR (Elastic Container Registry): docs/advanced/private-registries/ecr.md\n - GCR (Google Container Registry): docs/advanced/private-registries/gcr.md\n - ACR (Azure Container Registry): docs/advanced/private-registries/acr.md\n - Self-Hosted: docs/advanced/private-registries/self.md\n - References:\n - Configuration:\n - CLI:\n - Overview: docs/references/configuration/cli/trivy.md\n - Clean: docs/references/configuration/cli/trivy_clean.md\n - Config: docs/references/configuration/cli/trivy_config.md\n - Convert: docs/references/configuration/cli/trivy_convert.md\n - Filesystem: docs/references/configuration/cli/trivy_filesystem.md\n - Image: docs/references/configuration/cli/trivy_image.md\n - Kubernetes: docs/references/configuration/cli/trivy_kubernetes.md\n - Module:\n - Module: docs/references/configuration/cli/trivy_module.md\n - Module Install: docs/references/configuration/cli/trivy_module_install.md\n - Module Uninstall: docs/references/configuration/cli/trivy_module_uninstall.md\n - Plugin:\n - Plugin: docs/references/configuration/cli/trivy_plugin.md\n - Plugin Info: docs/references/configuration/cli/trivy_plugin_info.md\n - Plugin Install: docs/references/configuration/cli/trivy_plugin_install.md\n - Plugin List: docs/references/configuration/cli/trivy_plugin_list.md\n - Plugin Run: docs/references/configuration/cli/trivy_plugin_run.md\n - Plugin Uninstall: docs/references/configuration/cli/trivy_plugin_uninstall.md\n - Plugin Update: docs/references/configuration/cli/trivy_plugin_update.md\n - Plugin Upgrade: docs/references/configuration/cli/trivy_plugin_upgrade.md\n - Plugin Search: docs/references/configuration/cli/trivy_plugin_search.md\n - Registry:\n - Registry: docs/references/configuration/cli/trivy_registry.md\n - Registry Login: docs/references/configuration/cli/trivy_registry_login.md\n - Registry Logout: docs/references/configuration/cli/trivy_registry_logout.md\n - Repository: docs/references/configuration/cli/trivy_repository.md\n - Rootfs: docs/references/configuration/cli/trivy_rootfs.md\n - SBOM: docs/references/configuration/cli/trivy_sbom.md\n - Server: docs/references/configuration/cli/trivy_server.md\n - Version: docs/references/configuration/cli/trivy_version.md\n - VEX:\n - VEX: docs/references/configuration/cli/trivy_vex.md\n - VEX Download: docs/references/configuration/cli/trivy_vex_repo_download.md\n - VEX Init: docs/references/configuration/cli/trivy_vex_repo_init.md\n - VEX List: docs/references/configuration/cli/trivy_vex_repo_list.md\n - VEX Repo: docs/references/configuration/cli/trivy_vex_repo.md\n - VM: docs/references/configuration/cli/trivy_vm.md\n - Config file: docs/references/configuration/config-file.md\n - Modes:\n - Standalone: docs/references/modes/standalone.md\n - Client/Server: docs/references/modes/client-server.md\n - Troubleshooting: docs/references/troubleshooting.md\n - Terminology: docs/references/terminology.md\n - Abbreviations: docs/references/abbreviations.md\n - Ecosystem: \n - Overview: ecosystem/index.md\n - CI/CD: ecosystem/cicd.md\n - IDE and Dev tools: ecosystem/ide.md\n - Production and Clouds: ecosystem/prod.md\n - Reporting: ecosystem/reporting.md\n - Contributing:\n - Principles: community/principles.md\n - How to contribute:\n - Issues: community/contribute/issue.md\n - Discussions: community/contribute/discussion.md\n - Pull Requests: community/contribute/pr.md\n - Contribute Rego Checks:\n - Overview: community/contribute/checks/overview.md\n - Add Service Support: community/contribute/checks/service-support.md\n - Maintainer:\n - Release Flow: community/maintainer/release-flow.md\n - Backporting: community/maintainer/backporting.md\n - Help Wanted: community/maintainer/help-wanted.md\n - Triage: community/maintainer/triage.md\n - Enterprise:\n - Comparison: commercial/compare.md\n - Contact Us: commercial/contact.md\ntheme:\n name: material\n custom_dir: docs/overrides\n language: "en"\n logo: imgs/logo-white.svg\n features:\n - navigation.tabs\n - navigation.tabs.sticky\n - navigation.sections\n - navigation.footer\n - content.action.edit\n - content.tabs.link\n - content.code.annotate\n - content.code.copy\n font:\n text: Inter\n\nmarkdown_extensions:\n - abbr\n - admonition\n - attr_list\n - def_list\n - footnotes\n - md_in_html\n - toc:\n permalink: true\n - pymdownx.highlight\n - pymdownx.details\n - pymdownx.magiclink\n - pymdownx.superfences:\n custom_fences:\n - name: mermaid\n class: mermaid\n format: !!python/name:pymdownx.superfences.fence_code_format\n - pymdownx.tabbed:\n alternate_style: true\n\nextra:\n generator: false\n version:\n method: mike\n provider: mike\n default: latest\n social:\n - icon: fontawesome/brands/x-twitter\n link: https://twitter.com/AquaTrivy\n - icon: fontawesome/brands/github\n link: https://github.com/aquasecurity/trivy\n analytics:\n provider: google\n property: G-V9LJGFH7GX\n\nplugins:\n - search\n - macros\n\n
dataset_sample\yaml\aquasecurity_trivy\mkdocs.yml
mkdocs.yml
YAML
13,780
0.8
0.003521
0
react-lib
303
2024-06-05T14:24:41.219592
GPL-3.0
false
3b7be8aa3147183150805a64b781090d
version: 2\nupdates:\n - package-ecosystem: github-actions\n directory: /\n schedule:\n interval: monthly\n groups:\n github-actions:\n patterns:\n - "*"\n - package-ecosystem: docker\n directory: /\n schedule:\n interval: monthly\n groups:\n docker:\n patterns:\n - "*"\n - package-ecosystem: gomod\n open-pull-requests-limit: 10\n directory: /\n schedule:\n interval: weekly\n ignore:\n - dependency-name: "github.com/aquasecurity/trivy-*" ## `trivy-*` dependencies are updated manually\n groups:\n aws:\n patterns:\n - "github.com/aws/*"\n docker:\n patterns:\n - "github.com/docker/*"\n - "github.com/moby/*"\n testcontainers:\n patterns:\n - "github.com/testcontainers/*"\n common:\n exclude-patterns:\n - "github.com/aquasecurity/trivy-*"\n patterns:\n - "*"
dataset_sample\yaml\aquasecurity_trivy\.github\dependabot.yml
dependabot.yml
YAML
931
0.8
0
0
python-kit
59
2025-05-08T00:47:54.774173
MIT
false
f673f4a39d726e31fbb498c31dd242c6
title: "<company name>"\nlabels: ["adopters"]\nbody:\n - type: textarea\n id: info\n attributes:\n label: "[Optional] How do you use Trivy?"\n validations:\n required: false\n - type: textarea\n id: info\n attributes:\n label: "[Optional] Can you provide us with a quote on your favourite part of Trivy? This may be used on the trivy.dev website, posted on Twitter (@AquaTrivy) or similar marketing material."\n validations:\n required: false\n - type: checkboxes\n attributes:\n label: "[Optional] Which targets are you scanning with Trivy?"\n options:\n - label: "Container Image"\n - label: "Filesystem"\n - label: "Git Repository"\n - label: "Virtual Machine Image"\n - label: "Kubernetes"\n - label: "AWS"\n - label: "SBOM"\n validations:\n required: false\n - type: checkboxes\n attributes:\n label: "[Optional] What kind of issues are scanning with Trivy?"\n options:\n - label: "Software Bill of Materials (SBOM)"\n - label: "Known vulnerabilities (CVEs)"\n - label: "IaC issues and misconfigurations"\n - label: "Sensitive information and secrets"\n - label: "Software licenses"\n - type: markdown\n attributes:\n value: |\n ## Get in touch\n We are always looking for \n * User feedback\n * Collaboration with other companies and organisations\n * Or just to have a chat with you about trivy. \n If any of this interests you or your marketing team, please reach out at: oss@aquasec.com\n We would love to hear from you!\n
dataset_sample\yaml\aquasecurity_trivy\.github\DISCUSSION_TEMPLATE\adopters.yml
adopters.yml
YAML
1,602
0.95
0.021277
0.085106
python-kit
706
2025-05-30T14:46:27.360115
Apache-2.0
false
b1a5b392878aed110a0594d6f8d07014
labels: ["kind/bug"]\nbody:\n - type: markdown\n attributes:\n value: |\n #### Note\n Feel free to raise a bug report if something doesn't work as expected.\n Please ensure that you're not creating a duplicate report by searching the [issues](https://github.com/aquasecurity/trivy/issues)/[discussions](https://github.com/aquasecurity/trivy/discussions) beforehand.\n If you see any false positives or false negatives, please file a ticket [here](https://github.com/aquasecurity/trivy/discussions/new?category=false-detection).\n \n **Do not open a GitHub issue, please.** Maintainers triage discussions and then create issues.\n \n Please also check [our contribution guidelines](https://trivy.dev/latest/community/contribute/discussion/).\n - type: textarea\n attributes:\n label: Description\n description: Briefly describe the problem you are having in a few paragraphs.\n validations:\n required: true\n - type: textarea\n attributes:\n label: Desired Behavior\n description: What did you expect to happen?\n validations:\n required: true\n - type: textarea\n attributes:\n label: Actual Behavior\n description: What happened instead?\n validations:\n required: true\n - type: textarea\n attributes:\n label: Reproduction Steps\n description: How do you trigger this bug? Please walk us through it step by step.\n value: |\n 1.\n 2.\n 3.\n ...\n render: bash\n validations:\n required: true\n - type: dropdown\n attributes:\n label: Target\n description: Which target are you scanning? It is equal to which subcommand you are using.\n options:\n - Container Image\n - Filesystem\n - Git Repository\n - Virtual Machine Image\n - Kubernetes\n - AWS\n - SBOM\n validations:\n required: false\n - type: dropdown\n attributes:\n label: Scanner\n description: Which scanner are you using?\n options:\n - Vulnerability\n - Misconfiguration\n - Secret\n - License\n validations:\n required: false\n - type: dropdown\n attributes:\n label: Output Format\n description: Which output format are you using?\n options:\n - Table\n - JSON\n - Template\n - SARIF\n - CycloneDX\n - SPDX\n validations:\n required: false\n - type: dropdown\n attributes:\n label: Mode\n description: Which mode are you using? Specify "Standalone" if you are not using `trivy server`.\n options:\n - Standalone\n - Client/Server\n validations:\n required: false\n - type: textarea\n attributes:\n label: Debug Output\n description: Output of run with `--debug`\n placeholder: "$ trivy <target> <subject> --debug"\n render: bash\n validations:\n required: true\n - type: input\n attributes:\n label: Operating System\n description: On what operating system are you running Trivy?\n placeholder: "e.g. macOS Big Sur"\n validations:\n required: true\n - type: textarea\n attributes:\n label: Version\n description: Output of `trivy --version`\n placeholder: "$ trivy --version"\n render: bash\n validations:\n required: true\n - type: checkboxes\n attributes:\n label: Checklist\n description: Have you tried the following?\n options:\n - label: Run `trivy clean --all`\n - label: Read [the troubleshooting](https://trivy.dev/latest/docs/references/troubleshooting/)\n - type: markdown\n attributes:\n value: |\n We would be happy if you could share how you are using Trivy [here](https://github.com/aquasecurity/trivy/discussions/new?category=adopters).
dataset_sample\yaml\aquasecurity_trivy\.github\DISCUSSION_TEMPLATE\bugs.yml
bugs.yml
YAML
3,770
0.95
0.02439
0.016393
react-lib
94
2023-10-07T07:05:23.894358
Apache-2.0
false
9cef24e975cae65fe199378d622b8b5b
labels: ["kind/documentation"]\nbody:\n - type: markdown\n attributes:\n value: |\n #### Note\n Feel free to create a docs report if something doesn't work as expected or is unclear in the documentation.\n Please ensure that you're not creating a duplicate report by searching the [issues](https://github.com/aquasecurity/trivy/issues)/[discussions](https://github.com/aquasecurity/trivy/discussions) beforehand.\n \n Please also check [our contribution guidelines](https://trivy.dev/latest/community/contribute/discussion/).\n - type: textarea\n attributes:\n label: Description\n description: Briefly describe the what has been unclear in the existing documentation\n validations:\n required: true\n - type: textarea\n attributes:\n label: Link\n description: Please provide a link to the current documentation or where you thought to find the information you were looking for\n validations:\n required: false\n - type: textarea\n attributes:\n label: Suggestions\n description: What would you like to have added or changed in the documentation?\n validations:\n required: true
dataset_sample\yaml\aquasecurity_trivy\.github\DISCUSSION_TEMPLATE\documentation.yml
documentation.yml
YAML
1,162
0.95
0.074074
0.037037
python-kit
429
2023-11-23T03:45:00.751842
Apache-2.0
false
998df2b236f95584b2c96e39200b631e
body:\n - type: markdown\n attributes:\n value: |\n #### Note\n Feel free to raise a bug report if something doesn't work as expected.\n Please ensure that you're not creating a duplicate report by searching the [issues](https://github.com/aquasecurity/trivy/issues)/[discussions](https://github.com/aquasecurity/trivy/discussions) beforehand.\n \n **Do not open a GitHub issue, please.** Maintainers triage discussions and then create issues.\n \n Please also check [our contribution guidelines](https://trivy.dev/latest/community/contribute/discussion/).\n - type: input\n attributes:\n label: IDs\n description: List the IDs of vulnerabilities, misconfigurations, secrets, or licenses that are either not detected or mistakenly detected.\n placeholder: "e.g. CVE-2021-44228, CVE-2022-22965"\n validations:\n required: true\n - type: textarea\n attributes:\n label: Description\n description: Describe the false detection.\n validations:\n required: true\n - type: textarea\n attributes:\n label: Reproduction Steps\n description: How do you trigger this bug? Please walk us through it step by step.\n value: |\n 1.\n 2.\n 3.\n ...\n render: bash\n validations:\n required: true\n - type: dropdown\n attributes:\n label: Target\n description: Which target are you scanning? It is equal to which subcommand you are using.\n options:\n - Container Image\n - Filesystem\n - Git Repository\n - Virtual Machine Image\n - Kubernetes\n - AWS\n - SBOM\n validations:\n required: true\n - type: dropdown\n attributes:\n label: Scanner\n description: Which scanner are you using?\n options:\n - Vulnerability\n - Misconfiguration\n - Secret\n - License\n validations:\n required: true\n - type: input\n attributes:\n label: Target OS\n description: What operating system are you scanning? Fill in this field if the scanning target is an operating system.\n placeholder: "Example: Ubuntu 22.04"\n validations:\n required: false\n - type: textarea\n attributes:\n label: Debug Output\n description: Output of run with `--debug`\n placeholder: "$ trivy <target> <subject> --debug"\n render: bash\n validations:\n required: true\n - type: textarea\n attributes:\n label: Version\n description: Output of `trivy --version`\n placeholder: "$ trivy --version"\n render: bash\n validations:\n required: true\n - type: checkboxes\n attributes:\n label: Checklist\n options:\n - label: Read [the documentation regarding wrong detection](https://trivy.dev/dev/community/contribute/discussion/#false-detection)\n - label: Ran Trivy with `-f json` that shows data sources and confirmed that the security advisory in data sources was correct\n validations:\n required: true\n - type: markdown\n attributes:\n value: |\n We would be happy if you could share how you are using Trivy [here](https://github.com/aquasecurity/trivy/discussions/new?category=adopters).
dataset_sample\yaml\aquasecurity_trivy\.github\DISCUSSION_TEMPLATE\false-detection.yml
false-detection.yml
YAML
3,185
0.95
0.031579
0.021277
python-kit
225
2024-07-02T18:21:48.103665
GPL-3.0
false
f45cd06ae21282bf07138fa3ee206265
labels: ["kind/feature"]\nbody:\n - type: markdown\n attributes:\n value: |\n #### Note\n Feel free to share your idea.\n Please ensure that you're not creating a duplicate ticket by searching the [issues](https://github.com/aquasecurity/trivy/issues)/[discussions](https://github.com/aquasecurity/trivy/discussions) beforehand.\n \n **Do not open a GitHub issue, please.** Maintainers triage discussions and then create issues.\n \n Please also check [our contribution guidelines](https://trivy.dev/latest/community/contribute/discussion/).\n - type: textarea\n attributes:\n label: Description\n description: Describe your idea.\n validations:\n required: true\n - type: dropdown\n attributes:\n label: Target\n description: Which target is your idea related to?\n options:\n - Container Image\n - Filesystem\n - Git Repository\n - Virtual Machine Image\n - Kubernetes\n - AWS\n - SBOM\n validations:\n required: false\n - type: dropdown\n attributes:\n label: Scanner\n description: Which scanner is your idea related to?\n options:\n - Vulnerability\n - Misconfiguration\n - Secret\n - License\n validations:\n required: false\n - type: markdown\n attributes:\n value: |\n We would be happy if you could share how you are using Trivy [here](https://github.com/aquasecurity/trivy/discussions/new?category=adopters).\n
dataset_sample\yaml\aquasecurity_trivy\.github\DISCUSSION_TEMPLATE\ideas.yml
ideas.yml
YAML
1,500
0.95
0.021277
0.044444
node-utils
739
2024-09-07T11:00:23.230911
BSD-3-Clause
false
2632301382507c5ac560f6a85fcc99b6
labels: ["triage/support"]\nbody:\n - type: markdown\n attributes:\n value: |\n #### Note\n If you have any troubles/questions, feel free to ask.\n Please ensure that you're not asking a duplicate question by searching the [issues](https://github.com/aquasecurity/trivy/issues)/[discussions](https://github.com/aquasecurity/trivy/discussions) beforehand.\n \n **Do not open a GitHub issue, please.** Maintainers triage discussions and then create issues.\n \n Please also check [our contribution guidelines](https://trivy.dev/latest/community/contribute/discussion/).\n - type: textarea\n attributes:\n label: Question\n description: What kind of problem are you facing? Or, what questions do you have?\n validations:\n required: true\n - type: dropdown\n attributes:\n label: Target\n description: Which target are you scanning? It is equal to which subcommand you are using.\n options:\n - Container Image\n - Filesystem\n - Git Repository\n - Virtual Machine Image\n - Kubernetes\n - AWS\n - SBOM\n validations:\n required: false\n - type: dropdown\n attributes:\n label: Scanner\n description: Which scanner are you using?\n options:\n - Vulnerability\n - Misconfiguration\n - Secret\n - License\n validations:\n required: false\n - type: dropdown\n attributes:\n label: Output Format\n description: Which output format are you using?\n options:\n - Table\n - JSON\n - Template\n - SARIF\n - CycloneDX\n - SPDX\n validations:\n required: false\n - type: dropdown\n attributes:\n label: Mode\n description: Which mode are you using? Specify "Standalone" if you are not using `trivy server`.\n options:\n - Standalone\n - Client/Server\n validations:\n required: false\n - type: input\n attributes:\n label: Operating System\n description: What operating system are you using?\n placeholder: "Example: macOS Big Sur"\n validations:\n required: false\n - type: textarea\n attributes:\n label: Version\n description: Output of `trivy --version`\n placeholder: "$ trivy --version"\n render: bash\n validations:\n required: false\n - type: markdown\n attributes:\n value: |\n We would be happy if you could share how you are using Trivy [here](https://github.com/aquasecurity/trivy/discussions/new?category=adopters.\n
dataset_sample\yaml\aquasecurity_trivy\.github\DISCUSSION_TEMPLATE\q-a.yml
q-a.yml
YAML
2,528
0.95
0.02381
0.02439
react-lib
580
2025-03-10T23:39:43.476974
GPL-3.0
false
266f98246c2841759166d3b387f69ceb
blank_issues_enabled: false\ncontact_links:\n - name: Report a false detection\n url: https://github.com/aquasecurity/trivy/discussions/new?category=false-detection\n about: Report false positives/negatives\n - name: Report a bug\n url: https://github.com/aquasecurity/trivy/discussions/new?category=bugs\n about: Report bugs\n - name: Enhance documentation\n url: https://github.com/aquasecurity/trivy/discussions/new?category=documentation\n about: Make suggestions to the documentation\n - name: Request a feature enhancement\n url: https://github.com/aquasecurity/trivy/discussions/new?category=ideas\n about: Share ideas for new features\n - name: Ask the community for help\n url: https://github.com/aquasecurity/trivy/discussions/new?category=q-a\n about: Ask questions and discuss with other community members
dataset_sample\yaml\aquasecurity_trivy\.github\ISSUE_TEMPLATE\config.yml
config.yml
YAML
836
0.8
0.125
0
python-kit
97
2025-06-06T05:44:17.999690
GPL-3.0
false
b6b2cb449481ea9f44ef8f622c9e4b57
Trivy_container_scanning:\n stage: test\n image:\n name: alpine:3.11\n variables:\n # Override the GIT_STRATEGY variable in your `.gitlab-ci.yml` file and set it to `fetch` if you want to provide a `clair-whitelist.yml`\n # file. See https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#overriding-the-container-scanning-template\n # for details\n GIT_STRATEGY: none\n IMAGE: "$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA"\n allow_failure: true\n before_script:\n - export TRIVY_VERSION=${TRIVY_VERSION:-v0.19.2}\n - apk add --no-cache curl docker-cli\n - curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin ${TRIVY_VERSION}\n - curl -sSL -o /tmp/trivy-gitlab.tpl https://github.com/aquasecurity/trivy/raw/${TRIVY_VERSION}/contrib/gitlab.tpl\n - trivy registry login --username "$CI_REGISTRY_USER" --password "$CI_REGISTRY_PASSWORD" $CI_REGISTRY\n script:\n - trivy --exit-code 0 --cache-dir .trivycache/ --no-progress --format template --template "@/tmp/trivy-gitlab.tpl" -o gl-container-scanning-report.json $IMAGE\n cache:\n paths:\n - .trivycache/\n artifacts:\n reports:\n container_scanning: gl-container-scanning-report.json\n dependencies: []\n only:\n refs:\n - branches\n
dataset_sample\yaml\aquasecurity_trivy\contrib\Trivy.gitlab-ci.yml
Trivy.gitlab-ci.yml
YAML
1,305
0.8
0.068966
0.103448
react-lib
397
2023-10-11T07:14:11.197468
MIT
false
e12b518edb172df7b6685d1b9ee81fda
server:\n addr: ":5001"\n certificate: "/certs/cert.pem"\n key: "/certs/key.pem"\n\ntoken:\n issuer: "Trivy auth server" # Must match issuer in the Registry config.\n expiration: 900\n\nusers:\n # Password is specified as a BCrypt hash. Use `htpasswd -nB USERNAME` to generate.\n "admin":\n password: "$2y$05$LO.vzwpWC5LZGqThvEfznu8qhb5SGqvBSWY1J3yZ4AxtMRZ3kN5jC" # badmin\n "test":\n password: "$2y$05$WuwBasGDAgr.QCbGIjKJaep4dhxeai9gNZdmBnQXqpKly57oNutya" # 123\n\nacl:\n - match: {account: "admin"}\n actions: ["*"]\n comment: "Admin has full access to everything."\n - match: {account: "test"}\n actions: ["pull"]\n comment: "User \"test\" can pull stuff."\n
dataset_sample\yaml\aquasecurity_trivy\integration\data\auth_config\config.yml
config.yml
YAML
670
0.8
0
0.05
vue-tools
565
2025-01-05T13:58:06.861457
GPL-3.0
false
fc65832260d9d60024b8629ceb478871
Resources:\n LambdaAPIRole:\n Type: "AWS::IAM::Role"\n Properties:\n RoleName: "${self:service}-${self:provider.stage}-LambdaAPI"\n Policies:\n - PolicyName: "${self:service}-${self:provider.stage}-lambda"\n PolicyDocument:\n Version: "2012-10-17"\n Statement:\n - Effect: Allow\n Action:\n - "logs:CreateLogStream"\n - "logs:CreateLogGroup"\n - "logs:PutLogEvents"\n Resource: !Sub "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${self:service}-${self:provider.stage}*:*"\n - !If\n - EnableCrossAccountSnsPublish\n - PolicyName: "${self:service}-${self:provider.stage}-asngen-sns-publish"\n PolicyDocument:\n Version: "2012-10-17"\n Statement:\n - Effect: Allow\n Action:\n - "SNS:Publish"\n Resource:\n - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-PurchaseOrder.fifo"\n - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Vendor.fifo"\n - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Customer.fifo"\n - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Manufacturer.fifo"\n - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-ManufacturerItem.fifo"\n - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Item.fifo"\n - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-VendorItem.fifo"\n - !Ref "AWS::NoValue"\n AssumeRolePolicyDocument:\n Version: "2012-10-17"\n Statement:\n - Effect: Allow\n Principal:\n Service:\n - "lambda.amazonaws.com"\n Action:\n - "sts:AssumeRole"\n\n\n\n\nConditions:\n EnableCrossAccountSnsPublish: !Equals\n - ${env:ALLOW_SNS_PUBLISH, true}\n - true\n
dataset_sample\yaml\aquasecurity_trivy\pkg\iac\scanners\cloudformation\test\examples\roles\roles.yml
roles.yml
YAML
2,348
0.7
0
0
node-utils
293
2024-01-16T08:43:36.960654
MIT
true
fb050c9663f9756b5b86ed51f819e474
ignore:\n- "**/*.pb.go"\n- "**/*.pb.gw.go"\n- "**/*generated.go"\n- "**/*generated.deepcopy.go"\n- "**/*_test.go"\n- "pkg/apis/client/.*"\n- "pkg/client/.*"\n- "vendor/.*"\n- "test/.*"\n- "**/mocks/*"\ncoverage:\n status:\n # we've found this not to be useful\n patch: off\n project:\n default:\n # allow test coverage to drop by 2%, assume that it's typically due to CI problems\n threshold: 2
dataset_sample\yaml\argoproj_argo-cd\.codecov.yml
.codecov.yml
YAML
405
0.8
0
0.105263
vue-tools
480
2023-09-14T10:31:02.244327
MIT
false
41822aea547918ac6f42d47434c9144e
image:\n file: .gitpod.Dockerfile\n\ntasks:\n - init: make mod-download-local dep-ui-local && GO111MODULE=off go install github.com/mattn/goreman@latest\n command: make start-test-k8s
dataset_sample\yaml\argoproj_argo-cd\.gitpod.yml
.gitpod.yml
YAML
183
0.7
0
0
python-kit
315
2024-06-03T09:05:21.985695
Apache-2.0
false
5523a96652859d938663c2b27e9c180f
extra:\n analytics:\n property: G-5Z1VTPDL73\n provider: google\nextra_css:\n- assets/versions.css\nextra_javascript:\n- assets/versions.js\nmarkdown_extensions:\n- markdown_include.include\n- codehilite:\n css_class: highlight\n- admonition\n- toc:\n permalink: true\n- pymdownx.superfences\nnav:\n- Overview: index.md\n- understand_the_basics.md\n- core_concepts.md\n- getting_started.md\n- Operator Manual:\n - operator-manual/index.md\n - operator-manual/architecture.md\n - operator-manual/installation.md\n - operator-manual/feature-maturity.md\n - operator-manual/core.md\n - operator-manual/declarative-setup.md\n - operator-manual/app-any-namespace.md\n - operator-manual/ingress.md\n - High Availability:\n - Overview: operator-manual/high_availability.md\n - Dynamic Cluster Distribution: operator-manual/dynamic-cluster-distribution.md\n - User Management:\n - operator-manual/user-management/index.md\n - operator-manual/user-management/auth0.md\n - operator-manual/user-management/microsoft.md\n - operator-manual/user-management/okta.md\n - operator-manual/user-management/onelogin.md\n - operator-manual/user-management/keycloak.md\n - operator-manual/user-management/openunison.md\n - operator-manual/user-management/google.md\n - operator-manual/user-management/zitadel.md\n - operator-manual/user-management/identity-center.md\n - operator-manual/rbac.md\n - Security:\n - Overview: operator-manual/security.md\n - snyk/index.md\n - operator-manual/signed-release-assets.md\n - operator-manual/tls.md\n - operator-manual/cluster-management.md\n - operator-manual/cluster-bootstrapping.md\n - operator-manual/secret-management.md\n - operator-manual/disaster_recovery.md\n - operator-manual/reconcile.md\n - operator-manual/webhook.md\n - operator-manual/health.md\n - operator-manual/resource_actions.md\n - operator-manual/custom_tools.md\n - operator-manual/custom-styles.md\n - operator-manual/ui-customization.md\n - operator-manual/metrics.md\n - operator-manual/web_based_terminal.md\n - operator-manual/config-management-plugins.md\n - operator-manual/deep_links.md\n - Notifications:\n - Overview: operator-manual/notifications/index.md\n - operator-manual/notifications/triggers.md\n - operator-manual/notifications/templates.md\n - operator-manual/notifications/functions.md\n - operator-manual/notifications/catalog.md\n - operator-manual/notifications/monitoring.md\n - operator-manual/notifications/subscriptions.md\n - operator-manual/notifications/troubleshooting.md\n - operator-manual/notifications/troubleshooting-commands.md\n - operator-manual/notifications/troubleshooting-errors.md\n - operator-manual/notifications/examples.md\n - Notification Services:\n - operator-manual/notifications/services/alertmanager.md\n - operator-manual/notifications/services/awssqs.md\n - operator-manual/notifications/services/email.md\n - operator-manual/notifications/services/github.md\n - operator-manual/notifications/services/googlechat.md\n - operator-manual/notifications/services/grafana.md\n - operator-manual/notifications/services/mattermost.md\n - operator-manual/notifications/services/newrelic.md\n - operator-manual/notifications/services/opsgenie.md\n - operator-manual/notifications/services/overview.md\n - operator-manual/notifications/services/pagerduty.md\n - operator-manual/notifications/services/pagerduty_v2.md\n - operator-manual/notifications/services/pushover.md\n - operator-manual/notifications/services/rocketchat.md\n - operator-manual/notifications/services/slack.md\n - operator-manual/notifications/services/teams.md\n - operator-manual/notifications/services/telegram.md\n - operator-manual/notifications/services/webex.md\n - operator-manual/notifications/services/webhook.md\n - operator-manual/troubleshooting.md\n - ApplicationSet:\n - Introduction: operator-manual/applicationset/index.md\n - Installations: operator-manual/applicationset/Getting-Started.md\n - Use Cases: operator-manual/applicationset/Use-Cases.md\n - Security: operator-manual/applicationset/Security.md\n - How ApplicationSet controller interacts with Argo CD: operator-manual/applicationset/Argo-CD-Integration.md\n - Generators:\n - operator-manual/applicationset/Generators.md\n - operator-manual/applicationset/Generators-List.md\n - operator-manual/applicationset/Generators-Cluster.md\n - operator-manual/applicationset/Generators-Git.md\n - operator-manual/applicationset/Generators-Matrix.md\n - operator-manual/applicationset/Generators-Merge.md\n - operator-manual/applicationset/Generators-SCM-Provider.md\n - operator-manual/applicationset/Generators-Cluster-Decision-Resource.md\n - operator-manual/applicationset/Generators-Pull-Request.md\n - operator-manual/applicationset/Generators-Post-Selector.md\n - operator-manual/applicationset/Generators-Plugin.md\n - Template fields:\n - operator-manual/applicationset/Template.md\n - operator-manual/applicationset/GoTemplate.md\n - Controlling Resource Modification: operator-manual/applicationset/Controlling-Resource-Modification.md\n - Application Pruning & Resource Deletion: operator-manual/applicationset/Application-Deletion.md\n - Progressive Syncs: operator-manual/applicationset/Progressive-Syncs.md\n - Git File Generator Globbing: operator-manual/applicationset/Generators-Git-File-Globbing.md\n - ApplicationSet Specification Reference: operator-manual/applicationset/applicationset-specification.md\n - ApplicationSet in any namespace: operator-manual/applicationset/Appset-Any-Namespace.md\n - Server Configuration Parameters:\n - operator-manual/server-commands/argocd-server.md\n - operator-manual/server-commands/argocd-application-controller.md\n - operator-manual/server-commands/argocd-repo-server.md\n - operator-manual/server-commands/argocd-dex.md\n - operator-manual/server-commands/additional-configuration-method.md\n - Upgrading:\n - operator-manual/upgrading/overview.md\n - operator-manual/upgrading/2.13-2.14.md\n - operator-manual/upgrading/2.12-2.13.md\n - operator-manual/upgrading/2.11-2.12.md\n - operator-manual/upgrading/2.10-2.11.md\n - operator-manual/upgrading/2.9-2.10.md\n - operator-manual/upgrading/2.8-2.9.md\n - operator-manual/upgrading/2.7-2.8.md\n - operator-manual/upgrading/2.6-2.7.md\n - operator-manual/upgrading/2.5-2.6.md\n - operator-manual/upgrading/2.4-2.5.md\n - operator-manual/upgrading/2.3-2.4.md\n - operator-manual/upgrading/2.2-2.3.md\n - operator-manual/upgrading/2.1-2.2.md\n - operator-manual/upgrading/2.0-2.1.md\n - operator-manual/upgrading/1.8-2.0.md\n - operator-manual/upgrading/1.7-1.8.md\n - operator-manual/upgrading/1.6-1.7.md\n - operator-manual/upgrading/1.5-1.6.md\n - operator-manual/upgrading/1.4-1.5.md\n - operator-manual/upgrading/1.3-1.4.md\n - operator-manual/upgrading/1.2-1.3.md\n - operator-manual/upgrading/1.1-1.2.md\n - operator-manual/upgrading/1.0-1.1.md\n - Project Specification Reference: operator-manual/project-specification.md\n- User Guide:\n - user-guide/index.md\n - user-guide/application_sources.md\n - user-guide/kustomize.md\n - user-guide/helm.md\n - user-guide/import.md\n - user-guide/jsonnet.md\n - user-guide/directory.md\n - user-guide/tool_detection.md\n - user-guide/projects.md\n - user-guide/private-repositories.md\n - user-guide/multiple_sources.md\n - GnuPG verification: user-guide/gpg-verification.md\n - user-guide/auto_sync.md\n - Diffing:\n - Diff Strategies: user-guide/diff-strategies.md\n - Diff Customization: user-guide/diffing.md\n - user-guide/orphaned-resources.md\n - user-guide/compare-options.md\n - user-guide/sync-options.md\n - user-guide/parameters.md\n - user-guide/environment-variables.md\n - user-guide/build-environment.md\n - user-guide/tracking_strategies.md\n - user-guide/resource_tracking.md\n - user-guide/resource_hooks.md\n - user-guide/selective_sync.md\n - user-guide/sync-waves.md\n - user-guide/sync_windows.md\n - user-guide/sync-kubectl.md\n - user-guide/skip_reconcile.md\n - Generating Applications with ApplicationSet: user-guide/application-set.md\n - user-guide/ci_automation.md\n - user-guide/app_deletion.md\n - user-guide/best_practices.md\n - user-guide/status-badge.md\n - user-guide/external-url.md\n - user-guide/extra_info.md\n - Notification subscriptions: user-guide/subscriptions.md\n - user-guide/annotations-and-labels.md\n - Command Reference: user-guide/commands/argocd.md\n - Application Specification Reference: user-guide/application-specification.md\n- Developer Guide:\n - developer-guide/index.md\n - Architecture:\n - developer-guide/architecture/authz-authn.md\n - developer-guide/architecture/components.md\n - Code Contribution Guide: developer-guide/code-contributions.md\n - Toolchain Guide: developer-guide/toolchain-guide.md\n - developer-guide/contributors-quickstart.md\n - developer-guide/release-process-and-cadence.md\n - developer-guide/running-locally.md\n - developer-guide/debugging-remote-environment.md\n - developer-guide/use-gitpod.md\n - developer-guide/api-docs.md\n - developer-guide/test-e2e.md\n - developer-guide/dependencies.md\n - developer-guide/ci.md\n - developer-guide/releasing.md\n - developer-guide/docs-site.md\n - developer-guide/static-code-analysis.md\n - Extensions:\n - developer-guide/extensions/ui-extensions.md\n - developer-guide/extensions/proxy-extensions.md\n - developer-guide/faq.md\n- faq.md\n- security_considerations.md\n- Support: SUPPORT.md\n- roadmap.md\n- Releases ⧉: https://github.com/argoproj/argo-cd/releases\n- Blog ⧉: https://blog.argoproj.io/\nrepo_url: https://github.com/argoproj/argo-cd\nsite_name: Argo CD - Declarative GitOps CD for Kubernetes\nsite_url: !ENV READTHEDOCS_CANONICAL_URL\nstrict: true\ntheme:\n custom_dir: overrides\n favicon: assets/favicon.png\n font:\n text: Work Sans\n logo: assets/logo.png\n name: material\n palette:\n - media: '(prefers-color-scheme: light)'\n primary: teal\n scheme: default\n toggle:\n icon: material/brightness-7\n name: Switch to dark mode\n - media: '(prefers-color-scheme: dark)'\n primary: teal\n scheme: slate\n toggle:\n icon: material/brightness-4\n name: Switch to light mode\n
dataset_sample\yaml\argoproj_argo-cd\mkdocs.yml
mkdocs.yml
YAML
10,378
0.95
0.003968
0
react-lib
36
2025-04-18T18:46:58.996274
GPL-3.0
false
34bac22ea3470f05ff5a1da77f03ff6c
header:\n schema-version: 1.0.0\n expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.\n last-updated: '2023-10-27'\n last-reviewed: '2023-10-27'\n commit-hash: 226a670fe6b3c6769ff6d18e6839298a58e4577d\n project-url: https://github.com/argoproj/argo-cd\n project-release: v3.1.0\n changelog: https://github.com/argoproj/argo-cd/releases\n license: https://github.com/argoproj/argo-cd/blob/master/LICENSE\nproject-lifecycle:\n status: active\n roadmap: https://github.com/orgs/argoproj/projects/25\n bug-fixes-only: false\n core-maintainers:\n - https://github.com/argoproj/argoproj/blob/master/MAINTAINERS.md\n release-cycle: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/\n release-process: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#release-process\ncontribution-policy:\n accepts-pull-requests: true\n accepts-automated-pull-requests: true\n automated-tools-list:\n - automated-tool: dependabot\n action: allowed\n path:\n - /\n - automated-tool: snyk-report\n action: allowed\n path:\n - docs/snyk\n comment: |\n This tool runs Snyk and generates a report of vulnerabilities in the project's dependencies. The report is \n placed in the project's documentation. The workflow is defined here:\n https://github.com/argoproj/argo-cd/blob/master/.github/workflows/update-snyk.yaml\n contributing-policy: https://argo-cd.readthedocs.io/en/stable/developer-guide/code-contributions/\n code-of-conduct: https://github.com/cncf/foundation/blob/master/code-of-conduct.md\ndocumentation:\n - https://argo-cd.readthedocs.io/\ndistribution-points:\n - https://github.com/argoproj/argo-cd/releases\n - https://quay.io/repository/argoproj/argocd\nsecurity-artifacts:\n threat-model:\n threat-model-created: true\n evidence-url:\n - https://github.com/argoproj/argoproj/blob/master/docs/argo_threat_model.pdf\n - https://github.com/argoproj/argoproj/blob/master/docs/end_user_threat_model.pdf\n self-assessment:\n self-assessment-created: false\n comment: |\n An extensive self-assessment was performed for CNCF graduation. Because the self-assessment process was evolving\n at the time, no standardized document has been published.\nsecurity-testing:\n - tool-type: sca\n tool-name: Dependabot\n tool-version: "2"\n tool-url: https://github.com/dependabot\n integration:\n ad-hoc: false\n ci: false\n before-release: false\n tool-rulesets:\n - https://github.com/argoproj/argo-cd/blob/master/.github/dependabot.yml\n - tool-type: sca\n tool-name: Snyk\n tool-version: latest\n tool-url: https://snyk.io/\n integration:\n ad-hoc: true\n ci: true\n before-release: false\n - tool-type: sast\n tool-name: CodeQL\n tool-version: latest\n tool-url: https://codeql.github.com/\n integration:\n ad-hoc: false\n ci: true\n before-release: false\n comment: |\n We use the default configuration with the latest version.\nsecurity-assessments:\n - auditor-name: Trail of Bits\n auditor-url: https://trailofbits.com\n auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/argo_security_final_report.pdf\n report-year: 2021\n - auditor-name: Ada Logics\n auditor-url: https://adalogics.com\n auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/argo_security_audit_2022.pdf\n report-year: 2022\n - auditor-name: Ada Logics\n auditor-url: https://adalogics.com\n auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/audit_fuzzer_adalogics_2022.pdf\n report-year: 2022\n comment: |\n Part of the audit was performed by Ada Logics, focussed on fuzzing.\n - auditor-name: Chainguard\n auditor-url: https://chainguard.dev\n auditor-report: https://github.com/argoproj/argoproj/blob/master/docs/software_supply_chain_slsa_assessment_chainguard_2023.pdf\n report-year: 2023\n comment: |\n Confirmed the project's release process as achieving SLSA (v0.1) level 3.\nsecurity-contacts:\n - type: email\n value: cncf-argo-security@lists.cncf.io\n primary: true\nvulnerability-reporting:\n accepts-vulnerability-reports: true\n email-contact: cncf-argo-security@lists.cncf.io\n security-policy: https://github.com/argoproj/argo-cd/security/policy\n bug-bounty-available: true\n bug-bounty-url: https://hackerone.com/ibb/policy_scopes\n out-scope:\n - vulnerable and outdated components # See https://github.com/argoproj/argo-cd/blob/master/SECURITY.md#a-word-about-security-scanners\n - security logging and monitoring failures\ndependencies:\n third-party-packages: true\n dependencies-lists:\n - https://github.com/argoproj/argo-cd/blob/master/go.mod\n - https://github.com/argoproj/argo-cd/blob/master/Dockerfile\n - https://github.com/argoproj/argo-cd/blob/master/ui/package.json\n sbom:\n - sbom-file: https://github.com/argoproj/argo-cd/releases # Every release's assets include SBOMs.\n sbom-format: SPDX\n dependencies-lifecycle:\n policy-url: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#dependencies-lifecycle-policy\n env-dependencies-policy:\n policy-url: https://argo-cd.readthedocs.io/en/stable/developer-guide/release-process-and-cadence/#dependencies-lifecycle-policy\n
dataset_sample\yaml\argoproj_argo-cd\SECURITY-INSIGHTS.yml
SECURITY-INSIGHTS.yml
YAML
5,348
0.8
0.007813
0
vue-tools
606
2024-07-11T06:44:23.823292
GPL-3.0
false
d4c9d7b3e7dd35e3cc354f7b7153ea9b
version: 2\nupdates:\n - package-ecosystem: "gomod"\n directory: "/"\n schedule:\n interval: "daily"\n open-pull-requests-limit: 20\n ignore:\n - dependency-name: k8s.io/*\n groups:\n otel:\n patterns:\n - "^go.opentelemetry.io/.*"\n\n - package-ecosystem: "github-actions"\n directory: "/"\n schedule:\n interval: "daily"\n\n - package-ecosystem: "npm"\n directory: "/ui/"\n schedule:\n interval: "daily"\n\n - package-ecosystem: "npm"\n directory: "/ui-test/"\n schedule:\n interval: "daily"\n\n - package-ecosystem: "docker"\n directory: "/"\n schedule:\n interval: "daily"\n ignore:\n # We use consistent go and node versions across a lot of different files, and updating via dependabot would cause\n # drift among those files, instead we let renovate bot handle them.\n - dependency-name: "library/golang"\n - dependency-name: "library/node"\n\n - package-ecosystem: "docker"\n directory: "/test/container/"\n schedule:\n interval: "daily"\n\n - package-ecosystem: "docker"\n directory: "/test/e2e/multiarch-container/"\n schedule:\n interval: "daily"\n\n - package-ecosystem: "docker"\n directory: "/test/remote/"\n schedule:\n interval: "daily"\n\n - package-ecosystem: "docker"\n directory: "/ui-test/"\n schedule:\n interval: "daily"\n
dataset_sample\yaml\argoproj_argo-cd\.github\dependabot.yml
dependabot.yml
YAML
1,350
0.8
0
0.04
react-lib
958
2025-06-06T23:37:13.684758
BSD-3-Clause
false
f84ef7eb0bf3113f92dbe476278d416d
# See https://github.com/probot/stale\n# See https://github.com/probot/stale\nexemptLabels:\n - backlog\n
dataset_sample\yaml\argoproj_argo-cd\.github\stale.yml
stale.yml
YAML
102
0.8
0
0.5
node-utils
834
2024-01-03T01:22:30.027273
MIT
false
4e83e6d3fd7dc0476deaf9232948dd71
blank_issues_enabled: false\n\ncontact_links:\n - name: Have you read the docs?\n url: https://argo-cd.readthedocs.io/\n about: Much help can be found in the docs\n - name: Ask a question\n url: https://github.com/argoproj/argo-cd/discussions/new\n about: Ask a question or start a discussion about Argo CD\n - name: Chat on Slack\n url: https://argoproj.github.io/community/join-slack\n about: Maybe chatting with the community can help\n
dataset_sample\yaml\argoproj_argo-cd\.github\ISSUE_TEMPLATE\config.yml
config.yml
YAML
448
0.8
0
0
python-kit
867
2024-08-05T23:02:00.682517
MIT
false
c10412bec4daaf56f5101a3fe042094d
name: "Code scanning - action"\n\non:\n push:\n # Secrets aren't available for dependabot on push. https://docs.github.com/en/enterprise-cloud@latest/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/troubleshooting-the-codeql-workflow#error-403-resource-not-accessible-by-integration-when-using-dependabot\n branches-ignore:\n - 'dependabot/**'\n - 'cherry-pick-*'\n pull_request:\n schedule:\n - cron: '0 19 * * 0'\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.ref }}\n cancel-in-progress: true\n\npermissions:\n contents: read\n\njobs:\n CodeQL-Build:\n permissions:\n actions: read # for github/codeql-action/init to get workflow details\n contents: read # for actions/checkout to fetch code\n security-events: write # for github/codeql-action/autobuild to send a status report\n if: github.repository == 'argoproj/argo-cd' || vars.enable_codeql\n\n # CodeQL runs on ubuntu-latest and windows-latest\n runs-on: ubuntu-22.04\n steps:\n - name: Checkout repository\n uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0\n\n # Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087\n - name: Setup Golang\n uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0\n with:\n go-version-file: go.mod\n \n # Initializes the CodeQL tools for scanning.\n - name: Initialize CodeQL\n uses: github/codeql-action/init@8fcfedf57053e09257688fce7a0beeb18b1b9ae3 # v2.17.2\n # Override language selection by uncommenting this and choosing your languages\n # with:\n # languages: go, javascript, csharp, python, cpp, java\n\n # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).\n # If this step fails, then you should remove it and run the build manually (see below)\n - name: Autobuild\n uses: github/codeql-action/autobuild@8fcfedf57053e09257688fce7a0beeb18b1b9ae3 # v2.17.2\n\n # ℹ️ Command-line programs to run using the OS shell.\n # 📚 https://git.io/JvXDl\n\n # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines\n # and modify them (or add more) to build your code if your project\n # uses a compiled language\n\n #- run: |\n # make bootstrap\n # make release\n\n - name: Perform CodeQL Analysis\n uses: github/codeql-action/analyze@8fcfedf57053e09257688fce7a0beeb18b1b9ae3 # v2.17.2\n
dataset_sample\yaml\argoproj_argo-cd\.github\workflows\codeql.yml
codeql.yml
YAML
2,498
0.8
0.125
0.326923
awesome-app
94
2024-08-15T01:56:47.534148
BSD-3-Clause
false
0230088455fdbbb5a78435f74d51c57e
name: "Lint PR"\n\non:\n pull_request_target:\n types: [opened, edited, reopened, synchronize]\n\n# IMPORTANT: No checkout actions, scripts, or builds should be added to this workflow. Permissions should always be used\n# with extreme caution. https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target\npermissions: {}\n\n# PR updates can happen in quick succession leading to this\n# workflow being trigger a number of times. This limits it\n# to one run per PR.\nconcurrency:\n group: ${{ github.workflow }}-${{ github.head_ref }}\n cancel-in-progress: true\n\njobs:\n validate:\n permissions:\n contents: read\n pull-requests: read\n name: Validate PR Title\n runs-on: ubuntu-latest\n steps:\n - uses: thehanimo/pr-title-checker@7fbfe05602bdd86f926d3fb3bccb6f3aed43bc70 # v1.4.3\n with:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n configuration_path: ".github/pr-title-checker-config.json"\n
dataset_sample\yaml\argoproj_argo-cd\.github\workflows\pr-title-check.yml
pr-title-check.yml
YAML
969
0.8
0
0.2
react-lib
655
2024-03-18T16:18:36.109321
BSD-3-Clause
false
8b51881000d4264418bec8bc4db3c546
ignore:\n- "**/*.pb.go"\n- "**/*.pb.gw.go"\n- "**/*generated.go"\n- "**/*generated.deepcopy.go"\n- "**/*_test.go"\n- "pkg/apis/client/.*"\n- "pkg/client/.*"\n- "vendor/.*"\ncoverage:\n status:\n # we've found this not to be useful\n patch: off\n project:\n default:\n # allow test coverage to drop by 2%, assume that it's typically due to CI problems\n threshold: 2
dataset_sample\yaml\argoproj_argo-workflows\.codecov.yml
.codecov.yml
YAML
378
0.8
0
0.117647
python-kit
912
2024-03-19T15:31:26.983628
Apache-2.0
false
fbb4973165c6cc6fd6efd62f91d05f0c
version: "2"\nrun:\n build-tags:\n - api\n - cli\n - cron\n - executor\n - examples\n - corefunctional\n - functional\n - plugins\nlinters:\n enable:\n - asasalint\n - bidichk\n - bodyclose\n - copyloopvar\n - errcheck\n - gosec\n - govet\n - ineffassign\n - misspell\n - nakedret\n - nosprintfhostport\n - reassign\n - rowserrcheck\n - sqlclosecheck\n - staticcheck\n - testifylint\n - unparam\n - unused\n settings:\n gosec:\n includes:\n - G304\n - G307\n excludes:\n # G106: Use of ssh InsecureIgnoreHostKey should be audited\n - G106\n # G402: TLS InsecureSkipVerify set true\n - G402\n staticcheck:\n checks:\n - all\n # Capitalised variable names\n - "-ST1003"\n # Capitalised error strings\n - "-ST1005"\n # Receiver names\n - "-ST1016"\n exclusions:\n generated: lax\n presets:\n - comments\n - common-false-positives\n - legacy\n - std-error-handling\n rules:\n - path: server/artifacts/artifact_server_test.go\n text: response body must be closed\n paths:\n - dist\n - docs\n - examples\n - hack\n - manifests\n - pkg/client\n - sdks\n - ui\n - vendor\n - third_party$\n - builtin$\n - examples$\nformatters:\n enable:\n - gofmt\n - goimports\n settings:\n goimports:\n local-prefixes:\n - github.com/argoproj/argo-workflows/\n exclusions:\n generated: lax\n paths:\n - dist\n - docs\n - examples\n - hack\n - manifests\n - pkg/client\n - sdks\n - ui\n - vendor\n - third_party$\n - builtin$\n - examples$\n
dataset_sample\yaml\argoproj_argo-workflows\.golangci.yml
.golangci.yml
YAML
1,710
0.95
0
0.052083
python-kit
225
2023-09-22T12:05:54.046106
BSD-3-Clause
false
a8f8ed6c6a535164c56d5e8bc5de100c