content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nbug:\n - 'panic:'\ncrash:\n - 'panic:'\n\n
dataset_sample\yaml\go\labeler-issue-triage.yml
labeler-issue-triage.yml
YAML
108
0.8
0
0.333333
python-kit
992
2024-04-10T05:48:34.621445
BSD-3-Clause
false
9bb11e1c5033f661faa89bac1ae8fe0d
area/graphql:\n - changed-files:\n - any-glob-to-any-file: graphql/**\n\narea/documentation:\n - changed-files:\n - any-glob-to-any-file:\n - "**/*.md"\n - "**/*.pdf"\n - "**/*.tex"\n\narea/bulk-loader:\n - changed-files:\n - any-glob-to-any-file: dgraph/cmd/bulk/**\n\narea/live-loader:\n - changed-files:\n - any-glob-to-any-file: dgraph/cmd/live/**\n\narea/querylang:\n - changed-files:\n - any-glob-to-any-file: dql/**\n\narea/integrations:\n - changed-files:\n - any-glob-to-any-file:\n - contrib/**\n - .github/**\n - .travis/**\n\narea/testing/jepsen:\n - changed-files:\n - any-glob-to-any-file: contrib/jepsen/**\n\narea/backup:\n - changed-files:\n - any-glob-to-any-file: backup/**\n\narea/acl:\n - changed-files:\n - any-glob-to-any-file: acl/**\n\narea/schema:\n - changed-files:\n - any-glob-to-any-file: schema/**\n\narea/testing:\n - changed-files:\n - any-glob-to-any-file:\n - systest/**\n - "**/*test.go"\n - graphql/e2e/**\n - "**/*test.yaml"\n - t/**\n - testutil/**\n - tlstest/**\n\narea/core:\n - changed-files:\n - any-glob-to-any-file:\n - protos/**\n - posting/**\n - raftwal/**\n - query/**\n - schema/**\n - protos/**\n - x/**\n - xidmap/**\n - worker/**\n - graphql/**\n\ngo:\n - changed-files:\n - any-glob-to-any-file: "**/*.go"\n\npython:\n - changed-files:\n - any-glob-to-any-file: "**/*.py"\n
dataset_sample\yaml\go\labeler.yml
labeler.yml
YAML
1,541
0.8
0
0
node-utils
850
2024-06-26T18:59:56.113028
GPL-3.0
false
b5d7d05bb3c56b49a12b06722ff2c7aa
header:\n license:\n spdx-id: Apache-2.0\n copyright-owner: PingCAP, Inc.\n paths-ignore:\n - "docs/"\n - "br/"\n - ".gitignore"\n - ".gitmodules"\n - ".dockerignore"\n - ".gitattributes"\n - ".cilinter.yaml"\n - ".golangci.yml"\n - ".golangci_br.yml"\n - "LICENSES/"\n - "**/BUILD.bazel"\n - "WORKSPACE"\n - "WORKSPACE.patchgo"\n - "MODULE.bazel"\n - "MODULE.bazel.lock"\n - ".bazelrc"\n - "**/*.key"\n - "**/*.md"\n - "**/*.json"\n - "**/*.toml"\n - "**/*.pem"\n - "**/*.crt"\n - "**/*.test"\n - "**/*.result"\n - "**/*.example"\n - "**/*.patch"\n - "**/*.bzl"\n - "**/.git/**"\n - ".codecov.yml"\n - "Jenkinsfile"\n - ".editorconfig"\n - "hooks/pre-commit"\n - "**/go.mod"\n - "**/go.sum"\n - "LICENSE"\n - ".github/"\n - "pkg/parser/"\n - "dumpling/"\n - "pkg/tidb-binlog/driver/example"\n - "pkg/tidb-binlog/proto/go-binlog/secondary_binlog.pb.go"\n - "**/*.sql"\n - "**/*.csv"\n - "**/*.CSV"\n - "**/*.parquet"\n - "**/*.zst"\n - ".bazelversion"\n - "build/image/.ci_bazel"\n - "**/OWNERS"\n - "OWNERS_ALIASES"\n - "pkg/**/mock/**/*_mock.go"\n - "pkg/extension/enterprise/"\n - "lightning/pkg/web/res_vfsdata.go"\n - "lightning/web/docs/api.yaml"\n - "lightning/web/public/index.html"\n - "lightning/web/webpack.config.js"\n - "pkg/lightning/checkpoints/checkpointspb/file_checkpoints.pb.go"\n - "pkg/lightning/manual/manual.go"\n - "pkg/lightning/manual/manual_nocgo.go"\n - "pkg/lightning/mydump/bytes.go"\n - "pkg/lightning/mydump/examples/metadata"\n - "tests/_utils/config/"\n - "**/tidb-slow.log"\n - "**/tidb-slow-*.log"\n - "**/metadata"\n - "pkg/extension/_import/generated-enterprise.go"\n\n comment: on-failure\n
dataset_sample\yaml\go\licenserc.yml
licenserc.yml
YAML
1,761
0.95
0
0
awesome-app
601
2024-12-09T10:13:20.593335
GPL-3.0
false
1330d87c7614820ccfabeee296ac244d
misc:\n - &source_code_files files~=^(?=.*((\.(go|h|cpp)|go.sum|go.mod|CMakeLists.txt|conanfile\.*))).*$\n - &no_source_code_files -files~=^(?=.*((\.(go|h|cpp)|go.sum|go.mod|CMakeLists.txt|conanfile\.*))).*$\n - &only_go_unittest_files -files~=^(?!(client|internal|pkg|tests)\/.*_test\.go).*$\n - &morethan_go_unittest_files files~=^(?!(client|internal|pkg|tests)\/.*_test\.go).*$\n - when_build_and_test_status_successs: &Build_AND_TEST_STATUS_SUCESS_ON_UBUNTU_20_OR_UBUNTU_22\n - 'status-success=Build and test AMD64 Ubuntu 20.04'\n - 'status-success=Build and test AMD64 Ubuntu 22.04'\n - when_build_and_test_status_failed: &Build_AND_TEST_STATUS_FAILED_ON_UBUNTU_20_OR_UBUNTU_22\n - &failed_on_ubuntu_20 'check-failure=Build and test AMD64 Ubuntu 20.04'\n - &failed_on_ubuntu_22 'check-failure=Build and test AMD64 Ubuntu 22.04'\n - when_go_sdk_status_success: &WHEN_GO_SDK_STATUS_SUCCESS\n - 'status-success=go-sdk'\n - 'status-success=milvus-sdk-go'\n - when_cpp_unit_test_success: &WHEN_CPP_UNIT_TEST_SUCCESS\n - 'status-success=cpp-unit-test'\n - 'status-success=UT for Cpp'\n - when_go_unit_test_success: &WHEN_GO_UNIT_TEST_SUCCESS\n - 'status-success=go-unit-test'\n - 'status-success=UT for Go'\n - when_integration_unit_test_success: &WHEN_INTEGRATION_UNIT_TEST_SUCCESS\n - 'status-success=integration-test'\n - 'status-success=Integration Test'\n - branch: &BRANCHES\n # In this pull request, the changes are based on the master branch\n - &MASTER_BRANCH base=master\n - &23_BRANCH base=2.3\n - &24_BRANCH base=2.4\n - &25_BRANCH base=2.5\n # In this pull request, the changes are based on the 2.x(or 2.x.x) branch\n - &2X_BRANCH base~=^2(\.\d+){1,2}$\n\npull_request_rules:\n - name: Add needs-dco label when DCO check failed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - -status-success=DCO\n actions:\n label:\n remove:\n - dco-passed\n add:\n - needs-dco\n comment:\n message: |\n @{{author}} Thanks for your contribution. Please submit with DCO, see the contributing guide https://github.com/milvus-io/milvus/blob/master/CONTRIBUTING.md#developer-certificate-of-origin-dco.\n\n - name: Add dco-passed label when DCO check passed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - status-success=DCO\n actions:\n label:\n remove:\n - needs-dco\n add:\n - dco-passed\n\n - name: Test passed for code changed on master \n conditions:\n - *MASTER_BRANCH\n - or: *Build_AND_TEST_STATUS_SUCESS_ON_UBUNTU_20_OR_UBUNTU_22\n - or: *WHEN_GO_SDK_STATUS_SUCCESS\n - or: *WHEN_CPP_UNIT_TEST_SUCCESS\n - or: *WHEN_GO_UNIT_TEST_SUCCESS\n - or: *WHEN_INTEGRATION_UNIT_TEST_SUCCESS\n - 'status-success=Code Checker AMD64 Ubuntu 22.04'\n - 'status-success=Code Checker MacOS 13'\n # - 'status-success=Code Checker Amazonlinux 2023'\n - 'status-success=cpu-e2e'\n # - 'status-success=codecov/patch'\n # - 'status-success=codecov/project'\n actions:\n label:\n add:\n - ci-passed\n - name: Test passed for code changed on 2.* branch\n conditions:\n - *2X_BRANCH\n - 'status-success=Code Checker AMD64 Ubuntu 22.04'\n - or: *Build_AND_TEST_STATUS_SUCESS_ON_UBUNTU_20_OR_UBUNTU_22\n - or: *WHEN_CPP_UNIT_TEST_SUCCESS\n - or: *WHEN_GO_UNIT_TEST_SUCCESS\n - or: *WHEN_INTEGRATION_UNIT_TEST_SUCCESS\n - 'status-success=Code Checker MacOS 13'\n # - 'status-success=Code Checker CentOS 7'\n - 'status-success=cpu-e2e'\n # - 'status-success=codecov/patch'\n # - 'status-success=codecov/project'\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for tests changed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - -files~=^(?!tests\/python_client).+\n - 'status-success=cpu-e2e'\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for docs changed only\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - -files~=^(?!.*\.(md)).*$\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for non go or c++ code changed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'status-success=cpu-e2e'\n - *no_source_code_files\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for go unittest code changed-master\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - or: *Build_AND_TEST_STATUS_SUCESS_ON_UBUNTU_20_OR_UBUNTU_22\n - 'status-success=Code Checker AMD64 Ubuntu 22.04'\n - 'status-success=Code Checker MacOS 13'\n # - 'status-success=Code Checker Amazonlinux 2023'\n - or: *WHEN_GO_UNIT_TEST_SUCCESS\n - *only_go_unittest_files\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for go unittest code changed -2.2.*\n conditions:\n - *2X_BRANCH\n - or: *Build_AND_TEST_STATUS_SUCESS_ON_UBUNTU_20_OR_UBUNTU_22\n - 'status-success=Code Checker AMD64 Ubuntu 22.04'\n - 'status-success=Code Checker MacOS 13'\n - -files~=^(?!internal\/.*_test\.go).*$\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for mergify changed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - -files~=^(?!\.github\/mergify\.yml).*$\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for title skip e2e\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - title~=\[skip e2e\]\n - label=kind/enhancement\n - *no_source_code_files\n actions:\n label:\n add:\n - ci-passed\n\n - name: Blocking PR if missing a related issue or doesn't have kind/enhancement label\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - and:\n - -body~=\#[0-9]{1,6}(\s+|$)\n - -body~=https://github.com/milvus-io/milvus/issues/[0-9]{1,6}(\s+|$)\n - or:\n - and:\n - label=kind/enhancement\n - or:\n - label=size/L\n - label=size/XL\n - label=size/XXL\n - label=kind/bug\n - label=kind/feature\n \n - -label=kind/doc\n - -label=kind/test\n - -title~=\[automated\]\n actions:\n label:\n add:\n - do-not-merge/missing-related-issue\n comment:\n message: |\n @{{author}} Please associate the related issue to the body of your Pull Request. (eg. “issue: #<xyz>”)\n\n - name: Dismiss block label if related issue be added into PR\n conditions:\n - and:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - or:\n - body~=\#[0-9]{1,6}(\s+|$)\n - body~=https://github.com/milvus-io/milvus/issues/[0-9]{1,6}(\s+|$)\n actions:\n label:\n remove:\n - do-not-merge/missing-related-issue\n\n - name: Blocking PR if missing a related master PR or doesn't have kind/branch-feature label\n conditions:\n - *2X_BRANCH\n - and:\n - -body~=pr\:\ \#[0-9]{1,6}(\s+|$)\n - -body~=https://github.com/milvus-io/milvus/pull/[0-9]{1,6}(\s+|$)\n - -label=kind/branch-feature\n - -title~=\[automated\]\n actions:\n label:\n add:\n - do-not-merge/missing-related-pr\n comment:\n message: |\n @{{author}} Please associate the related pr of master to the body of your Pull Request. (eg. “pr: #<xyz>”)\n\n - name: Dismiss block label if related pr be added into PR\n conditions:\n - *2X_BRANCH\n - or:\n - body~=pr\:\ \#[0-9]{1,6}(\s+|$)\n - body~=https://github.com/milvus-io/milvus/pull/[0-9]{1,6}(\s+|$)\n - label=kind/branch-feature\n actions:\n label:\n remove:\n - do-not-merge/missing-related-pr\n\n - name: Dismiss block label if automated create PR\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - title~=\[automated\]\n actions:\n label:\n remove:\n - do-not-merge/missing-related-issue\n - do-not-merge/missing-related-pr\n\n - name: Test passed for skip e2e-master\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - or: *Build_AND_TEST_STATUS_SUCESS_ON_UBUNTU_20_OR_UBUNTU_22\n - title~=\[skip e2e\]\n - or: *WHEN_CPP_UNIT_TEST_SUCCESS\n - or: *WHEN_GO_UNIT_TEST_SUCCESS\n - or: *WHEN_INTEGRATION_UNIT_TEST_SUCCESS\n - 'status-success=Code Checker AMD64 Ubuntu 22.04'\n - 'status-success=Code Checker MacOS 13'\n # - 'status-success=Code Checker Amazonlinux 2023'\n - *source_code_files\n actions:\n label:\n add:\n - ci-passed\n\n - name: Test passed for skip e2e - 2.2.*\n conditions:\n - *2X_BRANCH\n - or: *Build_AND_TEST_STATUS_SUCESS_ON_UBUNTU_20_OR_UBUNTU_22\n - title~=\[skip e2e\]\n - or: *WHEN_CPP_UNIT_TEST_SUCCESS\n - or: *WHEN_GO_UNIT_TEST_SUCCESS\n - or: *WHEN_INTEGRATION_UNIT_TEST_SUCCESS\n - 'status-success=Code Checker MacOS 13'\n - *source_code_files\n actions:\n label:\n add:\n - ci-passed\n\n - name: Assign the 'lgtm' and 'approved' labels following the successful testing of the 'Update Knowhere Commit'\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'title~=Update Knowhere Commit'\n - label=ci-passed\n actions:\n label:\n add:\n - lgtm\n - approved\n\n - name: master or 2.5 - Remove ci-passed label when status for code checker or ut is not success\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or:\n - *MASTER_BRANCH\n - *25_BRANCH\n - label!=manual-pass\n - *source_code_files\n - or:\n - *failed_on_ubuntu_20\n - *failed_on_ubuntu_22\n - 'status-success!=Code Checker AMD64 Ubuntu 22.04'\n - and:\n - 'status-success!=cpp-unit-test'\n - *morethan_go_unittest_files\n - 'status-success!=UT for Go'\n - 'status-success!=Integration Test'\n - 'status-success!=Code Checker MacOS 13'\n # - 'status-success!=Code Checker Amazonlinux 2023'\n actions:\n label:\n remove:\n - ci-passed\n\n - name: 2.3 or 2.4 - Remove ci-passed label when status for code checker or ut is not success\n conditions:\n - or:\n - *23_BRANCH\n - *24_BRANCH\n - label!=manual-pass\n - *source_code_files\n - or:\n - *failed_on_ubuntu_20\n - *failed_on_ubuntu_22\n # - 'status-success!=Code Checker AMD64 Ubuntu 20.04'\n - 'status-success!=UT for Cpp'\n - 'status-success!=UT for Go'\n - 'status-success!=Integration Test'\n - 'status-success!=Code Checker MacOS 13'\n # - 'status-success!=Code Checker CentOS 7'\n actions:\n label:\n remove:\n - ci-passed\n\n - name: Remove ci-passed label when status for jenkins job is not success\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - label!=manual-pass\n - -title~=\[skip e2e\]\n - files~=^(?!(.*_test\.go|.*\.md)).*$\n - 'status-success!=cpu-e2e'\n actions:\n label:\n remove:\n - ci-passed\n\n - name: Add comment when jenkins job failed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'check-failure=cpu-e2e'\n actions:\n comment:\n message: |\n @{{author}} E2e jenkins job failed, comment `/run-cpu-e2e` can trigger the job again.\n\n# when go-sdk check failed, prompt user to rerun go-sdk job\n - name: Add comment when go-sdk check failed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'check-failure=go-sdk'\n actions:\n comment:\n message: |\n @{{author}} go-sdk check failed, comment `rerun go-sdk` can trigger the job again.\n\n# when cpp-unit-test check failed, prompt user to rerun cpp-unit-test job\n - name: Add comment when cpp-unit-test check failed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'check-failure=cpp-unit-test'\n actions:\n comment:\n message: |\n @{{author}} cpp-unit-test check failed, comment `rerun cpp-unit-test` can trigger the job again.\n\n# when go-unit-test check failed, prompt user to rerun go-unit-test job\n - name: Add comment when go-unit-test check failed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'check-failure=go-unit-test'\n actions:\n comment:\n message: |\n @{{author}} go-unit-test check failed, comment `rerun go-unit-test` can trigger the job again.\n\n# when integration-test check failed, prompt user to rerun integration-test job\n - name: Add comment when integration-test check failed\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'check-failure=integration-test'\n actions:\n comment:\n message: |\n @{{author}} integration-test check failed, comment `rerun integration-test` can trigger the job again.\n\n - name: Add comment when code checker or ut failed -master\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - or:\n # - 'check-failure=Code Checker AMD64 Ubuntu 20.04'\n - 'check-failure=Build and test AMD64 Ubuntu 20.04'\n actions:\n comment:\n message: |\n @{{author}} ut workflow job failed, comment `rerun ut` can trigger the job again.\n\n - name: Add comment when code checker or ut failed -2.2.*\n conditions:\n - *2X_BRANCH\n - or:\n # - 'check-failure=Code Checker AMD64 Ubuntu 20.04'\n - 'check-failure=Build and test AMD64 Ubuntu 20.04'\n actions:\n comment:\n message: |\n @{{author}} ut workflow job failed, comment `rerun ut` can trigger the job again.\n\n - name: Add 'do-not-merge/invalid-pr-format' label for invalid PR titles\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - or:\n - '-title~=^(feat:|enhance:|fix:|test:|doc:|auto:|\[automated\])'\n - body=^$\n actions:\n label:\n add:\n - do-not-merge/invalid-pr-format\n comment:\n message: |\n @{{author}} \n\n **Invalid PR Title Format Detected**\n\n Your PR submission does not adhere to our required standards. To ensure clarity and consistency, please meet the following criteria:\n\n 1. **Title Format:** The PR title must begin with one of these prefixes:\n - `feat:` for introducing a new feature.\n - `fix:` for bug fixes.\n - `enhance:` for improvements to existing functionality.\n - `test`: for add tests to existing functionality.\n - `doc`: for modifying documentation.\n - `auto`: for the pull request from bot.\n\n 2. **Description Requirement:** The PR must include a non-empty description, detailing the changes and their impact.\n\n **Required Title Structure:**\n\n ```\n [Type]: [Description of the PR]\n ```\n\n Where `Type` is one of `feat`, `fix`, `enhance`, `test` or `doc`. \n\n **Example:**\n ```\n enhance: improve search performance significantly \n ```\n\n Please review and update your PR to comply with these guidelines.\n\n - name: Remove 'do-not-merge/invalid-pr-format' label for valid PRs\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'title~=^(feat:|enhance:|fix:|test:|doc:|auto:|\[automated\])'\n - '-body=^$'\n - 'label=do-not-merge/invalid-pr-format'\n actions:\n label:\n remove:\n - do-not-merge/invalid-pr-format\n\n - name: Label bug fix PRs\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'title~=^fix:'\n actions:\n label:\n add:\n - kind/bug\n\n - name: Label feature PRs\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'title~=^feat:'\n actions:\n label:\n add:\n - kind/feature\n\n - name: Label enhancement PRs\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'title~=^enhance:'\n actions:\n label:\n add:\n - kind/enhancement\n\n - name: Label test PRs\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'title~=^test:'\n actions:\n label:\n add:\n - kind/test\n\n - name: Label doc PRs\n conditions:\n # branch condition: in this pull request, the changes are based on any branch referenced by BRANCHES\n - or: *BRANCHES\n - 'title~=^doc:'\n actions:\n label:\n add:\n - kind/doc\n\n \n
dataset_sample\yaml\go\mergify.yml
mergify.yml
YAML
18,892
0.95
0.060662
0.100604
vue-tools
155
2025-04-09T11:54:15.900584
GPL-3.0
false
0a1280f645d39f92335ae72c1b86a060
metricbeat.config.modules:\n path: ${path.config}/modules.d/*.yml\n reload.enabled: false\n\nprocessors:\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n\noutput.elasticsearch:\n hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'\n username: '${ELASTICSEARCH_USERNAME:}'\n password: '${ELASTICSEARCH_PASSWORD:}'\n
dataset_sample\yaml\go\metricbeat.docker.yml
metricbeat.docker.yml
YAML
314
0.8
0
0
awesome-app
794
2024-08-21T04:45:02.205967
MIT
false
70d39bfc0074d8f6c7b88a9e4c352a96
###################### Metricbeat Configuration Example #######################\n\n# This file is an example configuration file highlighting only the most common\n# options. The metricbeat.reference.yml file from the same directory contains all the\n# supported options with more comments. You can use it as a reference.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/metricbeat/index.html\n\n# =========================== Modules configuration ============================\n\nmetricbeat.config.modules:\n # Glob pattern for configuration loading\n path: ${path.config}/modules.d/*.yml\n\n # Set to true to enable config reloading\n reload.enabled: false\n\n # Period on which files under path should be checked for changes\n #reload.period: 10s\n\n# ======================= Elasticsearch template setting =======================\n\nsetup.template.settings:\n index.number_of_shards: 1\n index.codec: best_compression\n #_source.enabled: false\n\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output.\n#fields:\n# env: staging\n\n# ================================= Dashboards =================================\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards is disabled by default and can be enabled either by setting the\n# options here or by using the `setup` command.\n#setup.dashboards.enabled: false\n\n# The URL from where to download the dashboard archive. By default, this URL\n# has a value that is computed based on the Beat name and version. For released\n# versions, this URL points to the dashboard archive on the artifacts.elastic.co\n# website.\n#setup.dashboards.url:\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Kibana Space ID\n # ID of the Kibana Space into which the dashboards should be loaded. By default,\n # the Default Space will be used.\n #space.id:\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: ["localhost:9200"]\n\n # Performance preset - one of "balanced", "throughput", "scale",\n # "latency", or "custom".\n preset: balanced\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Optional SSL. By default is off.\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client Certificate Key\n #ssl.key: "/etc/pki/client/cert.key"\n\n# ================================= Processors =================================\n\n# Configure processors to enhance or manipulate events generated by the beat.\n\nprocessors:\n - add_host_metadata: ~\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n - add_kubernetes_metadata: ~\n\n\n# ================================== Logging ===================================\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: debug\n\n# At debug level, you can selectively enable logging only for some components.\n# To enable all selectors, use ["*"]. Examples of other selectors are "beat",\n# "publisher", "service".\n#logging.selectors: ["*"]\n\n# ============================= X-Pack Monitoring ==============================\n# Metricbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch outputs are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the metricbeat.\n#instrumentation:\n # Set to true to enable instrumentation of metricbeat.\n #enabled: false\n\n # Environment in which metricbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: true\n\n
dataset_sample\yaml\go\metricbeat.yml
metricbeat.yml
YAML
7,011
0.95
0.051813
0.892857
awesome-app
438
2024-12-03T17:32:23.404684
GPL-3.0
false
327eb675080546d8293aadd2b3cf79e0
site_name: Traefik\nsite_description: Traefik Documentation\nsite_author: traefik.io\nsite_url: https://doc.traefik.io/traefik\ndev_addr: 0.0.0.0:8000\n\nrepo_name: 'GitHub'\nrepo_url: 'https://github.com/traefik/traefik'\n\ndocs_dir: 'content'\n\nproduct: proxy\n\n# https://squidfunk.github.io/mkdocs-material/\ntheme:\n name: 'traefik-labs'\n language: en\n include_sidebar: true\n favicon: assets/img/traefikproxy-icon-color.png\n logo: assets/img/traefikproxy-vertical-logo-color.svg\n feature:\n tabs: false\n palette:\n primary: 'cyan'\n accent: 'cyan'\n i18n:\n prev: 'Previous'\n next: 'Next'\n\ncopyright: 'Traefik Labs • Copyright &copy; 2016-2025'\n\nextra_javascript:\n - assets/js/hljs/highlight.pack.js # Download from https://highlightjs.org/download/ and enable YAML, TOML and Dockerfile\n - assets/js/extra.js\n\nplugins:\n - search\n - exclude:\n glob:\n - "**/include-*.md"\n\n# https://squidfunk.github.io/mkdocs-material/extensions/admonition/\n# https://facelessuser.github.io/pymdown-extensions/\nmarkdown_extensions:\n - meta\n - attr_list\n - admonition\n - footnotes\n - pymdownx.details\n - pymdownx.inlinehilite\n - pymdownx.highlight:\n use_pygments: false # hljs is used instead of pygment for TOML highlighting support\n - pymdownx.smartsymbols\n - pymdownx.superfences\n - pymdownx.tabbed\n - pymdownx.tasklist\n - pymdownx.snippets:\n check_paths: true\n - markdown_include.include:\n base_path: content/includes/\n encoding: utf-8\n - toc:\n permalink: true\n\n# Page tree\nnav:\n - 'What is Traefik': 'index.md'\n - 'Getting Started':\n - 'Concepts' : 'getting-started/concepts.md'\n - 'Quick Start':\n - 'Docker': 'getting-started/quick-start.md'\n - 'Kubernetes': 'getting-started/quick-start-with-kubernetes.md'\n - 'Configuration Introduction': 'getting-started/configuration-overview.md'\n - 'Install Traefik': 'getting-started/install-traefik.md'\n - 'Frequently Asked Questions': 'getting-started/faq.md'\n - 'Configuration Discovery':\n - 'Overview': 'providers/overview.md'\n - 'Docker': 'providers/docker.md'\n - 'Swarm': 'providers/swarm.md'\n - 'Kubernetes IngressRoute': 'providers/kubernetes-crd.md'\n - 'Kubernetes Ingress': 'providers/kubernetes-ingress.md'\n - 'Kubernetes Gateway API': 'providers/kubernetes-gateway.md'\n - 'Consul Catalog': 'providers/consul-catalog.md'\n - 'Nomad': 'providers/nomad.md'\n - 'ECS': 'providers/ecs.md'\n - 'File': 'providers/file.md'\n - 'Consul': 'providers/consul.md'\n - 'Etcd': 'providers/etcd.md'\n - 'ZooKeeper': 'providers/zookeeper.md'\n - 'Redis': 'providers/redis.md'\n - 'HTTP': 'providers/http.md'\n - 'Routing & Load Balancing':\n - 'Overview': 'routing/overview.md'\n - 'EntryPoints': 'routing/entrypoints.md'\n - 'Routers': 'routing/routers/index.md'\n - 'Services': 'routing/services/index.md'\n - 'Providers':\n - 'Docker': 'routing/providers/docker.md'\n - 'Swarm': 'routing/providers/swarm.md'\n - 'Kubernetes IngressRoute': 'routing/providers/kubernetes-crd.md'\n - 'Kubernetes Ingress': 'routing/providers/kubernetes-ingress.md'\n - 'Kubernetes Gateway API': 'routing/providers/kubernetes-gateway.md'\n - 'Consul Catalog': 'routing/providers/consul-catalog.md'\n - 'Nomad': 'routing/providers/nomad.md'\n - 'ECS': 'routing/providers/ecs.md'\n - 'KV': 'routing/providers/kv.md'\n - 'HTTPS & TLS':\n - 'Overview': 'https/overview.md'\n - 'TLS': 'https/tls.md'\n - 'Let''s Encrypt': 'https/acme.md'\n - 'Tailscale': 'https/tailscale.md'\n - 'SPIFFE': 'https/spiffe.md'\n - 'Middlewares':\n - 'Overview': 'middlewares/overview.md'\n - 'HTTP':\n - 'Overview': 'middlewares/http/overview.md'\n - 'AddPrefix': 'middlewares/http/addprefix.md'\n - 'BasicAuth': 'middlewares/http/basicauth.md'\n - 'Buffering': 'middlewares/http/buffering.md'\n - 'Chain': 'middlewares/http/chain.md'\n - 'CircuitBreaker': 'middlewares/http/circuitbreaker.md'\n - 'Compress': 'middlewares/http/compress.md'\n - 'ContentType': 'middlewares/http/contenttype.md'\n - 'DigestAuth': 'middlewares/http/digestauth.md'\n - 'Errors': 'middlewares/http/errorpages.md'\n - 'ForwardAuth': 'middlewares/http/forwardauth.md'\n - 'GrpcWeb': 'middlewares/http/grpcweb.md'\n - 'Headers': 'middlewares/http/headers.md'\n - 'IPWhiteList': 'middlewares/http/ipwhitelist.md'\n - 'IPAllowList': 'middlewares/http/ipallowlist.md'\n - 'InFlightReq': 'middlewares/http/inflightreq.md'\n - 'PassTLSClientCert': 'middlewares/http/passtlsclientcert.md'\n - 'RateLimit': 'middlewares/http/ratelimit.md'\n - 'RedirectRegex': 'middlewares/http/redirectregex.md'\n - 'RedirectScheme': 'middlewares/http/redirectscheme.md'\n - 'ReplacePath': 'middlewares/http/replacepath.md'\n - 'ReplacePathRegex': 'middlewares/http/replacepathregex.md'\n - 'Retry': 'middlewares/http/retry.md'\n - 'StripPrefix': 'middlewares/http/stripprefix.md'\n - 'StripPrefixRegex': 'middlewares/http/stripprefixregex.md'\n - 'TCP':\n - 'Overview': 'middlewares/tcp/overview.md'\n - 'InFlightConn': 'middlewares/tcp/inflightconn.md'\n - 'IPWhiteList': 'middlewares/tcp/ipwhitelist.md'\n - 'IPAllowList': 'middlewares/tcp/ipallowlist.md'\n - 'Plugins & Plugin Catalog': 'plugins/index.md'\n - 'Operations':\n - 'CLI': 'operations/cli.md'\n - 'Dashboard' : 'operations/dashboard.md'\n - 'API': 'operations/api.md'\n - 'Ping': 'operations/ping.md'\n - 'Observability':\n - 'Overview': 'observability/overview.md'\n - 'Logs': 'observability/logs.md'\n - 'Access Logs': 'observability/access-logs.md'\n - 'Metrics':\n - 'Overview': 'observability/metrics/overview.md'\n - 'Datadog': 'observability/metrics/datadog.md'\n - 'InfluxDB2': 'observability/metrics/influxdb2.md'\n - 'OpenTelemetry': 'observability/metrics/opentelemetry.md'\n - 'Prometheus': 'observability/metrics/prometheus.md'\n - 'StatsD': 'observability/metrics/statsd.md'\n - 'Tracing':\n - 'Overview': 'observability/tracing/overview.md'\n - 'OpenTelemetry': 'observability/tracing/opentelemetry.md'\n - 'Security':\n - 'Best Practices':\n - 'security/best-practices/content-length.md'\n - 'User Guides':\n - 'FastProxy': 'user-guides/fastproxy.md'\n - 'Kubernetes and Let''s Encrypt': 'user-guides/crd-acme/index.md'\n - 'Kubernetes and cert-manager': 'user-guides/cert-manager.md'\n - 'gRPC Examples': 'user-guides/grpc.md'\n - 'Docker':\n - 'Basic Example': 'user-guides/docker-compose/basic-example/index.md'\n - 'HTTPS with Let''s Encrypt':\n - 'TLS Challenge': 'user-guides/docker-compose/acme-tls/index.md'\n - 'HTTP Challenge': 'user-guides/docker-compose/acme-http/index.md'\n - 'DNS Challenge': 'user-guides/docker-compose/acme-dns/index.md'\n - 'Migration':\n - 'Traefik v3 minor migrations': 'migration/v3.md'\n - 'Traefik v2 to v3':\n - 'Migration guide': 'migration/v2-to-v3.md'\n - 'Configuration changes for v3': 'migration/v2-to-v3-details.md'\n - 'Traefik v2 minor migrations': 'migration/v2.md'\n - 'Traefik v1 to v2': 'migration/v1-to-v2.md'\n - 'Contributing':\n - 'Thank You!': 'contributing/thank-you.md'\n - 'Submitting Issues': 'contributing/submitting-issues.md'\n - 'Submitting PRs': 'contributing/submitting-pull-requests.md'\n - 'Security': 'contributing/submitting-security-issues.md'\n - 'Building and Testing': 'contributing/building-testing.md'\n - 'Documentation': 'contributing/documentation.md'\n - 'Data Collection': 'contributing/data-collection.md'\n - 'Advocating': 'contributing/advocating.md'\n - 'Maintainers': 'contributing/maintainers.md'\n - 'Reference':\n - 'Install Configuration':\n - 'Boot Environment': 'reference/install-configuration/boot-environment.md'\n - 'Configuration Discovery':\n - 'Overview' : 'reference/install-configuration/providers/overview.md'\n - 'Kubernetes':\n - 'Kubernetes Gateway API' : 'reference/install-configuration/providers/kubernetes/kubernetes-gateway.md'\n - 'Kubernetes CRD' : 'reference/install-configuration/providers/kubernetes/kubernetes-crd.md'\n - 'Kubernetes Ingress' : 'reference/install-configuration/providers/kubernetes/kubernetes-ingress.md'\n - 'Docker': 'reference/install-configuration/providers/docker.md'\n - 'Swarm': 'reference/install-configuration/providers/swarm.md'\n - 'Hashicorp':\n - 'Nomad': "reference/install-configuration/providers/hashicorp/nomad.md"\n - 'Consul': 'reference/install-configuration/providers/hashicorp/consul.md'\n - 'Consul Catalog': 'reference/install-configuration/providers/hashicorp/consul-catalog.md'\n - 'KV Stores':\n - 'Redis': 'reference/install-configuration/providers/kv/redis.md'\n - 'Consul': 'reference/install-configuration/providers/kv/consul.md'\n - 'etcd': 'reference/install-configuration/providers/kv/etcd.md'\n - 'ZooKeeper' : 'reference/install-configuration/providers/kv/zk.md'\n - 'Others':\n - 'File': 'reference/install-configuration/providers/others/file.md'\n - 'ECS': 'reference/install-configuration/providers/others/ecs.md'\n - 'HTTP': 'reference/install-configuration/providers/others/http.md'\n - 'EntryPoints': 'reference/install-configuration/entrypoints.md'\n - 'API & Dashboard': 'reference/install-configuration/api-dashboard.md'\n - 'TLS':\n - 'Certificate Resolvers':\n - "Overview" : 'reference/install-configuration/tls/certificate-resolvers/overview.md'\n - "ACME" : 'reference/install-configuration/tls/certificate-resolvers/acme.md'\n - "Tailscale" : 'reference/install-configuration/tls/certificate-resolvers/tailscale.md'\n - "SPIFFE" : 'reference/install-configuration/tls/spiffe.md'\n - 'Observability':\n - 'Metrics' : 'reference/install-configuration/observability/metrics.md'\n - 'Tracing': 'reference/install-configuration/observability/tracing.md'\n - 'Logs & AccessLogs': 'reference/install-configuration/observability/logs-and-accesslogs.md'\n - 'Health Check (CLI & Ping)': 'reference/install-configuration/observability/healthcheck.md'\n # - 'Options List': 'reference/install-configuration/cli-options-list.md' -- Todo\n - 'Routing Configuration':\n - 'General' :\n - 'Configuration Methods' : 'reference/routing-configuration/dynamic-configuration-methods.md'\n - 'HTTP' :\n - 'Router' :\n - 'Rules & Priority' : 'reference/routing-configuration/http/router/rules-and-priority.md'\n - 'Observability': 'reference/routing-configuration/http/router/observability.md'\n - 'Load Balancing' :\n - 'Service' : 'reference/routing-configuration/http/load-balancing/service.md'\n - 'ServersTransport' : 'reference/routing-configuration/http/load-balancing/serverstransport.md'\n - 'TLS' :\n - 'Overview' : 'reference/routing-configuration/http/tls/overview.md'\n - 'TLS Certificates' : 'reference/routing-configuration/http/tls/tls-certificates.md'\n - 'TLS Options' : 'reference/routing-configuration/http/tls/tls-options.md'\n - 'Middlewares' :\n - 'Overview' : 'reference/routing-configuration/http/middlewares/overview.md'\n - 'AddPrefix' : 'reference/routing-configuration/http/middlewares/addprefix.md'\n - 'BasicAuth' : 'reference/routing-configuration/http/middlewares/basicauth.md'\n - 'Buffering': 'reference/routing-configuration/http/middlewares/buffering.md'\n - 'Chain': 'reference/routing-configuration/http/middlewares/chain.md'\n - 'Circuit Breaker' : 'reference/routing-configuration/http/middlewares/circuitbreaker.md'\n - 'Compress': 'reference/routing-configuration/http/middlewares/compress.md'\n - 'ContentType': 'reference/routing-configuration/http/middlewares/contenttype.md'\n - 'DigestAuth': 'reference/routing-configuration/http/middlewares/digestauth.md'\n - 'Errors': 'reference/routing-configuration/http/middlewares/errorpages.md'\n - 'ForwardAuth': 'reference/routing-configuration/http/middlewares/forwardauth.md'\n - 'GrpcWeb': 'reference/routing-configuration/http/middlewares/grpcweb.md'\n - 'Headers': 'reference/routing-configuration/http/middlewares/headers.md'\n - 'IPAllowList': 'reference/routing-configuration/http/middlewares/ipallowlist.md'\n - 'InFlightReq': 'reference/routing-configuration/http/middlewares/inflightreq.md'\n - 'PassTLSClientCert': 'reference/routing-configuration/http/middlewares/passtlsclientcert.md'\n - 'RateLimit': 'reference/routing-configuration/http/middlewares/ratelimit.md'\n - 'RedirectRegex': 'reference/routing-configuration/http/middlewares/redirectregex.md'\n - 'RedirectScheme': 'reference/routing-configuration/http/middlewares/redirectscheme.md'\n - 'ReplacePath': 'reference/routing-configuration/http/middlewares/replacepath.md'\n - 'ReplacePathRegex': 'reference/routing-configuration/http/middlewares/replacepathregex.md'\n - 'Retry': 'reference/routing-configuration/http/middlewares/retry.md'\n - 'StripPrefix': 'reference/routing-configuration/http/middlewares/stripprefix.md'\n - 'StripPrefixRegex': 'reference/routing-configuration/http/middlewares/stripprefixregex.md'\n - 'TCP' :\n - 'Router' :\n - 'Rules & Priority' : 'reference/routing-configuration/tcp/router/rules-and-priority.md'\n - 'Service' : 'reference/routing-configuration/tcp/service.md'\n - 'ServersTransport' : 'reference/routing-configuration/tcp/serverstransport.md'\n - 'TLS' : 'reference/routing-configuration/tcp/tls.md'\n - 'Middlewares' :\n - 'Overview' : 'reference/routing-configuration/tcp/middlewares/overview.md'\n - 'InFlightConn' : 'reference/routing-configuration/tcp/middlewares/inflightconn.md'\n - 'IPAllowList' : 'reference/routing-configuration/tcp/middlewares/ipallowlist.md'\n - 'UDP' :\n - 'Router' :\n - 'Rules & Priority' : 'reference/routing-configuration/udp/router/rules-priority.md'\n - 'Service' : 'reference/routing-configuration/udp/service.md'\n - 'Kubernetes':\n - 'Gateway API' : 'reference/routing-configuration/kubernetes/gateway-api.md'\n - 'Kubernetes CRD' :\n - 'HTTP' :\n - 'IngressRoute' : 'reference/routing-configuration/kubernetes/crd/http/ingressroute.md'\n - 'TraefikService' : 'reference/routing-configuration/kubernetes/crd/http/traefikservice.md'\n - 'ServersTransport' : 'reference/routing-configuration/kubernetes/crd/http/serverstransport.md'\n - 'Middleware' : 'reference/routing-configuration/kubernetes/crd/http/middleware.md'\n - 'TLSOption' : 'reference/routing-configuration/kubernetes/crd/http/tlsoption.md'\n - 'TLSStore' : 'reference/routing-configuration/kubernetes/crd/http/tlsstore.md'\n - 'TCP' :\n - 'IngressRouteTCP' : 'reference/routing-configuration/kubernetes/crd/tcp/ingressroutetcp.md'\n - 'ServersTransportTCP' : 'reference/routing-configuration/kubernetes/crd/tcp/serverstransporttcp.md'\n - 'MiddlewareTCP' : 'reference/routing-configuration/kubernetes/crd/tcp/middlewaretcp.md'\n - 'TLSOption' : 'reference/routing-configuration/kubernetes/crd/tcp/tlsoption.md'\n - 'TLSStore' : 'reference/routing-configuration/kubernetes/crd/tcp/tlsstore.md'\n - 'UDP' :\n - 'IngressRouteUDP' : 'reference/routing-configuration/kubernetes/crd/udp/ingressrouteudp.md'\n - 'Ingress' : 'reference/routing-configuration/kubernetes/ingress.md'\n - 'Label & Tag Providers' :\n - 'Docker' : 'reference/routing-configuration/other-providers/docker.md'\n - 'Swarm' : 'reference/routing-configuration/other-providers/swarm.md'\n - 'Consul Catalog' : 'reference/routing-configuration/other-providers/consul-catalog.md'\n - 'Nomad' : 'reference/routing-configuration/other-providers/nomad.md'\n - 'ECS' : 'reference/routing-configuration/other-providers/ecs.md'\n - 'KV' : 'reference/routing-configuration/other-providers/kv.md'\n - 'Deprecation Notices':\n - 'Releases': 'deprecation/releases.md'\n - 'Features': 'deprecation/features.md'\n
dataset_sample\yaml\go\mkdocs.yml
mkdocs.yml
YAML
17,085
0.8
0.006329
0.016287
python-kit
539
2024-04-24T20:46:00.268689
Apache-2.0
false
32c428a0a6710de9a7adc5ba7e2eeb49
- path: github.com/spf13/afero\n minVersion: v1.1.2\n validUntil: 2029-08-04T16:29:18+03:00\n- path: github.com/influxdata/influxdb1-client\n minVersion: v0.0.0-20190402204710-8ff2fc3824fc\n validUntil: 2029-08-04T16:29:55+03:00\n- path: gopkg.in/guregu/null.v3\n minVersion: v3.3.0\n validUntil: 2029-08-04T16:32:44+03:00\n- path: gopkg.in/yaml.v3\n minVersion: v3.0.0-20200313102051-9f266ea9e77c\n validUntil: 2029-08-04T16:32:56+03:00\n- path: github.com/spf13/cobra\n minVersion: v1.4.0\n validUntil: 2049-07-26T17:21:52+03:00\n- path: github.com/spf13/pflag\n minVersion: v1.0.5\n validUntil: 2049-07-26T17:21:57+03:00\n- path: github.com/andybalholm/cascadia\n minVersion: v1.1.0\n validUntil: 2049-07-26T17:24:57+03:00\n
dataset_sample\yaml\go\modtools_frozen.yml
modtools_frozen.yml
YAML
720
0.7
0
0
vue-tools
786
2024-06-25T13:08:42.965066
MIT
false
307da3fa24efb081b6a3eadc52ee72bb
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n - id: "mpl_busl"\n patterns:\n - pattern-either:\n - pattern-inside: |\n ...\n // SPDX-License-Identifier: MPL-2.0\n ...\n package $A\n ...\n import (\n ...\n ...\n )\n ...\n - pattern-inside: |\n ...\n // SPDX-License-Identifier: MPL-2.0\n ...\n package $A\n ...\n import ...\n ...\n - pattern: |\n "github.com/hashicorp/nomad/$...B"\n - metavariable-pattern:\n metavariable: $...B\n patterns:\n # List of MPL-2.0 packages that are allowed to be imported.\n # Command to find packages:\n # find . -name LICENSE ! -path '*node_modules*' | sort\n - pattern-not: "api..."\n - pattern-not: "demo..."\n - pattern-not: "drivers/shared..."\n - pattern-not: "helper/crypto..."\n - pattern-not: "helper/grpc-middleware..."\n - pattern-not: "helper/pluginutils/grpcutils..."\n - pattern-not: "helper/pluginutils/hclspecutils..."\n - pattern-not: "helper/pointer..."\n - pattern-not: "helper/testlog..."\n - pattern-not: "helper/uuid..."\n - pattern-not: "jobspec..."\n - pattern-not: "jobspec2..."\n - pattern-not: "plugins..."\n message: "BUSL package `github.com/hashicorp/nomad/$...B` imported in MPL package `$A`"\n languages:\n - "generic"\n paths:\n exclude:\n - "*_test.go"\n severity: "ERROR"\n
dataset_sample\yaml\go\mpl_busl.yml
mpl_busl.yml
YAML
1,678
0.95
0
0.132075
node-utils
226
2024-06-09T00:53:52.392600
GPL-3.0
false
a834aa056bbd7479dd0d6a0acbb958fe
# Number of days of inactivity before an Issue is closed for lack of response\ndaysUntilClose: 30\n# Label requiring a response\nresponseRequiredLabel: "need:more-information"\n# Comment to post when closing an Issue for lack of response. Set to `false` to disable\ncloseComment: >\n This issue has been automatically closed because there has been no response\n to our request for more information from the original author. With only the\n information that is currently in the issue, we don't have enough information\n to take action. Please reach out if you have more relevant information or\n answers to our questions so that we can investigate further.\n
dataset_sample\yaml\go\no-response.yml
no-response.yml
YAML
651
0.8
0.363636
0.272727
vue-tools
922
2024-02-01T09:57:51.260667
Apache-2.0
false
db3bd2d166faec162d47c0e3a1d5bbc2
# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n# TODO: Pre-cache beats-dev/golang-crossbuild container image\n\nenv:\n ASDF_MAGE_VERSION: 1.15.0\n AWS_ARM_INSTANCE_TYPE: "m6g.xlarge"\n AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64"\n GCP_DEFAULT_MACHINE_TYPE: "c2d-standard-8"\n IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204"\n\n PLATFORMS: "+all linux/amd64 windows/amd64 darwin/amd64"\n PLATFORMS_ARM: "+all linux/arm64 darwin/arm64"\n PLATFORMS_AMD64_FIPS: "+all linux/amd64"\n PLATFORMS_ARM64_FIPS: "+all linux/arm64"\n\nsteps:\n # we use concurrency gates (https://buildkite.com/blog/concurrency-gates)\n # to implement two FIFO queues for DRA-snapshot and DRA-staging\n # this prevents parallel builds and possibility of publishing out of order DRA artifacts if the first job takes longer than the second\n\n - name: Start of concurrency group for DRA Snapshot\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.branch == 'main' || build.env('RUN_SNAPSHOT') == "true") && build.env('VERSION_QUALIFIER') == null\n command: echo "--> Start of concurrency gate dra-snapshot"\n concurrency_group: "dra-gate-snapshot-$BUILDKITE_BRANCH"\n concurrency: 1\n key: start-gate-snapshot\n\n - name: Start of concurrency group for DRA Staging\n # exceptionally allow building staging from main when VERSION_QUALIFIER is set, to allow prerelease testing\n # TODO remove OR clause below and above comment, and only allow matching /^[0-9]+\.[0-9x]+\$/ for build.branch\n if: build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.env('VERSION_QUALIFIER') != null\n command: echo "--> Start of concurrency gate dra-staging"\n concurrency_group: "dra-gate-staging-$BUILDKITE_BRANCH"\n concurrency: 1\n key: start-gate-staging\n\n - wait\n\n - group: Beats dashboards\n key: dashboards\n steps:\n - label: Snapshot dashboards\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.branch == 'main' || build.env('RUN_SNAPSHOT') == "true") && build.env('VERSION_QUALIFIER') == null\n depends_on: start-gate-snapshot\n key: dashboards-snapshot\n # TODO: container with go and make\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n commands:\n - make build/distributions/dependencies.csv\n - make beats-dashboards\n env:\n SNAPSHOT: true\n DEV: true\n artifact_paths:\n - build/distributions/**/*\n\n - label: Staging dashboards\n # TODO remove OR clause below (see earlier comment)\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.env('VERSION_QUALIFIER') != null) || build.env('RUN_STAGING') == "true"\n depends_on: start-gate-staging\n key: dashboards-staging\n # TODO: container with go and make\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n commands: |\n source .buildkite/scripts/version_qualifier.sh\n make build/distributions/dependencies.csv\n make beats-dashboards\n env:\n SNAPSHOT: false\n DEV: false\n artifact_paths:\n - build/distributions/**/*\n\n - group: Packaging snapshot\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.branch == 'main' || build.env('RUN_SNAPSHOT') == "true") && build.env('VERSION_QUALIFIER') == null\n key: packaging-snapshot\n depends_on: start-gate-snapshot\n steps:\n - label: "SNAPSHOT: {{matrix}}"\n env:\n PLATFORMS: "${PLATFORMS}"\n SNAPSHOT: true\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}"\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - auditbeat\n - filebeat\n - heartbeat\n - metricbeat\n - packetbeat\n - winlogbeat\n - x-pack/auditbeat\n - x-pack/dockerlogbeat\n - x-pack/filebeat\n - x-pack/heartbeat\n - x-pack/metricbeat\n - x-pack/osquerybeat\n - x-pack/packetbeat\n - x-pack/winlogbeat\n\n - label: "SNAPSHOT: {{matrix}} Linux/arm64 and Darwin/arm64"\n env:\n PLATFORMS: "${PLATFORMS_ARM}"\n SNAPSHOT: true\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}"\n agents:\n provider: "aws"\n imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}"\n instanceType: "${AWS_ARM_INSTANCE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - auditbeat\n - filebeat\n - heartbeat\n - metricbeat\n - packetbeat\n - x-pack/auditbeat\n - x-pack/dockerlogbeat\n - x-pack/filebeat\n - x-pack/heartbeat\n - x-pack/metricbeat\n - x-pack/packetbeat\n - x-pack/osquerybeat\n - x-pack/agentbeat\n\n ## Agentbeat needs more CPUs because it builds many other beats\n - label: "SNAPSHOT: x-pack/agentbeat all artifacts apart from linux/arm64 and darwin/arm64"\n env:\n PLATFORMS: "${PLATFORMS}"\n SNAPSHOT: true\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n command: ".buildkite/scripts/packaging/package-dra.sh x-pack/agentbeat"\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "c2-standard-16"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n\n - label: "SNAPSHOT: {{matrix}} Linux/amd64 FIPS"\n env:\n PLATFORMS: "${PLATFORMS_AMD64_FIPS}"\n SNAPSHOT: true\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n FIPS: true\n command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}"\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - x-pack/auditbeat\n - x-pack/filebeat\n - x-pack/metricbeat\n\n - label: "SNAPSHOT: {{matrix}} Linux/arm64 FIPS"\n env:\n PLATFORMS: "${PLATFORMS_ARM64_FIPS}"\n SNAPSHOT: true\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n FIPS: true\n command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}"\n agents:\n provider: "aws"\n imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}"\n instanceType: "${AWS_ARM_INSTANCE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - x-pack/auditbeat\n - x-pack/filebeat\n - x-pack/metricbeat\n - x-pack/agentbeat\n\n ## Agentbeat needs more CPUs because it builds many other beats\n - label: "SNAPSHOT: x-pack/agentbeat Linux/amd64 FIPS"\n env:\n PLATFORMS: "${PLATFORMS_AMD64_FIPS}"\n SNAPSHOT: true\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n FIPS: true\n command: ".buildkite/scripts/packaging/package-dra.sh x-pack/agentbeat"\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "c2-standard-16"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n\n - group: Packaging Staging\n key: packaging-staging\n depends_on: start-gate-staging\n # TODO remove OR clause below (see earlier comment)\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.env('VERSION_QUALIFIER') != null) || build.env('RUN_STAGING') == "true"\n steps:\n - label: "STAGING: {{matrix}}"\n env:\n PLATFORMS: "${PLATFORMS}"\n SNAPSHOT: false\n DEV: false\n command: |\n source .buildkite/scripts/version_qualifier.sh\n .buildkite/scripts/packaging/package-dra.sh {{matrix}}\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - auditbeat\n - filebeat\n - heartbeat\n - metricbeat\n - packetbeat\n - winlogbeat\n - x-pack/auditbeat\n - x-pack/dockerlogbeat\n - x-pack/filebeat\n - x-pack/heartbeat\n - x-pack/metricbeat\n - x-pack/osquerybeat\n - x-pack/packetbeat\n - x-pack/winlogbeat\n\n - label: "STAGING: {{matrix}} Linux/arm64 and Darwin/arm64"\n env:\n PLATFORMS: "${PLATFORMS_ARM}"\n SNAPSHOT: false\n DEV: false\n command: |\n source .buildkite/scripts/version_qualifier.sh\n .buildkite/scripts/packaging/package-dra.sh {{matrix}}\n agents:\n provider: "aws"\n imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}"\n instanceType: "${AWS_ARM_INSTANCE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - auditbeat\n - filebeat\n - heartbeat\n - metricbeat\n - packetbeat\n - x-pack/auditbeat\n - x-pack/dockerlogbeat\n - x-pack/filebeat\n - x-pack/heartbeat\n - x-pack/metricbeat\n - x-pack/packetbeat\n - x-pack/osquerybeat\n - x-pack/agentbeat\n\n ## Agentbeat needs more CPUs because it builds many other beats\n - label: "STAGING: x-pack/agentbeat all artifacts apart from linux/arm64 and darwin/arm64"\n env:\n PLATFORMS: "${PLATFORMS}"\n SNAPSHOT: false\n DEV: false\n command: |\n source .buildkite/scripts/version_qualifier.sh\n .buildkite/scripts/packaging/package-dra.sh x-pack/agentbeat\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "c2-standard-16"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n\n - label: "STAGING: {{matrix}} Linux/amd64 FIPS"\n env:\n PLATFORMS: "${PLATFORMS_AMD64_FIPS}"\n SNAPSHOT: false\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n FIPS: true\n command: |\n source .buildkite/scripts/version_qualifier.sh\n .buildkite/scripts/packaging/package-dra.sh {{matrix}}\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - x-pack/auditbeat\n - x-pack/filebeat\n - x-pack/metricbeat\n\n - label: "STAGING: {{matrix}} Linux/arm64 FIPS"\n env:\n PLATFORMS: "${PLATFORMS_ARM64_FIPS}"\n SNAPSHOT: false\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n FIPS: true\n command: |\n source .buildkite/scripts/version_qualifier.sh\n .buildkite/scripts/packaging/package-dra.sh {{matrix}}\n agents:\n provider: "aws"\n imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}"\n instanceType: "${AWS_ARM_INSTANCE_TYPE}"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n matrix:\n - x-pack/auditbeat\n - x-pack/filebeat\n - x-pack/metricbeat\n - x-pack/agentbeat\n\n ## Agentbeat needs more CPUs because it builds many other beats\n - label: "STAGING: x-pack/agentbeat Linux/amd64 FIPS"\n env:\n PLATFORMS: "${PLATFORMS_AMD64_FIPS}"\n SNAPSHOT: false\n # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270\n DEV: false\n FIPS: true\n command: |\n source .buildkite/scripts/version_qualifier.sh\n .buildkite/scripts/packaging/package-dra.sh x-pack/agentbeat\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "c2-standard-16"\n timeout_in_minutes: 40\n retry:\n automatic:\n - limit: 1\n artifact_paths:\n - build/distributions/**/*\n\n - group: DRA publish\n key: dra\n steps:\n - label: DRA Snapshot\n ## Only for release branches and main\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.branch == 'main' || build.env('RUN_SNAPSHOT') == "true") && build.env('VERSION_QUALIFIER') == null\n key: dra-snapshot\n env:\n DRA_WORKFLOW: snapshot\n depends_on:\n - start-gate-snapshot\n - packaging-snapshot\n - dashboards-snapshot\n command: |\n buildkite-agent artifact download "build/**/*" .\n .buildkite/scripts/packaging/prepare-release-manager.sh snapshot\n .buildkite/scripts/dra.sh\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n\n - label: DRA Staging\n # TODO remove OR clause below (see earlier comment)\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.env('VERSION_QUALIFIER') != null) || build.env('RUN_STAGING') == "true"\n key: dra-staging\n env:\n DRA_WORKFLOW: staging\n depends_on:\n - start-gate-staging\n - packaging-staging\n - dashboards-staging\n command: |\n source .buildkite/scripts/version_qualifier.sh\n buildkite-agent artifact download "build/**" .\n .buildkite/scripts/packaging/prepare-release-manager.sh staging\n .buildkite/scripts/dra.sh\n agents:\n provider: gcp\n image: "${IMAGE_UBUNTU_X86_64}"\n machineType: "${GCP_DEFAULT_MACHINE_TYPE}"\n\n - wait\n\n - command: echo "End of concurrency gate dra-snapshot <--"\n if: (build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.branch == 'main' || build.env('RUN_SNAPSHOT') == "true") && build.env('VERSION_QUALIFIER') == null\n concurrency_group: "dra-gate-snapshot-$BUILDKITE_BRANCH"\n concurrency: 1\n key: end-gate-snapshot\n\n - command: echo "End of concurrency gate dra-staging <--"\n if: build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.env('VERSION_QUALIFIER') != null\n concurrency_group: "dra-gate-staging-$BUILDKITE_BRANCH"\n concurrency: 1\n key: end-gate-staging\n
dataset_sample\yaml\go\packaging.pipeline.yml
packaging.pipeline.yml
YAML
16,462
0.8
0.053879
0.059226
awesome-app
255
2023-09-30T17:33:17.602720
BSD-3-Clause
false
1e6617a26352afcc3f832eedc9bde9eb
packetbeat.interfaces.device: any\npacketbeat.interfaces.snaplen: 1514\npacketbeat.interfaces.type: af_packet\npacketbeat.interfaces.buffer_size_mb: 100\n\npacketbeat.flows:\n timeout: 30s\n period: 10s\n\npacketbeat.protocols.dns:\n ports: [53]\n\npacketbeat.protocols.http:\n ports: [80, 5601, 9200, 8080, 8081, 5000, 8002]\n\npacketbeat.protocols.memcache:\n ports: [11211]\n\npacketbeat.protocols.mysql:\n ports: [3306]\n\npacketbeat.protocols.pgsql:\n ports: [5432]\n\npacketbeat.protocols.redis:\n ports: [6379]\n\npacketbeat.protocols.thrift:\n ports: [9090]\n\npacketbeat.protocols.mongodb:\n ports: [27017]\n\npacketbeat.protocols.cassandra:\n ports: [9042]\n\npacketbeat.protocols.tls:\n ports: [443, 993, 995, 5223, 8443, 8883, 9243]\n\npacketbeat.protocols.sip:\n ports: [5060]\n\nprocessors:\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n\noutput.elasticsearch:\n hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'\n username: '${ELASTICSEARCH_USERNAME:}'\n password: '${ELASTICSEARCH_PASSWORD:}'\n
dataset_sample\yaml\go\packetbeat.docker.yml
packetbeat.docker.yml
YAML
987
0.7
0
0
vue-tools
334
2024-02-11T16:39:15.818085
MIT
false
f9298e6f3b7abe5e117bf2162fedb65b
###################### Packetbeat Configuration Example #######################\n\n# This file is a full configuration example documenting all non-deprecated\n# options in comments. For a shorter configuration example, that contains only\n# the most common options, please see packetbeat.yml in the same directory.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/packetbeat/index.html\n\n# =============================== Network device ===============================\n\n# Select the network interface to sniff the data. You can use the "any"\n# keyword to sniff on all connected interfaces. On all platforms, you\n# can use "default_route", "default_route_ipv4" or "default_route_ipv6"\n# to sniff on the device carrying the default route.\npacketbeat.interfaces.device: any\n\n# The network CIDR blocks are considered "internal" networks for\n# the purpose of network perimeter boundary classification. The valid\n# values for internal_networks are the same as those that can be used\n# with processor network conditions.\n#\n# For a list of available values see:\n# https://www.elastic.co/guide/en/beats/packetbeat/current/defining-processors.html#condition-network\npacketbeat.interfaces.internal_networks:\n - private\n\n# Packetbeat supports three sniffer types:\n# * pcap, which uses the libpcap library and works on most platforms, but it's\n# not the fastest option.\n# * af_packet, which uses memory-mapped sniffing. This option is faster than\n# libpcap and doesn't require a kernel module, but it's Linux-specific.\n#packetbeat.interfaces.type: pcap\n\n# The maximum size of the packets to capture. The default is 65535, which is\n# large enough for almost all networks and interface types. If you sniff on a\n# physical network interface, the optimal setting is the MTU size. On virtual\n# interfaces, however, it's safer to accept the default value.\n#packetbeat.interfaces.snaplen: 65535\n\n# The maximum size of the shared memory buffer to use between the kernel and\n# user space. A bigger buffer usually results in lower CPU usage but consumes\n# more memory. This setting is only available for the af_packet sniffer type.\n# The default is 30 MB.\n#packetbeat.interfaces.buffer_size_mb: 30\n\n# Set the polling frequency for interface metrics. This currently only applies\n# to the "afpacket" interface type.\n# The default is 5s (seconds).\n#packetbeat.interfaces.metrics_interval: 5s\n\n# To scale processing across multiple Packetbeat processes, a fanout group\n# identifier can be specified. When `fanout_group` is used the Linux kernel splits\n# packets across Packetbeat instances in the same group by using a flow hash. It\n# computes the flow hash modulo with the number of Packetbeat processes in order\n# to consistently route flows to the same Packetbeat instance.\n#\n# The value must be between 0 and 65535. By default, no value is set.\n#\n# This is only available on Linux and requires using `type: af_packet`. Each process\n# must be running in the same network namespace. All processes must use the same\n# interface settings. You must take responsibility for running multiple instances\n# of Packetbeat.\n#packetbeat.interfaces.fanout_group: ~\n\n# Packetbeat automatically generates a BPF for capturing only the traffic on\n# ports where it expects to find known protocols. Use this setting to tell\n# Packetbeat to generate a BPF filter that accepts VLAN tags.\n#packetbeat.interfaces.with_vlans: true\n\n# Use this setting to override the automatically generated BPF filter.\n#packetbeat.interfaces.bpf_filter:\n\n# With `auto_promisc_mode` Packetbeat puts the interface in promiscuous mode automatically on startup.\n# This option does not work with `any` interface device.\n# The default option is false and requires manual set-up of promiscuous mode.\n# Warning: under some circumstances (e.g., beat crash) promiscuous mode\n# can stay enabled even after beat is shut down.\n#packetbeat.interfaces.auto_promisc_mode: true\n\n# By default Ingest pipelines are not updated if a pipeline with the same ID\n# already exists. If this option is enabled Packetbeat overwrites pipelines\n# every time a new Elasticsearch connection is established.\n#packetbeat.overwrite_pipelines: false\n\n# =================================== Flows ====================================\n\npacketbeat.flows:\n # Enable Network flows. Default: true\n #enabled: true\n\n # Set network flow timeout. Flow is killed if no packet is received before being\n # timed out.\n timeout: 30s\n\n # Configure reporting period. If set to -1s, only killed flows will be reported\n period: 10s\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Overrides where flow events are indexed.\n #index: my-custom-flow-index\n\n# =========================== Transaction protocols ============================\n\npacketbeat.protocols:\n- type: icmp\n # Enable ICMPv4 and ICMPv6 monitoring. The default is true.\n #enabled: true\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-icmp-index\n\n- type: amqp\n # Enable AMQP monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for AMQP traffic. You can disable\n # the AMQP protocol by commenting out the list of ports.\n ports: [5672]\n # Truncate messages that are published and avoid huge messages being\n # indexed.\n # Default: 1000\n #max_body_length: 1000\n\n # Hide the header fields in header frames.\n # Default: false\n #parse_headers: false\n\n # Hide the additional arguments of method frames.\n # Default: false\n #parse_arguments: false\n\n # Hide all methods relative to connection negotiation between the server and\n # client.\n # Default: true\n #hide_connection_information: true\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-amqp-index\n\n- type: cassandra\n #Cassandra port for traffic monitoring.\n ports: [9042]\n\n # If this option is enabled, the raw message of the request (`cassandra_request` field)\n # is included in published events. The default is true.\n #send_request: true\n\n # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field)\n # is included in published events. The default is true. enable `send_request` first before enabling this option.\n #send_request_header: true\n\n # If this option is enabled, the raw message of the response (`cassandra_response` field)\n # is included in published events. The default is true.\n #send_response: true\n\n # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field)\n # is included in published events. The default is true. enable `send_response` first before enabling this option.\n #send_response_header: true\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured.\n # By default no compressor is configured.\n #compressor: "snappy"\n\n # This option indicates which Operator/Operators will be ignored.\n #ignored_ops: ["SUPPORTED","OPTIONS"]\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-cassandra-index\n\n- type: dhcpv4\n # Configure the DHCP for IPv4 ports.\n ports: [67, 68]\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n- type: dns\n # Enable DNS monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for DNS traffic. You can disable\n # the DNS protocol by commenting out the list of ports.\n ports: [53]\n\n # include_authorities controls whether or not the dns.authorities field\n # (authority resource records) is added to messages.\n # Default: false\n include_authorities: true\n # include_additionals controls whether or not the dns.additionals field\n # (additional resource records) is added to messages.\n # Default: false\n include_additionals: true\n\n # send_request and send_response control whether or not the stringified DNS\n # request and response message are added to the result.\n # Nearly all data about the request/response is available in the dns.*\n # fields, but this can be useful if you need visibility specifically\n # into the request or the response.\n # Default: false\n # send_request: true\n # send_response: true\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-dhcpv4-index\n\n- type: http\n # Enable HTTP monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for HTTP traffic. You can disable\n # the HTTP protocol by commenting out the list of ports.\n ports: [80, 8080, 8000, 5000, 8002]\n\n # Uncomment the following to hide certain parameters in the URL or forms attached\n # to HTTP requests. The names of the parameters are case-insensitive.\n # The value of the parameters will be replaced with the 'xxxxx' string.\n # This is generally useful for avoiding storing user passwords or other\n # sensitive information.\n # Only query parameters and top level form parameters are replaced.\n # hide_keywords: ['pass', 'password', 'passwd']\n\n # A list of header names to capture and send to Elasticsearch. These headers\n # are placed under the `headers` dictionary in the resulting JSON.\n #send_headers: false\n\n # Instead of sending a white list of headers to Elasticsearch, you can send\n # all headers by setting this option to true. The default is false.\n #send_all_headers: false\n\n # A list of headers to redact if present in the HTTP request. This will keep\n # the header field present, but will redact it's value to show the headers\n # presence.\n #redact_headers: []\n\n # The list of content types for which Packetbeat includes the full HTTP\n # payload. If the request's or response's Content-Type matches any on this\n # list, the full body will be included under the request or response field.\n #include_body_for: []\n\n # The list of content types for which Packetbeat includes the full HTTP\n # request payload.\n #include_request_body_for: []\n\n # The list of content types for which Packetbeat includes the full HTTP\n # response payload.\n #include_response_body_for: []\n\n # Whether the body of a request must be decoded when a content-encoding\n # or transfer-encoding has been applied.\n #decode_body: true\n\n # If the Cookie or Set-Cookie headers are sent, this option controls whether\n # they are split into individual values.\n #split_cookie: false\n\n # The header field to extract the real IP from. This setting is useful when\n # you want to capture traffic behind a reverse proxy, but you want to get the\n # geo-location information.\n #real_ip_header:\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Maximum message size. If an HTTP message is larger than this, it will\n # be trimmed to this size. Default is 10 MB.\n #max_message_size: 10485760\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-http-index\n\n- type: memcache\n # Enable memcache monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for memcache traffic. You can disable\n # the Memcache protocol by commenting out the list of ports.\n ports: [11211]\n\n # Uncomment the parseunknown option to force the memcache text protocol parser\n # to accept unknown commands.\n # Note: All unknown commands MUST not contain any data parts!\n # Default: false\n # parseunknown: true\n\n # Update the maxvalue option to store the values - base64 encoded - in the\n # json output.\n # possible values:\n # maxvalue: -1 # store all values (text based protocol multi-get)\n # maxvalue: 0 # store no values at all\n # maxvalue: N # store up to N values\n # Default: 0\n # maxvalues: -1\n\n # Use maxbytespervalue to limit the number of bytes to be copied per value element.\n # Note: Values will be base64 encoded, so actual size in json document\n # will be 4 times maxbytespervalue.\n # Default: unlimited\n # maxbytespervalue: 100\n\n # UDP transaction timeout in milliseconds.\n # Note: Quiet messages in UDP binary protocol will get response only in error case.\n # The memcached analyzer will wait for udptransactiontimeout milliseconds\n # before publishing quiet messages. Non quiet messages or quiet requests with\n # error response will not have to wait for the timeout.\n # Default: 200\n # udptransactiontimeout: 1000\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-memcache-index\n\n- type: mysql\n # Enable mysql monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for MySQL traffic. You can disable\n # the MySQL protocol by commenting out the list of ports.\n ports: [3306,3307]\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-mysql-index\n\n- type: pgsql\n # Enable pgsql monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for Pgsql traffic. You can disable\n # the Pgsql protocol by commenting out the list of ports.\n ports: [5432]\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-pgsql-index\n\n- type: redis\n # Enable redis monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for Redis traffic. You can disable\n # the Redis protocol by commenting out the list of ports.\n ports: [6379]\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Max size for per-session message queue. This places a limit on the memory\n # that can be used to buffer requests and responses for correlation.\n #queue_max_bytes: 1048576\n\n # Max number of messages for per-session message queue. This limits the number\n # of requests or responses that can be buffered for correlation. Set a value\n # large enough to allow for pipelining.\n #queue_max_messages: 20000\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-redis-index\n\n- type: thrift\n # Enable thrift monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for Thrift-RPC traffic. You can disable\n # the Thrift-RPC protocol by commenting out the list of ports.\n ports: [9090]\n\n # The Thrift transport type. Currently this option accepts the values socket\n # for TSocket, which is the default Thrift transport, and framed for the\n # TFramed Thrift transport. The default is socket.\n #transport_type: socket\n\n # The Thrift protocol type. Currently the only accepted value is binary for\n # the TBinary protocol, which is the default Thrift protocol.\n #protocol_type: binary\n\n # The Thrift interface description language (IDL) files for the service that\n # Packetbeat is monitoring. Providing the IDL enables Packetbeat to include\n # parameter and exception names.\n #idl_files: []\n\n # The maximum length for strings in parameters or return values. If a string\n # is longer than this value, the string is automatically truncated to this\n # length.\n #string_max_size: 200\n\n # The maximum number of elements in a Thrift list, set, map, or structure.\n #collection_max_size: 15\n\n # If this option is set to false, Packetbeat decodes the method name from the\n # reply and simply skips the rest of the response message.\n #capture_reply: true\n\n # If this option is set to true, Packetbeat replaces all strings found in\n # method parameters, return codes, or exception structures with the "*"\n # string.\n #obfuscate_strings: false\n\n # The maximum number of fields that a structure can have before Packetbeat\n # ignores the whole transaction.\n #drop_after_n_struct_fields: 500\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-thrift-index\n\n- type: mongodb\n # Enable mongodb monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for MongoDB traffic. You can disable\n # the MongoDB protocol by commenting out the list of ports.\n ports: [27017]\n\n\n # The maximum number of documents from the response to index in the `response`\n # field. The default is 10.\n #max_docs: 10\n\n # The maximum number of characters in a single document indexed in the\n # `response` field. The default is 5000. You can set this to 0 to index an\n # unlimited number of characters per document.\n #max_doc_length: 5000\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-mongodb-index\n\n- type: nfs\n # Enable NFS monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for NFS traffic. You can disable\n # the NFS protocol by commenting out the list of ports.\n ports: [2049]\n\n # If this option is enabled, the raw message of the request (`request` field)\n # is sent to Elasticsearch. The default is false.\n #send_request: false\n\n # If this option is enabled, the raw message of the response (`response`\n # field) is sent to Elasticsearch. The default is false.\n #send_response: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Transaction timeout. Expired transactions will no longer be correlated to\n # incoming responses, but sent to Elasticsearch immediately.\n #transaction_timeout: 10s\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-nfs-index\n\n- type: tls\n # Enable TLS monitoring. Default: true\n #enabled: true\n\n # Configure the ports where to listen for TLS traffic. You can disable\n # the TLS protocol by commenting out the list of ports.\n ports:\n - 443 # HTTPS\n - 993 # IMAPS\n - 995 # POP3S\n - 5223 # XMPP over SSL\n - 8443\n - 8883 # Secure MQTT\n - 9243 # Elasticsearch\n\n # List of hash algorithms to use to calculate certificates' fingerprints.\n # Valid values are `sha1`, `sha256` and `md5`.\n #fingerprints: [sha1]\n\n # If this option is enabled, the client and server certificates and\n # certificate chains are sent to Elasticsearch. The default is true.\n #send_certificates: true\n\n # If this option is enabled, the raw certificates will be stored\n # in PEM format under the `raw` key. The default is false.\n #include_raw_certificates: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-tls-index\n\n- type: sip\n # Configure the ports where to listen for SIP traffic. You can disable the SIP protocol by commenting out the list of ports.\n ports: [5060]\n\n # Parse the authorization headers\n parse_authorization: true\n\n # Parse body contents (only when body is SDP)\n parse_body: true\n\n # Preserve original contents in event.original\n keep_original: true\n\n # You can monitor tcp SIP traffic by setting the transport_protocol option\n # to tcp, it defaults to udp.\n #transport_protocol: tcp\n\n # Overrides where this protocol's events are indexed.\n #index: my-custom-sip-index\n\n# ============================ Monitored processes =============================\n\n# Packetbeat can enrich events with information about the process associated\n# the socket that sent or received the packet if Packetbeat is monitoring\n# traffic from the host machine. By default process enrichment is disabled.\n# This feature works on Linux and Windows.\npacketbeat.procs.enabled: false\n\n# If you want to ignore transactions created by the server on which the shipper\n# is installed you can enable this option. This option is useful to remove\n# duplicates if shippers are installed on multiple servers. Default value is\n# false.\npacketbeat.ignore_outgoing: false\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n# If this option is not defined, the hostname is used.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published. Tags make it easy to group servers by different\n# logical properties.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output. Fields can be scalar values, arrays, dictionaries, or any nested\n# combination of these.\n#fields:\n# env: staging\n\n# If this option is set to true, the custom fields are stored as top-level\n# fields in the output document instead of being grouped under a field\n# sub-dictionary. Default is false.\n#fields_under_root: false\n\n# Configure the precision of all timestamps in Packetbeat.\n# Available options: millisecond, microsecond, nanosecond\n#timestamp.precision: millisecond\n\n# Internal queue configuration for buffering events to be published.\n# Queue settings may be overridden by performance presets in the\n# Elasticsearch output. To configure them manually use "preset: custom".\n#queue:\n # Queue type by name (default 'mem')\n # The memory queue will present all available events (up to the outputs\n # bulk_max_size) to the output, the moment the output is ready to serve\n # another batch of events.\n #mem:\n # Max number of events the queue can buffer.\n #events: 3200\n\n # Hints the minimum number of events stored in the queue,\n # before providing a batch of events to the outputs.\n # The default value is set to 2048.\n # A value of 0 ensures events are immediately available\n # to be sent to the outputs.\n #flush.min_events: 1600\n\n # Maximum duration after which events are available to the outputs,\n # if the number of events stored in the queue is < `flush.min_events`.\n #flush.timeout: 10s\n\n # The disk queue stores incoming events on disk until the output is\n # ready for them. This allows a higher event limit than the memory-only\n # queue and lets pending events persist through a restart.\n #disk:\n # The directory path to store the queue's data.\n #path: "${path.data}/diskqueue"\n\n # The maximum space the queue should occupy on disk. Depending on\n # input settings, events that exceed this limit are delayed or discarded.\n #max_size: 10GB\n\n # The maximum size of a single queue data file. Data in the queue is\n # stored in smaller segments that are deleted after all their events\n # have been processed.\n #segment_size: 1GB\n\n # The number of events to read from disk to memory while waiting for\n # the output to request them.\n #read_ahead: 512\n\n # The number of events to accept from inputs while waiting for them\n # to be written to disk. If event data arrives faster than it\n # can be written to disk, this setting prevents it from overflowing\n # main memory.\n #write_ahead: 2048\n\n # The duration to wait before retrying when the queue encounters a disk\n # write error.\n #retry_interval: 1s\n\n # The maximum length of time to wait before retrying on a disk write\n # error. If the queue encounters repeated errors, it will double the\n # length of its retry interval each time, up to this maximum.\n #max_retry_interval: 30s\n\n# Sets the maximum number of CPUs that can be executed simultaneously. The\n# default is the number of logical CPUs available in the system.\n#max_procs:\n\n# ================================= Processors =================================\n\n# Processors are used to reduce the number of fields in the exported event or to\n# enhance the event with external metadata. This section defines a list of\n# processors that are applied one by one and the first one receives the initial\n# event:\n#\n# event -> filter1 -> event1 -> filter2 ->event2 ...\n#\n# The supported processors are drop_fields, drop_event, include_fields,\n# decode_json_fields, and add_cloud_metadata.\n#\n# For example, you can use the following processors to keep the fields that\n# contain CPU load percentages, but remove the fields that contain CPU ticks\n# values:\n#\n#processors:\n# - include_fields:\n# fields: ["cpu"]\n# - drop_fields:\n# fields: ["cpu.user", "cpu.system"]\n#\n# The following example drops the events that have the HTTP response code 200:\n#\n#processors:\n# - drop_event:\n# when:\n# equals:\n# http.code: 200\n#\n# The following example renames the field a to b:\n#\n#processors:\n# - rename:\n# fields:\n# - from: "a"\n# to: "b"\n#\n# The following example tokenizes the string into fields:\n#\n#processors:\n# - dissect:\n# tokenizer: "%{key1} - %{key2}"\n# field: "message"\n# target_prefix: "dissect"\n#\n# The following example enriches each event with metadata from the cloud\n# provider about the host machine. It works on EC2, GCE, DigitalOcean,\n# Tencent Cloud, and Alibaba Cloud.\n#\n#processors:\n# - add_cloud_metadata: ~\n#\n# The following example enriches each event with the machine's local time zone\n# offset from UTC.\n#\n#processors:\n# - add_locale:\n# format: offset\n#\n# The following example enriches each event with docker metadata, it matches\n# given fields to an existing container id and adds info from that container:\n#\n#processors:\n# - add_docker_metadata:\n# host: "unix:///var/run/docker.sock"\n# match_fields: ["system.process.cgroup.id"]\n# match_pids: ["process.pid", "process.parent.pid"]\n# match_source: true\n# match_source_index: 4\n# match_short_id: false\n# cleanup_timeout: 60\n# labels.dedot: false\n# # To connect to Docker over TLS you must specify a client and CA certificate.\n# #ssl:\n# # certificate_authority: "/etc/pki/root/ca.pem"\n# # certificate: "/etc/pki/client/cert.pem"\n# # key: "/etc/pki/client/cert.key"\n#\n# The following example enriches each event with docker metadata, it matches\n# container id from log path available in `source` field (by default it expects\n# it to be /var/lib/docker/containers/*/*.log).\n#\n#processors:\n# - add_docker_metadata: ~\n#\n# The following example enriches each event with host metadata.\n#\n#processors:\n# - add_host_metadata: ~\n#\n# The following example enriches each event with process metadata using\n# process IDs included in the event.\n#\n#processors:\n# - add_process_metadata:\n# match_pids: ["system.process.ppid"]\n# target: system.process.parent\n#\n# The following example decodes fields containing JSON strings\n# and replaces the strings with valid JSON objects.\n#\n#processors:\n# - decode_json_fields:\n# fields: ["field1", "field2", ...]\n# process_array: false\n# max_depth: 1\n# target: ""\n# overwrite_keys: false\n#\n#processors:\n# - decompress_gzip_field:\n# from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n#\n# The following example copies the value of the message to message_copied\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: message_copied\n# fail_on_error: true\n# ignore_missing: false\n#\n# The following example truncates the value of the message to 1024 bytes\n#\n#processors:\n# - truncate_fields:\n# fields:\n# - message\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example preserves the raw message under event.original\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: event.original\n# fail_on_error: false\n# ignore_missing: true\n# - truncate_fields:\n# fields:\n# - event.original\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example URL-decodes the value of field1 to field2\n#\n#processors:\n# - urldecode:\n# fields:\n# - from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify and additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n hosts: ["localhost:9200"]\n\n # Performance presets configure other output fields to recommended values\n # based on a performance priority.\n # Options are "balanced", "throughput", "scale", "latency" and "custom".\n # Default if unspecified: "custom"\n preset: balanced\n\n # Set gzip compression level. Set to 0 to disable compression.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1.\n #compression_level: 1\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Number of workers per Elasticsearch host.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n #worker: 1\n\n # If set to true and multiple hosts are configured, the output plugin load\n # balances published events onto all Elasticsearch hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # Optional data stream or index name. The default is "packetbeat-%{[agent.version]}".\n # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.\n #index: "packetbeat-%{[agent.version]}"\n\n # Optional ingest pipeline. By default, no pipeline will be used.\n #pipeline: ""\n\n # Optional HTTP path\n #path: "/elasticsearch"\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server URL\n #proxy_url: http://proxy:3128\n\n # Whether to disable proxy settings for outgoing connections. If true, this\n # takes precedence over both the proxy_url field and any environment settings\n # (HTTP_PROXY, HTTPS_PROXY). The default is false.\n #proxy_disable: false\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1600.\n #bulk_max_size: 1600\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum amount of time an idle connection will remain idle\n # before closing itself. Zero means use the default of 60s. The\n # format is a Go language duration (example 60s is 60 seconds).\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 3s.\n # idle_connection_timeout: 3s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Prevents packetbeat from connecting to older Elasticsearch versions when set to `false`\n #allow_older_versions: true\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n # Enables restarting packetbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Number of workers per Logstash host.\n #worker: 1\n\n # Set gzip compression level.\n #compression_level: 3\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Optional maximum time to live for a connection to Logstash, after which the\n # connection will be re-established. A value of `0s` (the default) will\n # disable this feature.\n #\n # Not yet supported for async connections (i.e. with the "pipelining" option set)\n #ttl: 30s\n\n # Optionally load-balance events between Logstash hosts. Default is false.\n #loadbalance: false\n\n # Number of batches to be sent asynchronously to Logstash while processing\n # new batches.\n #pipelining: 2\n\n # If enabled only a subset of events in a batch of events is transferred per\n # transaction. The number of events to be sent increases up to `bulk_max_size`\n # if no error is encountered.\n #slow_start: false\n\n # The number of seconds to wait before trying to reconnect to Logstash\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Logstash after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Optional index name. The default index name is set to packetbeat\n # in all lowercase.\n #index: 'packetbeat'\n\n # SOCKS5 proxy server URL\n #proxy_url: socks5://user:password@socks5-server:2233\n\n # Resolve names locally when using a proxy server. Defaults to false.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting packetbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting\n # and retry until all events are published. Set max_retries to a value less\n # than 0 to retry until all events are published. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Logstash request. The\n # default is 2048.\n #bulk_max_size: 2048\n\n # The number of seconds to wait for responses from the Logstash server before\n # timing out. The default is 30s.\n #timeout: 30s\n\n# -------------------------------- Kafka Output --------------------------------\n#output.kafka:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The list of Kafka broker addresses from which to fetch the cluster metadata.\n # The cluster metadata contain the actual Kafka brokers events are published\n # to.\n #hosts: ["localhost:9092"]\n\n # The Kafka topic used for produced events. The setting can be a format string\n # using any event field. To set the topic from document type use `%{[type]}`.\n #topic: beats\n\n # The Kafka event key setting. Use format string to create a unique event key.\n # By default no event key will be generated.\n #key: ''\n\n # The Kafka event partitioning strategy. Default hashing strategy is `hash`\n # using the `output.kafka.key` setting or randomly distributes events if\n # `output.kafka.key` is not configured.\n #partition.hash:\n # If enabled, events will only be published to partitions with reachable\n # leaders. Default is false.\n #reachable_only: false\n\n # Configure alternative event field names used to compute the hash value.\n # If empty `output.kafka.key` setting will be used.\n # Default value is empty list.\n #hash: []\n\n # Authentication details. Password is required if username is set.\n #username: ''\n #password: ''\n\n # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.\n # Defaults to PLAIN when `username` and `password` are configured.\n #sasl.mechanism: ''\n\n # Kafka version Packetbeat is assumed to run against. Defaults to the "1.0.0".\n #version: '1.0.0'\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Metadata update configuration. Metadata contains leader information\n # used to decide which broker to use when publishing.\n #metadata:\n # Max metadata request retry attempts when cluster is in middle of leader\n # election. Defaults to 3 retries.\n #retry.max: 3\n\n # Wait time between retries during leader elections. Default is 250ms.\n #retry.backoff: 250ms\n\n # Refresh metadata interval. Defaults to every 10 minutes.\n #refresh_frequency: 10m\n\n # Strategy for fetching the topics metadata from the broker. Default is false.\n #full: false\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to republish to Kafka\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to republish. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful publish, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to republish to\n # Kafka after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Kafka request. The default\n # is 2048.\n #bulk_max_size: 2048\n\n # Duration to wait before sending bulk Kafka request. 0 is no delay. The default\n # is 0.\n #bulk_flush_frequency: 0s\n\n # The number of seconds to wait for responses from the Kafka brokers before\n # timing out. The default is 30s.\n #timeout: 30s\n\n # The maximum duration a broker will wait for number of required ACKs. The\n # default is 10s.\n #broker_timeout: 10s\n\n # The number of messages buffered for each Kafka broker. The default is 256.\n #channel_buffer_size: 256\n\n # The keep-alive period for an active network connection. If 0s, keep-alives\n # are disabled. The default is 0 seconds.\n #keep_alive: 0\n\n # Sets the output compression codec. Must be one of none, snappy and gzip. The\n # default is gzip.\n #compression: gzip\n\n # Set the compression level. Currently only gzip provides a compression level\n # between 0 and 9. The default value is chosen by the compression algorithm.\n #compression_level: 4\n\n # The maximum permitted size of JSON-encoded messages. Bigger messages will be\n # dropped. The default value is 1000000 (bytes). This value should be equal to\n # or less than the broker's message.max.bytes.\n #max_message_bytes: 1000000\n\n # The ACK reliability level required from broker. 0=no response, 1=wait for\n # local commit, -1=wait for all replicas to commit. The default is 1. Note:\n # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently\n # on error.\n #required_acks: 1\n\n # The configurable ClientID used for logging, debugging, and auditing\n # purposes. The default is "beats".\n #client_id: beats\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting packetbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/security/keytabs/kafka.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # The service name. Service principal name is contructed from\n # service_name/hostname@realm.\n #kerberos.service_name: kafka\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n # Enables Kerberos FAST authentication. This may\n # conflict with certain Active Directory configurations.\n #kerberos.enable_krb5_fast: false\n\n# -------------------------------- Redis Output --------------------------------\n#output.redis:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty print json event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # The list of Redis servers to connect to. If load-balancing is enabled, the\n # events are distributed to the servers in the list. If one server becomes\n # unreachable, the events are distributed to the reachable servers only.\n # The hosts setting supports redis and rediss urls with custom password like\n # redis://:password@localhost:6379.\n #hosts: ["localhost:6379"]\n\n # The name of the Redis list or channel the events are published to. The\n # default is packetbeat.\n #key: packetbeat\n\n # The password to authenticate to Redis with. The default is no authentication.\n #password:\n\n # The Redis database number where the events are published. The default is 0.\n #db: 0\n\n # The Redis data type to use for publishing events. If the data type is list,\n # the Redis RPUSH command is used. If the data type is channel, the Redis\n # PUBLISH command is used. The default value is list.\n #datatype: list\n\n # The number of workers to use for each host configured to publish events to\n # Redis. Use this setting along with the loadbalance option. For example, if\n # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each\n # host).\n #worker: 1\n\n # If set to true and multiple hosts or workers are configured, the output\n # plugin load balances published events onto all Redis hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # The Redis connection timeout in seconds. The default is 5 seconds.\n #timeout: 5s\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to reconnect to Redis\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Redis after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Redis request or pipeline.\n # The default is 2048.\n #bulk_max_size: 2048\n\n # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The\n # value must be a URL with a scheme of socks5://.\n #proxy_url:\n\n # This option determines whether Redis hostnames are resolved locally when\n # using a proxy. The default value is false, which means that name resolution\n # occurs on the proxy server.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# -------------------------------- File Output ---------------------------------\n#output.file:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Path to the directory where to save the generated files. The option is\n # mandatory.\n #path: "/tmp/packetbeat"\n\n # Name of the generated files. The default is `packetbeat` and it generates\n # files: `packetbeat-{datetime}.ndjson`, `packetbeat-{datetime}-1.ndjson`, etc.\n #filename: packetbeat\n\n # Maximum size in kilobytes of each file. When this size is reached, and on\n # every Packetbeat restart, the files are rotated. The default value is 10240\n # kB.\n #rotate_every_kb: 10000\n\n # Maximum number of files under path. When this number of files is reached,\n # the oldest file is deleted and the rest are shifted from last to first. The\n # default is 7 files.\n #number_of_files: 7\n\n # Permissions to use for file creation. The default is 0600.\n #permissions: 0600\n \n # Configure automatic file rotation on every startup. The default is true.\n #rotate_on_startup: true\n\n# ------------------------------- Console Output -------------------------------\n#output.console:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n# =================================== Paths ====================================\n\n# The home path for the Packetbeat installation. This is the default base path\n# for all other path settings and for miscellaneous files that come with the\n# distribution (for example, the sample dashboards).\n# If not set by a CLI flag or in the configuration file, the default for the\n# home path is the location of the binary.\n#path.home:\n\n# The configuration path for the Packetbeat installation. This is the default\n# base path for configuration files, including the main YAML configuration file\n# and the Elasticsearch template file. If not set by a CLI flag or in the\n# configuration file, the default for the configuration path is the home path.\n#path.config: ${path.home}\n\n# The data path for the Packetbeat installation. This is the default base path\n# for all the files in which Packetbeat needs to store its data. If not set by a\n# CLI flag or in the configuration file, the default for the data path is a data\n# subdirectory inside the home path.\n#path.data: ${path.home}/data\n\n# The logs path for a Packetbeat installation. This is the default location for\n# the Beat's log files. If not set by a CLI flag or in the configuration file,\n# the default for the logs path is a logs subdirectory inside the home path.\n#path.logs: ${path.home}/logs\n\n# ================================== Keystore ==================================\n\n# Location of the Keystore containing the keys and their sensitive values.\n#keystore.path: "${path.config}/beats.keystore"\n\n# ================================= Dashboards =================================\n\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards are disabled by default and can be enabled either by setting the\n# options here or by using the `-setup` CLI flag or the `setup` command.\n#setup.dashboards.enabled: false\n\n# The directory from where to read the dashboards. The default is the `kibana`\n# folder in the home path.\n#setup.dashboards.directory: ${path.home}/kibana\n\n# The URL from where to download the dashboard archive. It is used instead of\n# the directory if it has a value.\n#setup.dashboards.url:\n\n# The file archive (zip file) from where to read the dashboards. It is used instead\n# of the directory when it has a value.\n#setup.dashboards.file:\n\n# In case the archive contains the dashboards from multiple Beats, this lets you\n# select which one to load. You can load all the dashboards in the archive by\n# setting this to the empty string.\n#setup.dashboards.beat: packetbeat\n\n# The name of the Kibana index to use for setting the configuration. Default is ".kibana"\n#setup.dashboards.kibana_index: .kibana\n\n# The Elasticsearch index name. This overwrites the index name defined in the\n# dashboards and index pattern. Example: testbeat-*\n#setup.dashboards.index:\n\n# Always use the Kibana API for loading the dashboards instead of autodetecting\n# how to install the dashboards by first querying Elasticsearch.\n#setup.dashboards.always_kibana: false\n\n# If true and Kibana is not reachable at the time when dashboards are loaded,\n# it will retry to reconnect to Kibana instead of exiting with an error.\n#setup.dashboards.retry.enabled: false\n\n# Duration interval between Kibana connection retries.\n#setup.dashboards.retry.interval: 1s\n\n# Maximum number of retries before exiting with an error, 0 for unlimited retrying.\n#setup.dashboards.retry.maximum: 0\n\n# ================================== Template ==================================\n\n# A template is used to set the mapping in Elasticsearch\n# By default template loading is enabled and the template is loaded.\n# These settings can be adjusted to load your own template or overwrite existing ones.\n\n# Set to false to disable template loading.\n#setup.template.enabled: true\n\n# Template name. By default the template name is "packetbeat-%{[agent.version]}"\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.name: "packetbeat-%{[agent.version]}"\n\n# Template pattern. By default the template pattern is "packetbeat-%{[agent.version]}" to apply to the default index settings.\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.pattern: "packetbeat-%{[agent.version]}"\n\n# Path to fields.yml file to generate the template\n#setup.template.fields: "${path.config}/fields.yml"\n\n# A list of fields to be added to the template and Kibana index pattern. Also\n# specify setup.template.overwrite: true to overwrite the existing template.\n#setup.template.append_fields:\n#- name: field_name\n# type: field_type\n\n# Enable JSON template loading. If this is enabled, the fields.yml is ignored.\n#setup.template.json.enabled: false\n\n# Path to the JSON template file\n#setup.template.json.path: "${path.config}/template.json"\n\n# Name under which the template is stored in Elasticsearch\n#setup.template.json.name: ""\n\n# Set this option if the JSON template is a data stream.\n#setup.template.json.data_stream: false\n\n# Overwrite existing template\n# Do not enable this option for more than one instance of packetbeat as it might\n# overload your Elasticsearch with too many update requests.\n#setup.template.overwrite: false\n\n# Elasticsearch template settings\nsetup.template.settings:\n\n # A dictionary of settings to place into the settings.index dictionary\n # of the Elasticsearch template. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html\n #index:\n #number_of_shards: 1\n #codec: best_compression\n\n # A dictionary of settings for the _source field. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html\n #_source:\n #enabled: false\n\n# ====================== Index Lifecycle Management (ILM) ======================\n\n# Configure index lifecycle management (ILM) to manage the backing indices\n# of your data streams.\n\n# Enable ILM support. Valid values are true, or false.\n#setup.ilm.enabled: true\n\n# Set the lifecycle policy name. The default policy name is\n# 'beatname'.\n#setup.ilm.policy_name: "mypolicy"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n#setup.ilm.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true.\n# If you set this option to false, lifecycle policy will not be installed,\n# even if setup.ilm.overwrite is set to true.\n#setup.ilm.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.ilm.overwrite: false\n\n# ======================== Data Stream Lifecycle (DSL) =========================\n\n# Configure Data Stream Lifecycle to manage data streams while connected to Serverless elasticsearch. \n# These settings are mutually exclusive with ILM settings which are not supported in Serverless projects.\n\n# Enable DSL support. Valid values are true, or false.\n#setup.dsl.enabled: true\n\n# Set the lifecycle policy name or pattern. For DSL, this name must match the data stream that the lifecycle is for.\n# The default data stream pattern is packetbeat-%{[agent.version]}"\n# The template string `%{[agent.version]}` will resolve to the current stack version. \n# The other possible template value is `%{[beat.name]}`.\n#setup.dsl.data_stream_pattern: "packetbeat-%{[agent.version]}"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n# If no custom policy is specified, a default policy with a lifetime of 7 days will be created.\n#setup.dsl.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true. If\n# you disable this check, set setup.dsl.overwrite: true so the lifecycle policy\n# can be installed.\n#setup.dsl.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.dsl.overwrite: false\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Optional protocol and basic auth credentials.\n #protocol: "https"\n #username: "elastic"\n #password: "changeme"\n\n # Optional HTTP path\n #path: ""\n\n # Optional Kibana space ID.\n #space.id: ""\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# ================================== Logging ===================================\n\n# There are four options for the log output: file, stderr, syslog, eventlog\n# The file output is the default.\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: info\n\n# Enable debug output for selected components. To enable all selectors use ["*"]\n# Other available selectors are "beat", "publisher", "service"\n# Multiple selectors can be chained.\n#logging.selectors: [ ]\n\n# Send all logging output to stderr. The default is false.\n#logging.to_stderr: false\n\n# Send all logging output to syslog. The default is false.\n#logging.to_syslog: false\n\n# Send all logging output to Windows Event Logs. The default is false.\n#logging.to_eventlog: false\n\n# If enabled, Packetbeat periodically logs its internal metrics that have changed\n# in the last period. For each metric that changed, the delta from the value at\n# the beginning of the period is logged. Also, the total values for\n# all non-zero internal metrics are logged on shutdown. The default is true.\n#logging.metrics.enabled: true\n\n# The period after which to log the internal metrics. The default is 30s.\n#logging.metrics.period: 30s\n\n# A list of metrics namespaces to report in the logs. Defaults to [stats].\n# `stats` contains general Beat metrics. `dataset` may be present in some\n# Beats and contains module or input metrics.\n#logging.metrics.namespaces: [stats]\n\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\nlogging.to_files: true\nlogging.files:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/packetbeat\n\n # The name of the files where the logs are written to.\n #name: packetbeat\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 10485760 # = 10MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 7\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to true.\n # rotateonstartup: true\n\n#=============================== Events Logging ===============================\n# Some outputs will log raw events on errors like indexing errors in the\n# Elasticsearch output, to prevent logging raw events (that may contain\n# sensitive information) together with other log messages, a different\n# log file, only for log entries containing raw events, is used. It will\n# use the same level, selectors and all other configurations from the\n# default logger, but it will have it's own file configuration.\n#\n# Having a different log file for raw events also prevents event data\n# from drowning out the regular log files.\n#\n# IMPORTANT: No matter the default logger output configuration, raw events\n# will **always** be logged to a file configured by `logging.event_data.files`.\n\n# logging.event_data:\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\n#logging.event_data.to_files: true\n#logging.event_data:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/packetbeat\n\n # The name of the files where the logs are written to.\n #name: packetbeat-events-data\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 5242880 # = 5MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 2\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to false.\n # rotateonstartup: false\n\n# ============================= X-Pack Monitoring ==============================\n# Packetbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Packetbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch output are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify an additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n #hosts: ["localhost:9200"]\n\n # Set gzip compression level.\n #compression_level: 0\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "beats_system"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server url\n #proxy_url: http://proxy:3128\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # The default is 50.\n #bulk_max_size: 50\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n #metrics.period: 10s\n #state.period: 1m\n\n# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts`\n# setting. You can find the value for this setting in the Elastic Cloud web UI.\n#monitoring.cloud.id:\n\n# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username`\n# and `monitoring.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#monitoring.cloud.auth:\n\n# =============================== HTTP Endpoint ================================\n\n# Each beat can expose internal metrics through an HTTP endpoint. For security\n# reasons the endpoint is disabled by default. This feature is currently experimental.\n# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output\n# append ?pretty to the URL.\n\n# Defines if the HTTP endpoint is enabled.\n#http.enabled: false\n\n# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe.\n# When using IP addresses, it is recommended to only use localhost.\n#http.host: localhost\n\n# Port on which the HTTP endpoint will bind. Default is 5066.\n#http.port: 5066\n\n# Define which user should be owning the named pipe.\n#http.named_pipe.user:\n\n# Define which permissions should be applied to the named pipe, use the Security\n# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with\n# `http.user`.\n#http.named_pipe.security_descriptor:\n\n# Defines if the HTTP pprof endpoints are enabled.\n# It is recommended that this is only enabled on localhost as these endpoints may leak data.\n#http.pprof.enabled: false\n\n# Controls the fraction of goroutine blocking events that are reported in the\n# blocking profile.\n#http.pprof.block_profile_rate: 0\n\n# Controls the fraction of memory allocations that are recorded and reported in\n# the memory profile.\n#http.pprof.mem_profile_rate: 524288\n\n# Controls the fraction of mutex contention events that are reported in the\n# mutex profile.\n#http.pprof.mutex_profile_rate: 0\n\n# ============================== Process Security ==============================\n\n# Enable or disable seccomp system call filtering on Linux. Default is enabled.\n#seccomp.enabled: true\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the packetbeat.\n#instrumentation:\n # Set to true to enable instrumentation of packetbeat.\n #enabled: false\n\n # Environment in which packetbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n # Enable profiling of the server, recording profile samples as events.\n #\n # This feature is experimental.\n #profiling:\n #cpu:\n # Set to true to enable CPU profiling.\n #enabled: false\n #interval: 60s\n #duration: 10s\n #heap:\n # Set to true to enable heap profiling.\n #enabled: false\n #interval: 60s\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: false\n\n# =============================== Feature Flags ================================\n\n# Enable and configure feature flags.\n#features:\n# fqdn:\n# enabled: true\n\n
dataset_sample\yaml\go\packetbeat.reference.yml
packetbeat.reference.yml
YAML
89,496
0.75
0.074441
0.968699
node-utils
492
2025-01-22T21:42:47.476136
GPL-3.0
false
c7508c62ea312c963f1eb57ecea4c24c
#################### Packetbeat Configuration Example #########################\n\n# This file is an example configuration file highlighting only the most common\n# options. The packetbeat.reference.yml file from the same directory contains all the\n# supported options with more comments. You can use it as a reference.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/packetbeat/index.html\n\n# =============================== Network device ===============================\n\n# Select the network interface to sniff the data. On Linux, you can use the\n# "any" keyword to sniff on all connected interfaces. On all platforms, you\n# can use "default_route", "default_route_ipv4" or "default_route_ipv6"\n# to sniff on the device carrying the default route. If you wish to sniff\n# on multiple network interfaces you may specify an array of distinct interfaces\n# as a YAML array with each device's configuration specified individually.\n# Each device may only appear once in the array of interfaces.\n#\n# packetbeat.interfaces:\n# - device: en0\n# internal_networks:\n# - private\n# - device: en1\n# internal_networks:\n# - private\n#\npacketbeat.interfaces.device: any\n\n# Specify the amount of time between polling for changes in the default\n# route. This option is only used when one of the default route devices\n# is specified.\npacketbeat.interfaces.poll_default_route: 1m\n\n# The network CIDR blocks that are considered "internal" networks for\n# the purpose of network perimeter boundary classification. The valid\n# values for internal_networks are the same as those that can be used\n# with processor network conditions.\n#\n# For a list of available values see:\n# https://www.elastic.co/guide/en/beats/packetbeat/current/defining-processors.html#condition-network\npacketbeat.interfaces.internal_networks:\n - private\n\n# =================================== Flows ====================================\n\n# Set `enabled: false` or comment out all options to disable flows reporting.\npacketbeat.flows:\n # Set network flow timeout. Flow is killed if no packet is received before being\n # timed out.\n timeout: 30s\n\n # Configure reporting period. If set to -1s, only killed flows will be reported\n period: 10s\n\n# =========================== Transaction protocols ============================\n\npacketbeat.protocols:\n- type: icmp\n # Enable ICMPv4 and ICMPv6 monitoring. The default is true.\n enabled: true\n\n- type: amqp\n # Configure the ports where to listen for AMQP traffic. You can disable\n # the AMQP protocol by commenting out the list of ports.\n ports: [5672]\n\n- type: cassandra\n # Configure the ports where to listen for Cassandra traffic. You can disable\n # the Cassandra protocol by commenting out the list of ports.\n ports: [9042]\n\n- type: dhcpv4\n # Configure the DHCP for IPv4 ports.\n ports: [67, 68]\n\n- type: dns\n # Configure the ports where to listen for DNS traffic. You can disable\n # the DNS protocol by commenting out the list of ports.\n ports: [53]\n\n- type: http\n # Configure the ports where to listen for HTTP traffic. You can disable\n # the HTTP protocol by commenting out the list of ports.\n ports: [80, 8080, 8000, 5000, 8002]\n\n- type: memcache\n # Configure the ports where to listen for memcache traffic. You can disable\n # the Memcache protocol by commenting out the list of ports.\n ports: [11211]\n\n- type: mysql\n # Configure the ports where to listen for MySQL traffic. You can disable\n # the MySQL protocol by commenting out the list of ports.\n ports: [3306,3307]\n\n- type: pgsql\n # Configure the ports where to listen for Pgsql traffic. You can disable\n # the Pgsql protocol by commenting out the list of ports.\n ports: [5432]\n\n- type: redis\n # Configure the ports where to listen for Redis traffic. You can disable\n # the Redis protocol by commenting out the list of ports.\n ports: [6379]\n\n- type: thrift\n # Configure the ports where to listen for Thrift-RPC traffic. You can disable\n # the Thrift-RPC protocol by commenting out the list of ports.\n ports: [9090]\n\n- type: mongodb\n # Configure the ports where to listen for MongoDB traffic. You can disable\n # the MongoDB protocol by commenting out the list of ports.\n ports: [27017]\n\n- type: nfs\n # Configure the ports where to listen for NFS traffic. You can disable\n # the NFS protocol by commenting out the list of ports.\n ports: [2049]\n\n- type: tls\n # Configure the ports where to listen for TLS traffic. You can disable\n # the TLS protocol by commenting out the list of ports.\n ports:\n - 443 # HTTPS\n - 993 # IMAPS\n - 995 # POP3S\n - 5223 # XMPP over SSL\n - 8443\n - 8883 # Secure MQTT\n - 9243 # Elasticsearch\n\n- type: sip\n # Configure the ports where to listen for SIP traffic. You can disable\n # the SIP protocol by commenting out the list of ports.\n ports: [5060]\n # You can monitor tcp SIP traffic by setting the transport_protocol option\n # to tcp, it defaults to udp.\n #transport_protocol: tcp\n\n# ======================= Elasticsearch template setting =======================\n\nsetup.template.settings:\n index.number_of_shards: 1\n #index.codec: best_compression\n #_source.enabled: false\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n#name:\n\n# A list of tags to include in every event. In the default configuration file\n# the forwarded tag causes Packetbeat to not add any host fields. If you are\n# monitoring a network tap or mirror port then add the forwarded tag.\n#tags: [forwarded]\n\n# Optional fields that you can specify to add additional information to the\n# output.\n#fields:\n# env: staging\n\n# ================================= Dashboards =================================\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards is disabled by default and can be enabled either by setting the\n# options here or by using the `setup` command.\n#setup.dashboards.enabled: false\n\n# The URL from where to download the dashboard archive. By default, this URL\n# has a value that is computed based on the Beat name and version. For released\n# versions, this URL points to the dashboard archive on the artifacts.elastic.co\n# website.\n#setup.dashboards.url:\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Kibana Space ID\n # ID of the Kibana Space into which the dashboards should be loaded. By default,\n # the Default Space will be used.\n #space.id:\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: ["localhost:9200"]\n\n # Performance preset - one of "balanced", "throughput", "scale",\n # "latency", or "custom".\n preset: balanced\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Optional SSL. By default is off.\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client Certificate Key\n #ssl.key: "/etc/pki/client/cert.key"\n\n# ================================= Processors =================================\n\nprocessors:\n - # Add forwarded to tags when processing data from a network tap or mirror.\n if.contains.tags: forwarded\n then:\n - drop_fields:\n fields: [host]\n else:\n - add_host_metadata: ~\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n - detect_mime_type:\n field: http.request.body.content\n target: http.request.mime_type\n - detect_mime_type:\n field: http.response.body.content\n target: http.response.mime_type\n\n# ================================== Logging ===================================\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: debug\n\n# At debug level, you can selectively enable logging only for some components.\n# To enable all selectors, use ["*"]. Examples of other selectors are "beat",\n# "publisher", "service".\n#logging.selectors: ["*"]\n\n# ============================= X-Pack Monitoring ==============================\n# Packetbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Packetbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch outputs are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the packetbeat.\n#instrumentation:\n # Set to true to enable instrumentation of packetbeat.\n #enabled: false\n\n # Environment in which packetbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: true\n\n
dataset_sample\yaml\go\packetbeat.yml
packetbeat.yml
YAML
11,602
0.95
0.084112
0.734127
python-kit
355
2025-02-22T06:49:22.066947
BSD-3-Clause
false
85b5512c66583cc4ee6e05851f2880b7
# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n\n# this intermediate pipeline is required because we can't specify a custom agent (k8s image) yet\n# in catalog-info: https://github.com/elastic/ci/blob/71e83d340e3b93ab43fcf16a7a70ac33bdeec6e9/terrazzo/terrazzo/constructs/buildkite/pipelines.py#L787-L842\n\nsteps:\n - label: ":pipeline: Generate trigger steps for $PIPELINES_TO_TRIGGER"\n command: |\n set -eo pipefail\n .buildkite/pipeline-scheduler.py >steps.yml\n echo "~~~ Printing pipeline steps"\n yq . steps.yml\n echo "~~~ Uploading steps"\n buildkite-agent pipeline upload steps.yml\n agents:\n image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:0.1"\n useCustomGlobalHooks: true\n
dataset_sample\yaml\go\pipeline-scheduler.yml
pipeline-scheduler.yml
YAML
826
0.95
0.058824
0.2
vue-tools
131
2025-02-07T09:05:06.151695
GPL-3.0
false
12535ea7798670eeb3ef3d24be0821b1
# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n\nsteps:\n - label: "Trigger Auditbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - auditbeat/\n - .buildkite/auditbeat/\n - .buildkite/scripts\n - .buildkite/hooks/\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/**\n - testing/**\n config:\n trigger: "auditbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Auditbeat"\n if: build.pull_request.id == null\n trigger: "auditbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Heartbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - heartbeat/\n - .buildkite/heartbeat/\n - .buildkite/scripts\n - .buildkite/hooks/\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/**\n - testing/**\n config:\n trigger: "heartbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Heartbeat"\n if: build.pull_request.id == null\n trigger: "heartbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Filebeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - filebeat/\n - .buildkite/filebeat/\n # CI related scripts\n - .buildkite/scripts\n - .buildkite/hooks/\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools\n - libbeat/**\n - testing/**\n config:\n trigger: "filebeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Filebeat"\n if: build.pull_request.id == null\n trigger: "filebeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/filebeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - filebeat/\n - x-pack/filebeat/\n - x-pack/libbeat/\n - .buildkite/x-pack/pipeline.xpack.filebeat.yml\n - .buildkite/scripts\n - .buildkite/hooks/\n - .buildkite/deploy/docker/docker-compose.yml\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/**\n - testing/**\n config:\n trigger: "beats-xpack-filebeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/filebeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-filebeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/dockerlogbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - x-pack/dockerlogbeat/\n - .buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml\n - .buildkite/hooks/\n - .buildkite/scripts\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/**\n - testing/**\n config:\n trigger: "beats-xpack-dockerlogbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/dockerlogbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-dockerlogbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Metricbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - metricbeat/\n - .buildkite/metricbeat/\n - .buildkite/scripts\n - .buildkite/hooks/\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/**\n - testing/**\n config:\n trigger: "beats-metricbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Metricbeat"\n if: build.pull_request.id == null\n trigger: "beats-metricbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/metricbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - metricbeat/\n - x-pack/metricbeat/\n - x-pack/libbeat/common/aws\n - .buildkite/x-pack/pipeline.xpack.metricbeat.yml\n - .buildkite/scripts\n - .buildkite/hooks/\n - .buildkite/deploy/docker/docker-compose.yml\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/**\n - testing/**\n config:\n trigger: "beats-xpack-metricbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/metricbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-metricbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/osquerybeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - x-pack/osquerybeat/**\n - .buildkite/x-pack/pipeline.xpack.osquerybeat.yml\n - .buildkite/scripts/**\n - .buildkite/hooks/**\n # x-pack\n - libbeat/**\n - x-pack/libbeat/**\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/**\n - testing/**\n config:\n trigger: "beats-xpack-osquerybeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/osquerybeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-osquerybeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/winlogbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - winlogbeat/\n - x-pack/winlogbeat/\n - .buildkite/x-pack/pipeline.xpack.winlogbeat.yml\n - .buildkite/scripts\n - .buildkite/hooks/\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/**\n - libbeat/**\n - testing/**\n config:\n trigger: "beats-xpack-winlogbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/winlogbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-winlogbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Deploy/K8S"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - .buildkite/deploy/kubernetes/**\n - .buildkite/hooks/\n - .buildkite/scripts\n - deploy/kubernetes/**\n - metricbeat/module/kubernetes/**\n - libbeat/docs/version.asciidoc\n config:\n trigger: "deploy-k8s"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Deploy/K8S"\n if: build.pull_request.id == null\n trigger: "deploy-k8s"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Libbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - libbeat/\n - .buildkite/libbeat/pipeline.libbeat.yml\n - .buildkite/scripts\n - .buildkite/hooks\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - testing/\n config:\n trigger: "beats-libbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Libbeat"\n if: build.pull_request.id == null\n trigger: "beats-libbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/libbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - libbeat/\n - x-pack/libbeat/\n - .buildkite/x-pack/pipeline.xpack.libbeat.yml\n - .buildkite/scripts\n - .buildkite/hooks\n # x-pack\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/\n - testing/\n config:\n trigger: "beats-xpack-libbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/libbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-libbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/auditbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - auditbeat/\n - x-pack/auditbeat/\n - .buildkite/x-pack/pipeline.xpack.auditbeat.yml\n - .buildkite/scripts/\n - .buildkite/hooks/\n # x-pack\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/\n - testing/\n - x-pack/libbeat/\n config:\n trigger: "beats-xpack-auditbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/auditbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-auditbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/heartbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - heartbeat/\n - x-pack/heartbeat/\n - .buildkite/x-pack/pipeline.xpack.heartbeat.yml\n - .buildkite/scripts/\n - .buildkite/hooks/\n # x-pack\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/\n - testing/\n - x-pack/libbeat/\n config:\n trigger: "beats-xpack-heartbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/heartbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-heartbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger x-pack/packetbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - packetbeat/\n - x-pack/packetbeat/\n - .buildkite/x-pack/pipeline.xpack.packetbeat.yml\n - .buildkite/scripts/\n - .buildkite/hooks/\n # x-pack\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/\n - testing/\n - x-pack/libbeat/\n config:\n trigger: "beats-xpack-packetbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for x-pack/packetbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-packetbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Winlogbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - winlogbeat/\n - .buildkite/winlogbeat/pipeline.winlogbeat.yml\n - .buildkite/scripts\n - .buildkite/hooks\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/\n - testing/\n config:\n trigger: "beats-winlogbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Winlogbeat"\n if: build.pull_request.id == null\n trigger: "beats-winlogbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Packetbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - packetbeat/\n - .buildkite/packetbeat/pipeline.packetbeat.yml\n - .buildkite/scripts/\n - .buildkite/hooks/\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/\n - testing/\n config:\n trigger: "beats-packetbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Packetbeat"\n if: build.pull_request.id == null\n trigger: "beats-packetbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n - label: "Trigger Agentbeat"\n if: build.pull_request.id != null\n plugins:\n - monorepo-diff#v1.0.1:\n diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD"\n interpolation: false\n watch:\n - path:\n - auditbeat/\n - filebeat/\n - heartbeat/\n - metricbeat/\n - osquerybeat/\n - packetbeat/\n\n - x-pack/agentbeat/\n - x-pack/auditbeat/\n - x-pack/filebeat/\n - x-pack/heartbeat/\n - x-pack/metricbeat/\n - x-pack/osquerybeat/\n - x-pack/packetbeat/\n\n - .buildkite/x-pack/pipeline.xpack.agentbeat.yml\n - .buildkite/scripts/\n - .buildkite/hooks/\n #OSS\n - go.mod\n - pytest.ini\n - dev-tools/\n - libbeat/\n - testing/\n config:\n trigger: "beats-xpack-agentbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n env:\n - BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST}\n - BUILDKITE_PULL_REQUEST_BASE_BRANCH=${BUILDKITE_PULL_REQUEST_BASE_BRANCH}\n - GITHUB_PR_LABELS=${GITHUB_PR_LABELS}\n\n - label: "Triggering Build for Agentbeat"\n if: build.pull_request.id == null\n trigger: "beats-xpack-agentbeat"\n build:\n commit: "${BUILDKITE_COMMIT}"\n branch: "${BUILDKITE_BRANCH}"\n\n # Triggers Serverless Beats tests in the elastic-agent repo for main/release branches\n - label: "Serverless beats tests(elastic-agent)"\n if: build.pull_request.id == null\n trigger: "beats-agent-serverless-tests"\n build:\n branch: "main"\n
dataset_sample\yaml\go\pipeline.yml
pipeline.yml
YAML
23,441
0.8
0.084084
0.033493
react-lib
339
2025-03-16T18:58:37.996379
GPL-3.0
false
a89a14f5ec6af75451f3ec58183db007
- github.com/prometheus/prometheus/discovery/aws\n- github.com/prometheus/prometheus/discovery/azure\n- github.com/prometheus/prometheus/discovery/consul\n- github.com/prometheus/prometheus/discovery/digitalocean\n- github.com/prometheus/prometheus/discovery/dns\n- github.com/prometheus/prometheus/discovery/eureka\n- github.com/prometheus/prometheus/discovery/gce\n- github.com/prometheus/prometheus/discovery/hetzner\n- github.com/prometheus/prometheus/discovery/ionos\n- github.com/prometheus/prometheus/discovery/kubernetes\n- github.com/prometheus/prometheus/discovery/linode\n- github.com/prometheus/prometheus/discovery/marathon\n- github.com/prometheus/prometheus/discovery/moby\n- github.com/prometheus/prometheus/discovery/nomad\n- github.com/prometheus/prometheus/discovery/openstack\n- github.com/prometheus/prometheus/discovery/ovhcloud\n- github.com/prometheus/prometheus/discovery/puppetdb\n- github.com/prometheus/prometheus/discovery/scaleway\n- github.com/prometheus/prometheus/discovery/triton\n- github.com/prometheus/prometheus/discovery/uyuni\n- github.com/prometheus/prometheus/discovery/vultr\n- github.com/prometheus/prometheus/discovery/xds\n- github.com/prometheus/prometheus/discovery/zookeeper\n
dataset_sample\yaml\go\plugins.yml
plugins.yml
YAML
1,202
0.7
0
0
awesome-app
375
2024-11-20T09:37:17.955680
GPL-3.0
false
3f617a90eca780694dc6be70ab791336
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\npr/dependencies:\n - changed-files:\n - any-glob-to-any-file:\n - vendor/**/*\n - go.*\ntheme/acls:\n - changed-files:\n - any-glob-to-any-file:\n - acl/**/*\ntheme/agent-cache:\n - changed-files:\n - any-glob-to-any-file:\n - agent/cache/**/*\ntheme/api:\n - changed-files:\n - any-glob-to-any-file:\n - api/**/*\ntheme/catalog:\n - changed-files:\n - any-glob-to-any-file:\n - agent/catalog/**/*\ntheme/certificates:\n - changed-files:\n - any-glob-to-any-file:\n - tlsutil/**/*\ntheme/cli:\n - changed-files:\n - any-glob-to-any-file:\n - command/**/*\ntheme/config:\n - changed-files:\n - any-glob-to-any-file:\n - agent/config/**/*\ntheme/connect:\n - changed-files:\n - any-glob-to-any-file:\n - connect/**/*\n - agent/connect/**/*\n# theme/consul-nomad:\ntheme/consul-terraform-sync:\n - changed-files:\n - any-glob-to-any-file:\n - website/content/docs/nia/**/*\n - website/content/docs/integrate/nia*\n# theme/consul-vault:\ntheme/contributing:\n - changed-files:\n - any-glob-to-any-file:\n - .github/**/*\ntheme/dns:\n - changed-files:\n - any-glob-to-any-file:\n - dns/**/*\ntheme/envoy/xds:\n - changed-files:\n - any-glob-to-any-file:\n - agent/xds/**/*\n# theme/federation-usability:\ntheme/health-checks:\n - changed-files:\n - any-glob-to-any-file:\n - agent/health*\n - api/health*\n# theme/ingress-gw:\n# theme/internal-cleanup:\ntheme/internals:\n - changed-files:\n - any-glob-to-any-file:\n - lib/**/*\n - types/**/*\n# theme/kubernetes:\n# theme/mesh-gw:\n# theme/operator-usability:\n# theme/performance:\n# theme/service-metadata:\n# theme/streaming:\ntheme/telemetry:\n - changed-files:\n - any-glob-to-any-file:\n - logging/**/*\n# theme/terminating-gw:\ntheme/testing:\n - changed-files:\n - any-glob-to-any-file:\n - ./*test*/**/*\ntheme/tls:\n - changed-files:\n - any-glob-to-any-file:\n - tlsutil/**/*\ntheme/ui:\n - changed-files:\n - any-glob-to-any-file:\n - ui/**/*\n# theme/windows:\n# thinking:\n# type/bug:\ntype/ci:\n - changed-files:\n - any-glob-to-any-file:\n - .github/workflows/*\n# type/crash:\ntype/docs:\n - changed-files:\n - any-glob-to-any-file:\n - website/**/*\n
dataset_sample\yaml\go\pr-labeler.yml
pr-labeler.yml
YAML
2,293
0.8
0
0.168224
react-lib
800
2024-07-03T13:07:38.932966
GPL-3.0
false
a065da1cceb32bbf911dd16551c53aba
global:\n scrape_interval: 60s\n scrape_timeout: 10s\nscrape_configs:\n - job_name: "photoprism"\n metrics_path: "/api/v1/metrics"\n oauth2:\n client_id: "cs5cpu17n6gj2qo5"\n client_secret: "xcCbOrw6I0vcoXzhnOmXhjpVSyFq0l0e"\n token_url: "http://photoprism:2342/api/v1/oauth/token"\n scopes:\n - 'metrics'\n endpoint_params:\n grant_type: "client_credentials"\n static_configs:\n - targets: ["photoprism:2342"]
dataset_sample\yaml\go\prometheus.yml
prometheus.yml
YAML
451
0.8
0
0
node-utils
276
2025-02-24T05:57:08.517790
MIT
false
0b7d2554e0225bc4a80d5a09ded21af7
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n - id: "no-overriding-struct-globals"\n patterns:\n - pattern: |\n structs.$A = ...\n message: "Mutating global structs is never safe"\n languages:\n - "go"\n severity: "ERROR"\n fix: " "\n paths:\n # including tests!\n include: ["*"]\n
dataset_sample\yaml\go\protect_globals.yml
protect_globals.yml
YAML
348
0.8
0
0.2
vue-tools
16
2024-02-27T07:41:18.781640
MIT
false
de51dc1237e270f04024e75c35f2d1cb
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra-migrate:\n environment:\n - DSN=cockroach://root@cockroachd:26257/defaultdb?sslmode=disable&max_conns=20&max_idle_conns=4\n hydra:\n environment:\n - DSN=cockroach://root@cockroachd:26257/defaultdb?sslmode=disable&max_conns=20&max_idle_conns=4\n cockroachd:\n image: cockroachdb/cockroach:latest-v24.1\n ports:\n - "26257:26257"\n command: start-single-node --insecure\n networks:\n - intranet\n
dataset_sample\yaml\go\quickstart-cockroach.yml
quickstart-cockroach.yml
YAML
1,276
0.8
0.08
0.44
vue-tools
258
2025-05-06T07:22:10.291201
GPL-3.0
false
93a6ab235e77f10a646935961602d5ed
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra:\n environment:\n - SERVE_PUBLIC_CORS_ENABLED=true\n - SERVE_PUBLIC_CORS_ALLOWED_METHODS=POST,GET,PUT,DELETE\n - SERVE_ADMIN_CORS_ENABLED=true\n - SERVE_ADMIN_CORS_ALLOWED_METHODS=POST,GET,PUT,DELETE\n
dataset_sample\yaml\go\quickstart-cors.yml
quickstart-cors.yml
YAML
1,072
0.8
0.111111
0.611111
vue-tools
903
2024-10-04T10:50:49.208372
Apache-2.0
false
804dd673d722e95d65fdbca2ddde072f
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra:\n environment:\n - LOG_LEVEL=debug\n - OAUTH2_EXPOSE_INTERNAL_ERRORS=1\n
dataset_sample\yaml\go\quickstart-debug.yml
quickstart-debug.yml
YAML
936
0.8
0.125
0.6875
vue-tools
848
2024-03-19T03:15:49.237536
BSD-3-Clause
false
4260b28b46b18b8886115cbf69535d7d
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra:\n build:\n context: .\n dockerfile: .docker/Dockerfile-hsm\n environment:\n - HSM_ENABLED=true\n - HSM_LIBRARY=/usr/lib/softhsm/libsofthsm2.so\n - HSM_TOKEN_LABEL=hydra\n - HSM_PIN=1234\n
dataset_sample\yaml\go\quickstart-hsm.yml
quickstart-hsm.yml
YAML
1,069
0.8
0.095238
0.52381
node-utils
821
2024-01-10T18:07:13.665360
MIT
false
8faf1f607cb6076d8cf40dcb59f66a9f
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra:\n environment:\n - STRATEGIES_ACCESS_TOKEN=jwt\n - OIDC_SUBJECT_IDENTIFIERS_SUPPORTED_TYPES=public\n
dataset_sample\yaml\go\quickstart-jwt.yml
quickstart-jwt.yml
YAML
964
0.8
0.125
0.6875
python-kit
103
2024-07-18T05:28:42.452407
BSD-3-Clause
false
29658b87f55776c03fc01558edcd515e
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra-migrate:\n environment:\n - DSN=mysql://root:secret@tcp(mysqld:3306)/mysql?max_conns=20&max_idle_conns=4\n hydra:\n environment:\n - DSN=mysql://root:secret@tcp(mysqld:3306)/mysql?max_conns=20&max_idle_conns=4\n mysqld:\n image: mysql:8.0\n ports:\n - "3306:3306"\n environment:\n - MYSQL_ROOT_PASSWORD=secret\n networks:\n - intranet\n
dataset_sample\yaml\go\quickstart-mysql.yml
quickstart-mysql.yml
YAML
1,221
0.8
0.076923
0.423077
awesome-app
557
2023-08-15T14:33:48.557711
Apache-2.0
false
481a4d0cb212ce4625229063593456d2
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra-migrate:\n environment:\n - DSN=postgres://hydra:secret@postgresd:5432/hydra?sslmode=disable&max_conns=20&max_idle_conns=4\n hydra:\n environment:\n - DSN=postgres://hydra:secret@postgresd:5432/hydra?sslmode=disable&max_conns=20&max_idle_conns=4\n postgresd:\n image: postgres:16\n ports:\n - "5432:5432"\n environment:\n - POSTGRES_USER=hydra\n - POSTGRES_PASSWORD=secret\n - POSTGRES_DB=hydra\n networks:\n - intranet\n
dataset_sample\yaml\go\quickstart-postgres.yml
quickstart-postgres.yml
YAML
1,314
0.8
0.071429
0.392857
vue-tools
146
2023-12-30T05:46:00.518522
GPL-3.0
false
0ffbe5e3ce9f31ce3ee249602cc88394
global:\n scrape_interval: 15s # By default, scrape targets every 15 seconds.\n\n external_labels:\n monitor: "codelab-monitor"\n\nscrape_configs:\n - job_name: "prometheus"\n\n scrape_interval: 5s\n\n static_configs:\n - targets: ["localhost:9090"]\n - job_name: "hydra"\n\n # Override the global default and scrape targets from this job every 5 seconds.\n scrape_interval: 5s\n metrics_path: /metrics/prometheus\n static_configs:\n - targets: ["hydra:4445"]\n
dataset_sample\yaml\go\quickstart-prometheus-config.yml
quickstart-prometheus-config.yml
YAML
476
0.8
0
0.066667
python-kit
423
2025-04-08T02:52:15.412711
Apache-2.0
false
e53d3a6c9549ab0334e02d7778b8d3ad
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n prometheus:\n image: prom/prometheus:v2.12.0\n ports:\n - "9090:9090"\n depends_on:\n - hydra\n command: --config.file=/etc/prometheus/prometheus.yml\n volumes:\n - ./quickstart-prometheus-config.yml:/etc/prometheus/prometheus.yml\n networks:\n - intranet\n
dataset_sample\yaml\go\quickstart-prometheus.yml
quickstart-prometheus.yml
YAML
1,132
0.8
0.086957
0.478261
python-kit
679
2025-07-08T17:04:21.197070
GPL-3.0
false
1b4f49dc643439044b50d9fe66f08e5a
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n hydra:\n depends_on:\n - jaeger\n # - zipkin\n # - datadog\n environment:\n # - TRACING_SERVICE_NAME="Ory Hydra"\n - TRACING_PROVIDER=jaeger\n # - TRACING_PROVIDER=zipkin\n # - TRACING_PROVIDER=otel # datadog\n # - TRACING_PROVIDER=elastic-apm\n ### Jaeger ###\n - TRACING_PROVIDERS_JAEGER_SAMPLING_SERVER_URL=http://jaeger:5778/sampling\n - TRACING_PROVIDERS_JAEGER_LOCAL_AGENT_ADDRESS=jaeger:6831\n - TRACING_PROVIDERS_JAEGER_SAMPLING_TRACE_ID_RATIO=1\n ### Zipkin ###\n # - TRACING_PROVIDERS_ZIPKIN_SERVER_URL=http://zipkin:9411/api/v2/spans\n ### DataDog ###\n ### See env vars here: https://docs.datadoghq.com/tracing/setup/go/#configuration) ###\n # - TRACING_PROVIDERS_OTLP_INSECURE=true\n # - TRACING_PROVIDERS_OTLP_SAMPLING_SAMPLING_RATIO=1.0\n # - TRACING_PROVIDERS_OTLP_SERVER_URL=localhost:4318\n ### Elastic APM ###\n ### See env vars here: https://www.elastic.co/guide/en/apm/agent/go/1.x/configuration.html) ###\n # - ELASTIC_APM_SERVER_URL="http://apm-server:8200"\n # - ELASTIC_APM_SERVICE_NAME="Ory Hydra"\n # - ELASTIC_APM_SERVICE_VERSION="1.9.0"\n # - ELASTIC_APM_ENVIRONMENT="devel"\n ### Opentelemetry ###\n ### See env vars here: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md ###\n jaeger:\n image: jaegertracing/all-in-one:1.19.2\n ports:\n - "16686:16686" # The UI port\n networks:\n - intranet\n# zipkin:\n# image: openzipkin/zipkin:2\n# environment:\n# - STORAGE_TYPE=mem\n# ports:\n# - "9411:9411" # The UI/API port\n\n# datadog:\n# image: datadog/agent:7\n# environment:\n# - DD_API_KEY=<YOUR_API_KEY> # Replace it with your DataDog API key\n# - DD_APM_ENABLED=true\n# - DD_APM_NON_LOCAL_TRAFFIC=true\n# - DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT=0.0.0.0:4318\n
dataset_sample\yaml\go\quickstart-tracing.yml
quickstart-tracing.yml
YAML
2,772
0.8
0.032258
0.754098
vue-tools
782
2025-04-09T16:34:58.836828
GPL-3.0
false
af9d861d3500dae6709da8cfa264305d
###########################################################################\n####### FOR DEMONSTRATION PURPOSES ONLY #######\n###########################################################################\n# #\n# If you have not yet read the tutorial, do so now: #\n# https://www.ory.sh/docs/hydra/5min-tutorial #\n# #\n# This set up is only for demonstration purposes. The login #\n# endpoint can only be used if you follow the steps in the tutorial. #\n# #\n###########################################################################\nservices:\n sqlite:\n image: busybox\n volumes:\n - hydra-sqlite:/mnt/sqlite\n command: "chmod -R 777 /mnt/sqlite"\n hydra:\n image: oryd/hydra:v2.3.0\n build:\n context: .\n dockerfile: .docker/Dockerfile-local-build\n ports:\n - "4444:4444" # Public port\n - "4445:4445" # Admin port\n - "5555:5555" # Port for hydra token user\n command: serve -c /etc/config/hydra/hydra.yml all --dev\n volumes:\n - hydra-sqlite:/mnt/sqlite:rw\n - type: bind\n source: ./contrib/quickstart/5-min\n target: /etc/config/hydra\n pull_policy: missing\n environment:\n - DSN=sqlite:///mnt/sqlite/db.sqlite?_fk=true&mode=rwc\n restart: unless-stopped\n depends_on:\n - hydra-migrate\n - sqlite\n networks:\n - intranet\n hydra-migrate:\n image: oryd/hydra:v2.3.0\n build:\n context: .\n dockerfile: .docker/Dockerfile-local-build\n environment:\n - DSN=sqlite:///mnt/sqlite/db.sqlite?_fk=true&mode=rwc\n command: migrate -c /etc/config/hydra/hydra.yml sql up -e --yes\n pull_policy: missing\n volumes:\n - hydra-sqlite:/mnt/sqlite:rw\n - type: bind\n source: ./contrib/quickstart/5-min\n target: /etc/config/hydra\n restart: on-failure\n networks:\n - intranet\n depends_on:\n - sqlite\n consent:\n environment:\n - HYDRA_ADMIN_URL=http://hydra:4445\n image: oryd/hydra-login-consent-node:v2.3.0\n ports:\n - "3000:3000"\n restart: unless-stopped\n networks:\n - intranet\nnetworks:\n intranet:\nvolumes:\n hydra-sqlite:\n
dataset_sample\yaml\go\quickstart.yml
quickstart.yml
YAML
2,402
0.8
0.041096
0.150685
vue-tools
680
2024-12-25T04:38:28.567528
MIT
false
9eb7c2dacd50db068d92d0786fe15d3c
version: 0.1\nlog:\n fields:\n service: registry\nstorage:\n cache:\n blobdescriptor: inmemory\n filesystem:\n rootdirectory: /storage\nhttp:\n addr: :5000\n headers:\n X-Content-Type-Options: [nosniff]\nhealth:\n storagedriver:\n enabled: true\n interval: 10s\n threshold: 3\n
dataset_sample\yaml\go\reg_config.yml
reg_config.yml
YAML
286
0.7
0
0
react-lib
13
2023-11-30T20:06:58.042581
GPL-3.0
false
9aed9c621fc1359026d8f02198e6a2d6
---\nissues:\n - missingLabel: needs_team\n regex: Team:.+\npulls:\n - missingLabel: needs_team\n regex: Team:.+\n
dataset_sample\yaml\go\relabel.yml
relabel.yml
YAML
115
0.7
0
0
awesome-app
947
2025-06-19T21:09:55.638174
GPL-3.0
false
898d5221ed5c1877a3ccbd4c3d6ff2c6
changelog:\n exclude:\n labels:\n - ignore-for-release\n categories:\n - title: Features ✨\n labels:\n - feature\n - title: Enhancements 🔥\n labels:\n - enhancement\n - title: Fixes 🔧\n labels:\n - bug\n - title: Maintenance ⚙️\n labels:\n - maintenance\n - title: Docs 📖\n labels:\n - docs\n - title: I18n 🌎\n labels:\n - i18n\n - title: Performance Improvements 📊\n labels:\n - performance\n - title: Other Changes\n labels:\n - "*"\n
dataset_sample\yaml\go\release.yml
release.yml
YAML
558
0.7
0.034483
0
node-utils
676
2024-06-30T23:45:01.977827
Apache-2.0
false
de2e4c8b0568315765724598d4add70d
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n # Check potentially RPC endpoints with missing authentication/authorization.\n - id: "rpc-potentially-unauthenticated"\n patterns:\n - pattern: |\n if done, err := $A.$B.forward($METHOD, ...); done {\n return err\n }\n # Pattern used by typical endpoints that take an auth token or workload\n # identity. Some of these endpoints have no context for Authenticate\n - pattern-not-inside: |\n authErr := $A.$B.Authenticate(...)\n ...\n if done, err := $A.$B.forward($METHOD, ...); done {\n return err\n }\n ...\n ... := $A.$B.ResolveACL(...)\n ...\n\n # Pattern used by endpoints that are used only for server-to-server. The\n # authentication and authorization check must be done together before\n # forwarding to prevent the risk of confused deputy when RPCs are\n # forwarded.\n - pattern-not-inside: |\n\n aclObj, err := $A.srv.AuthenticateServerOnly($A.ctx, args)\n ...\n if err != nil || !aclObj.AllowServerOp() {\n return structs.ErrPermissionDenied\n }\n\n if done, err := $A.srv.forward($METHOD, ...); done {\n return err\n }\n ...\n\n # Pattern used by endpoints that are used only for client-to-server.\n # Authorization can be done after forwarding, but must check the\n # AllowClientOp policy; the AllowClientOp condition is left open so that\n # additional ACL checks can be made (ex. to scope to a given node/pool).\n - pattern-not-inside: |\n aclObj, err := $A.$B.AuthenticateClientOnly($A.ctx, args)\n ...\n if done, err := $A.$B.forward($METHOD, ...); done {\n return err\n }\n ...\n if !aclObj.AllowClientOp() {\n return structs.ErrPermissionDenied\n }\n ...\n\n # Pattern used by endpoints that are used only for client-to-server.\n # Authorization can be done after forwarding, but must check the\n # AllowClientOp policy. This should not be added to any new endpoints.\n - pattern-not-inside: |\n aclObj, err := $A.$B.AuthenticateClientOnlyLegacy($A.ctx, args)\n ...\n if done, err := $A.$B.forward($METHOD, ...); done {\n return err\n }\n ...\n if !aclObj.AllowClientOp() {\n return structs.ErrPermissionDenied\n }\n ...\n\n # Pattern used by ACL endpoints that need to interact with the token\n # directly.\n - pattern-not-inside: |\n authErr := $A.$B.Authenticate($A.ctx, args)\n ...\n if done, err := $A.$B.forward($METHOD, ...); done {\n return err\n }\n ...\n ... := args.GetIdentity().GetACLToken()\n ...\n\n - metavariable-pattern:\n metavariable: $METHOD\n patterns:\n # Endpoints that are expected not to have authentication.\n - pattern-not: '"ACL.Bootstrap"'\n - pattern-not: '"ACL.GetClaimPolicies"'\n - pattern-not: '"ACL.ResolveToken"'\n - pattern-not: '"ACL.UpsertOneTimeToken"'\n - pattern-not: '"ACL.ExchangeOneTimeToken"'\n - pattern-not: '"ACL.WhoAmI"'\n - pattern-not: 'structs.ACLListAuthMethodsRPCMethod'\n - pattern-not: 'structs.ACLOIDCAuthURLRPCMethod'\n - pattern-not: 'structs.ACLOIDCCompleteAuthRPCMethod'\n - pattern-not: 'structs.ACLLoginRPCMethod'\n - pattern-not: '"Status.Leader"'\n - pattern-not: '"Status.Peers"'\n - pattern-not: '"Status.Version"'\n - pattern-not: '"Keyring.ListPublic"'\n - pattern-not: '"Keyring.GetConfig"'\n message: "RPC method $METHOD appears to be unauthenticated"\n languages:\n - "go"\n severity: "WARNING"\n paths:\n include:\n - "nomad/*_endpoint.go"\n\n\n # ACL objects should never be nil-checked in RPC handlers before checking\n # authorization, as nil ACLs are always programmer errors.\n - id: "rpc-authz-bypass"\n patterns:\n # Pattern that may accidentally bypass authorization checks.\n - pattern: |\n aclObj == nil\n\n message: "RPC method ACL check $ACL_CHECK appears to bypass authorization by first checking for nil ACLs"\n languages:\n - "go"\n severity: "WARNING"\n paths:\n include:\n - "nomad/*_endpoint.go"\n
dataset_sample\yaml\go\rpc_endpoint.yml
rpc_endpoint.yml
YAML
4,507
0.8
0.111111
0.191304
awesome-app
347
2025-03-18T08:23:44.557704
GPL-3.0
false
b3c7b671aac6eb5c07a8cb45cfa344a7
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n # Check for server RPC endpoints without metrics\n - id: "rpc-missing-metrics"\n patterns:\n - pattern: |\n authErr := $A.$B.Authenticate($A.ctx, args)\n - pattern-not-inside: |\n authErr := $A.$B.Authenticate($A.ctx, args)\n ...\n $T.srv.MeasureRPCRate(...)\n ...\n message: "RPC method appears to be missing metrics"\n languages:\n - "go"\n severity: "WARNING"\n paths:\n include:\n - "nomad/*_endpoint.go"\n
dataset_sample\yaml\go\rpc_metrics.yml
rpc_metrics.yml
YAML
559
0.8
0.047619
0.15
react-lib
565
2023-11-24T22:42:39.089922
BSD-3-Clause
false
f4e1edf8e51ae2fe8d5c21bc429a0f56
version: '3.9'\n\nservices:\n master:\n image: chrislusf/seaweedfs # use a remote image\n ports:\n - 9333:9333\n - 19333:19333\n - 9324:9324\n command: "master -ip=master -ip.bind=0.0.0.0 -metricsPort=9324"\n volume:\n image: chrislusf/seaweedfs # use a remote image\n ports:\n - 8080:8080\n - 18080:18080\n - 9325:9325\n command: 'volume -mserver="master:9333" -ip.bind=0.0.0.0 -port=8080 -metricsPort=9325'\n depends_on:\n - master\n filer:\n image: chrislusf/seaweedfs # use a remote image\n ports:\n - 8888:8888\n - 18888:18888\n - 9326:9326\n command: 'filer -master="master:9333" -ip.bind=0.0.0.0 -metricsPort=9326'\n tty: true\n stdin_open: true\n depends_on:\n - master\n - volume\n s3:\n image: chrislusf/seaweedfs # use a remote image\n ports:\n - 8333:8333\n - 9327:9327\n command: 's3 -filer="filer:8888" -ip.bind=0.0.0.0 -metricsPort=9327'\n depends_on:\n - master\n - volume\n - filer\n webdav:\n image: chrislusf/seaweedfs # use a remote image\n ports:\n - 7333:7333\n command: 'webdav -filer="filer:8888"'\n depends_on:\n - master\n - volume\n - filer\n prometheus:\n image: prom/prometheus:v2.21.0\n ports:\n - 9000:9090\n volumes:\n - ./prometheus:/etc/prometheus\n command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml\n depends_on:\n - s3\n
dataset_sample\yaml\go\seaweedfs-compose.yml
seaweedfs-compose.yml
YAML
1,430
0.8
0
0
react-lib
920
2023-07-16T07:19:39.155616
BSD-3-Clause
false
792d25fcdbcdf58beee88527cabc0506
version: '3.9'\n\nservices:\n master:\n image: chrislusf/seaweedfs:dev # use a remote dev image\n ports:\n - 9333:9333\n - 19333:19333\n command: "master -ip=master"\n volume:\n image: chrislusf/seaweedfs:dev # use a remote dev image\n ports:\n - 8080:8080\n - 18080:18080\n command: 'volume -mserver="master:9333" -port=8080 -ip=volume'\n depends_on:\n - master\n filer:\n image: chrislusf/seaweedfs:dev # use a remote dev image\n ports:\n - 8888:8888\n - 18888:18888\n command: 'filer -master="master:9333" -ip.bind=0.0.0.0'\n depends_on:\n - master\n - volume\n s3:\n image: chrislusf/seaweedfs:dev # use a remote dev image\n ports:\n - 8333:8333\n command: 's3 -filer="filer:8888" -ip.bind=0.0.0.0'\n depends_on:\n - master\n - volume\n - filer\n webdav:\n image: chrislusf/seaweedfs:dev # use a remote dev image\n ports:\n - 7333:7333\n command: 'webdav -filer="filer:8888"'\n depends_on:\n - master\n - volume\n - filer\n
dataset_sample\yaml\go\seaweedfs-dev-compose.yml
seaweedfs-dev-compose.yml
YAML
1,030
0.8
0
0
python-kit
229
2025-03-27T03:26:41.449220
GPL-3.0
false
76a00268db47c7aba408c85687cb1be4
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\npaths-ignore:\n - "website/content/*"\n
dataset_sample\yaml\go\secret-scanning.yml
secret-scanning.yml
YAML
107
0.8
0
0.5
react-lib
577
2024-10-02T12:24:08.064066
Apache-2.0
false
29dc769db82cf491bcfbfca56e3bbf0e
header:\n schema-version: '1.0.0'\n expiration-date: '2025-07-30T01:00:00.000Z'\n last-updated: '2024-07-30'\n last-reviewed: '2024-07-30'\n project-url: https://github.com/prometheus/prometheus\n changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md\n license: https://github.com/prometheus/prometheus/blob/main/LICENSE\nproject-lifecycle:\n status: active\n bug-fixes-only: false\n core-maintainers:\n - https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md\ncontribution-policy:\n accepts-pull-requests: true\n accepts-automated-pull-requests: true\ndependencies:\n third-party-packages: true\n dependencies-lists:\n - https://github.com/prometheus/prometheus/blob/main/go.mod\n - https://github.com/prometheus/prometheus/blob/main/web/ui/package.json\n env-dependencies-policy:\n policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management\ndistribution-points:\n - https://github.com/prometheus/prometheus/releases\ndocumentation:\n - https://prometheus.io/docs/introduction/overview/\nsecurity-contacts:\n - type: email\n value: prometheus-team@googlegroups.com\nsecurity-testing:\n - tool-type: sca\n tool-name: Dependabot\n tool-version: latest\n integration:\n ad-hoc: false\n ci: true\n before-release: true\n - tool-type: sast\n tool-name: CodeQL\n tool-version: latest\n integration:\n ad-hoc: false\n ci: true\n before-release: true\nvulnerability-reporting:\n accepts-vulnerability-reports: true\n security-policy: https://github.com/prometheus/prometheus/security/policy\n
dataset_sample\yaml\go\SECURITY-INSIGHTS.yml
SECURITY-INSIGHTS.yml
YAML
1,598
0.8
0
0
node-utils
313
2023-08-07T22:38:05.830320
Apache-2.0
false
8922548ca1e8fb2969375385ca35bb29
enabled: true\n\n# Only check PR title\ntitleOnly: true\n\ntypes:\n - feat\n - fix\n - refactor\n - docs\n - style\n - test\n - chore\n - cherrypick\n
dataset_sample\yaml\go\semantic.yml
semantic.yml
YAML
144
0.8
0
0.083333
awesome-app
704
2023-08-24T00:53:58.354968
BSD-3-Clause
false
0bdad41b5fdfc21fdc243858cd448ee0
version: v1.0\nname: Traefik Release - deprecated\nagent:\n machine:\n type: f1-standard-2\n os_image: ubuntu2204\nblocks:\n - name: 'Do nothing'\n task:\n jobs:\n - name: 'Do nothing'\n commands:\n - echo "Do nothing"\n
dataset_sample\yaml\go\semaphore.yml
semaphore.yml
YAML
250
0.7
0
0
awesome-app
346
2023-10-31T00:03:09.825385
Apache-2.0
false
6b252369cc9104b837425f93472467ec
#\n# SPDX-License-Identifier: Apache-2.0\n#\n\nrepository:\n name: fabric\n description: Hyperledger Fabric is an enterprise-grade permissioned distributed\n ledger framework for developing solutions and applications. Its modular and\n versatile design satisfies a broad range of industry use cases. It offers\n a unique approach to consensus that enables performance at scale while preserving\n privacy.\n homepage: https://lf-hyperledger.atlassian.net/wiki/spaces/fabric\n default_branch: main\n has_downloads: true\n has_issues: false\n has_projects: false\n has_wiki: false\n archived: false\n private: false\n allow_squash_merge: true\n allow_merge_commit: false\n allow_rebase_merge: true\n
dataset_sample\yaml\go\settings.yml
settings.yml
YAML
699
0.8
0.090909
0.142857
awesome-app
204
2025-05-17T08:40:48.105151
MIT
false
b74fa40d27bb603d1b671c62652be79d
wtf:\n grid:\n columns: [20, 20]\n rows: [3, 3]\n refreshInterval: 1\n mods:\n uptime:\n type: cmdrunner\n args: []\n cmd: "uptime"\n enabled: true\n position:\n top: 0\n left: 0\n height: 1\n width: 1\n refreshInterval: 30\n
dataset_sample\yaml\go\small_config.yml
small_config.yml
YAML
278
0.7
0
0
python-kit
916
2025-06-29T12:13:25.340433
GPL-3.0
false
eaddaba7003e0564855b273a5a178c2a
template: bb.sql-review.prod\nruleList:\n - type: naming.index.pk\n level: WARNING\n - type: naming.index.fk\n level: WARNING\n - type: naming.index.uk\n payload:\n format: "^idx_{{table}}_unique_{{column_list}}$"\n - type: column.no-null\n level: WARNING\n - type: table.no-foreign-key\n level: DISABLED\n
dataset_sample\yaml\go\sql-review-override.yml
sql-review-override.yml
YAML
318
0.7
0
0
python-kit
583
2024-03-15T04:02:46.908756
BSD-3-Clause
false
8f432902657431169731831697cae6be
# Configuration for probot-stale - https://github.com/probot/stale\n\n# Number of days of inactivity before an Issue or Pull Request becomes stale\ndaysUntilStale: 180\n\n# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.\n# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.\ndaysUntilClose: false\n\n# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)\nonlyLabels: []\n\n# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable\nexemptLabels:\n - pinned\n - security\n - "[Status] Maybe Later"\n\n# Set to true to ignore issues in a project (defaults to false)\nexemptProjects: false\n\n# Set to true to ignore issues in a milestone (defaults to false)\nexemptMilestones: false\n\n# Set to true to ignore issues with an assignee (defaults to false)\nexemptAssignees: true\n\n# Label to use when marking as stale\nstaleLabel: wontfix\n\n# Comment to post when marking as stale. Set to `false` to disable\nmarkComment: >\n This issue has been automatically marked as stale because it has not had\n recent activity. It will be closed if no further activity occurs. Thank you\n for your contributions.\n\n# Comment to post when removing the stale label.\n# unmarkComment: >\n# Your comment here.\n\n# Comment to post when closing a stale Issue or Pull Request.\n# closeComment: >\n# Your comment here.\n\n# Limit the number of actions per hour, from 1-30. Default is 30\nlimitPerRun: 30\n\n# Limit to only `issues` or `pulls`\n# only: issues\n\n# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':\n# pulls:\n# daysUntilStale: 30\n# markComment: >\n# This pull request has been automatically marked as stale because it has not had\n# recent activity. It will be closed if no further activity occurs. Thank you\n# for your contributions.\n\n# issues:\n# exemptLabels:\n# - confirmed\n
dataset_sample\yaml\go\stale.yml
stale.yml
YAML
1,990
0.8
0.098361
0.652174
awesome-app
705
2023-07-31T15:26:02.033168
Apache-2.0
false
443f13113c61b18e5693805477a5b765
name: Sync OpenAPI\n\non:\n workflow_call:\n inputs:\n project-name:\n required: true\n type: string\n push:\n branches:\n - main\n\njobs:\n sync:\n uses: IceWhaleTech/github/.github/workflows/sync_openapi.yml@main\n with:\n project-name: casaos\n secrets:\n API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}\n
dataset_sample\yaml\go\sync_openapi.yml
sync_openapi.yml
YAML
343
0.85
0
0
awesome-app
609
2024-08-27T03:24:54.130561
BSD-3-Clause
false
b4863680b1c1426a2642be0722327702
# https://taskfile.dev\n\nversion: "3"\n\ntasks:\n install:\n desc: Install Dependencies\n aliases: [i]\n cmds:\n - corepack enable\n - corepack prepare pnpm@8.3.1 --activate\n - pnpm install\n sources:\n - package.json\n - pnpm-lock.yaml\n\n default:\n desc: Start Website\n deps: [install]\n aliases: [s, start]\n cmds:\n - npx docusaurus start\n\n build:\n desc: Build Website\n deps: [install]\n cmds:\n - npx docusaurus build\n\n preview:\n desc: Preview Website\n deps: [build]\n aliases: [serve]\n cmds:\n - npx docusaurus serve\n\n crowdin:push:\n desc: Upload source files to Crowdin\n deps: [install]\n cmds:\n - npx crowdin push -b v2\n\n crowdin:pull:\n desc: Download approved translation files from Crowdin to local\n deps: [install]\n cmds:\n - npx crowdin pull -b v2 --export-only-approved\n\n format:md:\n cmds:\n - npx prettier --write "**/*.{md,mdx}"\n\n format:\n cmds:\n - task: format:md\n
dataset_sample\yaml\go\Taskfile.yml
Taskfile.yml
YAML
993
0.8
0
0.021739
node-utils
55
2024-12-17T05:09:05.642283
MIT
false
4fe7f9cd15b15f4c433ebfb5a8046a74
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n - id: "time-after-leak"\n patterns:\n - pattern: |\n select {\n case <- time.After(...): ...\n }\n message: "Potential leak of time.Timer, consider using NewSafeTimer instead"\n languages:\n - "go"\n severity: "WARNING"\n paths:\n exclude:\n - "testutil/*"\n - "*testing.go"\n - "*_test.go"\n
dataset_sample\yaml\go\time_after.yml
time_after.yml
YAML
433
0.8
0
0.111111
awesome-app
104
2024-09-14T04:52:11.936501
Apache-2.0
false
56df170cf7445cdc7b12638d6f75081f
Trivy_container_scanning:\n stage: test\n image:\n name: alpine:3.11\n variables:\n # Override the GIT_STRATEGY variable in your `.gitlab-ci.yml` file and set it to `fetch` if you want to provide a `clair-whitelist.yml`\n # file. See https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#overriding-the-container-scanning-template\n # for details\n GIT_STRATEGY: none\n IMAGE: "$CI_REGISTRY_IMAGE:$CI_COMMIT_SHA"\n allow_failure: true\n before_script:\n - export TRIVY_VERSION=${TRIVY_VERSION:-v0.19.2}\n - apk add --no-cache curl docker-cli\n - curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin ${TRIVY_VERSION}\n - curl -sSL -o /tmp/trivy-gitlab.tpl https://github.com/aquasecurity/trivy/raw/${TRIVY_VERSION}/contrib/gitlab.tpl\n - trivy registry login --username "$CI_REGISTRY_USER" --password "$CI_REGISTRY_PASSWORD" $CI_REGISTRY\n script:\n - trivy --exit-code 0 --cache-dir .trivycache/ --no-progress --format template --template "@/tmp/trivy-gitlab.tpl" -o gl-container-scanning-report.json $IMAGE\n cache:\n paths:\n - .trivycache/\n artifacts:\n reports:\n container_scanning: gl-container-scanning-report.json\n dependencies: []\n only:\n refs:\n - branches\n
dataset_sample\yaml\go\Trivy.gitlab-ci.yml
Trivy.gitlab-ci.yml
YAML
1,305
0.8
0.068966
0.103448
python-kit
52
2024-01-10T04:12:21.349549
GPL-3.0
false
e12b518edb172df7b6685d1b9ee81fda
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n - id: ui-no-string-extensions\n patterns:\n - pattern: "$S.$FUNC()"\n - metavariable-pattern:\n metavariable: $FUNC\n pattern-either:\n - pattern: "w"\n - pattern: "loc"\n - pattern: "camelize"\n - pattern: "decamelize"\n - pattern: "dasherize"\n - pattern: "underscore"\n - pattern: "classify"\n - pattern: "capitalize"\n message: "Invalid call to string extension `$FUNC` in `$S.$FUNC()`"\n languages:\n - javascript\n severity: ERROR\n paths:\n include:\n - "ui/**/*.js"\n exclude:\n - "ui/node_modules"\n
dataset_sample\yaml\go\ui.yml
ui.yml
YAML
723
0.8
0
0.076923
awesome-app
647
2025-01-09T07:24:36.215478
Apache-2.0
false
b2c27b00c4da60213dffbbb8c262f0c6
wtf:\n colors:\n background: black\n border:\n focusable: darkslateblue\n grid:\n columns: [40, 40]\n rows: [16]\n refreshInterval: 1\n mods:\n americas_time:\n title: "Americas"\n type: clocks\n enabled: true\n locations:\n UTC: "Etc/UTC"\n Vancouver: "America/Vancouver"\n New_York: "America/New_York"\n Sao_Paolo: "America/Sao_Paulo"\n Denver: "America/Denver"\n Iqaluit: "America/Iqaluit"\n Bahamas: "America/Nassau"\n Chicago: "America/Chicago"\n position:\n top: 0\n left: 0\n height: 1\n width: 1\n refreshInterval: 15\n sort: "chronological"\n textfile:\n enabled: true\n filePaths:\n - "~/.config/wtf/config.yml"\n format: true\n formatStyle: "vim"\n position:\n top: 0\n left: 1\n height: 1\n width: 1\n refreshInterval: 15\n
dataset_sample\yaml\go\uniconfig.yml
uniconfig.yml
YAML
904
0.7
0
0
vue-tools
880
2024-02-05T02:20:45.979904
BSD-3-Clause
false
1d59810606fe9fc5a815d32a87ce4bd1
apiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: cattle-cleanup-sa\n namespace: default\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: cattle-cleanup-binding\n namespace: default\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cattle-cleanup-role\nsubjects:\n- kind: ServiceAccount\n name: cattle-cleanup-sa\n namespace: default\n\n---\n\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: cattle-cleanup-job\n namespace: default\n labels:\n rancher-cleanup: "true"\nspec:\n backoffLimit: 6\n completions: 1\n parallelism: 1\n selector:\n template:\n metadata:\n creationTimestamp: null\n spec:\n containers:\n - env:\n - name: CLUSTER_CLEANUP\n value: "true"\n # - name: DRY_RUN\n # value: "true"\n image: agent_image\n imagePullPolicy: Always\n name: cleanup-agent\n resources: {}\n terminationMessagePath: /dev/termination-log\n terminationMessagePolicy: File\n dnsPolicy: ClusterFirst\n restartPolicy: OnFailure\n schedulerName: default-scheduler\n securityContext: {}\n serviceAccountName: cattle-cleanup-sa\n terminationGracePeriodSeconds: 30\n\n---\n\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: cattle-cleanup-role\n namespace: default\nrules:\n- apiGroups:\n - ""\n resources:\n - namespaces\n verbs:\n - list\n - get\n - update\n - delete\n- apiGroups:\n - rbac.authorization.k8s.io\n resources:\n - roles\n - rolebindings\n - clusterroles\n - clusterrolebindings\n verbs:\n - list\n - get\n - delete\n- apiGroups:\n - batch\n resources:\n - jobs\n verbs:\n - list\n - get\n - delete\n- apiGroups:\n - ""\n resources:\n - services\n verbs:\n - get\n- apiGroups:\n - admissionregistration.k8s.io\n resources:\n - mutatingwebhookconfigurations\n - validatingwebhookconfigurations\n verbs:\n - get\n - delete
dataset_sample\yaml\go\user-cluster.yml
user-cluster.yml
YAML
1,910
0.8
0
0.019417
vue-tools
537
2024-11-19T15:43:18.031520
GPL-3.0
false
da5459a08bea98c8cdd29c80fecfd9ab
# Configuration for weekly-digest - https://github.com/apps/weekly-digest\npublishDay: "thursday"\ncanPublishIssues: true\ncanPublishPullRequests: true\ncanPublishContributors: true\ncanPublishStargazers: true\ncanPublishCommits: true
dataset_sample\yaml\go\weekly-digest.yml
weekly-digest.yml
YAML
228
0.8
0.166667
0.142857
react-lib
431
2023-10-17T02:26:08.676371
MIT
false
ea183a67251aacb795092741a10badba
###################### Winlogbeat Configuration Example ########################\n\n# This file is an example configuration file highlighting only the most common\n# options. The winlogbeat.reference.yml file from the same directory contains\n# all the supported options with more comments. You can use it as a reference.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/winlogbeat/index.html\n\n# ======================== Winlogbeat specific options =========================\n\n# The registry file is where Winlogbeat persists its state so that the beat can\n# resume after shutdown or an outage. The default is .winlogbeat.yml in the\n# directory in which it was started.\n#winlogbeat.registry_file: .winlogbeat.yml\n\n# The timeout value that controls when registry entries are written to disk\n# (flushed). When an unwritten update exceeds this value, it triggers a write\n# to disk. When flush is set to 0s, the registry is written to disk after each\n# batch of events has been published successfully. The default value is 5s.\n#winlogbeat.registry_flush: 5s\n\n# By default Ingest pipelines are not updated if a pipeline with the same ID\n# already exists. If this option is enabled Winlogbeat overwrites pipelines\n# every time a new Elasticsearch connection is established.\n#winlogbeat.overwrite_pipelines: false\n\n# event_logs specifies a list of event logs to monitor as well as any\n# accompanying options. The YAML data type of event_logs is a list of\n# dictionaries.\n#\n# The supported keys are name, id, xml_query, tags, fields, fields_under_root,\n# forwarded, ignore_older, level, event_id, provider, and include_xml.\n# The xml_query key requires an id and must not be used with the name,\n# ignore_older, level, event_id, or provider keys. Please visit the\n# documentation for the complete details of each option.\n# https://go.es.io/WinlogbeatConfig\n\nwinlogbeat.event_logs:\n - name: Application\n ignore_older: 72h\n\n - name: System\n\n - name: Security\n\n - name: ForwardedEvents\n tags: [forwarded]\n\n - name: Windows PowerShell\n event_id: 400, 403, 600, 800\n\n - name: Microsoft-Windows-PowerShell/Operational\n event_id: 4103, 4104, 4105, 4106\n\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n# If this option is not defined, the hostname is used.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published. Tags make it easy to group servers by different\n# logical properties.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output. Fields can be scalar values, arrays, dictionaries, or any nested\n# combination of these.\n#fields:\n# env: staging\n\n# If this option is set to true, the custom fields are stored as top-level\n# fields in the output document instead of being grouped under a field\n# sub-dictionary. Default is false.\n#fields_under_root: false\n\n# Configure the precision of all timestamps in Winlogbeat.\n# Available options: millisecond, microsecond, nanosecond\n#timestamp.precision: millisecond\n\n# Internal queue configuration for buffering events to be published.\n# Queue settings may be overridden by performance presets in the\n# Elasticsearch output. To configure them manually use "preset: custom".\n#queue:\n # Queue type by name (default 'mem')\n # The memory queue will present all available events (up to the outputs\n # bulk_max_size) to the output, the moment the output is ready to serve\n # another batch of events.\n #mem:\n # Max number of events the queue can buffer.\n #events: 3200\n\n # Hints the minimum number of events stored in the queue,\n # before providing a batch of events to the outputs.\n # The default value is set to 2048.\n # A value of 0 ensures events are immediately available\n # to be sent to the outputs.\n #flush.min_events: 1600\n\n # Maximum duration after which events are available to the outputs,\n # if the number of events stored in the queue is < `flush.min_events`.\n #flush.timeout: 10s\n\n # The disk queue stores incoming events on disk until the output is\n # ready for them. This allows a higher event limit than the memory-only\n # queue and lets pending events persist through a restart.\n #disk:\n # The directory path to store the queue's data.\n #path: "${path.data}/diskqueue"\n\n # The maximum space the queue should occupy on disk. Depending on\n # input settings, events that exceed this limit are delayed or discarded.\n #max_size: 10GB\n\n # The maximum size of a single queue data file. Data in the queue is\n # stored in smaller segments that are deleted after all their events\n # have been processed.\n #segment_size: 1GB\n\n # The number of events to read from disk to memory while waiting for\n # the output to request them.\n #read_ahead: 512\n\n # The number of events to accept from inputs while waiting for them\n # to be written to disk. If event data arrives faster than it\n # can be written to disk, this setting prevents it from overflowing\n # main memory.\n #write_ahead: 2048\n\n # The duration to wait before retrying when the queue encounters a disk\n # write error.\n #retry_interval: 1s\n\n # The maximum length of time to wait before retrying on a disk write\n # error. If the queue encounters repeated errors, it will double the\n # length of its retry interval each time, up to this maximum.\n #max_retry_interval: 30s\n\n# Sets the maximum number of CPUs that can be executed simultaneously. The\n# default is the number of logical CPUs available in the system.\n#max_procs:\n\n# ================================= Processors =================================\n\n# Processors are used to reduce the number of fields in the exported event or to\n# enhance the event with external metadata. This section defines a list of\n# processors that are applied one by one and the first one receives the initial\n# event:\n#\n# event -> filter1 -> event1 -> filter2 ->event2 ...\n#\n# The supported processors are drop_fields, drop_event, include_fields,\n# decode_json_fields, and add_cloud_metadata.\n#\n# For example, you can use the following processors to keep the fields that\n# contain CPU load percentages, but remove the fields that contain CPU ticks\n# values:\n#\n#processors:\n# - include_fields:\n# fields: ["cpu"]\n# - drop_fields:\n# fields: ["cpu.user", "cpu.system"]\n#\n# The following example drops the events that have the HTTP response code 200:\n#\n#processors:\n# - drop_event:\n# when:\n# equals:\n# http.code: 200\n#\n# The following example renames the field a to b:\n#\n#processors:\n# - rename:\n# fields:\n# - from: "a"\n# to: "b"\n#\n# The following example tokenizes the string into fields:\n#\n#processors:\n# - dissect:\n# tokenizer: "%{key1} - %{key2}"\n# field: "message"\n# target_prefix: "dissect"\n#\n# The following example enriches each event with metadata from the cloud\n# provider about the host machine. It works on EC2, GCE, DigitalOcean,\n# Tencent Cloud, and Alibaba Cloud.\n#\n#processors:\n# - add_cloud_metadata: ~\n#\n# The following example enriches each event with the machine's local time zone\n# offset from UTC.\n#\n#processors:\n# - add_locale:\n# format: offset\n#\n# The following example enriches each event with docker metadata, it matches\n# given fields to an existing container id and adds info from that container:\n#\n#processors:\n# - add_docker_metadata:\n# host: "unix:///var/run/docker.sock"\n# match_fields: ["system.process.cgroup.id"]\n# match_pids: ["process.pid", "process.parent.pid"]\n# match_source: true\n# match_source_index: 4\n# match_short_id: false\n# cleanup_timeout: 60\n# labels.dedot: false\n# # To connect to Docker over TLS you must specify a client and CA certificate.\n# #ssl:\n# # certificate_authority: "/etc/pki/root/ca.pem"\n# # certificate: "/etc/pki/client/cert.pem"\n# # key: "/etc/pki/client/cert.key"\n#\n# The following example enriches each event with docker metadata, it matches\n# container id from log path available in `source` field (by default it expects\n# it to be /var/lib/docker/containers/*/*.log).\n#\n#processors:\n# - add_docker_metadata: ~\n#\n# The following example enriches each event with host metadata.\n#\n#processors:\n# - add_host_metadata: ~\n#\n# The following example enriches each event with process metadata using\n# process IDs included in the event.\n#\n#processors:\n# - add_process_metadata:\n# match_pids: ["system.process.ppid"]\n# target: system.process.parent\n#\n# The following example decodes fields containing JSON strings\n# and replaces the strings with valid JSON objects.\n#\n#processors:\n# - decode_json_fields:\n# fields: ["field1", "field2", ...]\n# process_array: false\n# max_depth: 1\n# target: ""\n# overwrite_keys: false\n#\n#processors:\n# - decompress_gzip_field:\n# from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n#\n# The following example copies the value of the message to message_copied\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: message_copied\n# fail_on_error: true\n# ignore_missing: false\n#\n# The following example truncates the value of the message to 1024 bytes\n#\n#processors:\n# - truncate_fields:\n# fields:\n# - message\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example preserves the raw message under event.original\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: event.original\n# fail_on_error: false\n# ignore_missing: true\n# - truncate_fields:\n# fields:\n# - event.original\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example URL-decodes the value of field1 to field2\n#\n#processors:\n# - urldecode:\n# fields:\n# - from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Winlogbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify and additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n hosts: ["localhost:9200"]\n\n # Performance presets configure other output fields to recommended values\n # based on a performance priority.\n # Options are "balanced", "throughput", "scale", "latency" and "custom".\n # Default if unspecified: "custom"\n preset: balanced\n\n # Set gzip compression level. Set to 0 to disable compression.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1.\n #compression_level: 1\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Number of workers per Elasticsearch host.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n #worker: 1\n\n # If set to true and multiple hosts are configured, the output plugin load\n # balances published events onto all Elasticsearch hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # Optional data stream or index name. The default is "winlogbeat-%{[agent.version]}".\n # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.\n #index: "winlogbeat-%{[agent.version]}"\n\n # Optional ingest pipeline. By default, no pipeline will be used.\n #pipeline: ""\n\n # Optional HTTP path\n #path: "/elasticsearch"\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server URL\n #proxy_url: http://proxy:3128\n\n # Whether to disable proxy settings for outgoing connections. If true, this\n # takes precedence over both the proxy_url field and any environment settings\n # (HTTP_PROXY, HTTPS_PROXY). The default is false.\n #proxy_disable: false\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1600.\n #bulk_max_size: 1600\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum amount of time an idle connection will remain idle\n # before closing itself. Zero means use the default of 60s. The\n # format is a Go language duration (example 60s is 60 seconds).\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 3s.\n # idle_connection_timeout: 3s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Prevents winlogbeat from connecting to older Elasticsearch versions when set to `false`\n #allow_older_versions: true\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n # Enables restarting winlogbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Number of workers per Logstash host.\n #worker: 1\n\n # Set gzip compression level.\n #compression_level: 3\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Optional maximum time to live for a connection to Logstash, after which the\n # connection will be re-established. A value of `0s` (the default) will\n # disable this feature.\n #\n # Not yet supported for async connections (i.e. with the "pipelining" option set)\n #ttl: 30s\n\n # Optionally load-balance events between Logstash hosts. Default is false.\n #loadbalance: false\n\n # Number of batches to be sent asynchronously to Logstash while processing\n # new batches.\n #pipelining: 2\n\n # If enabled only a subset of events in a batch of events is transferred per\n # transaction. The number of events to be sent increases up to `bulk_max_size`\n # if no error is encountered.\n #slow_start: false\n\n # The number of seconds to wait before trying to reconnect to Logstash\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Logstash after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Optional index name. The default index name is set to winlogbeat\n # in all lowercase.\n #index: 'winlogbeat'\n\n # SOCKS5 proxy server URL\n #proxy_url: socks5://user:password@socks5-server:2233\n\n # Resolve names locally when using a proxy server. Defaults to false.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting winlogbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting\n # and retry until all events are published. Set max_retries to a value less\n # than 0 to retry until all events are published. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Logstash request. The\n # default is 2048.\n #bulk_max_size: 2048\n\n # The number of seconds to wait for responses from the Logstash server before\n # timing out. The default is 30s.\n #timeout: 30s\n\n# -------------------------------- Kafka Output --------------------------------\n#output.kafka:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The list of Kafka broker addresses from which to fetch the cluster metadata.\n # The cluster metadata contain the actual Kafka brokers events are published\n # to.\n #hosts: ["localhost:9092"]\n\n # The Kafka topic used for produced events. The setting can be a format string\n # using any event field. To set the topic from document type use `%{[type]}`.\n #topic: beats\n\n # The Kafka event key setting. Use format string to create a unique event key.\n # By default no event key will be generated.\n #key: ''\n\n # The Kafka event partitioning strategy. Default hashing strategy is `hash`\n # using the `output.kafka.key` setting or randomly distributes events if\n # `output.kafka.key` is not configured.\n #partition.hash:\n # If enabled, events will only be published to partitions with reachable\n # leaders. Default is false.\n #reachable_only: false\n\n # Configure alternative event field names used to compute the hash value.\n # If empty `output.kafka.key` setting will be used.\n # Default value is empty list.\n #hash: []\n\n # Authentication details. Password is required if username is set.\n #username: ''\n #password: ''\n\n # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.\n # Defaults to PLAIN when `username` and `password` are configured.\n #sasl.mechanism: ''\n\n # Kafka version Winlogbeat is assumed to run against. Defaults to the "1.0.0".\n #version: '1.0.0'\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Metadata update configuration. Metadata contains leader information\n # used to decide which broker to use when publishing.\n #metadata:\n # Max metadata request retry attempts when cluster is in middle of leader\n # election. Defaults to 3 retries.\n #retry.max: 3\n\n # Wait time between retries during leader elections. Default is 250ms.\n #retry.backoff: 250ms\n\n # Refresh metadata interval. Defaults to every 10 minutes.\n #refresh_frequency: 10m\n\n # Strategy for fetching the topics metadata from the broker. Default is false.\n #full: false\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to republish to Kafka\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to republish. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful publish, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to republish to\n # Kafka after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Kafka request. The default\n # is 2048.\n #bulk_max_size: 2048\n\n # Duration to wait before sending bulk Kafka request. 0 is no delay. The default\n # is 0.\n #bulk_flush_frequency: 0s\n\n # The number of seconds to wait for responses from the Kafka brokers before\n # timing out. The default is 30s.\n #timeout: 30s\n\n # The maximum duration a broker will wait for number of required ACKs. The\n # default is 10s.\n #broker_timeout: 10s\n\n # The number of messages buffered for each Kafka broker. The default is 256.\n #channel_buffer_size: 256\n\n # The keep-alive period for an active network connection. If 0s, keep-alives\n # are disabled. The default is 0 seconds.\n #keep_alive: 0\n\n # Sets the output compression codec. Must be one of none, snappy and gzip. The\n # default is gzip.\n #compression: gzip\n\n # Set the compression level. Currently only gzip provides a compression level\n # between 0 and 9. The default value is chosen by the compression algorithm.\n #compression_level: 4\n\n # The maximum permitted size of JSON-encoded messages. Bigger messages will be\n # dropped. The default value is 1000000 (bytes). This value should be equal to\n # or less than the broker's message.max.bytes.\n #max_message_bytes: 1000000\n\n # The ACK reliability level required from broker. 0=no response, 1=wait for\n # local commit, -1=wait for all replicas to commit. The default is 1. Note:\n # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently\n # on error.\n #required_acks: 1\n\n # The configurable ClientID used for logging, debugging, and auditing\n # purposes. The default is "beats".\n #client_id: beats\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting winlogbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/security/keytabs/kafka.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # The service name. Service principal name is contructed from\n # service_name/hostname@realm.\n #kerberos.service_name: kafka\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n # Enables Kerberos FAST authentication. This may\n # conflict with certain Active Directory configurations.\n #kerberos.enable_krb5_fast: false\n\n# -------------------------------- Redis Output --------------------------------\n#output.redis:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty print json event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # The list of Redis servers to connect to. If load-balancing is enabled, the\n # events are distributed to the servers in the list. If one server becomes\n # unreachable, the events are distributed to the reachable servers only.\n # The hosts setting supports redis and rediss urls with custom password like\n # redis://:password@localhost:6379.\n #hosts: ["localhost:6379"]\n\n # The name of the Redis list or channel the events are published to. The\n # default is winlogbeat.\n #key: winlogbeat\n\n # The password to authenticate to Redis with. The default is no authentication.\n #password:\n\n # The Redis database number where the events are published. The default is 0.\n #db: 0\n\n # The Redis data type to use for publishing events. If the data type is list,\n # the Redis RPUSH command is used. If the data type is channel, the Redis\n # PUBLISH command is used. The default value is list.\n #datatype: list\n\n # The number of workers to use for each host configured to publish events to\n # Redis. Use this setting along with the loadbalance option. For example, if\n # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each\n # host).\n #worker: 1\n\n # If set to true and multiple hosts or workers are configured, the output\n # plugin load balances published events onto all Redis hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # The Redis connection timeout in seconds. The default is 5 seconds.\n #timeout: 5s\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to reconnect to Redis\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Redis after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Redis request or pipeline.\n # The default is 2048.\n #bulk_max_size: 2048\n\n # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The\n # value must be a URL with a scheme of socks5://.\n #proxy_url:\n\n # This option determines whether Redis hostnames are resolved locally when\n # using a proxy. The default value is false, which means that name resolution\n # occurs on the proxy server.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# -------------------------------- File Output ---------------------------------\n#output.file:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Path to the directory where to save the generated files. The option is\n # mandatory.\n #path: "/tmp/winlogbeat"\n\n # Name of the generated files. The default is `winlogbeat` and it generates\n # files: `winlogbeat-{datetime}.ndjson`, `winlogbeat-{datetime}-1.ndjson`, etc.\n #filename: winlogbeat\n\n # Maximum size in kilobytes of each file. When this size is reached, and on\n # every Winlogbeat restart, the files are rotated. The default value is 10240\n # kB.\n #rotate_every_kb: 10000\n\n # Maximum number of files under path. When this number of files is reached,\n # the oldest file is deleted and the rest are shifted from last to first. The\n # default is 7 files.\n #number_of_files: 7\n\n # Permissions to use for file creation. The default is 0600.\n #permissions: 0600\n \n # Configure automatic file rotation on every startup. The default is true.\n #rotate_on_startup: true\n\n# ------------------------------- Console Output -------------------------------\n#output.console:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n# =================================== Paths ====================================\n\n# The home path for the Winlogbeat installation. This is the default base path\n# for all other path settings and for miscellaneous files that come with the\n# distribution (for example, the sample dashboards).\n# If not set by a CLI flag or in the configuration file, the default for the\n# home path is the location of the binary.\n#path.home:\n\n# The configuration path for the Winlogbeat installation. This is the default\n# base path for configuration files, including the main YAML configuration file\n# and the Elasticsearch template file. If not set by a CLI flag or in the\n# configuration file, the default for the configuration path is the home path.\n#path.config: ${path.home}\n\n# The data path for the Winlogbeat installation. This is the default base path\n# for all the files in which Winlogbeat needs to store its data. If not set by a\n# CLI flag or in the configuration file, the default for the data path is a data\n# subdirectory inside the home path.\n#path.data: ${path.home}/data\n\n# The logs path for a Winlogbeat installation. This is the default location for\n# the Beat's log files. If not set by a CLI flag or in the configuration file,\n# the default for the logs path is a logs subdirectory inside the home path.\n#path.logs: ${path.home}/logs\n\n# ================================== Keystore ==================================\n\n# Location of the Keystore containing the keys and their sensitive values.\n#keystore.path: "${path.config}/beats.keystore"\n\n# ================================= Dashboards =================================\n\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards are disabled by default and can be enabled either by setting the\n# options here or by using the `-setup` CLI flag or the `setup` command.\n#setup.dashboards.enabled: false\n\n# The directory from where to read the dashboards. The default is the `kibana`\n# folder in the home path.\n#setup.dashboards.directory: ${path.home}/kibana\n\n# The URL from where to download the dashboard archive. It is used instead of\n# the directory if it has a value.\n#setup.dashboards.url:\n\n# The file archive (zip file) from where to read the dashboards. It is used instead\n# of the directory when it has a value.\n#setup.dashboards.file:\n\n# In case the archive contains the dashboards from multiple Beats, this lets you\n# select which one to load. You can load all the dashboards in the archive by\n# setting this to the empty string.\n#setup.dashboards.beat: winlogbeat\n\n# The name of the Kibana index to use for setting the configuration. Default is ".kibana"\n#setup.dashboards.kibana_index: .kibana\n\n# The Elasticsearch index name. This overwrites the index name defined in the\n# dashboards and index pattern. Example: testbeat-*\n#setup.dashboards.index:\n\n# Always use the Kibana API for loading the dashboards instead of autodetecting\n# how to install the dashboards by first querying Elasticsearch.\n#setup.dashboards.always_kibana: false\n\n# If true and Kibana is not reachable at the time when dashboards are loaded,\n# it will retry to reconnect to Kibana instead of exiting with an error.\n#setup.dashboards.retry.enabled: false\n\n# Duration interval between Kibana connection retries.\n#setup.dashboards.retry.interval: 1s\n\n# Maximum number of retries before exiting with an error, 0 for unlimited retrying.\n#setup.dashboards.retry.maximum: 0\n\n# ================================== Template ==================================\n\n# A template is used to set the mapping in Elasticsearch\n# By default template loading is enabled and the template is loaded.\n# These settings can be adjusted to load your own template or overwrite existing ones.\n\n# Set to false to disable template loading.\n#setup.template.enabled: true\n\n# Template name. By default the template name is "winlogbeat-%{[agent.version]}"\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.name: "winlogbeat-%{[agent.version]}"\n\n# Template pattern. By default the template pattern is "winlogbeat-%{[agent.version]}" to apply to the default index settings.\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.pattern: "winlogbeat-%{[agent.version]}"\n\n# Path to fields.yml file to generate the template\n#setup.template.fields: "${path.config}/fields.yml"\n\n# A list of fields to be added to the template and Kibana index pattern. Also\n# specify setup.template.overwrite: true to overwrite the existing template.\n#setup.template.append_fields:\n#- name: field_name\n# type: field_type\n\n# Enable JSON template loading. If this is enabled, the fields.yml is ignored.\n#setup.template.json.enabled: false\n\n# Path to the JSON template file\n#setup.template.json.path: "${path.config}/template.json"\n\n# Name under which the template is stored in Elasticsearch\n#setup.template.json.name: ""\n\n# Set this option if the JSON template is a data stream.\n#setup.template.json.data_stream: false\n\n# Overwrite existing template\n# Do not enable this option for more than one instance of winlogbeat as it might\n# overload your Elasticsearch with too many update requests.\n#setup.template.overwrite: false\n\n# Elasticsearch template settings\nsetup.template.settings:\n\n # A dictionary of settings to place into the settings.index dictionary\n # of the Elasticsearch template. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html\n #index:\n #number_of_shards: 1\n #codec: best_compression\n\n # A dictionary of settings for the _source field. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html\n #_source:\n #enabled: false\n\n# ====================== Index Lifecycle Management (ILM) ======================\n\n# Configure index lifecycle management (ILM) to manage the backing indices\n# of your data streams.\n\n# Enable ILM support. Valid values are true, or false.\n#setup.ilm.enabled: true\n\n# Set the lifecycle policy name. The default policy name is\n# 'beatname'.\n#setup.ilm.policy_name: "mypolicy"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n#setup.ilm.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true.\n# If you set this option to false, lifecycle policy will not be installed,\n# even if setup.ilm.overwrite is set to true.\n#setup.ilm.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.ilm.overwrite: false\n\n# ======================== Data Stream Lifecycle (DSL) =========================\n\n# Configure Data Stream Lifecycle to manage data streams while connected to Serverless elasticsearch. \n# These settings are mutually exclusive with ILM settings which are not supported in Serverless projects.\n\n# Enable DSL support. Valid values are true, or false.\n#setup.dsl.enabled: true\n\n# Set the lifecycle policy name or pattern. For DSL, this name must match the data stream that the lifecycle is for.\n# The default data stream pattern is winlogbeat-%{[agent.version]}"\n# The template string `%{[agent.version]}` will resolve to the current stack version. \n# The other possible template value is `%{[beat.name]}`.\n#setup.dsl.data_stream_pattern: "winlogbeat-%{[agent.version]}"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n# If no custom policy is specified, a default policy with a lifetime of 7 days will be created.\n#setup.dsl.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true. If\n# you disable this check, set setup.dsl.overwrite: true so the lifecycle policy\n# can be installed.\n#setup.dsl.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.dsl.overwrite: false\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Optional protocol and basic auth credentials.\n #protocol: "https"\n #username: "elastic"\n #password: "changeme"\n\n # Optional HTTP path\n #path: ""\n\n # Optional Kibana space ID.\n #space.id: ""\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# ================================== Logging ===================================\n\n# There are four options for the log output: file, stderr, syslog, eventlog\n# The file output is the default.\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: info\n\n# Enable debug output for selected components. To enable all selectors use ["*"]\n# Other available selectors are "beat", "publisher", "service"\n# Multiple selectors can be chained.\n#logging.selectors: [ ]\n\n# Send all logging output to stderr. The default is false.\n#logging.to_stderr: false\n\n# Send all logging output to syslog. The default is false.\n#logging.to_syslog: false\n\n# Send all logging output to Windows Event Logs. The default is false.\n#logging.to_eventlog: false\n\n# If enabled, Winlogbeat periodically logs its internal metrics that have changed\n# in the last period. For each metric that changed, the delta from the value at\n# the beginning of the period is logged. Also, the total values for\n# all non-zero internal metrics are logged on shutdown. The default is true.\n#logging.metrics.enabled: true\n\n# The period after which to log the internal metrics. The default is 30s.\n#logging.metrics.period: 30s\n\n# A list of metrics namespaces to report in the logs. Defaults to [stats].\n# `stats` contains general Beat metrics. `dataset` may be present in some\n# Beats and contains module or input metrics.\n#logging.metrics.namespaces: [stats]\n\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\nlogging.to_files: true\nlogging.files:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/winlogbeat\n\n # The name of the files where the logs are written to.\n #name: winlogbeat\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 10485760 # = 10MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 7\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to true.\n # rotateonstartup: true\n\n#=============================== Events Logging ===============================\n# Some outputs will log raw events on errors like indexing errors in the\n# Elasticsearch output, to prevent logging raw events (that may contain\n# sensitive information) together with other log messages, a different\n# log file, only for log entries containing raw events, is used. It will\n# use the same level, selectors and all other configurations from the\n# default logger, but it will have it's own file configuration.\n#\n# Having a different log file for raw events also prevents event data\n# from drowning out the regular log files.\n#\n# IMPORTANT: No matter the default logger output configuration, raw events\n# will **always** be logged to a file configured by `logging.event_data.files`.\n\n# logging.event_data:\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\n#logging.event_data.to_files: true\n#logging.event_data:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/winlogbeat\n\n # The name of the files where the logs are written to.\n #name: winlogbeat-events-data\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 5242880 # = 5MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 2\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to false.\n # rotateonstartup: false\n\n# ============================= X-Pack Monitoring ==============================\n# Winlogbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Winlogbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch output are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify an additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n #hosts: ["localhost:9200"]\n\n # Set gzip compression level.\n #compression_level: 0\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "beats_system"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server url\n #proxy_url: http://proxy:3128\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # The default is 50.\n #bulk_max_size: 50\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n #metrics.period: 10s\n #state.period: 1m\n\n# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts`\n# setting. You can find the value for this setting in the Elastic Cloud web UI.\n#monitoring.cloud.id:\n\n# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username`\n# and `monitoring.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#monitoring.cloud.auth:\n\n# =============================== HTTP Endpoint ================================\n\n# Each beat can expose internal metrics through an HTTP endpoint. For security\n# reasons the endpoint is disabled by default. This feature is currently experimental.\n# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output\n# append ?pretty to the URL.\n\n# Defines if the HTTP endpoint is enabled.\n#http.enabled: false\n\n# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe.\n# When using IP addresses, it is recommended to only use localhost.\n#http.host: localhost\n\n# Port on which the HTTP endpoint will bind. Default is 5066.\n#http.port: 5066\n\n# Define which user should be owning the named pipe.\n#http.named_pipe.user:\n\n# Define which permissions should be applied to the named pipe, use the Security\n# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with\n# `http.user`.\n#http.named_pipe.security_descriptor:\n\n# Defines if the HTTP pprof endpoints are enabled.\n# It is recommended that this is only enabled on localhost as these endpoints may leak data.\n#http.pprof.enabled: false\n\n# Controls the fraction of goroutine blocking events that are reported in the\n# blocking profile.\n#http.pprof.block_profile_rate: 0\n\n# Controls the fraction of memory allocations that are recorded and reported in\n# the memory profile.\n#http.pprof.mem_profile_rate: 524288\n\n# Controls the fraction of mutex contention events that are reported in the\n# mutex profile.\n#http.pprof.mutex_profile_rate: 0\n\n# ============================== Process Security ==============================\n\n# Enable or disable seccomp system call filtering on Linux. Default is enabled.\n#seccomp.enabled: true\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the winlogbeat.\n#instrumentation:\n # Set to true to enable instrumentation of winlogbeat.\n #enabled: false\n\n # Environment in which winlogbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n # Enable profiling of the server, recording profile samples as events.\n #\n # This feature is experimental.\n #profiling:\n #cpu:\n # Set to true to enable CPU profiling.\n #enabled: false\n #interval: 60s\n #duration: 10s\n #heap:\n # Set to true to enable heap profiling.\n #enabled: false\n #interval: 60s\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: false\n\n# =============================== Feature Flags ================================\n\n# Enable and configure feature flags.\n#features:\n# fqdn:\n# enabled: true\n\n
dataset_sample\yaml\go\winlogbeat.reference.yml
winlogbeat.reference.yml
YAML
67,464
0.75
0.076081
0.986861
node-utils
517
2025-01-09T17:44:56.875082
GPL-3.0
false
a3fb48d0f4e3e1df118a629987f714d7
###################### Winlogbeat Configuration Example ########################\n\n# This file is an example configuration file highlighting only the most common\n# options. The winlogbeat.reference.yml file from the same directory contains\n# all the supported options with more comments. You can use it as a reference.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/winlogbeat/index.html\n\n# ======================== Winlogbeat specific options =========================\n\n# event_logs specifies a list of event logs to monitor as well as any\n# accompanying options. The YAML data type of event_logs is a list of\n# dictionaries.\n#\n# The supported keys are name, id, xml_query, tags, fields, fields_under_root,\n# forwarded, ignore_older, level, event_id, provider, and include_xml.\n# The xml_query key requires an id and must not be used with the name,\n# ignore_older, level, event_id, or provider keys. Please visit the\n# documentation for the complete details of each option.\n# https://go.es.io/WinlogbeatConfig\n\nwinlogbeat.event_logs:\n - name: Application\n ignore_older: 72h\n\n - name: System\n\n - name: Security\n\n - name: ForwardedEvents\n tags: [forwarded]\n\n - name: Windows PowerShell\n event_id: 400, 403, 600, 800\n\n - name: Microsoft-Windows-PowerShell/Operational\n event_id: 4103, 4104, 4105, 4106\n\n# ====================== Elasticsearch template settings =======================\n\nsetup.template.settings:\n index.number_of_shards: 1\n #index.codec: best_compression\n #_source.enabled: false\n\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output.\n#fields:\n# env: staging\n\n# ================================= Dashboards =================================\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards is disabled by default and can be enabled either by setting the\n# options here or by using the `setup` command.\n#setup.dashboards.enabled: false\n\n# The URL from where to download the dashboard archive. By default, this URL\n# has a value that is computed based on the Beat name and version. For released\n# versions, this URL points to the dashboard archive on the artifacts.elastic.co\n# website.\n#setup.dashboards.url:\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Kibana Space ID\n # ID of the Kibana Space into which the dashboards should be loaded. By default,\n # the Default Space will be used.\n #space.id:\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Winlogbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: ["localhost:9200"]\n\n # Performance preset - one of "balanced", "throughput", "scale",\n # "latency", or "custom".\n preset: balanced\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Optional SSL. By default is off.\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client Certificate Key\n #ssl.key: "/etc/pki/client/cert.key"\n\n# ================================= Processors =================================\nprocessors:\n - add_host_metadata:\n when.not.contains.tags: forwarded\n - add_cloud_metadata: ~\n\n# ================================== Logging ===================================\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: debug\n\n# At debug level, you can selectively enable logging only for some components.\n# To enable all selectors, use ["*"]. Examples of other selectors are "beat",\n# "publisher", "service".\n#logging.selectors: ["*"]\n\n# ============================= X-Pack Monitoring ==============================\n# Winlogbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Winlogbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch outputs are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the winlogbeat.\n#instrumentation:\n # Set to true to enable instrumentation of winlogbeat.\n #enabled: false\n\n # Environment in which winlogbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: true\n\n
dataset_sample\yaml\go\winlogbeat.yml
winlogbeat.yml
YAML
7,492
0.95
0.043689
0.861842
python-kit
688
2025-02-19T11:35:30.973548
BSD-3-Clause
false
2a76818b0f49c9c7b2d818ec4b48966a
# The full repository name\nrepo: go-gitea/gitea\n\n# Service type (gitea or github)\nservice: github\n\n# Base URL for Gitea instance if using gitea service type (optional)\n# Default: https://gitea.com\nbase-url:\n\n# Changelog groups and which labeled PRs to add to each group\ngroups:\n -\n name: BREAKING\n labels:\n - pr/breaking\n -\n name: SECURITY\n labels:\n - topic/security\n -\n name: FEATURES\n labels:\n - type/feature\n -\n name: ENHANCEMENTS\n labels:\n - type/enhancement\n -\n name: PERFORMANCE\n labels:\n - performance/memory\n - performance/speed\n - performance/bigrepo\n - performance/cpu\n -\n name: BUGFIXES\n labels:\n - type/bug\n -\n name: API\n labels:\n - modifies/api\n -\n name: TESTING\n labels:\n - type/testing\n -\n name: BUILD\n labels:\n - topic/build\n - topic/code-linting\n -\n name: DOCS\n labels:\n - type/docs\n -\n name: MISC\n default: true\n\n# regex indicating which labels to skip for the changelog\nskip-labels: skip-changelog|backport\/.+\n
dataset_sample\yaml\go-gitea_gitea\.changelog.yml
.changelog.yml
YAML
1,077
0.8
0.048387
0.103448
python-kit
635
2025-01-02T09:38:07.938942
BSD-3-Clause
false
3b972a232f6bd3c454bfe32197dc9e36
tasks:\n - name: Setup\n init: |\n cp -r contrib/ide/vscode .vscode\n make deps\n make build\n command: |\n gp sync-done setup\n exit 0\n - name: Run backend\n command: |\n gp sync-await setup\n\n # Get the URL and extract the domain\n url=$(gp url 3000)\n domain=$(echo $url | awk -F[/:] '{print $4}')\n\n if [ -f custom/conf/app.ini ]; then\n sed -i "s|^ROOT_URL =.*|ROOT_URL = ${url}/|" custom/conf/app.ini\n sed -i "s|^DOMAIN =.*|DOMAIN = ${domain}|" custom/conf/app.ini\n sed -i "s|^SSH_DOMAIN =.*|SSH_DOMAIN = ${domain}|" custom/conf/app.ini\n sed -i "s|^NO_REPLY_ADDRESS =.*|SSH_DOMAIN = noreply.${domain}|" custom/conf/app.ini\n else\n mkdir -p custom/conf/\n echo -e "[server]\nROOT_URL = ${url}/" > custom/conf/app.ini\n echo -e "\n[database]\nDB_TYPE = sqlite3\nPATH = $GITPOD_REPO_ROOT/data/gitea.db" >> custom/conf/app.ini\n fi\n export TAGS="sqlite sqlite_unlock_notify"\n make watch-backend\n - name: Run frontend\n command: |\n gp sync-await setup\n make watch-frontend\n openMode: split-right\n\nvscode:\n extensions:\n - editorconfig.editorconfig\n - dbaeumer.vscode-eslint\n - golang.go\n - stylelint.vscode-stylelint\n - DavidAnson.vscode-markdownlint\n - Vue.volar\n - ms-azuretools.vscode-docker\n - vitest.explorer\n - cweijan.vscode-database-client2\n - GitHub.vscode-pull-request-github\n\nports:\n - name: Gitea\n port: 3000\n
dataset_sample\yaml\go-gitea_gitea\.gitpod.yml
.gitpod.yml
YAML
1,483
0.8
0.019608
0.021277
python-kit
823
2023-11-01T09:18:14.752754
MIT
false
ed73c42d31c18ea73871a4157acdc201
version: "2"\noutput:\n sort-order:\n - file\nlinters:\n default: none\n enable:\n - bidichk\n - depguard\n - dupl\n - errcheck\n - forbidigo\n - gocritic\n - govet\n - ineffassign\n - mirror\n - nakedret\n - nolintlint\n - perfsprint\n - revive\n - staticcheck\n - testifylint\n - unconvert\n - unparam\n - unused\n - usestdlibvars\n - usetesting\n - wastedassign\n settings:\n depguard:\n rules:\n main:\n deny:\n - pkg: encoding/json\n desc: use gitea's modules/json instead of encoding/json\n - pkg: github.com/unknwon/com\n desc: use gitea's util and replacements\n - pkg: io/ioutil\n desc: use os or io instead\n - pkg: golang.org/x/exp\n desc: it's experimental and unreliable\n - pkg: code.gitea.io/gitea/modules/git/internal\n desc: do not use the internal package, use AddXxx function instead\n - pkg: gopkg.in/ini.v1\n desc: do not use the ini package, use gitea's config system instead\n - pkg: gitea.com/go-chi/cache\n desc: do not use the go-chi cache package, use gitea's cache system\n gocritic:\n disabled-checks:\n - ifElseChain\n - singleCaseSwitch # Every time this occurred in the code, there was no other way.\n revive:\n severity: error\n rules:\n - name: atomic\n - name: bare-return\n - name: blank-imports\n - name: constant-logical-expr\n - name: context-as-argument\n - name: context-keys-type\n - name: dot-imports\n - name: duplicated-imports\n - name: empty-lines\n - name: error-naming\n - name: error-return\n - name: error-strings\n - name: errorf\n - name: exported\n - name: identical-branches\n - name: if-return\n - name: increment-decrement\n - name: indent-error-flow\n - name: modifies-value-receiver\n - name: package-comments\n - name: range\n - name: receiver-naming\n - name: redefines-builtin-id\n - name: string-of-int\n - name: superfluous-else\n - name: time-naming\n - name: unconditional-recursion\n - name: unexported-return\n - name: unreachable-code\n - name: var-declaration\n - name: var-naming\n staticcheck:\n checks:\n - all\n - -ST1003\n - -ST1005\n - -QF1001\n - -QF1006\n - -QF1008\n testifylint:\n disable:\n - go-require\n - require-error\n usetesting:\n os-temp-dir: true\n exclusions:\n generated: lax\n presets:\n - comments\n - common-false-positives\n - legacy\n - std-error-handling\n rules:\n - linters:\n - dupl\n - errcheck\n - gocyclo\n - gosec\n - staticcheck\n - unparam\n path: _test\.go\n - linters:\n - dupl\n - errcheck\n - gocyclo\n - gosec\n path: models/migrations/v\n - linters:\n - forbidigo\n path: cmd\n - linters:\n - dupl\n text: (?i)webhook\n - linters:\n - gocritic\n text: (?i)`ID' should not be capitalized\n - linters:\n - deadcode\n - unused\n text: (?i)swagger\n - linters:\n - staticcheck\n text: (?i)argument x is overwritten before first use\n - linters:\n - gocritic\n text: '(?i)commentFormatting: put a space between `//` and comment text'\n - linters:\n - gocritic\n text: '(?i)exitAfterDefer:'\n paths:\n - node_modules\n - public\n - web_src\n - third_party$\n - builtin$\n - examples$\nissues:\n max-issues-per-linter: 0\n max-same-issues: 0\nformatters:\n enable:\n - gofmt\n - gofumpt\n settings:\n gofumpt:\n extra-rules: true\n exclusions:\n generated: lax\n paths:\n - node_modules\n - public\n - web_src\n - third_party$\n - builtin$\n - examples$\n\nrun:\n timeout: 10m\n
dataset_sample\yaml\go-gitea_gitea\.golangci.yml
.golangci.yml
YAML
4,095
0.95
0.011628
0
vue-tools
999
2025-04-02T04:46:42.863743
GPL-3.0
false
8d95d77eb6888cab18d30c1f1eaadcbd
project_id_env: CROWDIN_PROJECT_ID\napi_token_env: CROWDIN_KEY\nbase_path: "."\nbase_url: "https://api.crowdin.com"\npreserve_hierarchy: true\nfiles:\n - source: "/options/locale/locale_en-US.ini"\n translation: "/options/locale/locale_%locale%.ini"\n type: "ini"\n skip_untranslated_strings: true\n export_only_approved: true\n update_option: "update_as_unapproved"\n
dataset_sample\yaml\go-gitea_gitea\crowdin.yml
crowdin.yml
YAML
372
0.8
0
0
vue-tools
794
2025-01-13T08:51:25.156913
BSD-3-Clause
false
639dbd864a2e4dfdaa44ad36875a6ab4
modifies/docs:\n - changed-files:\n - any-glob-to-any-file:\n - "**/*.md"\n - "docs/**"\n\nmodifies/templates:\n - changed-files:\n - all-globs-to-any-file:\n - "templates/**"\n - "!templates/swagger/v1_json.tmpl"\n\nmodifies/api:\n - changed-files:\n - any-glob-to-any-file:\n - "routers/api/**"\n - "templates/swagger/v1_json.tmpl"\n\nmodifies/cli:\n - changed-files:\n - any-glob-to-any-file:\n - "cmd/**"\n\nmodifies/translation:\n - changed-files:\n - any-glob-to-any-file:\n - "options/locale/*.ini"\n\nmodifies/migrations:\n - changed-files:\n - any-glob-to-any-file:\n - "models/migrations/**"\n\nmodifies/internal:\n - changed-files:\n - any-glob-to-any-file:\n - ".air.toml"\n - "Makefile"\n - "Dockerfile"\n - "Dockerfile.rootless"\n - ".dockerignore"\n - "docker/**"\n - ".editorconfig"\n - ".eslintrc.cjs"\n - ".golangci.yml"\n - ".gitpod.yml"\n - ".markdownlint.yaml"\n - ".spectral.yaml"\n - "stylelint.config.js"\n - ".yamllint.yaml"\n - ".github/**"\n - ".gitea/**"\n - ".devcontainer/**"\n - "build.go"\n - "build/**"\n - "contrib/**"\n\nmodifies/dependencies:\n - changed-files:\n - any-glob-to-any-file:\n - "package.json"\n - "package-lock.json"\n - "pyproject.toml"\n - "poetry.lock"\n - "go.mod"\n - "go.sum"\n\nmodifies/go:\n - changed-files:\n - any-glob-to-any-file:\n - "**/*.go"\n\nmodifies/frontend:\n - changed-files:\n - any-glob-to-any-file:\n - "*.js"\n - "*.ts"\n - "web_src/**"\n\ndocs-update-needed:\n - changed-files:\n - any-glob-to-any-file:\n - "custom/conf/app.example.ini"\n
dataset_sample\yaml\go-gitea_gitea\.github\labeler.yml
labeler.yml
YAML
1,859
0.8
0
0
vue-tools
188
2025-03-09T08:10:59.783563
Apache-2.0
false
470dfe7c0eb94f1b0a82d75dde9220d8
blank_issues_enabled: false\ncontact_links:\n - name: Security Concern\n url: https://tinyurl.com/security-gitea\n about: For security concerns, please send a mail to security@gitea.io instead of opening a public issue.\n - name: Discord Server\n url: https://discord.gg/Gitea\n about: Please ask questions and discuss configuration or deployment problems here.\n - name: Discourse Forum\n url: https://forum.gitea.com\n about: Questions and configuration or deployment problems can also be discussed on our forum.\n - name: Frequently Asked Questions\n url: https://docs.gitea.com/help/faq\n about: Please check if your question isn't mentioned here.\n - name: Crowdin Translations\n url: https://translate.gitea.com\n about: Translations are managed here.\n
dataset_sample\yaml\go-gitea_gitea\.github\ISSUE_TEMPLATE\config.yml
config.yml
YAML
777
0.8
0.058824
0
vue-tools
124
2025-01-31T01:23:43.913449
BSD-3-Clause
false
858d1df7eb32a5015835a7a0120b8cca
name: cron-licenses\n\non:\n # schedule:\n # - cron: "7 0 * * 1" # every Monday at 00:07 UTC\n workflow_dispatch:\n\njobs:\n cron-licenses:\n runs-on: ubuntu-latest\n if: github.repository == 'go-gitea/gitea'\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - run: make generate-gitignore\n timeout-minutes: 40\n - name: push translations to repo\n uses: appleboy/git-push-action@v0.0.3\n with:\n author_email: "teabot@gitea.io"\n author_name: GiteaBot\n branch: main\n commit: true\n commit_message: "[skip ci] Updated licenses and gitignores"\n remote: "git@github.com:go-gitea/gitea.git"\n ssh_key: ${{ secrets.DEPLOY_KEY }}\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\cron-licenses.yml
cron-licenses.yml
YAML
821
0.8
0.034483
0.074074
node-utils
210
2025-07-08T01:15:13.160780
GPL-3.0
false
3488f08bf7fe6944d30ffbcfba54dccd
name: cron-translations\n\non:\n schedule:\n - cron: "7 0 * * *" # every day at 00:07 UTC\n workflow_dispatch:\n\njobs:\n crowdin-pull:\n runs-on: ubuntu-latest\n if: github.repository == 'go-gitea/gitea'\n steps:\n - uses: actions/checkout@v4\n - uses: crowdin/github-action@v1\n with:\n upload_sources: true\n upload_translations: false\n download_sources: false\n download_translations: true\n push_translations: false\n push_sources: false\n create_pull_request: false\n config: crowdin.yml\n env:\n CROWDIN_PROJECT_ID: ${{ secrets.CROWDIN_PROJECT_ID }}\n CROWDIN_KEY: ${{ secrets.CROWDIN_KEY }}\n - name: update locales\n run: ./build/update-locales.sh\n - name: push translations to repo\n uses: appleboy/git-push-action@v0.0.3\n with:\n author_email: "teabot@gitea.io"\n author_name: GiteaBot\n branch: main\n commit: true\n commit_message: "[skip ci] Updated translations via Crowdin"\n remote: "git@github.com:go-gitea/gitea.git"\n ssh_key: ${{ secrets.DEPLOY_KEY }}\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\cron-translations.yml
cron-translations.yml
YAML
1,162
0.8
0.026316
0
python-kit
682
2025-06-02T06:10:27.144623
MIT
false
493067d5cd3c68fed7a165e1e93dd26a
name: files-changed\n\non:\n workflow_call:\n outputs:\n backend:\n value: ${{ jobs.detect.outputs.backend }}\n frontend:\n value: ${{ jobs.detect.outputs.frontend }}\n docs:\n value: ${{ jobs.detect.outputs.docs }}\n actions:\n value: ${{ jobs.detect.outputs.actions }}\n templates:\n value: ${{ jobs.detect.outputs.templates }}\n docker:\n value: ${{ jobs.detect.outputs.docker }}\n swagger:\n value: ${{ jobs.detect.outputs.swagger }}\n yaml:\n value: ${{ jobs.detect.outputs.yaml }}\n\njobs:\n detect:\n runs-on: ubuntu-latest\n timeout-minutes: 3\n outputs:\n backend: ${{ steps.changes.outputs.backend }}\n frontend: ${{ steps.changes.outputs.frontend }}\n docs: ${{ steps.changes.outputs.docs }}\n actions: ${{ steps.changes.outputs.actions }}\n templates: ${{ steps.changes.outputs.templates }}\n docker: ${{ steps.changes.outputs.docker }}\n swagger: ${{ steps.changes.outputs.swagger }}\n yaml: ${{ steps.changes.outputs.yaml }}\n steps:\n - uses: actions/checkout@v4\n - uses: dorny/paths-filter@v3\n id: changes\n with:\n filters: |\n backend:\n - "**/*.go"\n - "templates/**/*.tmpl"\n - "assets/emoji.json"\n - "go.mod"\n - "go.sum"\n - "Makefile"\n - ".golangci.yml"\n - ".editorconfig"\n - "options/locale/locale_en-US.ini"\n\n frontend:\n - "*.js"\n - "*.ts"\n - "web_src/**"\n - "tools/*.js"\n - "tools/*.ts"\n - "assets/emoji.json"\n - "package.json"\n - "package-lock.json"\n - "Makefile"\n - ".eslintrc.cjs"\n - ".npmrc"\n\n docs:\n - "**/*.md"\n - ".markdownlint.yaml"\n - "package.json"\n - "package-lock.json"\n\n actions:\n - ".github/workflows/*"\n - "Makefile"\n\n templates:\n - "tools/lint-templates-*.js"\n - "templates/**/*.tmpl"\n - "pyproject.toml"\n - "poetry.lock"\n\n docker:\n - "Dockerfile"\n - "Dockerfile.rootless"\n - "docker/**"\n - "Makefile"\n\n swagger:\n - "templates/swagger/v1_json.tmpl"\n - "templates/swagger/v1_input.json"\n - "Makefile"\n - "package.json"\n - "package-lock.json"\n - ".spectral.yaml"\n\n yaml:\n - "**/*.yml"\n - "**/*.yaml"\n - ".yamllint.yaml"\n - "pyproject.toml"\n - "poetry.lock"\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\files-changed.yml
files-changed.yml
YAML
2,812
0.8
0
0
vue-tools
981
2025-04-08T11:42:55.966854
MIT
false
69a0789ddf6fe7fc7119344c965b9568
name: compliance\n\non:\n pull_request:\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n cancel-in-progress: true\n\njobs:\n files-changed:\n uses: ./.github/workflows/files-changed.yml\n\n lint-backend:\n if: needs.files-changed.outputs.backend == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - run: make deps-backend deps-tools\n - run: make lint-backend\n env:\n TAGS: bindata sqlite sqlite_unlock_notify\n\n lint-templates:\n if: needs.files-changed.outputs.templates == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-python@v5\n with:\n python-version: "3.12"\n - uses: actions/setup-node@v4\n with:\n node-version: 22\n cache: npm\n cache-dependency-path: package-lock.json\n - run: pip install poetry\n - run: make deps-py\n - run: make deps-frontend\n - run: make lint-templates\n\n lint-yaml:\n if: needs.files-changed.outputs.yaml == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-python@v5\n with:\n python-version: "3.12"\n - run: pip install poetry\n - run: make deps-py\n - run: make lint-yaml\n\n lint-swagger:\n if: needs.files-changed.outputs.swagger == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-node@v4\n with:\n node-version: 22\n cache: npm\n cache-dependency-path: package-lock.json\n - run: make deps-frontend\n - run: make lint-swagger\n\n lint-spell:\n if: needs.files-changed.outputs.backend == 'true' || needs.files-changed.outputs.frontend == 'true' || needs.files-changed.outputs.actions == 'true' || needs.files-changed.outputs.docs == 'true' || needs.files-changed.outputs.templates == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - run: make lint-spell\n\n lint-go-windows:\n if: needs.files-changed.outputs.backend == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - run: make deps-backend deps-tools\n - run: make lint-go-windows lint-go-gitea-vet\n env:\n TAGS: bindata sqlite sqlite_unlock_notify\n GOOS: windows\n GOARCH: amd64\n\n lint-go-gogit:\n if: needs.files-changed.outputs.backend == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - run: make deps-backend deps-tools\n - run: make lint-go\n env:\n TAGS: bindata gogit sqlite sqlite_unlock_notify\n\n checks-backend:\n if: needs.files-changed.outputs.backend == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - run: make deps-backend deps-tools\n - run: make --always-make checks-backend # ensure the "go-licenses" make target runs\n\n frontend:\n if: needs.files-changed.outputs.frontend == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-node@v4\n with:\n node-version: 22\n cache: npm\n cache-dependency-path: package-lock.json\n - run: make deps-frontend\n - run: make lint-frontend\n - run: make checks-frontend\n - run: make test-frontend\n - run: make frontend\n\n backend:\n if: needs.files-changed.outputs.backend == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n # no frontend build here as backend should be able to build\n # even without any frontend files\n - run: make deps-backend\n - run: go build -o gitea_no_gcc # test if build succeeds without the sqlite tag\n - name: build-backend-arm64\n run: make backend # test cross compile\n env:\n GOOS: linux\n GOARCH: arm64\n TAGS: bindata gogit\n - name: build-backend-windows\n run: go build -o gitea_windows\n env:\n GOOS: windows\n GOARCH: amd64\n TAGS: bindata gogit\n - name: build-backend-386\n run: go build -o gitea_linux_386 # test if compatible with 32 bit\n env:\n GOOS: linux\n GOARCH: 386\n\n docs:\n if: needs.files-changed.outputs.docs == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-node@v4\n with:\n node-version: 22\n cache: npm\n cache-dependency-path: package-lock.json\n - run: make deps-frontend\n - run: make lint-md\n\n actions:\n if: needs.files-changed.outputs.actions == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - run: make lint-actions\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\pull-compliance.yml
pull-compliance.yml
YAML
6,234
0.8
0.068293
0.010526
awesome-app
187
2025-04-29T16:24:43.930547
GPL-3.0
false
f841d7d6d8048533d334db08f6d91463
name: docker-dryrun\n\non:\n pull_request:\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n cancel-in-progress: true\n\njobs:\n files-changed:\n uses: ./.github/workflows/files-changed.yml\n\n regular:\n if: needs.files-changed.outputs.docker == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: docker/setup-buildx-action@v3\n - uses: docker/build-push-action@v5\n with:\n push: false\n tags: gitea/gitea:linux-amd64\n\n rootless:\n if: needs.files-changed.outputs.docker == 'true' || needs.files-changed.outputs.actions == 'true'\n needs: files-changed\n runs-on: ubuntu-latest\n steps:\n - uses: docker/setup-buildx-action@v3\n - uses: docker/build-push-action@v5\n with:\n push: false\n file: Dockerfile.rootless\n tags: gitea/gitea:linux-amd64\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\pull-docker-dryrun.yml
pull-docker-dryrun.yml
YAML
941
0.7
0.057143
0
node-utils
306
2025-06-17T21:08:10.241339
BSD-3-Clause
false
5a58b2f26bb49c49dbde77b3d4b938b4
name: labeler\n\non:\n pull_request_target:\n types: [opened, synchronize, reopened]\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n cancel-in-progress: true\n\njobs:\n labeler:\n runs-on: ubuntu-latest\n permissions:\n contents: read\n pull-requests: write\n steps:\n - uses: actions/labeler@v5\n with:\n sync-labels: true\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\pull-labeler.yml
pull-labeler.yml
YAML
394
0.7
0
0
awesome-app
60
2024-12-10T16:12:59.797785
BSD-3-Clause
false
f3f7aa26779bb5d2ca086588d8ab5284
name: release-nightly\n\non:\n push:\n branches: [main, release/v*]\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.ref }}\n cancel-in-progress: true\n\njobs:\n nightly-binary:\n runs-on: namespace-profile-gitea-release-binary\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - uses: actions/setup-node@v4\n with:\n node-version: 22\n cache: npm\n cache-dependency-path: package-lock.json\n - run: make deps-frontend deps-backend\n # xgo build\n - run: make release\n env:\n TAGS: bindata sqlite sqlite_unlock_notify\n - name: import gpg key\n id: import_gpg\n uses: crazy-max/ghaction-import-gpg@v6\n with:\n gpg_private_key: ${{ secrets.GPGSIGN_KEY }}\n passphrase: ${{ secrets.GPGSIGN_PASSPHRASE }}\n - name: sign binaries\n run: |\n for f in dist/release/*; do\n echo '${{ secrets.GPGSIGN_PASSPHRASE }}' | gpg --pinentry-mode loopback --passphrase-fd 0 --batch --yes --detach-sign -u ${{ steps.import_gpg.outputs.fingerprint }} --output "$f.asc" "$f"\n done\n # clean branch name to get the folder name in S3\n - name: Get cleaned branch name\n id: clean_name\n run: |\n REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\///' -e 's/release\/v//')\n echo "Cleaned name is ${REF_NAME}"\n echo "branch=${REF_NAME}-nightly" >> "$GITHUB_OUTPUT"\n - name: configure aws\n uses: aws-actions/configure-aws-credentials@v4\n with:\n aws-region: ${{ secrets.AWS_REGION }}\n aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}\n aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}\n - name: upload binaries to s3\n run: |\n aws s3 sync dist/release s3://${{ secrets.AWS_S3_BUCKET }}/gitea/${{ steps.clean_name.outputs.branch }} --no-progress\n nightly-docker-rootful:\n runs-on: namespace-profile-gitea-release-docker\n permissions:\n packages: write # to publish to ghcr.io\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - uses: docker/setup-qemu-action@v3\n - uses: docker/setup-buildx-action@v3\n - name: Get cleaned branch name\n id: clean_name\n run: |\n # if main then say nightly otherwise cleanup name\n if [ "${{ github.ref }}" = "refs/heads/main" ]; then\n echo "branch=nightly" >> "$GITHUB_OUTPUT"\n exit 0\n fi\n REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\///' -e 's/release\/v//')\n echo "branch=${REF_NAME}-nightly" >> "$GITHUB_OUTPUT"\n - name: Login to Docker Hub\n uses: docker/login-action@v3\n with:\n username: ${{ secrets.DOCKERHUB_USERNAME }}\n password: ${{ secrets.DOCKERHUB_TOKEN }}\n - name: Login to GHCR using PAT\n uses: docker/login-action@v3\n with:\n registry: ghcr.io\n username: ${{ github.repository_owner }}\n password: ${{ secrets.GITHUB_TOKEN }}\n - name: fetch go modules\n run: make vendor\n - name: build rootful docker image\n uses: docker/build-push-action@v5\n with:\n context: .\n platforms: linux/amd64,linux/arm64,linux/riscv64\n push: true\n tags: |-\n gitea/gitea:${{ steps.clean_name.outputs.branch }}\n ghcr.io/go-gitea/gitea:${{ steps.clean_name.outputs.branch }}\n nightly-docker-rootless:\n runs-on: namespace-profile-gitea-release-docker\n permissions:\n packages: write # to publish to ghcr.io\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - uses: docker/setup-qemu-action@v3\n - uses: docker/setup-buildx-action@v3\n - name: Get cleaned branch name\n id: clean_name\n run: |\n # if main then say nightly otherwise cleanup name\n if [ "${{ github.ref }}" = "refs/heads/main" ]; then\n echo "branch=nightly" >> "$GITHUB_OUTPUT"\n exit 0\n fi\n REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\///' -e 's/release\/v//')\n echo "branch=${REF_NAME}-nightly" >> "$GITHUB_OUTPUT"\n - name: Login to Docker Hub\n uses: docker/login-action@v3\n with:\n username: ${{ secrets.DOCKERHUB_USERNAME }}\n password: ${{ secrets.DOCKERHUB_TOKEN }}\n - name: Login to GHCR using PAT\n uses: docker/login-action@v3\n with:\n registry: ghcr.io\n username: ${{ github.repository_owner }}\n password: ${{ secrets.GITHUB_TOKEN }}\n - name: fetch go modules\n run: make vendor\n - name: build rootless docker image\n uses: docker/build-push-action@v5\n with:\n context: .\n platforms: linux/amd64,linux/arm64\n push: true\n file: Dockerfile.rootless\n tags: |-\n gitea/gitea:${{ steps.clean_name.outputs.branch }}-rootless\n ghcr.io/go-gitea/gitea:${{ steps.clean_name.outputs.branch }}-rootless\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\release-nightly.yml
release-nightly.yml
YAML
6,272
0.95
0.032468
0.066225
node-utils
876
2024-09-15T17:16:14.555412
GPL-3.0
false
13e4eca71ebf01b12a54ffe0ec6ca346
name: release-tag-rc\n\non:\n push:\n tags:\n - "v1*-rc*"\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.ref }}\n cancel-in-progress: false\n\njobs:\n binary:\n runs-on: namespace-profile-gitea-release-binary\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - uses: actions/setup-node@v4\n with:\n node-version: 22\n cache: npm\n cache-dependency-path: package-lock.json\n - run: make deps-frontend deps-backend\n # xgo build\n - run: make release\n env:\n TAGS: bindata sqlite sqlite_unlock_notify\n - name: import gpg key\n id: import_gpg\n uses: crazy-max/ghaction-import-gpg@v6\n with:\n gpg_private_key: ${{ secrets.GPGSIGN_KEY }}\n passphrase: ${{ secrets.GPGSIGN_PASSPHRASE }}\n - name: sign binaries\n run: |\n for f in dist/release/*; do\n echo '${{ secrets.GPGSIGN_PASSPHRASE }}' | gpg --pinentry-mode loopback --passphrase-fd 0 --batch --yes --detach-sign -u ${{ steps.import_gpg.outputs.fingerprint }} --output "$f.asc" "$f"\n done\n # clean branch name to get the folder name in S3\n - name: Get cleaned branch name\n id: clean_name\n run: |\n REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\/v//' -e 's/release\/v//')\n echo "Cleaned name is ${REF_NAME}"\n echo "branch=${REF_NAME}" >> "$GITHUB_OUTPUT"\n - name: configure aws\n uses: aws-actions/configure-aws-credentials@v4\n with:\n aws-region: ${{ secrets.AWS_REGION }}\n aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}\n aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}\n - name: upload binaries to s3\n run: |\n aws s3 sync dist/release s3://${{ secrets.AWS_S3_BUCKET }}/gitea/${{ steps.clean_name.outputs.branch }} --no-progress\n - name: Install GH CLI\n uses: dev-hanz-ops/install-gh-cli-action@v0.1.0\n with:\n gh-cli-version: 2.39.1\n - name: create github release\n run: |\n gh release create ${{ github.ref_name }} --title ${{ github.ref_name }} --draft --notes-from-tag dist/release/*\n env:\n GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}\n docker-rootful:\n runs-on: namespace-profile-gitea-release-docker\n permissions:\n packages: write # to publish to ghcr.io\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: docker/setup-qemu-action@v3\n - uses: docker/setup-buildx-action@v3\n - uses: docker/metadata-action@v5\n id: meta\n with:\n images: |-\n gitea/gitea\n ghcr.io/go-gitea/gitea\n flavor: |\n latest=false\n # 1.2.3-rc0\n tags: |\n type=semver,pattern={{version}}\n - name: Login to Docker Hub\n uses: docker/login-action@v3\n with:\n username: ${{ secrets.DOCKERHUB_USERNAME }}\n password: ${{ secrets.DOCKERHUB_TOKEN }}\n - name: Login to GHCR using PAT\n uses: docker/login-action@v3\n with:\n registry: ghcr.io\n username: ${{ github.repository_owner }}\n password: ${{ secrets.GITHUB_TOKEN }}\n - name: build rootful docker image\n uses: docker/build-push-action@v5\n with:\n context: .\n platforms: linux/amd64,linux/arm64,linux/riscv64\n push: true\n tags: ${{ steps.meta.outputs.tags }}\n labels: ${{ steps.meta.outputs.labels }}\n docker-rootless:\n runs-on: namespace-profile-gitea-release-docker\n permissions:\n packages: write # to publish to ghcr.io\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: docker/setup-qemu-action@v3\n - uses: docker/setup-buildx-action@v3\n - uses: docker/metadata-action@v5\n id: meta\n with:\n images: |-\n gitea/gitea\n ghcr.io/go-gitea/gitea\n # each tag below will have the suffix of -rootless\n flavor: |\n latest=false\n suffix=-rootless\n # 1.2.3-rc0\n tags: |\n type=semver,pattern={{version}}\n - name: Login to Docker Hub\n uses: docker/login-action@v3\n with:\n username: ${{ secrets.DOCKERHUB_USERNAME }}\n password: ${{ secrets.DOCKERHUB_TOKEN }}\n - name: Login to GHCR using PAT\n uses: docker/login-action@v3\n with:\n registry: ghcr.io\n username: ${{ github.repository_owner }}\n password: ${{ secrets.GITHUB_TOKEN }}\n - name: build rootless docker image\n uses: docker/build-push-action@v5\n with:\n context: .\n platforms: linux/amd64,linux/arm64,linux/riscv64\n push: true\n file: Dockerfile.rootless\n tags: ${{ steps.meta.outputs.tags }}\n labels: ${{ steps.meta.outputs.labels }}\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\release-tag-rc.yml
release-tag-rc.yml
YAML
5,869
0.95
0.006494
0.072848
node-utils
563
2025-03-23T08:53:40.104779
BSD-3-Clause
false
2be8359f4f06df74806489c8fd96552e
name: release-tag-version\n\non:\n push:\n tags:\n - "v1.*"\n - "!v1*-rc*"\n - "!v1*-dev"\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.ref }}\n cancel-in-progress: false\n\njobs:\n binary:\n runs-on: namespace-profile-gitea-release-binary\n permissions:\n packages: write # to publish to ghcr.io\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: actions/setup-go@v5\n with:\n go-version-file: go.mod\n check-latest: true\n - uses: actions/setup-node@v4\n with:\n node-version: 22\n cache: npm\n cache-dependency-path: package-lock.json\n - run: make deps-frontend deps-backend\n # xgo build\n - run: make release\n env:\n TAGS: bindata sqlite sqlite_unlock_notify\n - name: import gpg key\n id: import_gpg\n uses: crazy-max/ghaction-import-gpg@v6\n with:\n gpg_private_key: ${{ secrets.GPGSIGN_KEY }}\n passphrase: ${{ secrets.GPGSIGN_PASSPHRASE }}\n - name: sign binaries\n run: |\n for f in dist/release/*; do\n echo '${{ secrets.GPGSIGN_PASSPHRASE }}' | gpg --pinentry-mode loopback --passphrase-fd 0 --batch --yes --detach-sign -u ${{ steps.import_gpg.outputs.fingerprint }} --output "$f.asc" "$f"\n done\n # clean branch name to get the folder name in S3\n - name: Get cleaned branch name\n id: clean_name\n run: |\n REF_NAME=$(echo "${{ github.ref }}" | sed -e 's/refs\/heads\///' -e 's/refs\/tags\/v//' -e 's/release\/v//')\n echo "Cleaned name is ${REF_NAME}"\n echo "branch=${REF_NAME}" >> "$GITHUB_OUTPUT"\n - name: configure aws\n uses: aws-actions/configure-aws-credentials@v4\n with:\n aws-region: ${{ secrets.AWS_REGION }}\n aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}\n aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}\n - name: upload binaries to s3\n run: |\n aws s3 sync dist/release s3://${{ secrets.AWS_S3_BUCKET }}/gitea/${{ steps.clean_name.outputs.branch }} --no-progress\n - name: Install GH CLI\n uses: dev-hanz-ops/install-gh-cli-action@v0.1.0\n with:\n gh-cli-version: 2.39.1\n - name: create github release\n run: |\n gh release create ${{ github.ref_name }} --title ${{ github.ref_name }} --notes-from-tag dist/release/*\n env:\n GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}\n docker-rootful:\n runs-on: namespace-profile-gitea-release-docker\n permissions:\n packages: write # to publish to ghcr.io\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: docker/setup-qemu-action@v3\n - uses: docker/setup-buildx-action@v3\n - uses: docker/metadata-action@v5\n id: meta\n with:\n images: |-\n gitea/gitea\n ghcr.io/go-gitea/gitea\n # this will generate tags in the following format:\n # latest\n # 1\n # 1.2\n # 1.2.3\n tags: |\n type=semver,pattern={{version}}\n type=semver,pattern={{major}}\n type=semver,pattern={{major}}.{{minor}}\n - name: Login to Docker Hub\n uses: docker/login-action@v3\n with:\n username: ${{ secrets.DOCKERHUB_USERNAME }}\n password: ${{ secrets.DOCKERHUB_TOKEN }}\n - name: Login to GHCR using PAT\n uses: docker/login-action@v3\n with:\n registry: ghcr.io\n username: ${{ github.repository_owner }}\n password: ${{ secrets.GITHUB_TOKEN }}\n - name: build rootful docker image\n uses: docker/build-push-action@v5\n with:\n context: .\n platforms: linux/amd64,linux/arm64,linux/riscv64\n push: true\n tags: ${{ steps.meta.outputs.tags }}\n labels: ${{ steps.meta.outputs.labels }}\n docker-rootless:\n runs-on: namespace-profile-gitea-release-docker\n steps:\n - uses: actions/checkout@v4\n # fetch all commits instead of only the last as some branches are long lived and could have many between versions\n # fetch all tags to ensure that "git describe" reports expected Gitea version, eg. v1.21.0-dev-1-g1234567\n - run: git fetch --unshallow --quiet --tags --force\n - uses: docker/setup-qemu-action@v3\n - uses: docker/setup-buildx-action@v3\n - uses: docker/metadata-action@v5\n id: meta\n with:\n images: |-\n gitea/gitea\n ghcr.io/go-gitea/gitea\n # each tag below will have the suffix of -rootless\n flavor: |\n suffix=-rootless,onlatest=true\n # this will generate tags in the following format (with -rootless suffix added):\n # latest\n # 1\n # 1.2\n # 1.2.3\n tags: |\n type=semver,pattern={{version}}\n type=semver,pattern={{major}}\n type=semver,pattern={{major}}.{{minor}}\n - name: Login to Docker Hub\n uses: docker/login-action@v3\n with:\n username: ${{ secrets.DOCKERHUB_USERNAME }}\n password: ${{ secrets.DOCKERHUB_TOKEN }}\n - name: Login to GHCR using PAT\n uses: docker/login-action@v3\n with:\n registry: ghcr.io\n username: ${{ github.repository_owner }}\n password: ${{ secrets.GITHUB_TOKEN }}\n - name: build rootless docker image\n uses: docker/build-push-action@v5\n with:\n context: .\n platforms: linux/amd64,linux/arm64,linux/riscv64\n push: true\n file: Dockerfile.rootless\n tags: ${{ steps.meta.outputs.tags }}\n labels: ${{ steps.meta.outputs.labels }}\n
dataset_sample\yaml\go-gitea_gitea\.github\workflows\release-tag-version.yml
release-tag-version.yml
YAML
6,275
0.95
0.006061
0.117284
vue-tools
545
2023-10-16T23:13:43.827483
Apache-2.0
false
792bfff52d163e177d3b602fcd021c17
-\n id: 1\n user_id: 2\n repo_id: 3\n mode: 4\n\n-\n id: 2\n user_id: 2\n repo_id: 5\n mode: 4\n\n-\n id: 3\n user_id: 2\n repo_id: 24\n mode: 2\n\n-\n id: 4\n user_id: 2\n repo_id: 32\n mode: 4\n\n-\n id: 5\n user_id: 4\n repo_id: 3\n mode: 2\n\n-\n id: 6\n user_id: 4\n repo_id: 4\n mode: 2\n\n-\n id: 7\n user_id: 4\n repo_id: 40\n mode: 2\n\n-\n id: 8\n user_id: 10\n repo_id: 21\n mode: 2\n\n-\n id: 9\n user_id: 10\n repo_id: 32\n mode: 2\n\n-\n id: 10\n user_id: 15\n repo_id: 21\n mode: 2\n\n-\n id: 11\n user_id: 15\n repo_id: 22\n mode: 2\n\n-\n id: 12\n user_id: 15\n repo_id: 23\n mode: 4\n\n-\n id: 13\n user_id: 15\n repo_id: 24\n mode: 4\n\n-\n id: 14\n user_id: 15\n repo_id: 32\n mode: 2\n\n-\n id: 15\n user_id: 18\n repo_id: 21\n mode: 2\n\n-\n id: 16\n user_id: 18\n repo_id: 22\n mode: 2\n\n-\n id: 17\n user_id: 18\n repo_id: 23\n mode: 4\n\n-\n id: 18\n user_id: 18\n repo_id: 24\n mode: 4\n\n-\n id: 19\n user_id: 20\n repo_id: 24\n mode: 1\n\n-\n id: 20\n user_id: 20\n repo_id: 27\n mode: 4\n\n-\n id: 21\n user_id: 20\n repo_id: 28\n mode: 4\n\n-\n id: 22\n user_id: 29\n repo_id: 4\n mode: 2\n\n-\n id: 23\n user_id: 29\n repo_id: 24\n mode: 1\n\n-\n id: 24\n user_id: 31\n repo_id: 27\n mode: 4\n\n-\n id: 25\n user_id: 31\n repo_id: 28\n mode: 4\n\n-\n id: 26\n user_id: 38\n repo_id: 60\n mode: 2\n\n-\n id: 27\n user_id: 38\n repo_id: 61\n mode: 1\n\n-\n id: 28\n user_id: 39\n repo_id: 61\n mode: 1\n\n-\n id: 29\n user_id: 40\n repo_id: 61\n mode: 4\n\n-\n id: 30\n user_id: 40\n repo_id: 1\n mode: 2\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\access.yml
access.yml
YAML
1,477
0.7
0
0
react-lib
441
2024-11-25T07:56:31.692066
MIT
false
d36cf76ef89432422cedd5bd9272aa41
-\n id: 1\n uid: 1\n name: Token A\n # token: d2c6c1ba3890b309189a8e618c72a162e4efbf36\n token_hash: 2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f\n token_salt: QuSiZr1byZ\n token_last_eight: e4efbf36\n created_unix: 946687980\n updated_unix: 946687980\n\n-\n id: 2\n uid: 1\n name: Token B\n # token: 4c6f36e6cf498e2a448662f915d932c09c5a146c\n token_hash: 1a0e32a231ebbd582dc626c1543a42d3c63d4fa76c07c72862721467c55e8f81c923d60700f0528b5f5f443f055559d3a279\n token_salt: Lfwopukrq5\n token_last_eight: 9c5a146c\n created_unix: 946687980\n updated_unix: 946687980\n\n-\n id: 3\n uid: 2\n name: Token A\n # token: 90a18faa671dc43924b795806ffe4fd169d28c91\n token_hash: d6d404048048812d9e911d93aefbe94fc768d4876fdf75e3bef0bdc67828e0af422846d3056f2f25ec35c51dc92075685ec5\n token_salt: 99ArgXKlQQ\n token_last_eight: 69d28c91\n created_unix: 946687980\n updated_unix: 946687980\n # commented out tokens so you can see what they are in plaintext\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\access_token.yml
access_token.yml
YAML
993
0.8
0
0.129032
python-kit
292
2024-04-12T11:29:41.659278
GPL-3.0
false
e5954424f7db80f85a102b25b1163ed4
-\n id: 1\n user_id: 2\n op_type: 12 # close issue\n act_user_id: 2\n repo_id: 2 # private\n is_private: true\n created_unix: 1603228283\n\n-\n id: 2\n user_id: 3\n op_type: 2 # rename repo\n act_user_id: 2\n repo_id: 3 # private\n is_private: true\n content: oldRepoName\n\n-\n id: 3\n user_id: 11\n op_type: 1 # create repo\n act_user_id: 11\n repo_id: 9 # public\n is_private: false\n\n-\n id: 4\n user_id: 16\n op_type: 12 # close issue\n act_user_id: 16\n repo_id: 22 # private\n is_private: true\n created_unix: 1603267920\n\n- id: 5\n user_id: 10\n op_type: 1 # create repo\n act_user_id: 10\n repo_id: 6 # private\n is_private: true\n created_unix: 1603010100\n\n- id: 6\n user_id: 10\n op_type: 1 # create repo\n act_user_id: 10\n repo_id: 7 # private\n is_private: true\n created_unix: 1603011300\n\n- id: 7\n user_id: 10\n op_type: 1 # create repo\n act_user_id: 10\n repo_id: 8 # public\n is_private: false\n created_unix: 1603011540 # grouped with id:7\n\n- id: 8\n user_id: 1\n op_type: 12 # close issue\n act_user_id: 1\n repo_id: 1700 # dangling intentional\n is_private: false\n created_unix: 1603011541\n\n- id: 9\n user_id: 34\n op_type: 12 # close issue\n act_user_id: 34\n repo_id: 1 # public\n is_private: false\n created_unix: 1680454039\n content: '4|' # issueId 5\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action.yml
action.yml
YAML
1,273
0.8
0
0
vue-tools
64
2024-05-06T21:32:08.002368
GPL-3.0
false
40ccdd3eb3920515072cdf555beafe1c
-\n id: 1\n run_id: 791\n runner_id: 1\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n storage_path: "26/1/1712166500347189545.chunk"\n file_size: 1024\n file_compressed_size: 1024\n content_encoding: ""\n artifact_path: "abc.txt"\n artifact_name: "artifact-download"\n status: 1\n created_unix: 1712338649\n updated_unix: 1712338649\n expired_unix: 1720114649\n\n-\n id: 19\n run_id: 791\n runner_id: 1\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n storage_path: "26/19/1712348022422036662.chunk"\n file_size: 1024\n file_compressed_size: 1024\n content_encoding: ""\n artifact_path: "abc.txt"\n artifact_name: "multi-file-download"\n status: 2\n created_unix: 1712348022\n updated_unix: 1712348022\n expired_unix: 1720124022\n\n-\n id: 20\n run_id: 791\n runner_id: 1\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n storage_path: "26/20/1712348022423431524.chunk"\n file_size: 1024\n file_compressed_size: 1024\n content_encoding: ""\n artifact_path: "xyz/def.txt"\n artifact_name: "multi-file-download"\n status: 2\n created_unix: 1712348022\n updated_unix: 1712348022\n expired_unix: 1720124022\n\n-\n id: 22\n run_id: 792\n runner_id: 1\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n storage_path: "27/5/1730330775594233150.chunk"\n file_size: 1024\n file_compressed_size: 1024\n content_encoding: "application/zip"\n artifact_path: "artifact-v4-download.zip"\n artifact_name: "artifact-v4-download"\n status: 2\n created_unix: 1730330775\n updated_unix: 1730330775\n expired_unix: 1738106775\n\n-\n id: 23\n run_id: 793\n runner_id: 1\n repo_id: 2\n owner_id: 2\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n storage_path: "27/5/1730330775594233150.chunk"\n file_size: 1024\n file_compressed_size: 1024\n content_encoding: "application/zip"\n artifact_path: "artifact-v4-download.zip"\n artifact_name: "artifact-v4-download"\n status: 2\n created_unix: 1730330775\n updated_unix: 1730330775\n expired_unix: 1738106775\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action_artifact.yml
action_artifact.yml
YAML
2,073
0.7
0.011236
0
python-kit
841
2024-02-19T22:01:16.883259
GPL-3.0
false
1a4510fa5cef45c41c7eb0abd5180af8
-\n id: 791\n title: "update actions"\n repo_id: 4\n owner_id: 1\n workflow_id: "artifact.yaml"\n index: 187\n trigger_user_id: 1\n ref: "refs/heads/master"\n commit_sha: "c2d72f548424103f01ee1dc02889c1e2bff816b0"\n event: "push"\n is_fork_pull_request: 0\n status: 1\n started: 1683636528\n stopped: 1683636626\n created: 1683636108\n updated: 1683636626\n need_approval: 0\n approved_by: 0\n-\n id: 792\n title: "update actions"\n repo_id: 4\n owner_id: 1\n workflow_id: "artifact.yaml"\n index: 188\n trigger_user_id: 1\n ref: "refs/heads/master"\n commit_sha: "c2d72f548424103f01ee1dc02889c1e2bff816b0"\n event: "push"\n is_fork_pull_request: 0\n status: 1\n started: 1683636528\n stopped: 1683636626\n created: 1683636108\n updated: 1683636626\n need_approval: 0\n approved_by: 0\n-\n id: 793\n title: "job output"\n repo_id: 4\n owner_id: 1\n workflow_id: "test.yaml"\n index: 189\n trigger_user_id: 1\n ref: "refs/heads/master"\n commit_sha: "c2d72f548424103f01ee1dc02889c1e2bff816b0"\n event: "push"\n is_fork_pull_request: 0\n status: 1\n started: 1683636528\n stopped: 1683636626\n created: 1683636108\n updated: 1683636626\n need_approval: 0\n approved_by: 0\n-\n id: 794\n title: "job output"\n repo_id: 4\n owner_id: 1\n workflow_id: "test.yaml"\n index: 190\n trigger_user_id: 1\n ref: "refs/heads/test"\n commit_sha: "c2d72f548424103f01ee1dc02889c1e2bff816b0"\n event: "push"\n is_fork_pull_request: 0\n status: 1\n started: 1683636528\n stopped: 1683636626\n created: 1683636108\n updated: 1683636626\n need_approval: 0\n approved_by: 0\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action_run.yml
action_run.yml
YAML
1,550
0.7
0
0
react-lib
72
2024-07-07T20:08:35.490732
GPL-3.0
false
6734fd3aef3ed32c91ac055b0baad1ac
-\n id: 34346\n name: runner_to_be_deleted-user\n uuid: 3EF231BD-FBB7-4E4B-9602-E6F28363EF18\n token_hash: 3EF231BD-FBB7-4E4B-9602-E6F28363EF18\n version: "1.0.0"\n owner_id: 1\n repo_id: 0\n description: "This runner is going to be deleted"\n agent_labels: '["runner_to_be_deleted","linux"]'\n-\n id: 34347\n name: runner_to_be_deleted-org\n uuid: 3EF231BD-FBB7-4E4B-9602-E6F28363EF19\n token_hash: 3EF231BD-FBB7-4E4B-9602-E6F28363EF19\n version: "1.0.0"\n owner_id: 3\n repo_id: 0\n description: "This runner is going to be deleted"\n agent_labels: '["runner_to_be_deleted","linux"]'\n-\n id: 34348\n name: runner_to_be_deleted-repo1\n uuid: 3EF231BD-FBB7-4E4B-9602-E6F28363EF20\n token_hash: 3EF231BD-FBB7-4E4B-9602-E6F28363EF20\n version: "1.0.0"\n owner_id: 0\n repo_id: 1\n description: "This runner is going to be deleted"\n agent_labels: '["runner_to_be_deleted","linux"]'\n-\n id: 34349\n name: runner_to_be_deleted\n uuid: 3EF231BD-FBB7-4E4B-9602-E6F28363EF17\n token_hash: 3EF231BD-FBB7-4E4B-9602-E6F28363EF17\n version: "1.0.0"\n owner_id: 0\n repo_id: 0\n description: "This runner is going to be deleted"\n agent_labels: '["runner_to_be_deleted","linux"]'\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action_runner.yml
action_runner.yml
YAML
1,167
0.7
0
0
vue-tools
414
2024-07-13T11:34:10.949711
BSD-3-Clause
false
6e1561a38ddef465a579a3558f99e0a9
-\n id: 1 # instance scope\n token: xeiWBL5kuTYxGPynHCqQdoeYmJAeG3IzGXCYTrDX\n owner_id: 0\n repo_id: 0\n is_active: 1\n created: 1695617748\n updated: 1695617748\n\n-\n id: 2 # user scope and can't be used\n token: vohJB9QcZuSv1gAXESTk2uqpSjHhsKT9j4zYF84x\n owner_id: 1\n repo_id: 0\n is_active: 0\n created: 1695617749\n updated: 1695617749\n\n-\n id: 3 # user scope and can be used\n token: gjItAeJ3CA74hNPmPPo0Zco8I1eMaNcP1jVifjOE\n owner_id: 1\n repo_id: 0\n is_active: 1\n created: 1695617750\n updated: 1695617750\n\n-\n id: 4 # repo scope\n token: NOjLubxzFxPGhPXflZknys0gjVvQNhomFbAYuhbH\n owner_id: 0\n repo_id: 1\n is_active: 1\n created: 1695617751\n updated: 1695617751\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action_runner_token.yml
action_runner_token.yml
YAML
677
0.8
0
0
vue-tools
109
2023-07-15T05:42:14.631583
GPL-3.0
false
21b5204fbcc52877c6ac61044dacadeb
-\n id: 192\n run_id: 791\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n name: job_2\n attempt: 1\n job_id: job_2\n task_id: 47\n status: 1\n started: 1683636528\n stopped: 1683636626\n-\n id: 193\n run_id: 792\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n name: job_2\n attempt: 1\n job_id: job_2\n task_id: 48\n status: 1\n started: 1683636528\n stopped: 1683636626\n-\n id: 194\n run_id: 793\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n name: job1 (1)\n attempt: 1\n job_id: job1\n task_id: 49\n status: 1\n started: 1683636528\n stopped: 1683636626\n-\n id: 195\n run_id: 793\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n name: job1 (2)\n attempt: 1\n job_id: job1\n task_id: 50\n status: 1\n started: 1683636528\n stopped: 1683636626\n-\n id: 196\n run_id: 793\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n name: job2\n attempt: 1\n job_id: job2\n needs: '["job1"]'\n task_id: 51\n status: 5\n started: 1683636528\n stopped: 1683636626\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action_run_job.yml
action_run_job.yml
YAML
1,257
0.7
0
0
awesome-app
70
2024-01-31T08:02:17.970454
GPL-3.0
false
8ec10e74a40e12ae75775eed9ec470a0
-\n id: 46\n attempt: 3\n runner_id: 1\n status: 3 # 3 is the status code for "cancelled"\n started: 1683636528\n stopped: 1683636626\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n token_hash: 6d8ef48297195edcc8e22c70b3020eaa06c52976db67d39b4260c64a69a2cc1508825121b7b8394e48e00b1bf8718b2aaaaa\n token_salt: eeeeeeee\n token_last_eight: eeeeeeee\n log_filename: artifact-test2/2f/47.log\n log_in_storage: 1\n log_length: 707\n log_size: 90179\n log_expired: 0\n-\n id: 47\n job_id: 192\n attempt: 3\n runner_id: 1\n status: 6 # 6 is the status code for "running", running task can upload artifacts\n started: 1683636528\n stopped: 1683636626\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n token_hash: 6d8ef48297195edcc8e22c70b3020eaa06c52976db67d39b4260c64a69a2cc1508825121b7b8394e48e00b1bf8718b2a867e\n token_salt: jVuKnSPGgy\n token_last_eight: eeb1a71a\n log_filename: artifact-test2/2f/47.log\n log_in_storage: 1\n log_length: 707\n log_size: 90179\n log_expired: 0\n-\n id: 48\n job_id: 193\n attempt: 1\n runner_id: 1\n status: 6 # 6 is the status code for "running", running task can upload artifacts\n started: 1683636528\n stopped: 1683636626\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n token_hash: ffffcfffffffbffffffffffffffffefffffffafffffffffffffffffffffffffffffdffffffffffffffffffffffffffffffff\n token_salt: ffffffffff\n token_last_eight: ffffffff\n log_filename: artifact-test2/2f/47.log\n log_in_storage: 1\n log_length: 707\n log_size: 90179\n log_expired: 0\n-\n id: 49\n job_id: 194\n attempt: 1\n runner_id: 1\n status: 1 # success\n started: 1683636528\n stopped: 1683636626\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n token_hash: b8d3962425466b6709b9ac51446f93260c54afe8e7b6d3686e34f991fb8a8953822b0deed86fe41a103f34bc48dbc4784220\n token_salt: ffffffffff\n token_last_eight: ffffffff\n log_filename: artifact-test2/2f/47.log\n log_in_storage: 1\n log_length: 707\n log_size: 90179\n log_expired: 0\n-\n id: 50\n job_id: 195\n attempt: 1\n runner_id: 1\n status: 1 # success\n started: 1683636528\n stopped: 1683636626\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n token_hash: b8d3962425466b6709b9ac51446f93260c54afe8e7b6d3686e34f991fb8a8953822b0deed86fe41a103f34bc48dbc4784221\n token_salt: ffffffffff\n token_last_eight: ffffffff\n log_filename: artifact-test2/2f/47.log\n log_in_storage: 1\n log_length: 707\n log_size: 90179\n log_expired: 0\n-\n id: 51\n job_id: 196\n attempt: 1\n runner_id: 1\n status: 6 # running\n started: 1683636528\n stopped: 1683636626\n repo_id: 4\n owner_id: 1\n commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0\n is_fork_pull_request: 0\n token_hash: b8d3962425466b6709b9ac51446f93260c54afe8e7b6d3686e34f991fb8a8953822b0deed86fe41a103f34bc48dbc4784222\n token_salt: ffffffffff\n token_last_eight: ffffffff\n log_filename: artifact-test2/2f/47.log\n log_in_storage: 1\n log_length: 707\n log_size: 90179\n log_expired: 0\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action_task.yml
action_task.yml
YAML
3,197
0.8
0.02521
0
node-utils
277
2024-11-03T06:26:19.561981
Apache-2.0
false
009271746e6a7db7a65318853db5aed5
-\n id: 1\n task_id: 49\n output_key: output_a\n output_value: abc\n-\n id: 2\n task_id: 49\n output_key: output_b\n output_value: ''\n-\n id: 3\n task_id: 50\n output_key: output_a\n output_value: ''\n-\n id: 4\n task_id: 50\n output_key: output_b\n output_value: bbb\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\action_task_output.yml
action_task_output.yml
YAML
266
0.7
0
0
awesome-app
464
2024-09-04T10:50:39.213688
Apache-2.0
false
5f0384c82345a9cdde3aabebd26bb63f
-\n id: 1\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11\n repo_id: 1\n issue_id: 1\n release_id: 0\n uploader_id: 0\n comment_id: 0\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 2\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a12\n repo_id: 2\n issue_id: 4\n release_id: 0\n uploader_id: 0\n comment_id: 0\n name: attach2\n download_count: 1\n size: 0\n created_unix: 946684800\n\n-\n id: 3\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a13\n repo_id: 1\n issue_id: 2\n release_id: 0\n uploader_id: 0\n comment_id: 1\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 4\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14\n repo_id: 1\n issue_id: 3\n release_id: 0\n uploader_id: 0\n comment_id: 1\n name: attach2\n download_count: 1\n size: 0\n created_unix: 946684800\n\n-\n id: 5\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a15\n repo_id: 2\n issue_id: 4\n release_id: 0\n uploader_id: 0\n comment_id: 0\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 6\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a16\n repo_id: 1\n issue_id: 5\n release_id: 0\n uploader_id: 0\n comment_id: 2\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 7\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17\n repo_id: 1\n issue_id: 5\n release_id: 0\n uploader_id: 0\n comment_id: 2\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 8\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a18\n repo_id: 3\n issue_id: 6\n release_id: 0\n uploader_id: 0\n comment_id: 0\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 9\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a19\n repo_id: 1\n issue_id: 0\n release_id: 1\n uploader_id: 0\n comment_id: 0\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 10\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a20\n repo_id: 0 # TestGetAttachment/NotLinked\n issue_id: 0\n release_id: 0\n uploader_id: 8\n comment_id: 0\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 11\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a21\n repo_id: 40\n issue_id: 0\n release_id: 2\n uploader_id: 0\n comment_id: 0\n name: attach1\n download_count: 0\n size: 0\n created_unix: 946684800\n\n-\n id: 12\n uuid: a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a22\n repo_id: 2\n issue_id: 0\n release_id: 11\n uploader_id: 2\n comment_id: 0\n name: README.md\n download_count: 0\n size: 0\n created_unix: 946684800\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\attachment.yml
attachment.yml
YAML
2,484
0.8
0
0
vue-tools
791
2024-08-03T06:47:31.330925
MIT
false
794661a9de8611e152081d3d4914587c
-\n id: 1\n repo_id: 1\n name: 'foo'\n commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'\n commit_message: 'first commit'\n commit_time: 978307100\n pusher_id: 1\n is_deleted: true\n deleted_by_id: 1\n deleted_unix: 978307200\n\n-\n id: 2\n repo_id: 1\n name: 'bar'\n commit_id: '62fb502a7172d4453f0322a2cc85bddffa57f07a'\n commit_message: 'second commit'\n commit_time: 978307100\n pusher_id: 1\n is_deleted: true\n deleted_by_id: 99\n deleted_unix: 978307200\n\n-\n id: 3\n repo_id: 1\n name: 'branch2'\n commit_id: '985f0301dba5e7b34be866819cd15ad3d8f508ee'\n commit_message: 'make pull5 outdated'\n commit_time: 1579166279\n pusher_id: 1\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 4\n repo_id: 1\n name: 'master'\n commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'\n commit_message: 'Initial commit'\n commit_time: 1489927679\n pusher_id: 1\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 5\n repo_id: 10\n name: 'master'\n commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'\n commit_message: 'Initial commit'\n commit_time: 1489927679\n pusher_id: 12\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 6\n repo_id: 10\n name: 'outdated-new-branch'\n commit_id: 'cb24c347e328d83c1e0c3c908a6b2c0a2fcb8a3d'\n commit_message: 'add'\n commit_time: 1489927679\n pusher_id: 12\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 14\n repo_id: 11\n name: 'master'\n commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'\n commit_message: 'Initial commit'\n commit_time: 1489927679\n pusher_id: 13\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 15\n repo_id: 4\n name: 'master'\n commit_id: 'c7cd3cd144e6d23c9d6f3d07e52b2c1a956e0338'\n commit_message: 'add Readme'\n commit_time: 1588147171\n pusher_id: 13\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 16\n repo_id: 16\n name: 'master'\n commit_id: '69554a64c1e6030f051e5c3f94bfbd773cd6a324'\n commit_message: 'not signed commit'\n commit_time: 1502042309\n pusher_id: 2\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 17\n repo_id: 16\n name: 'not-signed'\n commit_id: '69554a64c1e6030f051e5c3f94bfbd773cd6a324'\n commit_message: 'not signed commit'\n commit_time: 1502042309\n pusher_id: 2\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 18\n repo_id: 16\n name: 'good-sign-not-yet-validated'\n commit_id: '27566bd5738fc8b4e3fef3c5e72cce608537bd95'\n commit_message: 'good signed commit (with not yet validated email)'\n commit_time: 1502042234\n pusher_id: 2\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 19\n repo_id: 16\n name: 'good-sign'\n commit_id: 'f27c2b2b03dcab38beaf89b0ab4ff61f6de63441'\n commit_message: 'good signed commit'\n commit_time: 1502042101\n pusher_id: 2\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 20\n repo_id: 1\n name: 'feature/1'\n commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'\n commit_message: 'Initial commit'\n commit_time: 1489950479\n pusher_id: 2\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 21\n repo_id: 49\n name: 'master'\n commit_id: 'aacbdfe9e1c4b47f60abe81849045fa4e96f1d75'\n commit_message: "Add 'test/test.txt'"\n commit_time: 1572535577\n pusher_id: 2\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 22\n repo_id: 1\n name: 'develop'\n commit_id: '65f1bf27bc3bf70f64657658635e66094edbcb4d'\n commit_message: "Initial commit"\n commit_time: 1489927679\n pusher_id: 1\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 23\n repo_id: 3\n name: 'master'\n commit_id: '2a47ca4b614a9f5a43abbd5ad851a54a616ffee6'\n commit_message: "init project"\n commit_time: 1497448461\n pusher_id: 1\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n\n-\n id: 24\n repo_id: 3\n name: 'test_branch'\n commit_id: 'd22b4d4daa5be07329fcef6ed458f00cf3392da0'\n commit_message: "test commit"\n commit_time: 1602935385\n pusher_id: 1\n is_deleted: false\n deleted_by_id: 0\n deleted_unix: 0\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\branch.yml
branch.yml
YAML
4,022
0.7
0
0
python-kit
248
2025-06-18T16:17:29.549284
Apache-2.0
false
c7f361fc84d783fcb71eb13314eebe0d
-\n id: 1\n repo_id: 3\n user_id: 2\n mode: 2 # write\n\n-\n id: 2\n repo_id: 4\n user_id: 4\n mode: 2 # write\n\n-\n id: 3\n repo_id: 40\n user_id: 4\n mode: 2 # write\n\n-\n id: 4\n repo_id: 4\n user_id: 29\n mode: 2 # write\n\n-\n id: 5\n repo_id: 21\n user_id: 15\n mode: 2 # write\n\n-\n id: 6\n repo_id: 21\n user_id: 18\n mode: 2 # write\n\n-\n id: 7\n repo_id: 22\n user_id: 15\n mode: 2 # write\n\n-\n id: 8\n repo_id: 22\n user_id: 18\n mode: 2 # write\n\n-\n id: 9\n repo_id: 60\n user_id: 38\n mode: 2 # write\n\n-\n id: 10\n repo_id: 21\n user_id: 10\n mode: 2 # write\n\n-\n id: 11\n repo_id: 32\n user_id: 10\n mode: 2 # write\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\collaboration.yml
collaboration.yml
YAML
622
0.8
0
0
vue-tools
474
2024-11-09T21:14:32.568066
Apache-2.0
false
0e6db898ebe1bc353a1c012da26990ce
-\n id: 1\n type: 7 # label\n poster_id: 2\n issue_id: 1 # in repo_id 1\n label_id: 1\n content: "1"\n created_unix: 946684810\n-\n id: 2\n type: 0 # comment\n poster_id: 3 # user not watching (see watch.yml)\n issue_id: 1 # in repo_id 1\n content: "good work!"\n created_unix: 946684811\n updated_unix: 946684811\n-\n id: 3\n type: 0 # comment\n poster_id: 5 # user not watching (see watch.yml)\n issue_id: 1 # in repo_id 1\n content: "meh..."\n created_unix: 946684812\n updated_unix: 946684812\n-\n id: 4\n type: 21 # code comment\n poster_id: 1\n issue_id: 2\n content: "meh..."\n review_id: 4\n line: 4\n tree_path: "README.md"\n created_unix: 946684812\n invalidated: false\n-\n id: 5\n type: 21 # code comment\n poster_id: 1\n issue_id: 2\n content: "meh..."\n line: -4\n tree_path: "README.md"\n created_unix: 946684812\n invalidated: false\n\n-\n id: 6\n type: 21 # code comment\n poster_id: 1\n issue_id: 2\n content: "it's already invalidated. boring..."\n line: -4\n tree_path: "README.md"\n created_unix: 946684812\n invalidated: true\n\n-\n id: 7\n type: 21 # code comment\n poster_id: 100\n issue_id: 3\n content: "a review from a deleted user"\n line: -4\n review_id: 10\n tree_path: "README.md"\n created_unix: 946684812\n invalidated: true\n\n-\n id: 8\n type: 0 # comment\n poster_id: 2\n issue_id: 4 # in repo_id 2\n content: "comment in private pository"\n created_unix: 946684811\n updated_unix: 946684811\n\n-\n id: 9\n type: 22 # review\n poster_id: 2\n issue_id: 2 # in repo_id 1\n review_id: 20\n created_unix: 946684810\n\n-\n id: 10\n type: 22 # review\n poster_id: 5\n issue_id: 3 # in repo_id 1\n content: "reviewed by user5"\n review_id: 21\n created_unix: 946684816\n\n-\n id: 11\n type: 27 # review request\n poster_id: 2\n issue_id: 3 # in repo_id 1\n content: "review request for user5"\n review_id: 22\n assignee_id: 5\n created_unix: 946684817\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\comment.yml
comment.yml
YAML
1,865
0.8
0.009615
0
node-utils
619
2024-04-19T18:40:18.926622
Apache-2.0
false
93533e72a60a26f371964f9e006bf62b
-\n id: 1\n index: 1\n repo_id: 1\n state: "pending"\n sha: "1234123412341234123412341234123412341234"\n target_url: https://example.com/builds/\n description: My awesome CI-service\n context: ci/awesomeness\n creator_id: 2\n\n-\n id: 2\n index: 2\n repo_id: 1\n state: "warning"\n sha: "1234123412341234123412341234123412341234"\n target_url: https://example.com/converage/\n description: My awesome Coverage service\n context: cov/awesomeness\n creator_id: 2\n\n-\n id: 3\n index: 3\n repo_id: 1\n state: "success"\n sha: "1234123412341234123412341234123412341234"\n target_url: https://example.com/converage/\n description: My awesome Coverage service\n context: cov/awesomeness\n creator_id: 2\n\n-\n id: 4\n index: 4\n repo_id: 1\n state: "failure"\n sha: "1234123412341234123412341234123412341234"\n target_url: https://example.com/builds/\n description: My awesome CI-service\n context: ci/awesomeness\n creator_id: 2\n\n-\n id: 5\n index: 5\n repo_id: 1\n state: "error"\n sha: "1234123412341234123412341234123412341234"\n target_url: https://example.com/builds/\n description: My awesome deploy service\n context: deploy/awesomeness\n creator_id: 2\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\commit_status.yml
commit_status.yml
YAML
1,150
0.8
0
0
react-lib
461
2024-01-09T22:59:42.555080
GPL-3.0
false
90360478ab53ea3d93f878406176cce1
-\n id: 1\n uid: 11\n email: user11@example.com\n lower_email: user11@example.com\n is_activated: false\n is_primary: true\n\n-\n id: 2\n uid: 12\n email: user12@example.com\n lower_email: user12@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 3\n uid: 2\n email: user2@example.com\n lower_email: user2@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 4\n uid: 21\n email: user21@example.com\n lower_email: user21@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 5\n uid: 9999999\n email: user9999999@example.com\n lower_email: user9999999@example.com\n is_activated: true\n is_primary: false\n\n-\n id: 6\n uid: 10\n email: user10@example.com\n lower_email: user10@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 7\n uid: 10\n email: user101@example.com\n lower_email: user101@example.com\n is_activated: true\n is_primary: false\n\n-\n id: 8\n uid: 9\n email: user9@example.com\n lower_email: user9@example.com\n is_activated: false\n is_primary: true\n\n-\n id: 9\n uid: 1\n email: user1@example.com\n lower_email: user1@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 10\n uid: 3\n email: org3@example.com\n lower_email: org3@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 11\n uid: 4\n email: user4@example.com\n lower_email: user4@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 12\n uid: 5\n email: user5@example.com\n lower_email: user5@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 13\n uid: 6\n email: org6@example.com\n lower_email: org6@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 14\n uid: 7\n email: org7@example.com\n lower_email: org7@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 15\n uid: 8\n email: user8@example.com\n lower_email: user8@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 16\n uid: 13\n email: user13@example.com\n lower_email: user13@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 17\n uid: 14\n email: user14@example.com\n lower_email: user14@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 18\n uid: 15\n email: user15@example.com\n lower_email: user15@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 19\n uid: 16\n email: user16@example.com\n lower_email: user16@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 20\n uid: 17\n email: org17@example.com\n lower_email: org17@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 21\n uid: 18\n email: user18@example.com\n lower_email: user18@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 22\n uid: 19\n email: org19@example.com\n lower_email: org19@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 23\n uid: 20\n email: user20@example.com\n lower_email: user20@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 24\n uid: 22\n email: limited_org@example.com\n lower_email: limited_org@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 25\n uid: 23\n email: privated_org@example.com\n lower_email: privated_org@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 26\n uid: 24\n email: user24@example.com\n lower_email: user24@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 27\n uid: 25\n email: org25@example.com\n lower_email: org25@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 28\n uid: 26\n email: org26@example.com\n lower_email: org26@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 29\n uid: 27\n email: user27@example.com\n lower_email: user27@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 30\n uid: 28\n email: user28@example.com\n lower_email: user28@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 31\n uid: 29\n email: user29@example.com\n lower_email: user29@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 32\n uid: 30\n email: user30@example.com\n lower_email: user30@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 33\n uid: 1\n email: user1-2@example.com\n lower_email: user1-2@example.com\n is_activated: true\n is_primary: false\n\n-\n id: 34\n uid: 1\n email: user1-3@example.com\n lower_email: user1-3@example.com\n is_activated: true\n is_primary: false\n\n-\n id: 35\n uid: 2\n email: user2-2@example.com\n lower_email: user2-2@example.com\n is_activated: false\n is_primary: false\n\n-\n id: 36\n uid: 36\n email: abcde@gitea.com\n lower_email: abcde@gitea.com\n is_activated: true\n is_primary: false\n\n-\n id: 37\n uid: 37\n email: user37@example.com\n lower_email: user37@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 38\n uid: 38\n email: user38@example.com\n lower_email: user38@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 39\n uid: 39\n email: user39@example.com\n lower_email: user39@example.com\n is_activated: true\n is_primary: true\n\n-\n id: 40\n uid: 40\n email: user40@example.com\n lower_email: user40@example.com\n is_activated: true\n is_primary: true\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\email_address.yml
email_address.yml
YAML
4,954
0.7
0
0
node-utils
683
2025-03-29T16:08:06.063305
BSD-3-Clause
false
77d834dc14841649bb2c8c079ede7a20
-\n id: 1\n user_id: 4\n follow_id: 2\n\n-\n id: 2\n user_id: 8\n follow_id: 2\n\n-\n id: 3\n user_id: 2\n follow_id: 8\n\n-\n id: 4\n user_id: 31\n follow_id: 33\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\follow.yml
follow.yml
YAML
157
0.7
0
0
node-utils
932
2024-06-17T16:53:18.320875
Apache-2.0
false
ecabd03fe370c69ceb47f66233654f1a
-\n id: 5\n owner_id: 36\n key_id: B15431642629B826\n primary_key_id:\n content: xsDNBGTrY3UBDAC2HLBqmMplAV15qSnC7g1c4dV406f5EHNhFr95Nup2My6b2eafTlvedv77s8PT/I7F3fy4apOZs5A7w2SsPlLMcQ3ev4uGOsxRtkq5RLy1Yb6SNueX0Da2UVKR5KTC5Q6BWaqxwS0IjKOLZ/xz0Pbe/ClV3bZSKBEY2omkVo3Z0HZ771vB2clPRvGJ/IdeKOsZ3ZytSFXfyiJBdARmeSPmydXLil8+Ibq5iLAeow5PK8hK1TCOnKHzLWNqcNq70tyjoHvcGi70iGjoVEEUgPCLLuU8WmzTJwlvA3BuDzjtaO7TLo/jdE6iqkHtMSS8x+43sAH6hcFRCWAVh/0Uq7n36uGDfNxGnX3YrmX3LR9x5IsBES1rGGWbpxio4o5GIf/Xd+JgDd9rzJCqRuZ3/sW/TxK38htWaVNZV0kMkHUCTc1ctzWpCm635hbFCHBhPYIp+/z206khkAKDbz/CNuU91Wazsh7KO07wrwDtxfDDbInJ8TfHE2TGjzjQzgChfmcAEQEAAQ==\n verified: true\n can_sign: true\n can_encrypt_comms: true\n can_encrypt_storage: true\n can_certify: true\n\n-\n id: 6\n owner_id: 36\n key_id: EE3AF48454AFD619\n primary_key_id: B15431642629B826\n content: zsDNBGTrY3UBDADsHrzuOicQaPdUQm0+0UNrs92cESm/j/4yBBUk+sfLZAo6J99c4eh4nAQzzZ7al080rYKB0G+7xoRz1eHcQH6zrVcqB8KYtf/sdY47WaMiMyxM+kTSvzp7tsv7QuSQZ0neUEXRyYMz5ttBfIjWUd+3NDItuHyB+MtNWlS3zXgaUbe5VifqKaNmzN0Ye4yXTKcpypE3AOqPVz+iIFv3c6TmsqLHJaR4VoicCleAqLyF/28WsJO7M9dDW+EM3MZVnsVpycTURyHAJGfSk10waQZAaRwmarCN/q0KEJ+aEAK/SRliUneBZoMO5hY5iBeG432tofwaQqAahPv9uXIb1n2JEMKwnMlMA9UGD1AcDbywfj1m/ZGBBw95i4Ekkfn43RvV3THr7uJU/dRqqP+iic4MwpUrOxqELW/kmeHXlBcNbZZhEEvwRoW7U2/9eeuog4nRleRJ0pi/xOP9wmxkKjaIPIK3phdBtEpVk4w/UTAWNdyIIrFggukeAnZFyGJwlm8AEQEAAQ==\n verified: true\n can_sign: true\n can_encrypt_comms: true\n can_encrypt_storage: true\n can_certify: true\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\gpg_key.yml
gpg_key.yml
YAML
1,470
0.7
0
0
react-lib
362
2024-10-07T17:55:24.681643
GPL-3.0
false
b697a3a0ef64f885b06363d1f4878a9c
-\n id: 1\n hook_id: 1\n uuid: uuid1\n is_delivered: true\n is_succeed: false\n request_content: >\n {\n "url": "/matrix-delivered",\n "http_method":"PUT",\n "headers": {\n "X-Head": "42"\n },\n "body": "{}"\n }\n\n-\n id: 2\n hook_id: 1\n uuid: uuid2\n is_delivered: false\n\n-\n id: 3\n hook_id: 1\n uuid: uuid3\n is_delivered: true\n is_succeed: true\n payload_content: '{"key":"value"}' # legacy task, payload saved in payload_content (and not in request_content)\n request_content: >\n {\n "url": "/matrix-success",\n "http_method":"PUT",\n "headers": {\n "X-Head": "42"\n }\n }\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\hook_task.yml
hook_task.yml
YAML
636
0.8
0
0
awesome-app
147
2025-04-09T18:32:32.914671
Apache-2.0
false
1800db702ded43f68cefbccd741daee5
-\n id: 1\n repo_id: 1\n index: 1\n poster_id: 1\n original_author_id: 0\n name: issue1\n content: content for the first issue\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 2\n created_unix: 946684800\n updated_unix: 978307200\n is_locked: false\n\n-\n id: 2\n repo_id: 1\n index: 2\n poster_id: 1\n original_author_id: 0\n name: issue2\n content: content for the second issue\n milestone_id: 1\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 946684810\n updated_unix: 978307190\n is_locked: false\n\n-\n id: 3\n repo_id: 1\n index: 3\n poster_id: 1\n original_author_id: 0\n name: issue3\n content: content for the third issue\n milestone_id: 3\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 946684820\n updated_unix: 978307180\n is_locked: false\n\n-\n id: 4\n repo_id: 2\n index: 1\n poster_id: 2\n original_author_id: 0\n name: issue4\n content: content for the fourth issue\n milestone_id: 0\n priority: 0\n is_closed: true\n is_pull: false\n num_comments: 1\n created_unix: 946684830\n updated_unix: 978307200\n is_locked: false\n\n-\n id: 5\n repo_id: 1\n index: 4\n poster_id: 2\n original_author_id: 0\n name: issue5\n content: content for the fifth issue\n milestone_id: 0\n priority: 0\n is_closed: true\n is_pull: false\n num_comments: 0\n created_unix: 946684840\n updated_unix: 978307200\n is_locked: false\n\n-\n id: 6\n repo_id: 3\n index: 1\n poster_id: 1\n original_author_id: 0\n name: issue6\n content: content6\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 946684850\n updated_unix: 978307200\n is_locked: false\n\n-\n id: 7\n repo_id: 2\n index: 2\n poster_id: 2\n original_author_id: 0\n name: issue7\n content: content for the seventh issue\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 946684830\n updated_unix: 978307200\n is_locked: false\n\n-\n id: 8\n repo_id: 10\n index: 1\n poster_id: 11\n original_author_id: 0\n name: pr2\n content: a pull request\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 946684820\n updated_unix: 978307180\n is_locked: false\n\n-\n id: 9\n repo_id: 48\n index: 1\n poster_id: 11\n original_author_id: 0\n name: pr1\n content: a pull request\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 946684820\n updated_unix: 978307180\n is_locked: false\n\n-\n id: 10\n repo_id: 42\n index: 1\n poster_id: 500\n original_author_id: 0\n name: issue from deleted account\n content: content from deleted account\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n deadline_unix: 1019307200\n created_unix: 946684830\n updated_unix: 999307200\n is_locked: false\n\n-\n id: 11\n repo_id: 1\n index: 5\n poster_id: 1\n original_author_id: 0\n name: pull5\n content: content for the a pull request\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 1579194806\n updated_unix: 1579194806\n is_locked: false\n\n-\n id: 12\n repo_id: 3\n index: 2\n poster_id: 2\n original_author_id: 0\n name: pull6\n content: content for the a pull request\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 1602935696\n updated_unix: 1602935696\n is_locked: false\n\n-\n id: 13\n repo_id: 50\n index: 1\n poster_id: 2\n original_author_id: 0\n name: issue in active repo\n content: we'll be testing github issue 13171 with this.\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 1602935696\n updated_unix: 1602935696\n is_locked: false\n\n-\n id: 14\n repo_id: 51\n index: 1\n poster_id: 2\n original_author_id: 0\n name: issue in archived repo\n content: we'll be testing github issue 13171 with this.\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 1602935696\n updated_unix: 1602935696\n is_locked: false\n\n-\n id: 15\n repo_id: 5\n index: 1\n poster_id: 2\n original_author_id: 0\n name: issue in repo not linked to team1\n content: content\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 1602935696\n updated_unix: 1602935696\n is_locked: false\n\n-\n id: 16\n repo_id: 32\n index: 1\n poster_id: 2\n original_author_id: 0\n name: just a normal issue\n content: content\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 1602935696\n updated_unix: 1602935696\n is_locked: false\n\n-\n id: 17\n repo_id: 32\n index: 2\n poster_id: 15\n original_author_id: 0\n name: a issue with a assignment\n content: content\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 1602935696\n updated_unix: 1602935696\n is_locked: false\n\n-\n id: 18\n repo_id: 55\n index: 1\n poster_id: 2\n original_author_id: 0\n name: issue for scoped labels\n content: content\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: false\n num_comments: 0\n created_unix: 946684830\n updated_unix: 978307200\n is_locked: false\n\n-\n id: 19\n repo_id: 58\n index: 1\n poster_id: 2\n original_author_id: 0\n name: issue for pr\n content: content\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 946684830\n updated_unix: 978307200\n is_locked: false\n\n-\n id: 20\n repo_id: 23\n index: 1\n poster_id: 2\n original_author_id: 0\n name: issue for pr\n content: content\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 978307210\n updated_unix: 978307210\n is_locked: false\n\n-\n id: 21\n repo_id: 60\n index: 1\n poster_id: 39\n original_author_id: 0\n name: repo60 pull1\n content: content for the 1st issue\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 1707270422\n updated_unix: 1707270422\n is_locked: false\n\n-\n id: 22\n repo_id: 61\n index: 1\n poster_id: 40\n original_author_id: 0\n name: repo61 pull1\n content: content for the 1st issue\n milestone_id: 0\n priority: 0\n is_closed: false\n is_pull: true\n num_comments: 0\n created_unix: 1707270422\n updated_unix: 1707270422\n is_locked: false\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\issue.yml
issue.yml
YAML
6,357
0.7
0.034759
0
python-kit
150
2023-12-01T02:57:09.907231
GPL-3.0
false
2cd06b2d7ce5a80eb2959f666ef30dd3
-\n id: 1\n assignee_id: 1\n issue_id: 1\n-\n id: 2\n assignee_id: 1\n issue_id: 6\n-\n id: 3\n assignee_id: 2\n issue_id: 6\n-\n id: 4\n assignee_id: 2\n issue_id: 17\n-\n id: 5\n assignee_id: 10\n issue_id: 6\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\issue_assignees.yml
issue_assignees.yml
YAML
207
0.7
0
0
react-lib
334
2025-02-12T17:06:42.956282
GPL-3.0
false
d8644127193693a82ba47c1ebd6b51be
-\n group_id: 1\n max_index: 5\n\n-\n group_id: 2\n max_index: 2\n\n-\n group_id: 3\n max_index: 2\n\n-\n group_id: 10\n max_index: 1\n\n-\n group_id: 32\n max_index: 2\n\n-\n group_id: 48\n max_index: 1\n\n-\n group_id: 42\n max_index: 1\n\n-\n group_id: 50\n max_index: 1\n\n-\n group_id: 51\n max_index: 1\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\issue_index.yml
issue_index.yml
YAML
293
0.7
0
0
python-kit
4
2024-10-26T08:37:46.984411
GPL-3.0
false
d7dddd327a1c06517d52f20bffdacb6b
-\n id: 1\n issue_id: 1\n label_id: 1\n\n-\n id: 2\n issue_id: 5\n label_id: 2\n\n-\n id: 3\n issue_id: 2\n label_id: 1\n\n-\n id: 4\n issue_id: 2\n label_id: 4\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\issue_label.yml
issue_label.yml
YAML
155
0.7
0
0
react-lib
84
2024-05-01T07:25:06.471489
GPL-3.0
false
de6a7fb73179e4958178f26889a11084
-\n id: 1\n uid: 1\n issue_id: 1\n is_read: true\n is_mentioned: false\n\n-\n id: 2\n uid: 2\n issue_id: 1\n is_read: true\n is_mentioned: false\n\n-\n id: 3\n uid: 4\n issue_id: 1\n is_read: false\n is_mentioned: true\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\issue_user.yml
issue_user.yml
YAML
215
0.7
0
0
vue-tools
864
2024-09-05T09:14:45.849923
Apache-2.0
false
48f69e20837a9f7e26ad94397f53281b
-\n id: 1\n user_id: 9\n issue_id: 1\n is_watching: true\n created_unix: 946684800\n updated_unix: 946684800\n\n-\n id: 2\n user_id: 2\n issue_id: 2\n is_watching: false\n created_unix: 946684800\n updated_unix: 946684800\n\n-\n id: 3\n user_id: 2\n issue_id: 7\n is_watching: true\n created_unix: 946684800\n updated_unix: 946684800\n\n-\n id: 4\n user_id: 1\n issue_id: 7\n is_watching: false\n created_unix: 946684800\n updated_unix: 946684800\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\issue_watch.yml
issue_watch.yml
YAML
441
0.7
0
0
react-lib
355
2025-04-02T10:28:31.516722
GPL-3.0
false
4526868683b3f9a7a30ecf06d939dd5f
-\n id: 1\n repo_id: 1\n org_id: 0\n name: label1\n color: '#abcdef'\n exclusive: false\n num_issues: 2\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 2\n repo_id: 1\n org_id: 0\n name: label2\n color: '#000000'\n exclusive: false\n num_issues: 1\n num_closed_issues: 1\n archived_unix: 0\n\n-\n id: 3\n repo_id: 0\n org_id: 3\n name: orglabel3\n color: '#abcdef'\n exclusive: false\n num_issues: 0\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 4\n repo_id: 0\n org_id: 3\n name: orglabel4\n color: '#000000'\n exclusive: false\n num_issues: 1\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 5\n repo_id: 10\n org_id: 0\n name: pull-test-label\n color: '#000000'\n exclusive: false\n num_issues: 0\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 6\n repo_id: 55\n org_id: 0\n name: unscoped_label\n color: '#000000'\n exclusive: false\n num_issues: 0\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 7\n repo_id: 55\n org_id: 0\n name: scope/label1\n color: '#000000'\n exclusive: true\n num_issues: 0\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 8\n repo_id: 55\n org_id: 0\n name: scope/label2\n color: '#000000'\n exclusive: true\n num_issues: 0\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 9\n repo_id: 55\n org_id: 0\n name: scope/subscope/label2\n color: '#000000'\n exclusive: true\n num_issues: 0\n num_closed_issues: 0\n archived_unix: 0\n\n-\n id: 10\n repo_id: 3\n org_id: 0\n name: repo3label1\n color: '#112233'\n exclusive: false\n num_issues: 0\n num_closed_issues: 0\n archived_unix: 0\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\label.yml
label.yml
YAML
1,527
0.8
0
0
awesome-app
448
2025-01-26T04:11:50.428037
BSD-3-Clause
false
8e035f00c4be6cdd7165e1cae5593e08
# These are the LFS objects in user2/lfs.git\n-\n\n id: 1\n oid: 0b8d8b5f15046343fd32f451df93acc2bdd9e6373be478b968e4cad6b6647351\n size: 107\n repository_id: 54\n created_unix: 1671607299\n\n-\n\n id: 2\n oid: 2eccdb43825d2a49d99d542daa20075cff1d97d9d2349a8977efe9c03661737c\n size: 2048\n repository_id: 54\n created_unix: 1671607299\n\n-\n\n id: 3\n oid: 7b6b2c88dba9f760a1a58469b67fee2b698ef7e9399c4ca4f34a14ccbe39f623\n size: 27\n repository_id: 54\n created_unix: 1671607299\n\n-\n\n id: 4\n oid: 9d172e5c64b4f0024b9901ec6afe9ea052f3c9b6ff9f4b07956d8c48c86fca82\n size: 25\n repository_id: 54\n created_unix: 1671607299\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\lfs_meta_object.yml
lfs_meta_object.yml
YAML
615
0.8
0
0.04
awesome-app
990
2025-06-30T01:45:26.018210
BSD-3-Clause
false
69a9fc6d8bd4d053c9b0a76270bdbf26
-\n id: 1\n repo_id: 1\n name: milestone1\n content: content1\n is_closed: false\n num_issues: 1\n num_closed_issues: 0\n completeness: 0\n deadline_unix: 253370764800\n\n-\n id: 2\n repo_id: 1\n name: milestone2\n content: content2\n is_closed: false\n num_issues: 0\n num_closed_issues: 0\n completeness: 0\n deadline_unix: 253370764800\n\n-\n id: 3\n repo_id: 1\n name: milestone3\n content: content3\n is_closed: true\n num_issues: 1\n num_closed_issues: 0\n completeness: 0\n deadline_unix: 253370764800\n\n-\n id: 4\n repo_id: 42\n name: milestone of repo42\n content: content random\n is_closed: false\n num_issues: 0\n num_closed_issues: 0\n completeness: 0\n deadline_unix: 253370764800\n\n-\n id: 5\n repo_id: 10\n name: milestone of repo 10\n content: for testing with PRs\n is_closed: false\n num_issues: 0\n num_closed_issues: 0\n completeness: 0\n deadline_unix: 253370764800\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\milestone.yml
milestone.yml
YAML
882
0.7
0.018519
0
vue-tools
546
2023-10-02T15:06:42.386171
MIT
false
07a1b5f2d0ddc1f19f20ae9587d14fa6
-\n id: 1\n repo_id: 5\n interval: 3600\n enable_prune: false\n updated_unix: 0\n next_update_unix: 0\n lfs_enabled: false\n lfs_endpoint: ""\n\n-\n id: 2\n repo_id: 25\n interval: 3600\n enable_prune: false\n updated_unix: 0\n next_update_unix: 0\n lfs_enabled: false\n lfs_endpoint: ""\n\n-\n id: 3\n repo_id: 26\n interval: 3600\n enable_prune: false\n updated_unix: 0\n next_update_unix: 0\n lfs_enabled: false\n lfs_endpoint: ""\n\n-\n id: 4\n repo_id: 27\n interval: 3600\n enable_prune: false\n updated_unix: 0\n next_update_unix: 0\n lfs_enabled: false\n lfs_endpoint: ""\n\n-\n id: 5\n repo_id: 28\n interval: 3600\n enable_prune: false\n updated_unix: 0\n next_update_unix: 0\n lfs_enabled: false\n lfs_endpoint: ""\n
dataset_sample\yaml\go-gitea_gitea\models\fixtures\mirror.yml
mirror.yml
YAML
718
0.7
0
0
awesome-app
420
2024-07-10T07:30:32.923386
GPL-3.0
false
d3e56ca0dfc6f0a645d579231ec35583