content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
# Much of this workflow is adapted from the ripgrep release workflow.\n# https://github.com/BurntSushi/ripgrep/blob/master/.github/workflows/release.yml\n\nname: release\n\non:\n push:\n # Enable when testing release infrastructure on a branch.\n # branches:\n # - fix-releases\n tags:\n - 'v*'\n workflow_dispatch:\n\npermissions:\n contents: read # This is set more permissively in jobs that need `write`.\n\ndefaults:\n run:\n shell: bash # Use `bash` even in the Windows jobs.\n\njobs:\n # Create a draft release, initially with no binary assets attached.\n create-release:\n runs-on: ubuntu-latest\n\n permissions:\n contents: write\n\n# env:\n# # Set to force version number, e.g., when no tag exists.\n# VERSION: TEST-0.0.0\n\n steps:\n - name: Checkout repository\n uses: actions/checkout@v4\n\n - name: Get the release version from the tag\n if: env.VERSION == ''\n run: echo "VERSION=$REF_NAME" >> "$GITHUB_ENV"\n env:\n REF_NAME: ${{ github.ref_name }}\n\n - name: Validate version against Cargo.toml\n run: |\n manifest_version="$(yq -r .package.version Cargo.toml)"\n echo "version to name the release: $VERSION"\n echo "version Cargo.toml suggests: v$manifest_version"\n\n case "$VERSION" in\n "v$manifest_version" )\n echo 'OK: Release name/version agrees with Cargo.toml version.'\n ;;\n TEST-* | *-DO-NOT-USE ) # NOTE: If changed, change it in `announce-release` below, too.\n echo 'OK: Release name/version is strange but marked as such.'\n ;;\n "$manifest_version" )\n echo 'STOPPING: Release name/version is missing the leading "v".'\n exit 1\n ;;\n * )\n echo 'STOPPING: Release name/version and Cargo.toml version do not match.'\n echo 'STOPPING: Usually this means either a wrong tag name or wrong version in Cargo.toml.'\n echo 'STOPPING: If intended, prepend `TEST-` or append `-DO-NOT-USE` to the release name.'\n exit 1\n ;;\n esac\n\n - name: Create GitHub release\n run: gh release create "$VERSION" --title="$VERSION" --draft\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n outputs:\n version: ${{ env.VERSION }}\n\n # Build for a particular feature and target, and attach an archive for it.\n build-release:\n needs: [ create-release ]\n\n strategy:\n matrix:\n target:\n - x86_64-unknown-linux-musl\n - x86_64-unknown-linux-gnu\n - i686-unknown-linux-musl\n - i686-unknown-linux-gnu\n - aarch64-unknown-linux-musl\n - aarch64-unknown-linux-gnu\n - arm-unknown-linux-musleabihf\n - arm-unknown-linux-gnueabihf\n - powerpc64le-unknown-linux-gnu\n - riscv64gc-unknown-linux-gnu\n - s390x-unknown-linux-gnu\n - x86_64-apple-darwin\n - aarch64-apple-darwin\n - x86_64-pc-windows-msvc\n - x86_64-pc-windows-gnu\n - i686-pc-windows-msvc\n - aarch64-pc-windows-msvc\n # When changing these features, make the same change in build-macos-universal2-release.\n feature: [ small, lean, max, max-pure ]\n include:\n - rust: stable\n - target: x86_64-unknown-linux-musl\n os: ubuntu-latest\n - target: x86_64-unknown-linux-gnu\n os: ubuntu-latest\n - target: i686-unknown-linux-musl\n os: ubuntu-latest\n - target: i686-unknown-linux-gnu\n os: ubuntu-latest\n - target: aarch64-unknown-linux-musl\n os: ubuntu-latest\n - target: aarch64-unknown-linux-gnu\n os: ubuntu-latest\n - target: arm-unknown-linux-musleabihf\n os: ubuntu-latest\n - target: arm-unknown-linux-gnueabihf\n os: ubuntu-latest\n - target: powerpc64le-unknown-linux-gnu\n os: ubuntu-latest\n - target: riscv64gc-unknown-linux-gnu\n os: ubuntu-latest\n - target: s390x-unknown-linux-gnu\n os: ubuntu-latest\n - target: x86_64-apple-darwin\n os: macos-latest\n - target: aarch64-apple-darwin\n os: macos-latest\n - target: x86_64-pc-windows-msvc\n os: windows-latest\n - target: x86_64-pc-windows-gnu\n os: windows-latest\n rust: stable-x86_64-gnu\n - target: i686-pc-windows-msvc\n os: windows-latest\n - target: aarch64-pc-windows-msvc\n os: windows-latest\n # on linux we build with musl which causes trouble with open-ssl. For now, just build max-pure there.\n # It's a TODO.\n exclude:\n - target: x86_64-unknown-linux-musl\n feature: small\n - target: x86_64-unknown-linux-musl\n feature: lean\n - target: x86_64-unknown-linux-musl\n feature: max\n - target: x86_64-unknown-linux-gnu\n feature: small\n - target: x86_64-unknown-linux-gnu\n feature: lean\n - target: x86_64-unknown-linux-gnu\n feature: max\n - target: i686-unknown-linux-musl\n feature: small\n - target: i686-unknown-linux-musl\n feature: lean\n - target: i686-unknown-linux-musl\n feature: max\n - target: i686-unknown-linux-gnu\n feature: small\n - target: i686-unknown-linux-gnu\n feature: lean\n - target: i686-unknown-linux-gnu\n feature: max\n - target: aarch64-unknown-linux-musl\n feature: small\n - target: aarch64-unknown-linux-musl\n feature: lean\n - target: aarch64-unknown-linux-musl\n feature: max\n - target: aarch64-unknown-linux-gnu\n feature: small\n - target: aarch64-unknown-linux-gnu\n feature: lean\n - target: aarch64-unknown-linux-gnu\n feature: max\n - target: arm-unknown-linux-musleabihf\n feature: small\n - target: arm-unknown-linux-musleabihf\n feature: lean\n - target: arm-unknown-linux-musleabihf\n feature: max\n - target: arm-unknown-linux-gnueabihf\n feature: small\n - target: arm-unknown-linux-gnueabihf\n feature: lean\n - target: arm-unknown-linux-gnueabihf\n feature: max\n - target: powerpc64le-unknown-linux-gnu\n feature: small\n - target: powerpc64le-unknown-linux-gnu\n feature: lean\n - target: powerpc64le-unknown-linux-gnu\n feature: max\n - target: riscv64gc-unknown-linux-gnu\n feature: small\n - target: riscv64gc-unknown-linux-gnu\n feature: lean\n - target: riscv64gc-unknown-linux-gnu\n feature: max\n - target: s390x-unknown-linux-gnu\n feature: small\n - target: s390x-unknown-linux-gnu\n feature: lean\n - target: s390x-unknown-linux-gnu\n feature: max\n\n runs-on: ${{ matrix.os }}\n\n permissions:\n contents: write\n\n env:\n RUST_BACKTRACE: '1' # Emit backtraces on panics.\n CARGO_TERM_COLOR: always\n CLICOLOR: '1'\n CARGO: cargo # On Linux, this will be changed to `cross` in a later step.\n FEATURE: ${{ matrix.feature }}\n VERSION: ${{ needs.create-release.outputs.version }}\n TARGET: ${{ matrix.target }}\n TARGET_FLAGS: --target=${{ matrix.target }}\n TARGET_DIR: target/${{ matrix.target }}\n PROFILE: release-github\n\n steps:\n - name: Checkout repository\n uses: actions/checkout@v4\n\n - name: Install packages (Ubuntu)\n # Because openssl doesn't work on musl by default, we resort to max-pure.\n # And that won't need any dependency, so we can skip this or use `continue-on-error`.\n if: matrix.os == 'ubuntu-latest-disabled'\n run: |\n sudo apt-get update\n sudo apt-get install -y --no-install-recommends xz-utils liblz4-tool musl-tools\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@master\n with:\n toolchain: ${{ matrix.rust }}\n targets: ${{ matrix.target }}\n\n - name: Use Cross\n if: matrix.os == 'ubuntu-latest'\n run: |\n cargo install cross\n echo 'CARGO=cross' >> "$GITHUB_ENV"\n\n - name: Show command used for Cargo\n run: |\n echo "cargo command is: $CARGO"\n echo "target flag is: $TARGET_FLAGS"\n echo "target dir is: $TARGET_DIR"\n\n - name: Build release binary (with extra optimizations)\n run: |\n "$CARGO" build --verbose --profile="$PROFILE" "$TARGET_FLAGS" --no-default-features --features="$FEATURE"\n\n - name: Determine archive basename\n run: echo "ARCHIVE=gitoxide-$FEATURE-$VERSION-$TARGET" >> "$GITHUB_ENV"\n\n - name: Pre-populate directory for archive\n run: |\n mkdir -- "$ARCHIVE"\n cp -- {README.md,LICENSE-*,CHANGELOG.md} "$ARCHIVE/"\n\n - name: Build archive (Windows)\n if: matrix.os == 'windows-latest'\n run: |\n file -- "$TARGET_DIR/$PROFILE"/{ein,gix}.exe\n cp -- "$TARGET_DIR/$PROFILE"/{ein,gix}.exe "$ARCHIVE/"\n 7z a "$ARCHIVE.zip" "$ARCHIVE"\n /usr/bin/core_perl/shasum --algorithm=256 --binary -- "$ARCHIVE.zip" > "$ARCHIVE.zip.sha256"\n echo "ASSET=$ARCHIVE.zip" >> "$GITHUB_ENV"\n echo "ASSET_SUM=$ARCHIVE.zip.sha256" >> "$GITHUB_ENV"\n\n - name: Build archive (Unix)\n if: matrix.os != 'windows-latest'\n run: |\n file -- "$TARGET_DIR/$PROFILE"/{ein,gix}\n cp -- "$TARGET_DIR/$PROFILE"/{ein,gix} "$ARCHIVE/"\n tar czf "$ARCHIVE.tar.gz" -- "$ARCHIVE"\n shasum --algorithm=256 --binary -- "$ARCHIVE.tar.gz" > "$ARCHIVE.tar.gz.sha256"\n echo "ASSET=$ARCHIVE.tar.gz" >> "$GITHUB_ENV"\n echo "ASSET_SUM=$ARCHIVE.tar.gz.sha256" >> "$GITHUB_ENV"\n\n - name: Upload release archive\n run: gh release upload "$VERSION" "$ASSET" "$ASSET_SUM"\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n # Add a macOS universal binary archive for a feature using its built aarch64 and x86_64 assets.\n build-macos-universal2-release:\n runs-on: macos-latest\n\n needs: [ create-release, build-release ]\n\n strategy:\n matrix:\n # These features need to be exactly the same as the features in build-release.\n feature: [ small, lean, max, max-pure ]\n\n permissions:\n contents: write\n\n env:\n BASH_ENV: ./helpers.sh\n REPOSITORY: ${{ github.repository }}\n FEATURE: ${{ matrix.feature }}\n VERSION: ${{ needs.create-release.outputs.version }}\n\n steps:\n - name: Define helper function\n run: |\n name() { echo "gitoxide-$FEATURE-$VERSION-$1-apple-darwin"; }\n declare -f name >> "$BASH_ENV"\n\n - name: Obtain single-architecture releases\n run: |\n gh release --repo="$REPOSITORY" download "$VERSION" \\n --pattern="$(name aarch64).tar.gz" --pattern="$(name aarch64).tar.gz.sha256" \\n --pattern="$(name x86_64).tar.gz" --pattern="$(name x86_64).tar.gz.sha256"\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n - name: Unpack single-architecture releases\n run: |\n shasum --check -- "$(name aarch64).tar.gz.sha256" "$(name x86_64).tar.gz.sha256"\n tar xf "$(name aarch64).tar.gz"\n tar xf "$(name x86_64).tar.gz"\n\n - name: Determine archive basename\n run: echo "ARCHIVE=$(name universal)" >> "$GITHUB_ENV"\n\n - name: Pre-populate directory for archive\n run: |\n cp -R -- "$(name aarch64)" "$ARCHIVE"\n rm -- "$ARCHIVE"/{ein,gix}\n\n - name: Create Universal 2 binaries\n run: |\n for bin in ein gix; do\n lipo -create "$(name aarch64)/$bin" "$(name x86_64)/$bin" -output "$ARCHIVE/$bin"\n file -- "$ARCHIVE/$bin"\n done\n\n - name: Build archive\n run: |\n tar czf "$ARCHIVE.tar.gz" -- "$ARCHIVE"\n shasum --algorithm=256 --binary -- "$ARCHIVE.tar.gz" > "$ARCHIVE.tar.gz.sha256"\n echo "ASSET=$ARCHIVE.tar.gz" >> "$GITHUB_ENV"\n echo "ASSET_SUM=$ARCHIVE.tar.gz.sha256" >> "$GITHUB_ENV"\n\n - name: Upload release archive\n run: gh release --repo="$REPOSITORY" upload "$VERSION" "$ASSET" "$ASSET_SUM"\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n # Check for some problems, consolidate checksum files into one, and mark the release non-draft.\n publish-release:\n runs-on: ubuntu-latest\n\n needs: [ create-release, build-release, build-macos-universal2-release ]\n\n permissions:\n contents: write\n\n env:\n REPOSITORY: ${{ github.repository }}\n VERSION: ${{ needs.create-release.outputs.version }}\n\n steps:\n - name: Discover assets\n run: |\n gh release --repo="$REPOSITORY" view "$VERSION" --json assets --jq '.assets.[].name' > assets.txt\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n - name: Show all individual asset names\n run: cat assets.txt\n\n # The `features` array is repeated because GHA doesn't support YAML anchors.\n # We will check that the macOS `universal` features match the others exactly.\n # In the future this and the next step may be removed, or expanded to do more validation.\n - name: Extract macOS asset names by architecture\n run: |\n for arch in aarch64 x86_64 universal; do\n grep -Fwe "$arch-apple-darwin" assets.txt | sort | tee -- "$arch.txt"\n done\n\n - name: Check macOS archive features\n run: |\n mask() { sed -E 's/\w+-apple-darwin/<arch>-apple-darwin/' -- "$1.txt"; }\n diff -- <(mask aarch64) <(mask universal)\n diff -- <(mask x86_64) <(mask universal)\n\n - name: Clean up local temporary macOS asset list files\n run: rm {assets,aarch64,x86_64,universal}.txt\n\n - name: Retrieve all individual checksums\n run: gh release --repo="$REPOSITORY" download "$VERSION" --pattern='gitoxide-*.sha256'\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n - name: Concatenate checksums into one file\n run: cat gitoxide-*.sha256 > hashes.sha256\n\n - name: Upload the combined checksum file\n run: gh release --repo="$REPOSITORY" upload "$VERSION" hashes.sha256\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n # If any step of any job fails before this, the draft still has the individual checksum files.\n - name: Remove the individual checksum file assets\n run: |\n for sumfile in gitoxide-*.sha256; do\n gh release --repo="$REPOSITORY" delete-asset "$VERSION" "$sumfile" --yes\n done\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n - name: Publish the release\n if: vars.DRY_RUN_RELEASE != 'true' && vars.DRY_RUN_RELEASE != 'yes' && vars.DRY_RUN_RELEASE != '1'\n run: gh release --repo="$REPOSITORY" edit "$VERSION" --draft=false\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n # Comment in a locked discussion that notifies about only `gitoxide` (e.g. not `gix-*`) releases.\n announce-release:\n runs-on: ubuntu-latest\n\n needs: [ create-release, publish-release ]\n\n permissions:\n contents: write # Needed to distinguish unpublished (still draft) from missing releases.\n discussions: write\n\n env:\n REPOSITORY: ${{ github.repository }}\n VERSION: ${{ needs.create-release.outputs.version }}\n DISCUSSION_URL: ${{ vars.RELEASE_ANNOUNCEMENTS_URL }}\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n steps:\n - name: Find the discussion ID\n run: |\n [[ "$DISCUSSION_URL" =~ ^https://github\.com/([^/:@]+)/([^/:@]+)/discussions/([0-9]+)$ ]]\n owner="${BASH_REMATCH[1]}"\n name="${BASH_REMATCH[2]}"\n number="${BASH_REMATCH[3]}"\n\n id="$(gh api graphql -f query='\n query GetDiscussionId($owner: String!, $name: String!, $number: Int!) {\n repository(owner: $owner, name: $name) {\n discussion(number: $number) {\n id\n }\n }\n }' -F owner="$owner" -F name="$name" -F number="$number" --jq .data.repository.discussion.id)"\n\n echo "DISCUSSION_ID=$id" >> "$GITHUB_ENV"\n\n - name: Avoid announcing a test in a non-test thread\n run: |\n case "$VERSION" in\n TEST-* | *-DO-NOT-USE ) # NOTE: Should be the same pattern as in `create-release` above.\n echo "The release name indicates testing, so we'll only post if the thread is for that."\n ;;\n * )\n is_draft="$(gh release --repo="$REPOSITORY" view "$VERSION" --json isDraft --jq .isDraft)"\n if [ "$is_draft" = false ]; then\n exit 0 # OK to post in a non-test announcement thread.\n fi\n echo "The release is not published, so we'll only post if the thread is for testing."\n ;;\n esac\n\n title="$(gh api graphql -f query='\n query($id: ID!) {\n node(id: $id) {\n ... on Discussion {\n title\n }\n }\n }' -F id="$DISCUSSION_ID" --jq .data.node.title)"\n\n grep -Eiqz '^[[(]?test\b' <<<"$title"\n\n - name: Post the comment\n run: |\n grep -Eqx '[[:alnum:]._+-]+' <<<"$VERSION" # Ensure the version needs no sanitization.\n release_url="https://github.com/$REPOSITORY/releases/tag/$VERSION"\n comment_body="\`gitoxide\` [$VERSION]($release_url) has been released."\n\n gh api graphql -f query='\n mutation PostComment($discussionId: ID!, $body: String!) {\n addDiscussionComment(input: {discussionId: $discussionId, body: $body}) {\n comment {\n id\n body\n }\n }\n }' -F discussionId="$DISCUSSION_ID" -F body="$comment_body"\n\n installation:\n strategy:\n matrix:\n build: [ win-msvc, win-gnu, win32-msvc, win32-gnu ]\n include:\n - build: win-msvc\n os: windows-latest\n rust: stable\n target: x86_64-pc-windows-msvc\n - build: win-gnu\n os: windows-latest\n rust: stable-x86_64-gnu\n target: x86_64-pc-windows-gnu\n - build: win32-msvc\n os: windows-latest\n rust: stable\n target: i686-pc-windows-msvc\n - build: win32-gnu\n os: windows-latest\n rust: stable\n target: i686-pc-windows-gnu\n\n runs-on: ${{ matrix.os }}\n\n steps:\n - uses: actions/checkout@v4\n - name: Install Rust\n uses: dtolnay/rust-toolchain@master\n with:\n toolchain: ${{ matrix.rust }}\n targets: ${{ matrix.target }}\n - uses: msys2/setup-msys2@v2\n with:\n msystem: MINGW${{ startsWith(matrix.target, 'i686-') && '32' || '64' }}\n pacboy: cc:p\n path-type: inherit\n - name: 'Installation from crates.io: gitoxide'\n run: cargo +${{ matrix.rust }} install --target ${{ matrix.target }} --no-default-features --features max-pure --target-dir install-artifacts --debug --force gitoxide\n shell: msys2 {0}\n
dataset_sample\yaml\GitoxideLabs_gitoxide\.github\workflows\release.yml
release.yml
YAML
19,563
0.95
0.040367
0.052743
python-kit
72
2025-04-08T22:25:16.660917
GPL-3.0
false
7dea0ae06d6ab54c65048469fd671d14
governance:\n type: BDFL\n BDFL: byron\n open for alternatives: yes, as project matures\n\nmaintainers:\n privileges:\n - push directly to 'main' branch\n - reviews or suggestions by other maintainers if PRs are used\n - co-ownership of contributed crates\n - receive sponsorship via GitHub from project page\n \n\n
dataset_sample\yaml\GitoxideLabs_gitoxide\.gov\info.yml
info.yml
YAML
319
0.7
0.153846
0
node-utils
307
2023-09-17T22:40:30.502531
GPL-3.0
false
e70595d224ab70bd5f84426e1d6e14a9
version: 2\nupdates:\n- package-ecosystem: cargo\n directory: "/"\n schedule:\n interval: daily\n open-pull-requests-limit: 10\n
dataset_sample\yaml\gitui-org_gitui\.github\dependabot.yml
dependabot.yml
YAML
127
0.7
0
0
python-kit
485
2023-09-05T14:12:37.389751
Apache-2.0
false
8c0950a887b7a973dbbbe9a50b09443c
# Number of days of inactivity before an issue becomes stale\ndaysUntilStale: 180\n# Number of days of inactivity before a stale issue is closed\ndaysUntilClose: 14\n# Issues with these labels will never be considered stale\nexemptLabels:\n - pinned\n - security\n - nostale\n# Label to use when marking an issue as stale\nstaleLabel: dormant\n# Comment to post when marking an issue as stale. Set to `false` to disable\nmarkComment: >\n This issue has been automatically marked as stale because it has not had\n any activity half a year. It will be closed in 14 days if no further activity occurs. Thank you\n for your contributions.\n# Comment to post when closing a stale issue. Set to `false` to disable\ncloseComment: false\n
dataset_sample\yaml\gitui-org_gitui\.github\stale.yml
stale.yml
YAML
718
0.8
0.111111
0.333333
awesome-app
634
2024-10-21T17:37:50.849937
BSD-3-Clause
false
d8713abef1676a557f1895d77b10748f
name: brew update\n\non:\n # only manually\n workflow_dispatch:\n inputs:\n tag-name:\n required: true\n description: 'release tag'\n\njobs:\n update_brew:\n runs-on: ubuntu-latest\n steps:\n - name: Bump homebrew-core formula\n uses: mislav/bump-homebrew-formula-action@v3\n env:\n COMMITTER_TOKEN: ${{ secrets.BREW_TOKEN }}\n with:\n formula-name: gitui\n # https://github.com/mislav/bump-homebrew-formula-action/issues/58\n formula-path: Formula/g/gitui.rb\n tag-name: ${{ github.event.inputs.tag-name }}\n
dataset_sample\yaml\gitui-org_gitui\.github\workflows\brew.yml
brew.yml
YAML
570
0.95
0
0.095238
awesome-app
254
2024-10-11T04:21:16.878657
Apache-2.0
false
bd58b10d11fb15382a304597f2c6d49e
name: CD\n\non:\n push:\n tags:\n - '*'\n workflow_dispatch:\n\njobs:\n release:\n strategy:\n fail-fast: false\n matrix:\n os: [ubuntu-latest, macos-latest, windows-latest, ubuntu-22.04]\n runs-on: ${{ matrix.os }}\n\n steps:\n - uses: actions/checkout@v4\n\n - name: Get version\n id: get_version\n run: echo "version=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n shared-key: ${{ matrix.os }}-${{ env.cache-name }}-stable\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@stable\n with:\n components: clippy\n\n - name: Build\n if: matrix.os != 'ubuntu-22.04'\n env:\n GITUI_RELEASE: 1\n run: cargo build\n - name: Run tests\n if: matrix.os != 'ubuntu-22.04'\n run: make test\n - name: Run clippy\n if: matrix.os != 'ubuntu-22.04'\n run: |\n cargo clean\n make clippy\n\n - name: Setup MUSL\n if: matrix.os == 'ubuntu-latest'\n run: |\n rustup target add x86_64-unknown-linux-musl\n sudo apt-get -qq install musl-tools\n\n - name: Setup ARM toolchain\n if: matrix.os == 'ubuntu-22.04'\n run: |\n rustup target add aarch64-unknown-linux-gnu\n rustup target add armv7-unknown-linux-gnueabihf\n rustup target add arm-unknown-linux-gnueabihf\n\n curl -o $GITHUB_WORKSPACE/aarch64.tar.xz https://armkeil.blob.core.windows.net/developer/Files/downloads/gnu-a/8.2-2018.08/gcc-arm-8.2-2018.08-x86_64-aarch64-linux-gnu.tar.xz\n curl -o $GITHUB_WORKSPACE/arm.tar.xz https://armkeil.blob.core.windows.net/developer/Files/downloads/gnu-a/8.2-2018.08/gcc-arm-8.2-2018.08-x86_64-arm-linux-gnueabihf.tar.xz\n\n tar xf $GITHUB_WORKSPACE/aarch64.tar.xz\n tar xf $GITHUB_WORKSPACE/arm.tar.xz\n\n echo "$GITHUB_WORKSPACE/gcc-arm-8.2-2018.08-x86_64-aarch64-linux-gnu/bin" >> $GITHUB_PATH\n echo "$GITHUB_WORKSPACE/gcc-arm-8.2-2018.08-x86_64-arm-linux-gnueabihf/bin" >> $GITHUB_PATH\n\n - name: Build Release Mac\n if: matrix.os == 'macos-latest'\n env:\n GITUI_RELEASE: 1\n run: make release-mac\n - name: Build Release Mac x86\n if: matrix.os == 'macos-latest'\n env:\n GITUI_RELEASE: 1\n run: |\n rustup target add x86_64-apple-darwin\n make release-mac-x86\n - name: Build Release Linux\n if: matrix.os == 'ubuntu-latest'\n env:\n GITUI_RELEASE: 1\n run: make release-linux-musl\n - name: Build Release Win\n if: matrix.os == 'windows-latest'\n env:\n GITUI_RELEASE: 1\n run: make release-win\n - name: Build Release Linux ARM\n if: matrix.os == 'ubuntu-22.04'\n env:\n GITUI_RELEASE: 1\n run: make release-linux-arm\n\n - name: Set SHA\n if: matrix.os == 'macos-latest'\n id: shasum\n run: |\n echo sha="$(shasum -a 256 ./release/gitui-mac.tar.gz | awk '{printf $1}')" >> $GITHUB_OUTPUT\n\n - name: Extract release notes\n if: matrix.os == 'ubuntu-latest'\n id: release_notes\n uses: ffurrer2/extract-release-notes@v2\n - name: Release\n uses: softprops/action-gh-release@v2\n env:\n GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n with:\n body: ${{ steps.release_notes.outputs.release_notes }}\n prerelease: ${{ contains(github.ref, '-') }}\n files: |\n ./release/*.tar.gz\n ./release/*.zip\n ./release/*.msi\n\n - name: Bump homebrew-core formula\n uses: mislav/bump-homebrew-formula-action@v3\n if: "matrix.os == 'macos-latest' && !contains(github.ref, '-')" # skip prereleases\n env:\n COMMITTER_TOKEN: ${{ secrets.BREW_TOKEN }}\n with:\n formula-name: gitui\n # https://github.com/mislav/bump-homebrew-formula-action/issues/58\n formula-path: Formula/g/gitui.rb\n
dataset_sample\yaml\gitui-org_gitui\.github\workflows\cd.yml
cd.yml
YAML
3,901
0.8
0.1
0.008772
python-kit
945
2024-10-22T13:36:24.079606
GPL-3.0
false
d6c0979bf8666babf79941f590b6cc66
name: CI\n\non:\n schedule:\n - cron: '0 2 * * *'\n push:\n branches: [ '*' ]\n pull_request:\n branches: [ master ]\n\nenv:\n CARGO_TERM_COLOR: always\n\njobs:\n build:\n strategy:\n fail-fast: false\n matrix:\n os: [ubuntu-latest, macos-latest, windows-latest]\n rust: [nightly, stable, '1.81']\n runs-on: ${{ matrix.os }}\n continue-on-error: ${{ matrix.rust == 'nightly' }}\n\n steps:\n - uses: actions/checkout@v4\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n shared-key: ${{ matrix.os }}-${{ env.cache-name }}-${{ matrix.rust }}\n\n - name: MacOS Workaround\n if: matrix.os == 'macos-latest'\n run: cargo clean -p serde_derive -p thiserror\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@master\n with:\n toolchain: ${{ matrix.rust }}\n components: clippy\n\n - name: Override rust toolchain\n run: rustup override set ${{ matrix.rust }}\n\n - name: Rustup Show\n run: rustup show\n\n - name: Build Debug\n run: |\n cargo build\n\n - name: Run tests\n run: make test\n\n - name: Run clippy\n run: |\n make clippy\n\n - name: Build Release\n run: make build-release\n\n - name: Test Install\n run: cargo install --path "." --force --locked\n\n - name: Binary Size (unix)\n if: matrix.os != 'windows-latest'\n run: |\n ls -l ./target/release/gitui\n\n - name: Binary Size (win)\n if: matrix.os == 'windows-latest'\n run: |\n ls -l ./target/release/gitui.exe\n\n - name: Binary dependencies (mac)\n if: matrix.os == 'macos-latest'\n run: |\n otool -L ./target/release/gitui\n\n - name: Build MSI (windows)\n if: matrix.os == 'windows-latest'\n run: |\n cargo install cargo-wix --version 0.3.3\n cargo wix --version\n cargo wix -p gitui --no-build --nocapture --output ./target/wix/gitui-win.msi\n ls -l ./target/wix/gitui-win.msi\n\n build-linux-musl:\n runs-on: ubuntu-latest\n strategy:\n fail-fast: false\n matrix:\n rust: [nightly, stable, '1.81']\n continue-on-error: ${{ matrix.rust == 'nightly' }}\n steps:\n - uses: actions/checkout@v4\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n key: ubuntu-latest-${{ env.cache-name }}-${{ matrix.rust }}\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@master\n with:\n toolchain: ${{ matrix.rust }}\n targets: x86_64-unknown-linux-musl\n\n # The build would fail without manually installing the target.\n # https://github.com/dtolnay/rust-toolchain/issues/83\n - name: Manually install target\n run: rustup target add x86_64-unknown-linux-musl\n\n - name: Override rust toolchain\n run: rustup override set ${{ matrix.rust }}\n\n - name: Rustup Show\n run: rustup show\n\n - name: Setup MUSL\n run: |\n sudo apt-get -qq install musl-tools\n - name: Build Debug\n run: |\n make build-linux-musl-debug\n ./target/x86_64-unknown-linux-musl/debug/gitui --version\n - name: Build Release\n run: |\n make build-linux-musl-release\n ./target/x86_64-unknown-linux-musl/release/gitui --version\n ls -l ./target/x86_64-unknown-linux-musl/release/gitui\n - name: Test\n run: |\n make test-linux-musl\n - name: Test Install\n run: cargo install --path "." --force --locked\n\n build-linux-arm:\n runs-on: ubuntu-latest\n strategy:\n fail-fast: false\n matrix:\n rust: [nightly, stable, '1.81']\n continue-on-error: ${{ matrix.rust == 'nightly' }}\n steps:\n - uses: actions/checkout@v4\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n key: ubuntu-latest-${{ env.cache-name }}-${{ matrix.rust }}\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@master\n with:\n toolchain: ${{ matrix.rust }}\n\n - name: Override rust toolchain\n run: rustup override set ${{ matrix.rust }}\n\n - name: Setup ARM toolchain\n run: |\n rustup target add aarch64-unknown-linux-gnu\n rustup target add armv7-unknown-linux-gnueabihf\n rustup target add arm-unknown-linux-gnueabihf\n\n curl -o $GITHUB_WORKSPACE/aarch64.tar.xz https://armkeil.blob.core.windows.net/developer/Files/downloads/gnu-a/8.2-2018.08/gcc-arm-8.2-2018.08-x86_64-aarch64-linux-gnu.tar.xz\n curl -o $GITHUB_WORKSPACE/arm.tar.xz https://armkeil.blob.core.windows.net/developer/Files/downloads/gnu-a/8.2-2018.08/gcc-arm-8.2-2018.08-x86_64-arm-linux-gnueabihf.tar.xz\n\n tar xf $GITHUB_WORKSPACE/aarch64.tar.xz\n tar xf $GITHUB_WORKSPACE/arm.tar.xz\n\n echo "$GITHUB_WORKSPACE/gcc-arm-8.2-2018.08-x86_64-aarch64-linux-gnu/bin" >> $GITHUB_PATH\n echo "$GITHUB_WORKSPACE/gcc-arm-8.2-2018.08-x86_64-arm-linux-gnueabihf/bin" >> $GITHUB_PATH\n\n - name: Rustup Show\n run: rustup show\n\n - name: Build Debug\n run: |\n make build-linux-arm-debug\n - name: Build Release\n run: |\n make build-linux-arm-release\n ls -l ./target/aarch64-unknown-linux-gnu/release/gitui || ls -l ./target/armv7-unknown-linux-gnueabihf/release/gitui || ls -l ./target/arm-unknown-linux-gnueabihf/release/gitui\n\n build-apple-x86:\n runs-on: macos-latest\n strategy:\n fail-fast: false\n matrix:\n rust: [nightly, stable, '1.81']\n continue-on-error: ${{ matrix.rust == 'nightly' }}\n steps:\n - uses: actions/checkout@v4\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n key: apple-x86-${{ env.cache-name }}-${{ matrix.rust }}\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@master\n with:\n toolchain: ${{ matrix.rust }}\n\n - name: Override rust toolchain\n run: rustup override set ${{ matrix.rust }}\n\n - name: Setup target\n run: rustup target add x86_64-apple-darwin\n\n - name: Rustup Show\n run: rustup show\n\n - name: Build Debug\n run: |\n make build-apple-x86-debug\n - name: Build Release\n run: |\n make build-apple-x86-release\n ls -l ./target/x86_64-apple-darwin/release/gitui\n\n linting:\n name: Lints\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n key: ubuntu-latest-${{ env.cache-name }}-stable\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@stable\n with:\n components: rustfmt\n\n - run: cargo fmt -- --check\n\n - name: cargo-sort\n run: |\n cargo install cargo-sort --force\n cargo sort -c -w\n\n - name: cargo-deny install\n run: |\n cargo install --locked cargo-deny\n\n - name: cargo-deny checks\n run: |\n cargo deny check\n\n udeps:\n name: udeps\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n key: ubuntu-latest-${{ env.cache-name }}-nightly\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@nightly\n\n - name: build cargo-udeps\n run: cargo install --git https://github.com/est31/cargo-udeps --locked\n\n - name: run cargo-udeps\n run: cargo +nightly udeps --all-targets\n\n log-test:\n name: Changelog Test\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - name: Extract release notes\n id: extract_release_notes\n uses: ffurrer2/extract-release-notes@v2\n with:\n release_notes_file: ./release-notes.txt\n - uses: actions/upload-artifact@v4\n with:\n name: release-notes.txt\n path: ./release-notes.txt\n\n test-homebrew:\n name: Test Homebrew Formula (macOS)\n runs-on: macos-latest\n steps:\n - name: Checkout code\n uses: actions/checkout@v4\n\n - name: Install stable Rust\n uses: actions-rs/toolchain@v1\n with:\n toolchain: stable\n\n - name: Install Homebrew\n run: /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"\n\n - name: Set up Homebrew in PATH\n run: |\n echo "$HOMEBREW_PREFIX/bin:$HOMEBREW_PREFIX/sbin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" >> $GITHUB_PATH\n\n - name: Update Homebrew\n run: brew update\n\n - name: Let Homebrew build gitui from source\n run: brew install --head --build-from-source gitui\n\n - name: Run Homebrew test\n run: brew test gitui\n
dataset_sample\yaml\gitui-org_gitui\.github\workflows\ci.yml
ci.yml
YAML
8,714
0.8
0.015244
0.007519
node-utils
356
2025-05-25T22:05:43.226427
MIT
false
9ef88ffaabb8baf2dd904da240a9d815
name: Build Nightly Releases\n\non:\n schedule:\n - cron: '0 3 * * *'\n workflow_dispatch:\n\nenv:\n CARGO_TERM_COLOR: always\n AWS_BUCKET_NAME: s3://gitui/nightly/\n\njobs:\n release:\n strategy:\n fail-fast: false\n matrix:\n os: [\n ubuntu-latest, macos-latest, windows-latest, ubuntu-22.04\n ]\n runs-on: ${{ matrix.os }}\n\n steps:\n - uses: actions/checkout@v4\n\n - name: Restore cargo cache\n uses: Swatinem/rust-cache@v2\n env:\n cache-name: ci\n with:\n shared-key: ${{ matrix.os }}-${{ env.cache-name }}-stable\n\n - name: Install Rust\n uses: dtolnay/rust-toolchain@stable\n with:\n components: clippy\n\n # ideally we trigger the nightly build/deploy only if the normal nightly CI finished successfully\n - name: Run tests\n if: matrix.os != 'ubuntu-22.04'\n run: make test\n - name: Run clippy\n if: matrix.os != 'ubuntu-22.04'\n run: |\n cargo clean\n make clippy\n\n - name: Setup MUSL\n if: matrix.os == 'ubuntu-latest'\n run: |\n rustup target add x86_64-unknown-linux-musl\n sudo apt-get -qq install musl-tools\n\n - name: Setup ARM toolchain\n if: matrix.os == 'ubuntu-22.04'\n run: |\n rustup target add aarch64-unknown-linux-gnu\n rustup target add armv7-unknown-linux-gnueabihf\n rustup target add arm-unknown-linux-gnueabihf\n\n curl -o $GITHUB_WORKSPACE/aarch64.tar.xz https://armkeil.blob.core.windows.net/developer/Files/downloads/gnu-a/8.2-2018.08/gcc-arm-8.2-2018.08-x86_64-aarch64-linux-gnu.tar.xz\n curl -o $GITHUB_WORKSPACE/arm.tar.xz https://armkeil.blob.core.windows.net/developer/Files/downloads/gnu-a/8.2-2018.08/gcc-arm-8.2-2018.08-x86_64-arm-linux-gnueabihf.tar.xz\n\n tar xf $GITHUB_WORKSPACE/aarch64.tar.xz\n tar xf $GITHUB_WORKSPACE/arm.tar.xz\n\n echo "$GITHUB_WORKSPACE/gcc-arm-8.2-2018.08-x86_64-aarch64-linux-gnu/bin" >> $GITHUB_PATH\n echo "$GITHUB_WORKSPACE/gcc-arm-8.2-2018.08-x86_64-arm-linux-gnueabihf/bin" >> $GITHUB_PATH\n\n - name: Build Release Mac\n if: matrix.os == 'macos-latest'\n run: make release-mac\n - name: Build Release Mac x86\n if: matrix.os == 'macos-latest'\n run: |\n rustup target add x86_64-apple-darwin\n make release-mac-x86\n - name: Build Release Linux\n if: matrix.os == 'ubuntu-latest'\n run: make release-linux-musl\n - name: Build Release Win\n if: matrix.os == 'windows-latest'\n run: make release-win\n - name: Build Release Linux ARM\n if: matrix.os == 'ubuntu-22.04'\n run: make release-linux-arm\n\n - name: Ubuntu 22.04 Upload Artifact\n if: matrix.os == 'ubuntu-22.04'\n env:\n AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}\n AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_KEY_SECRET }}\n AWS_DEFAULT_REGION: ${{ secrets.AWS_REGION }}\n run: |\n aws s3 cp ./release/gitui-linux-armv7.tar.gz $AWS_BUCKET_NAME\n aws s3 cp ./release/gitui-linux-arm.tar.gz $AWS_BUCKET_NAME\n aws s3 cp ./release/gitui-linux-aarch64.tar.gz $AWS_BUCKET_NAME\n\n - name: Ubuntu Latest Upload Artifact\n if: matrix.os == 'ubuntu-latest'\n env:\n AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}\n AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_KEY_SECRET }}\n AWS_DEFAULT_REGION: ${{ secrets.AWS_REGION }}\n run: |\n aws s3 cp ./release/gitui-linux-x86_64.tar.gz $AWS_BUCKET_NAME\n\n - name: MacOS Upload Artifact\n if: matrix.os == 'macos-latest'\n env:\n AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}\n AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_KEY_SECRET }}\n AWS_DEFAULT_REGION: ${{ secrets.AWS_REGION }}\n run: |\n aws s3 cp ./release/gitui-mac.tar.gz $AWS_BUCKET_NAME\n aws s3 cp ./release/gitui-mac-x86.tar.gz $AWS_BUCKET_NAME\n\n - name: Windows Upload Artifact\n if: matrix.os == 'windows-latest'\n env:\n AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}\n AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_KEY_SECRET }}\n AWS_DEFAULT_REGION: ${{ secrets.AWS_REGION }}\n run: |\n aws s3 cp ./release/gitui-win.msi $env:AWS_BUCKET_NAME\n aws s3 cp ./release/gitui-win.tar.gz $env:AWS_BUCKET_NAME\n
dataset_sample\yaml\gitui-org_gitui\.github\workflows\nightly.yml
nightly.yml
YAML
4,247
0.8
0.112
0.009259
react-lib
913
2024-12-23T21:16:37.276168
BSD-3-Clause
false
10a3fc8e32d4ba472b416ed7c4516693
version: 2\n\nupdates:\n # Keep Github Actions dependencies up-to-date\n - package-ecosystem: "github-actions"\n directory: "/"\n schedule:\n interval: "weekly"\n\n\n # Update development dependencies on current release branch (10.0/bugfixes).\n - package-ecosystem: "npm"\n directory: "/"\n schedule:\n interval: "monthly"\n allow:\n - dependency-type: "development"\n open-pull-requests-limit: 100\n target-branch: "10.0/bugfixes"\n versioning-strategy: "increase"\n rebase-strategy: "disabled"\n groups:\n dev-dependencies:\n patterns:\n - "*"\n\n - package-ecosystem: "composer"\n directory: "/"\n schedule:\n interval: "monthly"\n allow:\n - dependency-type: "development"\n open-pull-requests-limit: 100\n target-branch: "10.0/bugfixes"\n versioning-strategy: "increase"\n rebase-strategy: "disabled"\n groups:\n dev-dependencies:\n patterns:\n - "*"\n\n\n # Update production dependencies on future release branch (main).\n - package-ecosystem: "npm"\n directory: "/"\n schedule:\n interval: "monthly"\n allow:\n - dependency-type: "production"\n open-pull-requests-limit: 100\n target-branch: "main"\n versioning-strategy: "increase"\n rebase-strategy: "disabled"\n groups:\n cytoscape:\n patterns:\n - "cytoscape"\n - "cytoscape-*"\n leaflet:\n patterns:\n - "leaflet"\n - "leaflet-*"\n - "leaflet.*"\n fullcalendar:\n patterns:\n - "@fullcalendar/*"\n tabler:\n patterns:\n - "@tabler/*"\n - "bootstrap"\n\n - package-ecosystem: "composer"\n directory: "/"\n schedule:\n interval: "monthly"\n allow:\n - dependency-type: "production"\n open-pull-requests-limit: 100\n target-branch: "main"\n versioning-strategy: "increase"\n rebase-strategy: "disabled"\n groups:\n guzzlehttp:\n patterns:\n - "guzzlehttp/*"\n sabre:\n patterns:\n - "sabre/*"\n symfony:\n patterns:\n - "symfony/*"\n exclude-patterns:\n - "symfony/polyfill-*"\n symfony-polyfills:\n patterns:\n - "symfony/polyfill-*"\n twig:\n patterns:\n - "twig/*"\n ignore:\n - dependency-name: "symfony/*"\n versions: [">= 7"]\n
dataset_sample\yaml\glpi-project_glpi\.github\dependabot.yml
dependabot.yml
YAML
2,345
0.8
0
0.031579
python-kit
885
2023-12-17T10:28:08.940397
Apache-2.0
false
4561041ae8085a78c0206aefd703b000
# These are supported funding model platforms\n\nopen_collective: glpi-project\nko_fi: glpi_teclib\n\n
dataset_sample\yaml\glpi-project_glpi\.github\FUNDING.yml
FUNDING.yml
YAML
97
0.6
0
0.333333
python-kit
479
2024-12-24T00:16:28.759628
BSD-3-Clause
false
92cf73c74222857c98cba742a0feed2d
labels:\n - name: "invalid"\n labeled:\n issue:\n body: |\n This issue has been closed because you did not provide the requested information.\n action: "close"\n - name: "support"\n labeled:\n issue:\n body: |\n This issue has been closed as we only track bugs here.\n \n You can get community support on [forums](https://forum.glpi-project.org/) or you can consider [taking a subscription](https://glpi-project.org/subscriptions/) to get professional support.\n You can also [contact GLPI editor team](https://portal.glpi-network.com/contact-us) directly.\n action: close\n - name: "feature suggestion"\n labeled:\n issue:\n body: |\n This issue has been closed as we only track bugs here.\n \n You can open a topic to discuss with community about this enhancement on [suggestion website](https://glpi.userecho.com/).\n You can also [contact GLPI editor team](https://portal.glpi-network.com/contact-us) directly if you are willing to sponsor this feature.\n action: close\n
dataset_sample\yaml\glpi-project_glpi\.github\label-commenter-config.yml
label-commenter-config.yml
YAML
1,118
0.8
0.04
0
node-utils
982
2025-05-30T14:57:26.763529
GPL-3.0
false
27e9ec080a90b6b75b948021b754136c
services:\n app:\n container_name: "glpi-test-app"\n image: "ghcr.io/glpi-project/${PHP_IMAGE:-githubactions-php-apache:8.3}"\n environment:\n CODE_COVERAGE: "${CODE_COVERAGE:-false}"\n volumes:\n - type: "bind"\n source: "${APPLICATION_ROOT}"\n target: "/var/www/glpi"\n - type: "bind"\n source: "${APP_CONTAINER_HOME}"\n target: "/home/www-data"\n
dataset_sample\yaml\glpi-project_glpi\.github\actions\docker-compose-app.yml
docker-compose-app.yml
YAML
392
0.7
0
0
awesome-app
453
2024-12-03T12:35:57.175888
MIT
false
71d71552f1f2df57efdec19facd9a4ce
services:\n db:\n container_name: "glpi-test-db"\n image: "ghcr.io/glpi-project/${DB_IMAGE:-githubactions-mariadb:10.11}"\n environment:\n MYSQL_ALLOW_EMPTY_PASSWORD: "yes"\n MYSQL_DATABASE: "glpi"\n shm_size: '1gb'\n dovecot:\n container_name: "glpi-test-dovecot"\n image: "ghcr.io/glpi-project/githubactions-dovecot"\n memcached:\n container_name: "glpi-test-memcached"\n image: "ghcr.io/glpi-project/githubactions-memcached"\n openldap:\n container_name: "glpi-test-openldap"\n image: "ghcr.io/glpi-project/githubactions-openldap"\n redis:\n container_name: "glpi-test-redis"\n image: "ghcr.io/glpi-project/githubactions-redis"\n
dataset_sample\yaml\glpi-project_glpi\.github\actions\docker-compose-services.yml
docker-compose-services.yml
YAML
663
0.7
0
0
python-kit
721
2024-03-28T18:48:13.025633
GPL-3.0
false
c8d62dbcbe1d57f8bd455d3e6cd7e751
name: Bug Report\ndescription: Create a report to help us improve GLPI\nbody:\n - type: markdown\n attributes:\n value: |\n\n Dear GLPI user.\n\n **⚠️ Please never use standard issues to report security problems. See [security policy](https://github.com/glpi-project/glpi/security/policy) for more details. ⚠️**\n\n BEFORE SUBMITTING YOUR ISSUE, please make sure to read and follow these steps:\n\n * We do not support community plugins. Contact directly their authors, or use [the community forum](https://forum.glpi-project.org).\n * We do not track feature requests nor enhancements here. Propose them on the [suggest dedicated site](https://suggest.glpi-project.org).\n * Keep this tracker in ENGLISH. If you want support in your language, the [community forum](https://forum.glpi-project.org) is the best place.\n * Always try to reproduce your issue at least on latest stable release.\n\n The GLPI team.\n - type: markdown\n attributes:\n value: |\n ## Professional Support\n\n We do not guarantee any processing / resolution time for community issues.\n\n If you need a quick fix or any guarantee, you should consider to buy a GLPI Network Subscription.\n\n More information here: https://glpi-project.org/subscriptions/\n - type: checkboxes\n id: terms\n attributes:\n label: Code of Conduct\n description: By submitting this issue, you agree to follow hereinabove rules and [Contribution guide](https://github.com/glpi-project/glpi/blob/main/CONTRIBUTING.md)\n options:\n - label: I agree to follow this project's Code of Conduct\n validations:\n required: true\n - type: checkboxes\n attributes:\n label: Is there an existing issue for this?\n description: Please search to see if an issue already exists for the bug you encountered.\n options:\n - label: I have searched the existing issues\n validations:\n required: true\n - type: input\n id: version\n attributes:\n label: Version\n description: What version of our GLPI are you running?\n validations:\n required: true\n - type: textarea\n attributes:\n label: Bug description\n description: A concise description of the problem you are experiencing and what you expected to happen.\n validations:\n required: false\n - type: textarea\n id: logs\n attributes:\n label: Relevant log output\n description: |\n Please copy and paste any relevant log output. Find them in `*-error.log` files under `glpi/files/_log/`.\n\n Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.\n render: shell\n - type: input\n id: url\n attributes:\n label: Page URL\n description: If applicable, page URL where the bug happens.\n validations:\n required: false\n - type: textarea\n attributes:\n label: Steps To reproduce\n description: Steps to reproduce the behavior.\n placeholder: |\n 1. With this config...\n 2. Go to...\n 3. Scroll down to...\n 4. See error...\n validations:\n required: false\n - type: textarea\n attributes:\n label: Your GLPI setup information\n description: Please copy and paste information you will find in GLPI in `Setup > General` menu, `System` tab.\n validations:\n required: false\n - type: textarea\n attributes:\n label: Anything else?\n description: Add any other context about the problem here.\n validations:\n required: false\n
dataset_sample\yaml\glpi-project_glpi\.github\ISSUE_TEMPLATE\bug_report.yml
bug_report.yml
YAML
3,554
0.95
0.061224
0.067416
vue-tools
650
2023-07-25T06:21:30.322002
BSD-3-Clause
false
058bf394d706ea362412f8aa33b68fe1
contact_links:\n - name: GLPI Community Forum\n url: https://forum.glpi-project.org\n about: Ask questions and get help from the community.\n - name: GLPI Professionnal Support\n url: https://services.glpi-network.com\n about: Get professionnal support from the editor and a network of local partners.\n - name: Find an official Partner\n url: https://glpi-project.org/partners/\n about: Get support to deploy GLPI in a professional manner.\n - name: Feature request\n url: https://suggest.glpi-project.org\n about: Suggest a new feature to the GLPI team\n
dataset_sample\yaml\glpi-project_glpi\.github\ISSUE_TEMPLATE\config.yml
config.yml
YAML
571
0.8
0
0
python-kit
753
2024-10-17T23:13:57.139105
MIT
false
23f6ef795b1c550c2dd11a153c87f92f
name: Contribution request\ndescription: Ask and discuss about a feature/evolution contribution\nbody:\n - type: markdown\n attributes:\n value: |\n\n Dear GLPI user.\n\n **⚠️ Please never use standard issues to report security problems. See [security policy](https://github.com/glpi-project/glpi/security/policy) for more details. ⚠️**\n\n BEFORE SUBMITTING YOUR ISSUE, please make sure to read and follow these steps:\n\n * We do not support community plugins. Contact directly their authors, or use [the community forum](https://forum.glpi-project.org).\n * This form is discussing about your possible contributions, not requesting we develop ourselves new features. Propose the latter on the [suggest dedicated site](https://suggest.glpi-project.org).\n * Do not use this form to request support. There is a [dedicated form](https://github.com/glpi-project/glpi/issues/new?assignees=&labels=&projects=&template=bug_report.yml) for that.\n * Keep this tracker in ENGLISH. If you want support in your language, the [community forum](https://forum.glpi-project.org) is the best place.\n\n The GLPI team.\n - type: checkboxes\n id: terms\n attributes:\n label: Code of Conduct\n description: By submitting this issue, you agree to follow hereinabove rules and [Contribution guide](https://github.com/glpi-project/glpi/blob/main/CONTRIBUTING.md)\n options:\n - label: I agree to follow this project's Code of Conduct\n - type: markdown\n attributes:\n value: |\n - Any contribution of code (new feature or evolution) must be done on main branch.\n - Any bugfixes should target the current bugfixes branch (usually the default)\n - type: textarea\n attributes:\n label: Contribution description\n description: A detailled description of the contribution you want to make.\n
dataset_sample\yaml\glpi-project_glpi\.github\ISSUE_TEMPLATE\contributing.yml
contributing.yml
YAML
1,875
0.8
0.057143
0.166667
react-lib
346
2024-05-30T07:14:35.058094
GPL-3.0
false
fd06cdf5b4914e6fd6385d0ca5bfe029
name: "Audit dependencies"\n\non:\n # Runs audit every monday\n schedule:\n - cron: '0 10 * * 1'\n # Enable manual run\n workflow_dispatch:\n\njobs:\n audit:\n # Do not run scheduled audit on tier repositories\n if: github.repository == 'glpi-project/glpi' || github.event_name != 'schedule'\n permissions:\n issues: "write"\n name: "Audit dependencies (${{ matrix.branch }})"\n runs-on: "ubuntu-latest"\n strategy:\n fail-fast: false\n matrix:\n include:\n - {branch: "10.0/bugfixes", php-version: "7.4"}\n env:\n COMPOSE_FILE: ".github/actions/docker-compose-app.yml"\n APPLICATION_ROOT: "${{ github.workspace }}"\n PHP_IMAGE: "githubactions-php:${{ matrix.php-version }}"\n UPDATE_FILES_ACL: true\n steps:\n - name: "Set env"\n run: |\n echo "APP_CONTAINER_HOME=${{ runner.temp }}/app_home" >> $GITHUB_ENV\n - name: "Checkout"\n uses: "actions/checkout@v4"\n with:\n ref: ${{ matrix.branch }}\n - name: "Restore dependencies cache"\n uses: actions/cache@v4\n with:\n path: |\n ${{ env.APP_CONTAINER_HOME }}/.composer/cache/\n ${{ env.APP_CONTAINER_HOME }}/.npm/_cacache/\n key: "app_home_deps-${{ matrix.php-version }}-${{ hashFiles('composer.lock', 'package-lock.json') }}"\n restore-keys: |\n app_home_deps-${{ matrix.php-version }}-\n app_home_deps-\n - name: "Initialize containers"\n run: |\n .github/actions/init_containers-start.sh\n - name: "Show versions"\n run: |\n .github/actions/init_show-versions.sh\n - name: "Audit npm dependencies"\n continue-on-error: true\n id: "npmaudit"\n run: |\n set -o pipefail\n CODE=0\n LOG=$( npm audit package-lock-only 2>&1 | tee /dev/stderr ) || CODE=$?\n echo "CODE=$CODE" >> $GITHUB_OUTPUT\n printf "LOG<<EOF\n$LOG\nEOF" >> $GITHUB_OUTPUT\n - name: "Create issue if npm audit fails"\n if: "${{ steps.npmaudit.outputs.CODE != '0' }}"\n uses: "actions/github-script@v7"\n with:\n script: |\n const result = await github.rest.issues.create({\n owner: context.repo.owner,\n repo: context.repo.repo,\n title: 'npm audit failed (${{ matrix.branch }})',\n body: '```\n' + ${{ toJSON(steps.npmaudit.outputs.LOG) }} + '\n```',\n });\n - name: "Audit composer dependencies"\n continue-on-error: true\n id: "composeraudit"\n run: |\n set -o pipefail\n CODE=0\n LOG=$( composer audit --locked 2>&1 | tee /dev/stderr ) || CODE=$?\n echo "CODE=$CODE" >> $GITHUB_OUTPUT\n printf "LOG<<EOF\n$LOG\nEOF" >> $GITHUB_OUTPUT\n - name: "Create issue if composer audit fails"\n if: "${{ steps.composeraudit.outputs.CODE != '0' }}"\n uses: "actions/github-script@v7"\n with:\n script: |\n const result = await github.rest.issues.create({\n owner: context.repo.owner,\n repo: context.repo.repo,\n title: 'composer audit failed (${{ matrix.branch }})',\n body: '```\n# composer audit report\n\n' + ${{ toJSON(steps.composeraudit.outputs.LOG) }} + '\n```',\n });\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\audit_dependencies.yml
audit_dependencies.yml
YAML
3,323
0.8
0.054945
0.033708
node-utils
797
2025-01-06T04:09:06.439974
GPL-3.0
false
eada37783569ed82773960992a25e2d5
name: "GLPI branch build"\n\non:\n workflow_dispatch:\n inputs:\n php-version:\n description: "PHP version"\n required: true\n default: "8.0"\n\njobs:\n build:\n name: "Build GLPI"\n runs-on: "ubuntu-latest"\n services:\n app:\n image: "ghcr.io/glpi-project/githubactions-php:${{ github.event.inputs.php-version }}"\n options: >-\n --volume /glpi:/var/www/glpi\n steps:\n - name: "Checkout"\n uses: "actions/checkout@v4"\n - name: "Deploy source into app container"\n run: |\n sudo cp --no-target-directory --preserve --recursive `pwd` /glpi\n sudo chown -R 1000:1000 /glpi\n - name: "Install dependencies"\n run: |\n docker exec ${{ job.services.app.id }} composer install --optimize-autoloader --prefer-dist --no-interaction --no-progress --no-suggest\n - name: "Define release name"\n run: |\n REF_NAME=$(echo ${{ github.ref_name }} | sed -E 's|/|-|g')\n SHA=$(git rev-parse --short HEAD)\n echo "release_name=$REF_NAME.$SHA" >> $GITHUB_ENV\n - name: "Build"\n run: |\n docker exec --interactive ${{ job.services.app.id }} tools/make_release.sh -y . ${{ env.release_name }}\n docker cp ${{ job.services.app.id }}:/tmp/glpi-${{ env.release_name }}.tgz ${{ github.workspace }}/${{ env.release_name }}.tar.gz\n - name: "Store archive"\n uses: actions/upload-artifact@v4\n with:\n name: ${{ env.release_name }}.tar.gz\n path: ${{ github.workspace }}/${{ env.release_name }}.tar.gz\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\branch_build.yml
branch_build.yml
YAML
1,584
0.85
0
0
node-utils
233
2023-11-14T11:52:53.486328
Apache-2.0
false
9b1bc4b407e523a981a37822abda3be7
name: "Bump version after release"\n\non:\n release:\n types:\n - "published"\n\njobs:\n bump-version:\n permissions:\n contents: "write" # needed to create a new branch and push commits\n pull-requests: "write" # needed to create a pull request\n name: "Bump version"\n runs-on: "ubuntu-latest"\n steps:\n - name: "Compute variables"\n run: |\n TAG="${GITHUB_REF#refs/tags/}"\n MAJOR_VERSION="$( echo "$TAG" | cut -d '.' -f 1 )"\n MINOR_VERSION="$( echo "$TAG" | cut -d '.' -f 2 )"\n BUGFIX_VERSION="$( echo "$TAG" | cut -d '.' -f 3 | cut -d '-' -f 1 )"\n STABILITY_FLAG="$( echo "$TAG" | grep -Po '(\-\w+)?$' )"\n NEXT_VERSION="$MAJOR_VERSION.$MINOR_VERSION.$(($BUGFIX_VERSION+1))"\n echo "TAG=$TAG" >> $GITHUB_ENV\n echo "IS_STABLE_RELEASE=$( [[ -z "$STABILITY_FLAG" ]] && echo "yes" || echo "no" )" >> $GITHUB_ENV\n echo "NEXT_VERSION=$NEXT_VERSION" >> $GITHUB_ENV\n echo "BASE_BRANCH=$MAJOR_VERSION.$MINOR_VERSION/bugfixes" >> $GITHUB_ENV\n echo "BUMP_BRANCH=bump-version/$NEXT_VERSION" >> $GITHUB_ENV\n CHANGELOG_ENTRY="## [$NEXT_VERSION] unreleased\n \n ### Added\n \n ### Changed\n \n ### Deprecated\n \n ### Removed\n \n ### API changes\n \n #### Added\n \n #### Changes\n \n #### Deprecated\n \n #### Removed\n \n "\n echo "CHANGELOG_ENTRY=${CHANGELOG_ENTRY//$'\n'/'\n'}" >> $GITHUB_ENV\n - name: "Checkout"\n if: ${{ env.IS_STABLE_RELEASE == 'yes' }}\n uses: "actions/checkout@v4"\n - name: "Update codebase"\n if: ${{ env.IS_STABLE_RELEASE == 'yes' }}\n run: |\n git config --local user.email "$(git log --format='%ae' HEAD^!)"\n git config --local user.name "$(git log --format='%an' HEAD^!)"\n git checkout -b ${{ env.BUMP_BRANCH }}\n echo "Renaming version file..."\n git mv version/${{ env.TAG }} version/${{ env.NEXT_VERSION }}\n echo "Replacing version in src/autoload/constants.php..."\n sed -i "s/define('GLPI_VERSION', '[^)]*');/define('GLPI_VERSION', '${{ env.NEXT_VERSION }}-dev');/g" src/autoload/constants.php\n echo "Archiving MySQL empty schema file..."\n cp install/mysql/glpi-empty.sql install/mysql/glpi-${{ env.TAG }}-empty.sql\n echo "Updating CHANGELOG.md..."\n sed -i "0,/##/s//${{ env.CHANGELOG_ENTRY }}\n##/" CHANGELOG.md\n git add .\n git commit -m "Bump version"\n git push origin ${{ env.BUMP_BRANCH }}\n - name: "Create Pull Request"\n if: ${{ env.IS_STABLE_RELEASE == 'yes' }}\n uses: "actions/github-script@v7"\n with:\n script: |\n const { repo, owner } = context.repo;\n const result = await github.rest.pulls.create({\n title: 'Bump version to ${{ env.NEXT_VERSION }}-dev',\n owner,\n repo,\n head: '${{ env.BUMP_BRANCH }}',\n base: '${{ env.BASE_BRANCH }}',\n body: 'Bump version to ensure next nightly build will use not use same version string as the version that was just released.'\n });\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\bump_version_after_release.yml
bump_version_after_release.yml
YAML
3,306
0.8
0.035714
0.125
node-utils
450
2025-02-08T05:48:19.139757
MIT
false
49aa5048d57afe83dfca084f41f090ae
name: "GLPI CI"\n\non:\n # Runs test suite when a new commit is pushed on "main" and "*/bugfixes" branches\n # and when a new tag is created\n push:\n branches:\n - main\n - '*/bugfixes'\n - 'feature/*'\n - 'fix/*'\n - 'security/*'\n tags:\n - '*'\n # Runs test suite when a PR is opened or synchronyzed\n pull_request:\n # Runs test suite every night\n schedule:\n - cron: '0 0 * * *'\n # Enable manual run\n workflow_dispatch:\n\nconcurrency:\n group: "${{ github.workflow }}-${{ github.ref }}"\n cancel-in-progress: true\n\njobs:\n lint:\n # Do not run scheduled lint on tier repositories\n if: github.repository == 'glpi-project/glpi' || github.event_name != 'schedule'\n name: "Lint on PHP ${{ matrix.php-version }}"\n runs-on: "ubuntu-latest"\n strategy:\n fail-fast: false\n matrix:\n include:\n - {php-version: "8.2"} # Lint on lower PHP version to detected too early usage of new syntaxes\n - {php-version: "8.4"} # Lint on higher PHP version to detected deprecated elements usage\n env:\n COMPOSE_FILE: ".github/actions/docker-compose-app.yml"\n APPLICATION_ROOT: "${{ github.workspace }}"\n PHP_IMAGE: "githubactions-php-apache:${{ matrix.php-version }}"\n UPDATE_FILES_ACL: true\n steps:\n - name: "Set env"\n run: |\n echo "APP_CONTAINER_HOME=${{ runner.temp }}/app_home" >> $GITHUB_ENV\n - name: "Checkout"\n uses: "actions/checkout@v4"\n - name: "Restore dependencies cache"\n uses: actions/cache@v4\n with:\n path: |\n ${{ env.APP_CONTAINER_HOME }}/.composer/cache/\n ${{ env.APP_CONTAINER_HOME }}/.npm/_cacache/\n key: "app_home_deps-${{ matrix.php-version }}-${{ hashFiles('composer.lock', 'package-lock.json') }}"\n restore-keys: |\n app_home_deps-${{ matrix.php-version }}-\n app_home_deps-\n - name: "Restore lint cache"\n uses: actions/cache@v4\n with:\n path: |\n ${{ env.APP_CONTAINER_HOME }}/phpcs.cache\n /tmp/phpstan\n key: "app_home_lint-${{ matrix.php-version }}-${{ github.base_ref || github.ref }}"\n restore-keys: |\n app_home_lint-${{ matrix.php-version }}\n app_home_lint-\n - name: "Initialize containers"\n run: |\n .github/actions/init_containers-start.sh\n - name: "Show versions"\n run: |\n .github/actions/init_show-versions.sh\n - name: "Force used PHP version"\n run: |\n docker compose exec -T app composer config --unset platform.php\n docker compose exec -T app composer require "php:>=${{ matrix.php-version }}" --ignore-platform-req=php+ --no-install --no-scripts\n - name: "Build dependencies / translations"\n run: |\n docker compose exec -T app .github/actions/init_build.sh\n - name: "PHP lint"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/lint_php-lint.sh\n - name: "Twig lint"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/lint_twig-lint.sh\n - name: "JS lint"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/lint_js-lint.sh\n - name: "SCSS lint"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/lint_scss-lint.sh\n - name: "Misc lint"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/lint_misc-lint.sh\n\n generate-tests-matrix:\n # Do not run scheduled tests on tier repositories\n if: github.repository == 'glpi-project/glpi' || github.event_name != 'schedule'\n name: "Generate tests matrix"\n runs-on: "ubuntu-latest"\n outputs:\n matrix: ${{ steps.generate-matrix.outputs.matrix }}\n steps:\n - name: "Generate matrix"\n id: "generate-matrix"\n run: |\n if [[ "${{ github.event_name }}" = "workflow_dispatch" || ( "${{ github.ref_type }}" = "branch" && "${{ github.ref_name }}" =~ ^(main|.*/bugfixes|security/.*)$ ) || "${{ github.ref_type }}" = "tag" ]]; then\n MATRIX='\n {\n "include": [\n {"php-version": "8.4", "db-image": "mariadb:11.4"},\n {"php-version": "8.4", "db-image": "mysql:8.4"},\n {"php-version": "8.2", "db-image": "mariadb:11.4"},\n {"php-version": "8.4", "db-image": "mariadb:10.5"},\n {"php-version": "8.4", "db-image": "mysql:8.0"},\n {"php-version": "8.4", "db-image": "percona:8.0"},\n {"php-version": "8.4", "db-image": "percona:8.4"}\n ]\n }\n '\n else\n MATRIX='\n {\n "include": [\n {"php-version": "8.4", "db-image": "mariadb:11.4"},\n {"php-version": "8.4", "db-image": "mysql:8.4"}\n ]\n }\n '\n fi\n echo "matrix=$(echo $MATRIX | jq -c .)" >> $GITHUB_OUTPUT\n tests:\n # Do not run scheduled tests on tier repositories\n if: github.repository == 'glpi-project/glpi' || github.event_name != 'schedule'\n name: "Test on PHP ${{ matrix.php-version }} using ${{ matrix.db-image }}"\n needs: "generate-tests-matrix"\n runs-on: "ubuntu-latest"\n timeout-minutes: 60\n strategy:\n fail-fast: false\n matrix: ${{ fromJson(needs.generate-tests-matrix.outputs.matrix) }}\n env:\n COMPOSE_FILE: ".github/actions/docker-compose-app.yml:.github/actions/docker-compose-services.yml"\n APPLICATION_ROOT: "${{ github.workspace }}"\n DB_IMAGE: "githubactions-${{ matrix.db-image }}"\n PHP_IMAGE: "githubactions-php-apache:${{ matrix.php-version }}"\n UPDATE_FILES_ACL: true\n steps:\n - name: "Set env"\n run: |\n echo "APP_CONTAINER_HOME=${{ runner.temp }}/app_home" >> $GITHUB_ENV\n - name: "Checkout"\n uses: "actions/checkout@v4"\n - name: "Restore dependencies cache"\n uses: actions/cache@v4\n with:\n path: |\n ${{ env.APP_CONTAINER_HOME }}/.composer/cache/\n ${{ env.APP_CONTAINER_HOME }}/.npm/_cacache/\n key: "app_home_deps-${{ matrix.php-version }}-${{ hashFiles('composer.lock', 'package-lock.json') }}"\n restore-keys: |\n app_home_deps-${{ matrix.php-version }}-\n app_home_deps-\n - name: "Initialize containers"\n run: |\n .github/actions/init_containers-start.sh\n - name: "Show versions"\n run: |\n .github/actions/init_show-versions.sh\n - name: "Build dependencies / translations"\n run: |\n docker compose exec -T app .github/actions/init_build.sh\n - name: "Install DB tests"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/test_install.sh\n - name: "Update DB tests (from 0.85.5, not using utf8mb4)"\n if: "${{ success() || failure() }}"\n run: |\n .github/actions/init_initialize-0.85.5-db.sh\n docker compose exec -T app .github/actions/test_update-from-older-version.sh\n - name: "Update DB tests (from 9.5, using utf8mb4)"\n if: "${{ success() || failure() }}"\n run: |\n .github/actions/init_initialize-9.5-db.sh\n docker compose exec -T app .github/actions/test_update-from-9.5.sh\n - name: "PHPUnit tests"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/test_tests-phpunit.sh\n - name: "Functional tests"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/test_tests-functional.sh\n - name: "Cache tests"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/test_tests-cache.sh\n - name: "LDAP tests"\n if: "${{ success() || failure() }}"\n run: |\n .github/actions/init_initialize-ldap-fixtures.sh\n docker compose exec -T app .github/actions/test_tests-ldap.sh\n - name: "IMAP tests"\n if: "${{ success() || failure() }}"\n run: |\n .github/actions/init_initialize-imap-fixtures.sh\n docker compose exec -T app .github/actions/test_tests-imap.sh\n - name: "Javascript tests"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/test_javascript.sh\n\n e2e:\n # Do not run scheduled tests on tier repositories\n if: github.repository == 'glpi-project/glpi' || github.event_name != 'schedule'\n name: "E2E and web tests using latest PHP and MariaDB versions"\n runs-on: "ubuntu-latest"\n timeout-minutes: 60\n env:\n COMPOSE_FILE: ".github/actions/docker-compose-app.yml:.github/actions/docker-compose-services.yml"\n APPLICATION_ROOT: "${{ github.workspace }}"\n DB_IMAGE: "githubactions-mariadb:10.11"\n PHP_IMAGE: "githubactions-php-apache:8.3"\n UPDATE_FILES_ACL: true\n steps:\n - name: "Set env"\n run: |\n echo "APP_CONTAINER_HOME=${{ runner.temp }}/app_home" >> $GITHUB_ENV\n - name: "Checkout"\n uses: "actions/checkout@v4"\n - name: "Restore dependencies cache"\n uses: actions/cache@v4\n with:\n path: |\n ${{ env.APP_CONTAINER_HOME }}/.composer/cache/\n ${{ env.APP_CONTAINER_HOME }}/.npm/_cacache/\n key: "app_home_deps-8.3-${{ hashFiles('composer.lock', 'package-lock.json') }}"\n restore-keys: |\n app_home_deps-8.3-\n app_home_deps-\n - name: "Initialize containers"\n run: |\n .github/actions/init_containers-start.sh\n - name: "Show versions"\n run: |\n .github/actions/init_show-versions.sh\n - name: "Build dependencies / translations"\n run: |\n docker compose exec -T app .github/actions/init_build.sh\n - name: "Install DB tests"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/test_install.sh\n - name: "WEB tests"\n if: "${{ success() || failure() }}"\n run: |\n docker compose exec -T app .github/actions/test_tests-web.sh\n - name: "E2E tests"\n if: "${{ success() || failure() }}"\n id: "e2e"\n run: |\n docker compose exec -T app .github/actions/test_tests-e2e.sh\n - name: "Upload Cypress screenshots"\n if: "${{ failure() && steps.e2e.conclusion == 'failure' }}"\n uses: actions/upload-artifact@v4\n with:\n name: cypress-screenshots\n path: tests/cypress/screenshots\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\ci.yml
ci.yml
YAML
10,868
0.95
0.083032
0.033088
react-lib
272
2024-10-11T12:02:58.344791
Apache-2.0
false
bf837e48f0c8160ecb857167f85efe03
name: 'Close stale issues'\non:\n schedule:\n - cron: '0 8 * * *'\n\njobs:\n stale:\n if: github.repository == 'glpi-project/glpi'\n permissions:\n issues: write # for actions/stale to close stale issues\n runs-on: ubuntu-latest\n steps:\n - uses: actions/stale@v9\n with:\n stale-issue-message: >-\n There has been no activity on this issue for some time and therefore it is considered stale\n and will be closed automatically in 10 days.\n \n \n If this issue is related to a bug, please try to reproduce on latest release. If the problem persist,\n feel free to add a comment to revive this issue.\n \n If it is related to a new feature, please open a topic to discuss with community about this enhancement\n on [suggestion website](https://glpi.userecho.com/).\n \n \n You may also consider taking a [subscription](https://glpi-project.org/subscriptions/) to get professionnal\n support or [contact GLPI editor team](https://portal.glpi-network.com/contact-us) directly.\n days-before-issue-stale: 60\n days-before-pr-stale: -1 # PR will be marked as stale manually.\n days-before-close: 10\n exempt-issue-labels: "bug,enhancement,question,security" # Issues with "bug", "enhancement", "question" or "security" labels will not be marked as stale\n exempt-all-milestones: true # Do not check issues/PR with defined milestone.\n ascending: true # First check older issues/PR.\n operations-per-run: 750 # Max API calls per run.\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\close_stale_issues.yml
close_stale_issues.yml
YAML
1,645
0.8
0.114286
0
awesome-app
810
2024-10-11T10:20:59.677067
GPL-3.0
false
5c1c854067b63c60575b0d7771e874e8
name: "Code coverage schedule"\n\non:\n schedule:\n - cron: '0 0 * * *'\n\njobs:\n run-coverage-workflow:\n name: "Run coverage workflow"\n uses: "glpi-project/glpi/.github/workflows/coverage.yml@main"\n with:\n branch: "main"\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\coverage-schedule.yml
coverage-schedule.yml
YAML
236
0.7
0
0
node-utils
465
2024-05-05T21:50:01.032952
GPL-3.0
false
fe74d852a742294fbe30f3b2d9c899a9
name: "GLPI test code coverage"\n\non:\n # Enable execution from the "Code coverage schedule" workflow\n workflow_call:\n inputs:\n branch:\n required: true\n type: string\n # Enable manual run\n workflow_dispatch:\n inputs:\n branch:\n description: "Target branch"\n required: true\n type: string\n\njobs:\n coverage:\n name: "Code coverage"\n runs-on: "ubuntu-latest"\n strategy:\n fail-fast: false\n matrix:\n include:\n - {php-version: "8.4", db-image: "mariadb:11.4"}\n env:\n COMPOSE_FILE: ".github/actions/docker-compose-app.yml:.github/actions/docker-compose-services.yml"\n APPLICATION_ROOT: "${{ github.workspace }}"\n DB_IMAGE: "githubactions-${{ matrix.db-image }}"\n PHP_IMAGE: "githubactions-php-coverage:${{ matrix.php-version }}"\n UPDATE_FILES_ACL: true\n CODE_COVERAGE: true\n steps:\n - name: "Set env"\n run: |\n echo "APP_CONTAINER_HOME=${{ runner.temp }}/app_home" >> $GITHUB_ENV\n - name: "Checkout"\n uses: "actions/checkout@v4"\n with:\n ref: ${{ inputs.branch }}\n - name: "Restore dependencies cache"\n uses: actions/cache@v4\n with:\n path: |\n ${{ env.APP_CONTAINER_HOME }}/.composer/cache/\n ${{ env.APP_CONTAINER_HOME }}/.npm/_cacache/\n key: "app_home_deps-${{ matrix.php-version }}-${{ hashFiles('composer.lock', 'package-lock.json') }}"\n restore-keys: |\n app_home_deps-${{ matrix.php-version }}-\n app_home_deps-\n - name: "Initialize containers"\n run: |\n .github/actions/init_containers-start.sh\n - name: "Show versions"\n run: |\n .github/actions/init_show-versions.sh\n - name: "Build dependencies / translations"\n run: |\n docker compose exec -T app .github/actions/init_build.sh\n - name: "Install database"\n run: |\n docker compose exec -T app .github/actions/test_install.sh\n - name: "PHPUnit tests"\n run: |\n docker compose exec -T app .github/actions/test_tests-phpunit.sh\n - name: "Codecov"\n uses: "codecov/codecov-action@v5"\n env:\n CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}\n with:\n files: ./phpunit/coverage/clover.xml\n override_branch: ${{ inputs.branch }}\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\coverage.yml
coverage.yml
YAML
2,373
0.95
0
0.028169
react-lib
567
2025-03-16T02:14:45.055221
GPL-3.0
false
75171ca4f111b0927c6862ad818fec75
name: "Label commenter"\n\non:\n issues:\n types:\n - "labeled"\n - "unlabeled"\n\njobs:\n comment:\n permissions:\n contents: "read"\n issues: "write"\n runs-on: "ubuntu-latest"\n steps:\n - name: "Checkout"\n uses: "actions/checkout@v4"\n\n - name: "Label commenter"\n uses: "peaceiris/actions-label-commenter@v1"\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\label-commenter.yml
label-commenter.yml
YAML
355
0.7
0
0
react-lib
152
2024-05-11T21:01:08.684473
GPL-3.0
false
c867745fec4e2e96c114bd7f7f1d990c
name: "GLPI nightly build"\n\non:\n # Runs test suite every night\n schedule:\n - cron: '0 0 * * *'\n # Enable manual run\n workflow_dispatch:\n\njobs:\n build:\n permissions:\n contents: write # for Git to git push\n if: github.repository == 'glpi-project/glpi'\n name: "Build ${{ matrix.branch }}"\n runs-on: "ubuntu-latest"\n strategy:\n fail-fast: false\n max-parallel: 1\n matrix:\n include:\n # build on lower supported version to ensure building tools are compatible with this version\n - {branch: "10.0/bugfixes", php-version: "7.4"}\n - {branch: "main", php-version: "8.2"}\n services:\n app:\n image: "ghcr.io/glpi-project/githubactions-php:${{ matrix.php-version }}"\n options: >-\n --volume /glpi:/var/www/glpi\n steps:\n - name: "Checkout"\n uses: "actions/checkout@v4"\n with:\n ref: ${{ matrix.branch }}\n - name: "Build if updated during the last 24h"\n id: updated\n run: |\n SHA=$( git rev-parse HEAD )\n if [ -n "$( git rev-list -n 1 --after="24 hours" $SHA )" ]; then\n echo "build=yes" >> $GITHUB_OUTPUT\n else\n echo "build=no" >> $GITHUB_OUTPUT\n fi\n - name: "Deploy source into app container"\n if: ${{ steps.updated.outputs.build == 'yes' }}\n run: |\n sudo cp --no-target-directory --preserve --recursive `pwd` /glpi\n sudo chown -R 1000:1000 /glpi\n - name: "Install dependencies"\n if: ${{ steps.updated.outputs.build == 'yes' }}\n run: |\n docker exec ${{ job.services.app.id }} composer install --optimize-autoloader --prefer-dist --no-interaction --no-progress --no-suggest\n - name: "Define release name"\n id: release-name\n if: ${{ steps.updated.outputs.build == 'yes' }}\n run: |\n REF_NAME=$(echo ${{ matrix.branch }} | sed -E 's|/bugfixes||')\n echo "basename=$REF_NAME" >> $GITHUB_OUTPUT\n SHA=$(git rev-parse --short HEAD)\n echo "release_name=$REF_NAME-$SHA" >> $GITHUB_ENV\n - name: "Build"\n if: ${{ steps.updated.outputs.build == 'yes' }}\n run: |\n docker exec --interactive ${{ job.services.app.id }} tools/make_release.sh -y . ${{ env.release_name }}\n docker cp ${{ job.services.app.id }}:/tmp/glpi-${{ env.release_name }}.tgz ${{ github.workspace }}/${{ env.release_name }}.tar.gz\n cp ${{ github.workspace }}/${{ env.release_name }}.tar.gz ${{ github.workspace }}/${{ steps.release-name.outputs.basename }}.tar.gz\n - uses: actions/checkout@v4\n if: ${{ steps.updated.outputs.build == 'yes' }}\n with:\n repository: ${{ github.repository_owner }}/glpi-project.github.io\n token: ${{ secrets.GH_PUBLISHING_TOKEN }}\n fetch-depth: 0\n path: nightly\n - name: "Publish nightly"\n if: ${{ steps.updated.outputs.build == 'yes' }}\n id: publish-nightly\n run: |\n EMAIL="$(git log --format='%ae' HEAD^!)"\n NAME="$(git log --format='%an' HEAD^!)"\n chmod +x tools/github-nightly-description.sh\n cd nightly\n git config --local user.email "$EMAIL"\n git config --local user.name "$NAME"\n echo "Removing previous ${{ matrix.branch }} builds"\n git filter-branch --prune-empty -f --index-filter 'git rm --cached --ignore-unmatch "glpi/${{ steps.release-name.outputs.basename }}.tar.gz"' HEAD\n git filter-branch --prune-empty -f --index-filter 'git rm --cached --ignore-unmatch "glpi/${{ steps.release-name.outputs.basename }}-*.tar.gz"' HEAD\n cp -vf ${{ github.workspace }}/${{ steps.release-name.outputs.basename }}.tar.gz glpi\n cp -vf ${{ github.workspace }}/${{ env.release_name }}.tar.gz glpi\n # script argument should be the ordered list of builds to index\n ../tools/github-nightly-description.sh `ls -r glpi/*-*.tar.gz` > glpi/index.md\n echo "Repository status:"\n git status\n ls -lt glpi\n # Prepare commit\n echo "Adding GLPI ${{ env.release_name }} build"\n git add glpi/*\n git commit -m "Add GLPI ${{ env.release_name }} nightly build"\n # Force commit\n git push --force\n git status\n shell: bash\n env:\n FILTER_BRANCH_SQUELCH_WARNING: 1\n
dataset_sample\yaml\glpi-project_glpi\.github\workflows\nightly_build.yml
nightly_build.yml
YAML
4,402
0.8
0.097087
0.059406
vue-tools
434
2024-05-21T06:15:51.168451
BSD-3-Clause
false
9595c6bea75e830cd2a3f15eae5c24fe
# The full repository name\nrepo: go-gitea/gitea\n\n# Service type (gitea or github)\nservice: github\n\n# Base URL for Gitea instance if using gitea service type (optional)\n# Default: https://gitea.com\nbase-url:\n\n# Changelog groups and which labeled PRs to add to each group\ngroups:\n -\n name: BREAKING\n labels:\n - pr/breaking\n -\n name: SECURITY\n labels:\n - topic/security\n -\n name: FEATURES\n labels:\n - type/feature\n -\n name: ENHANCEMENTS\n labels:\n - type/enhancement\n -\n name: PERFORMANCE\n labels:\n - performance/memory\n - performance/speed\n - performance/bigrepo\n - performance/cpu\n -\n name: BUGFIXES\n labels:\n - type/bug\n -\n name: API\n labels:\n - modifies/api\n -\n name: TESTING\n labels:\n - type/testing\n -\n name: BUILD\n labels:\n - topic/build\n - topic/code-linting\n -\n name: DOCS\n labels:\n - type/docs\n -\n name: MISC\n default: true\n\n# regex indicating which labels to skip for the changelog\nskip-labels: skip-changelog|backport\/.+\n
dataset_sample\yaml\go\.changelog.yml
.changelog.yml
YAML
1,077
0.8
0.048387
0.103448
react-lib
570
2024-10-20T11:39:25.130268
GPL-3.0
false
3b972a232f6bd3c454bfe32197dc9e36
---\n\n# Main collection of env. vars to set for all tasks and scripts.\nenv:\n ####\n #### Global variables used for all tasks\n ####\n # Sane (default) value for GOPROXY and GOSUMDB.\n GOPROXY: "https://proxy.golang.org,direct"\n GOSUMDB: "sum.golang.org"\n # Overrides default location (/tmp/cirrus) for repo clone\n GOPATH: &gopath "/var/tmp/go"\n GOCACHE: "${GOPATH}/cache"\n GOSRC: &gosrc "/var/tmp/go/src/github.com/containers/podman"\n CIRRUS_WORKING_DIR: *gosrc\n # The default is 'sh' if unspecified\n CIRRUS_SHELL: "/bin/bash"\n # Save a little typing (path relative to $CIRRUS_WORKING_DIR)\n SCRIPT_BASE: "./contrib/cirrus"\n # Runner statistics log file path/name\n STATS_LOGFILE_SFX: 'runner_stats.log'\n STATS_LOGFILE: '$GOSRC/${CIRRUS_TASK_NAME}-${STATS_LOGFILE_SFX}'\n\n ####\n #### Cache-image names to test with (double-quotes around names are critical)\n ####\n FEDORA_NAME: "fedora-42"\n FEDORA_AARCH64_NAME: "${FEDORA_NAME}-aarch64"\n PRIOR_FEDORA_NAME: "fedora-41"\n RAWHIDE_NAME: "rawhide"\n DEBIAN_NAME: "debian-13"\n\n # Image identifiers\n IMAGE_SUFFIX: "c20250422t130822z-f42f41d13"\n\n # EC2 images\n FEDORA_AMI: "fedora-aws-${IMAGE_SUFFIX}"\n FEDORA_AARCH64_AMI: "fedora-podman-aws-arm64-${IMAGE_SUFFIX}"\n # GCP Images\n FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"\n PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"\n RAWHIDE_CACHE_IMAGE_NAME: "rawhide-${IMAGE_SUFFIX}"\n DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"\n # Container FQIN's\n FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"\n PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"\n WINDOWS_AMI: "win-server-wsl-${IMAGE_SUFFIX}"\n ####\n #### Control variables that determine what to run and how to run it.\n #### N/B: Required ALL of these are set for every single task.\n ####\n TEST_FLAVOR: # int, sys, ext_svc, validate, automation, etc.\n TEST_ENVIRON: host # 'host', or 'container'.\n PODBIN_NAME: podman # 'podman' or 'remote'\n PRIV_NAME: root # 'root' or 'rootless'\n DISTRO_NV: # any {PRIOR_,}{FEDORA,DEBIAN}_NAME value\n VM_IMAGE_NAME: # One of the "Google-cloud VM Images" (above)\n CTR_FQIN: # One of the "Container FQIN's" (above)\n CI_DESIRED_RUNTIME: crun # As of 2024-05-28 there are no other supported runtimes\n CI_DESIRED_DATABASE: sqlite # 'sqlite' or 'boltdb'\n CI_DESIRED_STORAGE: overlay # overlay, vfs, or composefs (which is actually overlay)\n\n # Curl-command prefix for downloading task artifacts, simply add the\n # the url-encoded task name, artifact name, and path as a suffix.\n ART_URL: https://api.cirrus-ci.com/v1/artifact/build/${CIRRUS_BUILD_ID}\n ARTCURL: >-\n curl --retry 5 --retry-delay 8 --fail --location -O\n --url ${ART_URL}\n\n\n# Default timeout for each task\ntimeout_in: 20m\n\n\ngcp_credentials: ENCRYPTED[a28959877b2c9c36f151781b0a05407218cda646c7d047fc556e42f55e097e897ab63ee78369dae141dcf0b46a9d0cdd]\n\naws_credentials: ENCRYPTED[4ca070bffe28eb9b27d63c568b52970dd46f119c3a83b8e443241e895dbf1737580b4d84eed27a311a2b74287ef9f79f]\n\n\nvalidate-source_task:\n name: "Validate source code changes"\n alias: validate-source\n # This task is primarily intended to catch human-errors early on, in a\n # PR context. Skip running it everywhere else.\n only_if: &is_pr "$CIRRUS_PR != ''"\n gce_instance:\n image_project: libpod-218412\n zone: "us-central1-a"\n # golangci-lint is a very, very hungry beast.\n cpu: 8\n memory: "16Gb"\n # Required to be 200gig, do not modify - has i/o performance impact\n # according to gcloud CLI tool warning messages.\n disk: 200\n image_name: "${FEDORA_CACHE_IMAGE_NAME}" # from stdenvars\n env:\n TEST_FLAVOR: validate-source\n # NOTE: The default way Cirrus-CI clones is *NOT* compatible with\n # environment expectations in contrib/cirrus/lib.sh. Specifically\n # the 'origin' remote must be defined, and all remote branches/tags\n # must be available for reference from CI scripts.\n clone_script: &full_clone |\n set -exo pipefail\n cd /\n rm -rf $CIRRUS_WORKING_DIR\n mkdir -p $CIRRUS_WORKING_DIR\n if [[ -z "$CIRRUS_PR" ]]; then\n DEST_BRANCH="$CIRRUS_BRANCH"\n else\n DEST_BRANCH="$CIRRUS_BASE_BRANCH"\n fi\n git clone --recursive --branch=$DEST_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR\n cd $CIRRUS_WORKING_DIR\n git remote update origin\n if [[ -n "$CIRRUS_PR" ]]; then # running for a PR\n git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR\n git checkout pull/$CIRRUS_PR\n else\n git reset --hard $CIRRUS_CHANGE_IN_REPO\n fi\n # Standard setup stage call, used by nearly every task in CI.\n setup_script: &setup '$GOSRC/$SCRIPT_BASE/setup_environment.sh'\n golangci-lint_cache:\n folder: /root/.cache/golangci-lint\n reupload_on_changes: true\n fingerprint_script:\n - go version\n - grep GOLANGCI_LINT_VERSION Makefile | head -1\n # Standard main execution stage call, used by nearly every task in CI.\n main_script: &main '/usr/bin/time --verbose --output="$STATS_LOGFILE" $GOSRC/$SCRIPT_BASE/runner.sh'\n\n\n# N/B: This matrix of build tasks are critical to CI, along with the following\n# aarch64 task. They build binaries for all CI platforms, and versions. On\n# success, the contents of the repository are preserved as an artifact for\n# consumption by most subsequent CI tasks. This saves about 3-5 minutes of\n# otherwise duplicative effort in most tasks.\nbuild_task:\n alias: 'build'\n name: 'Build for $DISTRO_NV' # N/B: Referenced by URLencoded strings elsewhere\n gce_instance: &fastvm\n image_project: libpod-218412\n zone: "us-central1-a"\n cpu: 4\n memory: "4Gb"\n # Required to be 200gig, do not modify - has i/o performance impact\n # according to gcloud CLI tool warning messages.\n disk: 200\n image_name: "${VM_IMAGE_NAME}" # from stdenvars\n matrix: &platform_axis\n # Ref: https://cirrus-ci.org/guide/writing-tasks/#matrix-modification\n - env: &stdenvars\n DISTRO_NV: ${FEDORA_NAME}\n # Not used here, is used in other tasks\n VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}\n CTR_FQIN: ${FEDORA_CONTAINER_FQIN}\n - env:\n DISTRO_NV: ${PRIOR_FEDORA_NAME}\n VM_IMAGE_NAME: ${PRIOR_FEDORA_CACHE_IMAGE_NAME}\n CTR_FQIN: ${PRIOR_FEDORA_CONTAINER_FQIN}\n CI_DESIRED_DATABASE: boltdb\n CI_DESIRED_STORAGE: vfs\n - env:\n <<: *stdenvars\n DISTRO_NV: ${RAWHIDE_NAME}\n VM_IMAGE_NAME: ${RAWHIDE_CACHE_IMAGE_NAME}\n CI_DESIRED_STORAGE: composefs\n CTR_FQIN: ""\n - env:\n DISTRO_NV: ${DEBIAN_NAME}\n VM_IMAGE_NAME: ${DEBIAN_CACHE_IMAGE_NAME}\n env:\n TEST_FLAVOR: build\n clone_script: *full_clone\n # Attempt to prevent flakes by confirming basic environment expectations,\n # network service connectivity and essential container image availability.\n # TODO: Rename to "ci-sanity" and move into task that runs in parallel to build\n prebuild_script: &prebuild $SCRIPT_BASE/prebuild.sh\n setup_script: *setup\n # Attempt to prevent flakes by confirming automation environment and\n # all required external/3rd-party services are available and functional.\n main_script: *main\n # Attempt to catch code-quality and vendoring problems early.\n postbuild_script: &postbuild $SCRIPT_BASE/postbuild.sh\n # Cirrus-CI is very slow uploading one file at time, and the repo contains\n # thousands of files. Speed this up by archiving into tarball first.\n repo_prep_script: &repo_prep >-\n tar --zstd -cf /tmp/repo.tar.zst -C $GOSRC . && mv /tmp/repo.tar.zst $GOSRC/\n repo_artifacts: &repo_artifacts\n path: ./repo.tar.zst\n type: application/octet-stream\n always: &runner_stats\n runner_stats_artifacts:\n path: ./*-${STATS_LOGFILE_SFX}\n type: text/plain\n\n\nbuild_aarch64_task:\n alias: 'build_aarch64'\n name: 'Build for $DISTRO_NV'\n ec2_instance: &standard_build_ec2_aarch64\n image: ${VM_IMAGE_NAME}\n type: ${EC2_INST_TYPE}\n region: us-east-1\n architecture: arm64 # CAUTION: This has to be "arm64", not "aarch64".\n env: &stdenvars_aarch64\n EC2_INST_TYPE: "t4g.xlarge"\n DISTRO_NV: ${FEDORA_AARCH64_NAME}\n VM_IMAGE_NAME: ${FEDORA_AARCH64_AMI}\n CTR_FQIN: ${FEDORA_CONTAINER_FQIN}\n TEST_FLAVOR: build\n clone_script: *full_clone\n # TODO: Rename to "ci-sanity" and move into task that runs in parallel to build\n prebuild_script: *prebuild\n setup_script: *setup\n postbuild_script: *postbuild\n main_script: *main\n # Cirrus-CI is very slow uploading one file at time, and the repo contains\n # thousands of files. Speed this up by archiving into tarball first.\n repo_prep_script: *repo_prep\n repo_artifacts: *repo_artifacts\n always: *runner_stats\n\n\n# There are several other important variations of podman which\n# must always build successfully. Most of them are handled in\n# this task, though a few need dedicated tasks which follow.\nalt_build_task:\n name: "$ALT_NAME"\n alias: alt_build\n # Don't create task on rhel-release builds\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: &no_rhel_release |\n $CIRRUS_BRANCH !=~ 'v[0-9\.]+-rhel' &&\n $CIRRUS_BASE_BRANCH !=~ 'v[0-9\.]+-rhel'\n env:\n <<: *stdenvars\n TEST_FLAVOR: "altbuild"\n gce_instance: *fastvm\n matrix:\n - env:\n ALT_NAME: 'Build Each Commit'\n - env:\n # TODO: Replace with task using `winmake` to build\n # binary and archive installation zip file.\n ALT_NAME: 'Windows Cross' # N/B: Referenced by URLencoded strings elsewhere\n - env:\n ALT_NAME: 'Alt Arch. x86 Cross'\n - env:\n ALT_NAME: 'Alt Arch. ARM Cross'\n - env:\n ALT_NAME: 'Alt Arch. MIPS Cross'\n - env:\n ALT_NAME: 'Alt Arch. MIPS64 Cross'\n - env:\n ALT_NAME: 'Alt Arch. Other Cross'\n # This task cannot make use of the shared repo.tar.zst artifact.\n clone_script: *full_clone\n setup_script: *setup\n main_script: *main\n # Produce a new repo.tar.zst artifact for consumption by 'artifacts' task.\n repo_prep_script: *repo_prep\n repo_artifacts: *repo_artifacts\n always: *runner_stats\n\n\n# Confirm building the remote client, natively on a Mac OS-X VM.\nosx_alt_build_task:\n name: "Build for MacOS amd64+arm64" # N/B: Referenced by URLencoded strings elsewhere\n alias: osx_alt_build\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: *no_rhel_release # RHEL never releases podman mac installer binary\n persistent_worker: &mac_pw\n labels:\n os: darwin\n arch: arm64\n purpose: prod\n env: &mac_env\n CIRRUS_SHELL: "/bin/bash" # sh is the default\n CIRRUS_WORKING_DIR: "$HOME/ci/task-${CIRRUS_TASK_ID}" # Isolation: $HOME will be set to "ci" dir.\n # Prevent cache-pollution fron one task to the next.\n GOPATH: "$CIRRUS_WORKING_DIR/.go"\n GOCACHE: "$CIRRUS_WORKING_DIR/.go/cache"\n GOENV: "$CIRRUS_WORKING_DIR/.go/support"\n GOSRC: "$HOME/ci/task-${CIRRUS_TASK_ID}"\n clone_script: *full_clone\n # This host is/was shared with potentially many other CI tasks.\n # The previous task may have been canceled or aborted.\n prep_script: &mac_cleanup "contrib/cirrus/mac_cleanup.sh"\n lint_script:\n - make golangci-lint\n basic_build_script:\n - make .install.ginkgo\n - make podman-remote\n - make podman-mac-helper\n build_pkginstaller_script:\n - pushd contrib/pkginstaller\n - make ARCH=amd64 NO_CODESIGN=1 pkginstaller\n - make ARCH=aarch64 NO_CODESIGN=1 pkginstaller\n - make ARCH=universal NO_CODESIGN=1 pkginstaller\n - popd\n build_amd64_script:\n - make podman-remote-release-darwin_amd64.zip\n # Building arm podman needs to be the last thing built in this task\n # The Mac tests rely this Podman binary to run, and the CI Mac is ARM-based\n build_arm64_script:\n - make podman-remote-release-darwin_arm64.zip\n # Produce a new repo.tar.zst artifact for consumption by dependent tasks.\n repo_prep_script: *repo_prep\n repo_artifacts: *repo_artifacts\n # This host is/was shared with potentially many other CI tasks.\n # Ensure nothing is left running while waiting for the next task.\n always:\n task_cleanup_script: *mac_cleanup\n\n\n# Build freebsd release natively on a FreeBSD VM.\nfreebsd_alt_build_task:\n name: "FreeBSD Cross"\n alias: freebsd_alt_build\n # Only run on 'main' and PRs against 'main'\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: $CIRRUS_BRANCH == 'main' || $CIRRUS_BASE_BRANCH == 'main'\n env:\n <<: *stdenvars\n # Functional FreeBSD builds must be built natively since they depend on CGO\n DISTRO_NV: freebsd-13\n VM_IMAGE_NAME: notyet\n CTR_FQIN: notyet\n CIRRUS_SHELL: "/bin/sh"\n TEST_FLAVOR: "altbuild"\n ALT_NAME: 'FreeBSD Cross'\n freebsd_instance:\n image_family: freebsd-13-4\n # golangci-lint is a very, very hungry beast.\n cpu: 4\n memory: 8Gb\n setup_script:\n - pkg install -y gpgme bash go-md2man gmake gsed gnugrep go pkgconf zstd\n - go version # Downloads a new go version based on go.mod's go directive.\n golint_cache:\n folder: ~/.cache/golangci-lint\n reupload_on_changes: true\n fingerprint_script:\n - go version\n - grep GOLANGCI_LINT_VERSION Makefile | head -1\n lint_script:\n - gmake golangci-lint\n build_amd64_script:\n - gmake podman-release\n # This task cannot make use of the shared repo.tar.zst artifact and must\n # produce a new repo.tar.zst artifact for consumption by 'artifacts' task.\n repo_prep_script: *repo_prep\n repo_artifacts: *repo_artifacts\n\n\n# Status aggregator for all builds. This task simply makes dependency\n# management easier, and results in a simpler graph that using YAML\n# anchors/aliases.\nbuild_success_task:\n name: "Total Build Success"\n alias: build_success\n depends_on:\n - validate-source\n - build\n - build_aarch64\n - alt_build\n - osx_alt_build\n - freebsd_alt_build\n env:\n CTR_FQIN: ${FEDORA_CONTAINER_FQIN}\n container: &smallcontainer\n image: ${CTR_FQIN}\n # Resources are limited across ALL currently executing tasks\n # ref: https://cirrus-ci.org/guide/linux/#linux-containers\n cpu: 1\n memory: 1\n clone_script: &noop mkdir -p "$CIRRUS_WORKING_DIR"\n script: *noop\n\n\n# Exercise the "libpod" API with a small set of common\n# operations to ensure they are functional.\nbindings_task:\n name: "Test Bindings"\n alias: bindings\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - bindings test code is changed; or\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('pkg/bindings/test/**') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: &build\n - build_success\n gce_instance: &standardvm\n <<: *fastvm\n cpu: 2\n env:\n <<: *stdenvars\n TEST_FLAVOR: bindings\n # N/B: This script depends on ${DISTRO_NV} being defined for the task.\n clone_script: &get_gosrc |\n cd /tmp\n echo "$ARTCURL/Build%20for%20${DISTRO_NV}/repo/repo.tar.zst"\n time $ARTCURL/Build%20for%20${DISTRO_NV}/repo/repo.tar.zst\n time tar -xf /tmp/repo.tar.zst -C $GOSRC\n setup_script: *setup\n main_script: *main\n always: &logs_artifacts\n <<: *runner_stats\n # Required for `contrib/cirrus/logformatter` to work properly\n html_artifacts:\n path: ./*.html\n type: text/html\n server_log_artifacts:\n path: ./podman-server.log\n type: text/plain\n cleanup_tracer_artifacts:\n path: ./podman-cleanup-tracer.log\n type: text/plain\n df_script: '$SCRIPT_BASE/logcollector.sh df'\n audit_log_script: '$SCRIPT_BASE/logcollector.sh audit'\n journal_script: '$SCRIPT_BASE/logcollector.sh journal'\n podman_system_info_script: '$SCRIPT_BASE/logcollector.sh podman'\n time_script: '$SCRIPT_BASE/logcollector.sh time'\n\n\n# Build the "libpod" API documentation `swagger.yaml` and\n# publish it to google-cloud-storage (GCS).\nswagger_task:\n name: "Test Swagger"\n alias: swagger\n depends_on: *build\n gce_instance: *standardvm\n env:\n <<: *stdenvars\n TEST_FLAVOR: swagger\n CTR_FQIN: 'quay.io/libpod/gcsupld:${IMAGE_SUFFIX}'\n GCPJSON: ENCRYPTED[927dc01e755eaddb4242b0845cf86c9098d1e3dffac38c70aefb1487fd8b4fe6dd6ae627b3bffafaba70e2c63172664e]\n GCPNAME: ENCRYPTED[c145e9c16b6fb88d476944a454bf4c1ccc84bb4ecaca73bdd28bdacef0dfa7959ebc8171a27b2e4064d66093b2cdba49]\n GCPPROJECT: 'libpod-218412'\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always:\n <<: *runner_stats\n swagger_artifacts:\n path: ./swagger.yaml\n type: text/plain\n\n\nwin_installer_task:\n name: "Verify Win Installer Build"\n matrix:\n - env:\n CONTAINERS_MACHINE_PROVIDER: 'wsl'\n - env:\n CONTAINERS_MACHINE_PROVIDER: 'hyperv'\n alias: win_installer\n only_if: *no_rhel_release\n depends_on: *build\n ec2_instance: &windows\n image: "${WINDOWS_AMI}"\n type: m5.large\n region: us-east-1\n platform: windows\n env: &winenv\n CIRRUS_WORKING_DIR: &wincwd "${LOCALAPPDATA}\\cirrus-ci-build"\n CIRRUS_SHELL: powershell\n PATH: "${PATH};C:\\ProgramData\\chocolatey\\bin"\n DISTRO_NV: "windows"\n PRIV_NAME: "rootless"\n # Fake version, we are only testing the installer functions, so version doesn't matter\n WIN_INST_VER: 9.9.9\n # It's HIGHLY desireable to use the same binary throughout CI. Otherwise, if\n # there's a toolchain or build-environment specific problem, it can be incredibly\n # difficult (and non-obvious) to debug.\n clone_script: &winclone |\n $ErrorActionPreference = 'Stop'\n $ProgressPreference = 'SilentlyContinue'\n New-Item -ItemType Directory -Force -Path "$ENV:CIRRUS_WORKING_DIR"\n Set-Location "$ENV:CIRRUS_WORKING_DIR"\n $uri = "${ENV:ART_URL}/Windows Cross/repo/repo.tar.zst"\n Write-Host "Downloading $uri"\n For($i = 0;;) {\n Try {\n Invoke-WebRequest -UseBasicParsing -ErrorAction Stop -OutFile "repo.tar.zst" `\n -Uri "$uri"\n Break\n } Catch {\n if (++$i -gt 6) {\n throw $_.Exception\n }\n Write-Host "Download failed - retrying:" $_.Exception.Response.StatusCode\n Start-Sleep -Seconds 10\n }\n }\n Write-Host "zstd -d repo.tar.zst"\n zstd -d repo.tar.zst\n if ($LASTEXITCODE -ne 0) {\n throw "Extract repo.tar.zst failed"\n Exit 1\n }\n Write-Host "arc unarchive repo.tar .\"\n arc unarchive repo.tar .\repo\n if ($LASTEXITCODE -ne 0) {\n throw "Unarchive repo.tar failed"\n Exit 1\n }\n Get-ChildItem -Path .\n Get-ChildItem -Path .\repo\n main_script: ".\\repo\\contrib\\cirrus\\win-installer-main.ps1"\n\n\n# Verify podman is compatible with the docker python-module.\ndocker-py_test_task:\n name: Docker-py Compat.\n alias: docker-py_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - docker-py test code is changed; or\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('test/python/**') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n gce_instance: *standardvm\n env:\n <<: *stdenvars\n TEST_FLAVOR: docker-py\n TEST_ENVIRON: container\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *runner_stats\n\n\n# Does exactly what it says, execute the podman unit-tests on Fedora.\nunit_test_task:\n name: "Unit tests on $DISTRO_NV"\n alias: unit_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - unit test files are changed (contains a false positves such as test/e2e/\n # but that should not be an issue, it only runs when it doesn't have to)\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('**/*_test.go') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n matrix:\n - env: *stdenvars\n # Special-case: Rootless on latest Fedora (standard) VM\n - name: "Rootless unit on $DISTRO_NV"\n env:\n <<: *stdenvars\n PRIV_NAME: rootless\n gce_instance: *standardvm\n env:\n TEST_FLAVOR: unit\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\n\nunit_test_windows_task:\n name: "Unit tests on Windows"\n alias: unit_test_windows\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - unit test files are changed (contains a false positves such as test/e2e/\n # but that should not be an issue, it only runs when it doesn't have to)\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('winmake.ps1') ||\n changesInclude('**/*_test.go') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n # Special case, we do not run macos/windows builds on rhel branches.\n # Thus the machine task should not be run too, while we use only_if\n # everywhere to do so here it would mean we would need duplicate the\n # full big only_if condition which is more difficult to maintain so\n # use the skip here.\n skip: &skip_rhel_release |\n $CIRRUS_BRANCH =~ 'v[0-9\.]+-rhel' ||\n $CIRRUS_BASE_BRANCH =~ 'v[0-9\.]+-rhel'\n depends_on: *build\n ec2_instance: *windows\n timeout_in: 20m\n env:\n <<: *winenv\n TEST_FLAVOR: unit\n clone_script: *winclone\n main_script: ".\\repo\\contrib\\cirrus\\win-unit-main.ps1"\n always:\n # Required for `contrib/cirrus/logformatter` to work properly\n html_artifacts:\n path: ./*.html\n type: text/html\n\n\napiv2_test_task:\n name: "APIv2 test on $DISTRO_NV ($PRIV_NAME)"\n alias: apiv2_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - apiv2 test code is changed; or\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('test/apiv2/**', 'test/python/**') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n gce_instance: *standardvm\n env:\n <<: *stdenvars\n TEST_FLAVOR: apiv2\n matrix:\n - env:\n PRIV_NAME: root\n - env:\n PRIV_NAME: rootless\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\n\ncompose_test_task:\n name: "$TEST_FLAVOR test on $DISTRO_NV ($PRIV_NAME)"\n alias: compose_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - compose test code is changed; or\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('test/compose/**') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n gce_instance: *standardvm\n matrix:\n - env:\n PRIV_NAME: root\n - env:\n PRIV_NAME: rootless\n env:\n <<: *stdenvars\n TEST_FLAVOR: compose_v2\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\n\n# versions, as root, without involving the podman-remote client.\nlocal_integration_test_task: &local_integration_test_task\n # Integration-test task name convention:\n # <int.|sys.> <podman|remote> <Distro NV> <root|rootless>\n name: &std_name_fmt "$TEST_FLAVOR $PODBIN_NAME $DISTRO_NV $PRIV_NAME $TEST_ENVIRON ${CI_DESIRED_DATABASE}"\n alias: local_integration_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - int test code is changed; or\n # - actual source code changed\n only_if: &only_if_int_test >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('test/e2e/**', 'test/utils/**') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n matrix: *platform_axis\n # integration tests scale well with cpu as they are parallelized\n # so we give these tests 4 cores to make them faster\n gce_instance: *fastvm\n env:\n TEST_FLAVOR: int\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: &int_logs_artifacts\n <<: *logs_artifacts\n ginkgo_node_logs_artifacts:\n path: ./test/e2e/ginkgo-node-*.log\n type: text/plain\n\n\n# Nearly identical to `local_integration_test` except all operations\n# are performed through the podman-remote client vs a podman "server"\n# running on the same host.\nremote_integration_test_task:\n <<: *local_integration_test_task\n alias: remote_integration_test\n env:\n TEST_FLAVOR: int\n PODBIN_NAME: remote\n\n\n# Run the complete set of integration tests from inside a container.\n# This verifies all/most operations function with "podman-in-podman".\ncontainer_integration_test_task:\n name: *std_name_fmt\n alias: container_integration_test\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: *only_if_int_test\n depends_on: *build\n matrix: &fedora_vm_axis\n - env:\n DISTRO_NV: ${FEDORA_NAME}\n VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}\n CTR_FQIN: ${FEDORA_CONTAINER_FQIN}\n - env:\n DISTRO_NV: ${PRIOR_FEDORA_NAME}\n VM_IMAGE_NAME: ${PRIOR_FEDORA_CACHE_IMAGE_NAME}\n CTR_FQIN: ${PRIOR_FEDORA_CONTAINER_FQIN}\n CI_DESIRED_DATABASE: boltdb\n gce_instance: *fastvm\n env:\n TEST_FLAVOR: int\n TEST_ENVIRON: container\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *int_logs_artifacts\n\n\n# Execute most integration tests as a regular (non-root) user.\nrootless_integration_test_task:\n name: *std_name_fmt\n alias: rootless_integration_test\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: *only_if_int_test\n depends_on: *build\n matrix: *platform_axis\n gce_instance: *fastvm\n env:\n TEST_FLAVOR: int\n PRIV_NAME: rootless\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *int_logs_artifacts\n\n\npodman_machine_task:\n name: *std_name_fmt\n alias: podman_machine\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - machine code files are changed\n only_if: &only_if_machine_test >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('cmd/podman/machine/**', 'pkg/machine/**', '**/*machine*.go')\n depends_on: *build\n ec2_instance:\n image: "${VM_IMAGE_NAME}"\n type: "${EC2_INST_TYPE}"\n region: us-east-1\n timeout_in: 30m\n env:\n EC2_INST_TYPE: "m5zn.metal" # Bare-metal instance is required\n TEST_FLAVOR: "machine-linux"\n PRIV_NAME: "rootless" # intended use-case\n DISTRO_NV: "${FEDORA_NAME}"\n VM_IMAGE_NAME: "${FEDORA_AMI}"\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *int_logs_artifacts\n\n\npodman_machine_aarch64_task:\n name: *std_name_fmt\n alias: podman_machine_aarch64\n only_if: *only_if_machine_test\n depends_on: *build\n ec2_instance:\n <<: *standard_build_ec2_aarch64\n timeout_in: 30m\n env:\n TEST_FLAVOR: "machine-linux"\n EC2_INST_TYPE: c6g.metal\n PRIV_NAME: "rootless" # intended use-case\n DISTRO_NV: "${FEDORA_AARCH64_NAME}"\n VM_IMAGE_NAME: "${FEDORA_AARCH64_AMI}"\n clone_script: &get_gosrc_aarch64 |\n cd /tmp\n echo "$ARTCURL/build_aarch64/repo/repo.tar.zst"\n time $ARTCURL/build_aarch64/repo/repo.tar.zst\n time tar -xf /tmp/repo.tar.zst -C $GOSRC\n setup_script: *setup\n main_script: *main\n always: *int_logs_artifacts\n\n\npodman_machine_windows_task:\n name: *std_name_fmt\n alias: podman_machine_windows\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: *only_if_machine_test\n # Special case, we do not run macos/windows builds on rhel branches.\n # Thus the machine task should not be run too, while we use only_if\n # everywhere to do so here it would mean we would need duplicate the\n # full big only_if condition which is more difficult to maintain so\n # use the skip here.\n skip: *skip_rhel_release\n depends_on: *build\n ec2_instance:\n <<: *windows\n type: z1d.metal\n platform: windows\n timeout_in: 60m\n env: *winenv\n matrix:\n - env:\n TEST_FLAVOR: "machine-wsl"\n - env:\n TEST_FLAVOR: "machine-hyperv"\n clone_script: *winclone\n # This depends on an instance with an local NVMe storage so we can make use of fast IO\n # Our machine tests are IO bound so this is rather imporant to speed them up a lot.\n setup_disk_script: |\n echo "Get-Disk"\n Get-Disk | Ft -autosize | out-string -width 4096\n # Hard coded to disk 0, assume that this is always the case for our ec2 instance.\n # It is not clear to me how I would filter by name because we still have two disks\n # with the same name.\n echo "Format and mount disk 0"\n $disk = Get-Disk 0\n $disk | Initialize-Disk -PartitionStyle MBR\n $disk | New-Partition -UseMaximumSize -MbrType IFS\n $Partition = Get-Partition -DiskNumber $disk.Number\n $Partition | Format-Volume -FileSystem NTFS -Confirm:$false\n $Partition | Add-PartitionAccessPath -AccessPath "Z:\"\n echo "Get-Volume"\n Get-Volume\n main_script: ".\\repo\\contrib\\cirrus\\win-podman-machine-main.ps1"\n always:\n # Required for `contrib/cirrus/logformatter` to work properly\n html_artifacts:\n path: ./*.html\n type: text/html\n\n\npodman_machine_mac_task:\n name: *std_name_fmt\n alias: podman_machine_mac\n only_if: *only_if_machine_test\n skip: *skip_rhel_release\n depends_on: *build\n persistent_worker: *mac_pw\n timeout_in: 60m\n env:\n <<: *mac_env\n DISTRO_NV: "darwin"\n PRIV_NAME: "rootless" # intended use-case\n matrix:\n - env:\n # See contrib/cirrus/mac_runner.sh\n TEST_FLAVOR: "machine-applehv"\n - env:\n # See contrib/cirrus/mac_runner.sh\n TEST_FLAVOR: "machine-libkrun"\n clone_script: # artifacts from osx_alt_build_task\n - mkdir -p $CIRRUS_WORKING_DIR\n - cd $CIRRUS_WORKING_DIR\n - $ARTCURL/Build%20for%20MacOS%20amd64%2Barm64/repo/repo.tar.zst\n - tar -xf repo.tar.zst\n # This host is/was shared with potentially many other CI tasks.\n # The previous task may have been canceled or aborted.\n prep_script: *mac_cleanup\n setup_script: "contrib/cirrus/mac_setup.sh"\n env_script: "contrib/cirrus/mac_env.sh"\n # TODO: Timeout bumped b/c initial image download (~5min) and VM\n # resize (~2min) causes test-timeout (90s default). Should\n # tests deal with this internally?\n test_script:\n - "contrib/cirrus/mac_runner.sh"\n # This host is/was shared with potentially many other CI tasks.\n # Ensure nothing is left running while waiting for the next task.\n always:\n # Required for `contrib/cirrus/logformatter` to work properly\n html_artifacts:\n path: ./*.html\n type: text/html\n task_cleanup_script: *mac_cleanup\n\n\n# Always run subsequent to integration tests. While parallelism is lost\n# with runtime, debugging system-test failures can be more challenging\n# for some golang developers. Otherwise the following tasks run across\n# the same matrix as the integration-tests (above).\nlocal_system_test_task: &local_system_test_task\n name: *std_name_fmt\n alias: local_system_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - sys test code is changed; or\n # - actual source code changed\n only_if: &only_if_system_test >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('test/system/**') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n matrix: *platform_axis\n gce_instance: *fastvm\n timeout_in: 25m\n env:\n TEST_FLAVOR: sys\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\n\nlocal_system_test_aarch64_task: &local_system_test_task_aarch64\n name: *std_name_fmt\n alias: local_system_test_aarch64\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: *only_if_system_test\n depends_on: *build\n ec2_instance: *standard_build_ec2_aarch64\n timeout_in: 35m\n env:\n <<: *stdenvars_aarch64\n TEST_FLAVOR: sys\n DISTRO_NV: ${FEDORA_AARCH64_NAME}\n clone_script: *get_gosrc_aarch64\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\n\nremote_system_test_task:\n <<: *local_system_test_task\n alias: remote_system_test\n env:\n TEST_FLAVOR: sys\n PODBIN_NAME: remote\n\n\nremote_system_test_aarch64_task:\n <<: *local_system_test_task_aarch64\n alias: remote_system_test_aarch64\n env:\n TEST_FLAVOR: sys\n PODBIN_NAME: remote\n\n\nrootless_remote_system_test_task:\n matrix:\n # Minimal sanity testing: only the latest Fedora\n - env:\n DISTRO_NV: ${FEDORA_NAME}\n # Not used here, is used in other tasks\n VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}\n CTR_FQIN: ${FEDORA_CONTAINER_FQIN}\n <<: *local_system_test_task\n alias: rootless_remote_system_test\n gce_instance: *fastvm\n timeout_in: 25m\n env:\n TEST_FLAVOR: sys\n PODBIN_NAME: remote\n PRIV_NAME: rootless\n\n\nrootless_system_test_task:\n name: *std_name_fmt\n alias: rootless_system_test\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: *only_if_system_test\n depends_on: *build\n matrix: *platform_axis\n gce_instance: *fastvm\n timeout_in: 25m\n env:\n TEST_FLAVOR: sys\n PRIV_NAME: rootless\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\n\nfarm_test_task:\n name: *std_name_fmt\n alias: farm_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - farm test code is changed or one of the shared helper import files from the system test; or\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('test/farm/**', 'test/system/*.bash') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n gce_instance: *standardvm\n env:\n <<: *stdenvars\n TEST_FLAVOR: farm\n PRIV_NAME: rootless\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\nbuildah_bud_test_task:\n name: *std_name_fmt\n alias: buildah_bud_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - podman build source code files or bud tests files are changed\n # (vendor updates, i.e. buildah, are already covered in the main rules)\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('**/*build*.go', 'test/buildah-bud/**')\n depends_on: *build\n env:\n <<: *stdenvars\n TEST_FLAVOR: bud\n matrix:\n - env:\n PODBIN_NAME: podman\n - env:\n PODBIN_NAME: remote\n gce_instance: *fastvm\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *int_logs_artifacts\n\nupgrade_test_task:\n name: "Upgrade test: from $PODMAN_UPGRADE_FROM"\n alias: upgrade_test\n # Docs: ./contrib/cirrus/CIModes.md (Cirrus Task contexts and runtime modes)\n # only when: - main rules (see doc above); or\n # - upgrade test code is changed or one of the shared helper import files from the system test; or\n # - actual source code changed\n only_if: >-\n $CIRRUS_PR == '' ||\n $CIRRUS_CHANGE_TITLE =~ '.*CI:ALL.*' ||\n changesInclude('.cirrus.yml', 'Makefile', 'contrib/cirrus/**', 'vendor/**', 'test/tools/**', 'test/registries*.conf', 'hack/**', 'version/rawversion/*') ||\n changesInclude('test/upgrade/**', 'test/system/*.bash') ||\n (changesInclude('**/*.go', '**/*.c', '**/*.h') && !changesIncludeOnly('test/**', 'pkg/machine/e2e/**'))\n depends_on: *build\n matrix:\n - env:\n # 2024-02: as long as possible/reasonable, try to keep\n # one version < 4.8 so we can test boltdb. v4.3.1 is\n # the lowest we can go right now, builds before that\n # have netavark <1.4 which hangs on f39 kernel (#21863).\n PODMAN_UPGRADE_FROM: v4.3.1\n - env:\n PODMAN_UPGRADE_FROM: v4.8.0\n gce_instance: *standardvm\n env:\n TEST_FLAVOR: upgrade_test\n DISTRO_NV: ${FEDORA_NAME}\n VM_IMAGE_NAME: ${FEDORA_CACHE_IMAGE_NAME}\n # Never force a DB, let the old version decide its default\n CI_DESIRED_DATABASE:\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n always: *logs_artifacts\n\n\n# This task is critical. It updates the "last-used by" timestamp stored\n# in metadata for all VM images. This mechanism functions in tandem with\n# an out-of-band pruning operation to remove disused VM images.\nmeta_task:\n name: "VM img. keepalive"\n alias: meta\n container:\n cpu: 2\n memory: 2\n image: quay.io/libpod/imgts:latest\n env:\n # Space-separated list of images used by this repository state\n IMGNAMES: >-\n ${FEDORA_CACHE_IMAGE_NAME}\n ${PRIOR_FEDORA_CACHE_IMAGE_NAME}\n ${RAWHIDE_CACHE_IMAGE_NAME}\n ${DEBIAN_CACHE_IMAGE_NAME}\n EC2IMGNAMES: >-\n ${FEDORA_AARCH64_AMI}\n ${FEDORA_AMI}\n ${WINDOWS_AMI}\n BUILDID: "${CIRRUS_BUILD_ID}"\n REPOREF: "${CIRRUS_REPO_NAME}"\n AWSINI: ENCRYPTED[21b2db557171b11eb5abdbccae593f48c9caeba86dfcc4d4ff109edee9b4656ab6720a110dadfcd51e88cc59a71cc7af]\n GCPJSON: ENCRYPTED[3a198350077849c8df14b723c0f4c9fece9ebe6408d35982e7adf2105a33f8e0e166ed3ed614875a0887e1af2b8775f4]\n GCPNAME: ENCRYPTED[2f9738ef295a706f66a13891b40e8eaa92a89e0e87faf8bed66c41eca72bf76cfd190a6f2d0e8444c631fdf15ed32ef6]\n GCPPROJECT: libpod-218412\n clone_script: *noop\n script: /usr/local/bin/entrypoint.sh\n\n\n# Status aggregator for all tests. This task ensures a defined set of tasks\n# all passed, and allows confirming that based on the status of this task.\nsuccess_task:\n # N/B: The prow merge-bot (tide) is sensitized to this exact name, DO NOT CHANGE IT.\n # Ref: https://github.com/openshift/release/pull/48855\n name: "Total Success"\n alias: success\n # N/B: ALL tasks must be listed here, minus their '_task' suffix.\n depends_on:\n - build_success\n - bindings\n - swagger\n - win_installer\n - docker-py_test\n - unit_test\n - unit_test_windows\n - apiv2_test\n - compose_test\n - local_integration_test\n - remote_integration_test\n - container_integration_test\n - rootless_integration_test\n - podman_machine\n - podman_machine_aarch64\n - podman_machine_windows\n - podman_machine_mac\n - local_system_test\n - local_system_test_aarch64\n - remote_system_test\n - remote_system_test_aarch64\n - rootless_remote_system_test\n - rootless_system_test\n - farm_test\n - buildah_bud_test\n - upgrade_test\n - meta\n env:\n CTR_FQIN: ${FEDORA_CONTAINER_FQIN}\n container: *smallcontainer\n clone_script: *noop\n script: *noop\n\n# WARNING: Most of the artifacts captured here are also have their\n# permalinks present in the `DOWNLOADS.md` file. Any changes made\n# here, should probably be reflected in that document.\nartifacts_task:\n name: "Artifacts"\n alias: artifacts\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: *no_rhel_release\n depends_on:\n - success\n # This task is a secondary/convenience for downstream consumers, don't\n # block development progress if there is a failure in a PR, only break\n # when running on branches or tags.\n allow_failures: $CIRRUS_PR != ''\n container: *smallcontainer\n env:\n CTR_FQIN: ${FEDORA_CONTAINER_FQIN}\n TEST_ENVIRON: container\n # In order to keep the download URL and Cirrus-CI artifact.zip contents\n # simple, nothing should exist in $CIRRUS_WORKING_DIR except for artifacts.\n clone_script: *noop\n fedora_binaries_script:\n - mkdir -p /tmp/fed\n - cd /tmp/fed\n - $ARTCURL/Build%20for%20${FEDORA_NAME}/repo/repo.tar.zst\n - tar -xf repo.tar.zst\n - cp ./bin/* $CIRRUS_WORKING_DIR/\n win_binaries_script:\n - mkdir -p /tmp/win\n - cd /tmp/win\n - $ARTCURL/Windows%20Cross/repo/repo.tar.zst\n - tar -xf repo.tar.zst\n - mv ./podman-remote*.zip $CIRRUS_WORKING_DIR/\n osx_binaries_script:\n - mkdir -p /tmp/osx\n - cd /tmp/osx\n - $ARTCURL/Build%20for%20MacOS%20amd64%2Barm64/repo/repo.tar.zst\n - tar -xf repo.tar.zst\n - mv ./podman-remote-release-darwin_*.zip $CIRRUS_WORKING_DIR/\n - mv ./contrib/pkginstaller/out/podman-installer-macos-*.pkg $CIRRUS_WORKING_DIR/\n always:\n contents_script: ls -la $CIRRUS_WORKING_DIR\n # Produce downloadable files and an automatic zip-file accessible\n # by a consistent URL, based on contents of $CIRRUS_WORKING_DIR\n # Ref: https://cirrus-ci.org/guide/writing-tasks/#latest-build-artifacts\n binary_artifacts:\n path: ./*\n type: application/octet-stream\n\n\n# When a new tag is pushed, confirm that the code and commits\n# meet criteria for an official release.\nrelease_task:\n name: "Verify Release"\n alias: release\n # This should _only_ run for new tags\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: $CIRRUS_TAG != ''\n depends_on:\n - build_success\n - success\n gce_instance: *standardvm\n env:\n <<: *stdenvars\n TEST_FLAVOR: release\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n\n\n# When preparing to release a new version, this task may be manually\n# activated at the PR stage to verify the build is proper for a potential\n# podman release.\n#\n# Note: This cannot use a YAML alias on 'release_task' as of this\n# comment, it is incompatible with 'trigger_type: manual'\nrelease_test_task:\n name: "Optional Release Test"\n alias: release_test\n # Release-PRs always include "release" or "Bump" in the title\n # Docs: ./contrib/cirrus/CIModes.md\n only_if: $CIRRUS_CHANGE_TITLE =~ '.*((release)|(bump)).*'\n # Allow running manually only as part of release-related builds\n # see RELEASE_PROCESS.md\n trigger_type: manual\n depends_on:\n - build_success\n - success\n gce_instance: *standardvm\n env:\n <<: *stdenvars\n TEST_FLAVOR: release\n clone_script: *get_gosrc\n setup_script: *setup\n main_script: *main\n
dataset_sample\yaml\go\.cirrus.yml
.cirrus.yml
YAML
48,000
0.95
0.03962
0.199831
python-kit
101
2025-02-21T02:52:22.781497
Apache-2.0
false
1a48a9ba33cd3881a0a8ead872f866b1
# CLOMonitor metadata file\n# This file must be located at the root of the repository\n\n# Checks exemptions\nexemptions:\n - check: slack_presence # Check identifier (see https://github.com/cncf/clomonitor/blob/main/docs/checks.md#exemptions)\n reason: "This project has a separate Slack workspace that predates its donation to CNCF. A link to it is present in the README." # Justification of this exemption (mandatory, it will be displayed on the UI)\n\n
dataset_sample\yaml\go\.clomonitor.yml
.clomonitor.yml
YAML
452
0.8
0
0.5
python-kit
394
2024-09-20T19:21:21.250172
GPL-3.0
false
145de146563b1559650a3bd9798fa921
# see https://docs.codecov.com/docs/ignoring-paths\nignore:\n - "**/*_gen.go"\n - "**/*_mock.go"\n - "**/*.pb.go"\n - "**/pb.mock.go"\n - "api"\n - "cmd"\n - "common/testing"\n - "tests"\n - "tools"\n\n# disable pull request comments\ncomment: false\n\n# disable GitHub checks\ngithub_checks: false\n\n# disable GitHub status\ncoverage:\n status:\n project: off\n patch: off
dataset_sample\yaml\go\.codecov.yml
.codecov.yml
YAML
368
0.8
0
0.2
awesome-app
906
2024-08-02T22:28:15.515380
BSD-3-Clause
false
6ca6eb7f54d41024bd4ccf84974499b9
# Copyright 2017 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nchart-repos:\n - metrics-server=https://kubernetes-sigs.github.io/metrics-server/\n - cert-manager=https://charts.jetstack.io\n - ingress-nginx=https://kubernetes.github.io/ingress-nginx\n - kong=https://charts.konghq.com\ndebug: false\n# additional-commands:\n# - helm kubeconform {{ .Path }} --config charts/helm-chart/.kubeconform\n
dataset_sample\yaml\go\.ct.yml
.ct.yml
YAML
918
0.95
0.043478
0.727273
python-kit
303
2023-08-06T02:51:47.450184
MIT
false
b1d1824ef5fc9b144459078770337bc3
# The golangci-lint version used to build the custom binary.\n# Required.\nversion: v2.0.0\n\n# The name of the custom binary.\n# Optional.\n# Default: custom-gcl\nname: custom-golangci-lint\n\n# The directory path used to store the custom binary.\n# Optional.\n# Default: .\ndestination: ./my/path/\n\n# The list of the plugins to integrate inside the custom binary.\nplugins:\n # a plugin from a Go proxy\n - module: 'github.com/example/plugin3'\n version: v1.2.3\n\n # a plugin from a Go proxy (with a specific import path)\n - module: 'github.com/example/plugin4'\n import: 'github.com/example/plugin4/foo'\n version: v1.0.0\n\n # a plugin from local source (with absolute path)\n - module: 'github.com/example/plugin2'\n path: /my/local/path/plugin2\n\n # a plugin from local source (with relative path)\n - module: 'github.com/example/plugin1'\n path: ./my/local/path/plugin1\n\n # a plugin from local source (with absolute path and a specific import path)\n - module: 'github.com/example/plugin2'\n import: 'github.com/example/plugin4/foo'\n path: /my/local/path/plugin2\n
dataset_sample\yaml\go\.custom-gcl.reference.yml
.custom-gcl.reference.yml
YAML
1,073
0.95
0
0.466667
react-lib
498
2024-09-11T12:39:12.417887
MIT
false
3a4ad3577cec69655a95c95e7350c9e2
# yaml-language-server: $schema=https://golangci-lint.run/jsonschema/custom-gcl.jsonschema.json\n\nversion: v2.0.2\n\ndestination: ./_tools\n\nplugins:\n - module: 'github.com/microsoft/typescript-go/_tools'\n import: 'github.com/microsoft/typescript-go/_tools/customlint'\n path: ./_tools\n
dataset_sample\yaml\go\.custom-gcl.yml
.custom-gcl.yml
YAML
288
0.95
0
0.142857
react-lib
19
2023-11-27T14:12:02.764172
GPL-3.0
false
1587a292ff12b43fded9a668694efe78
---\nkind: pipeline\nname: amd64\n\nplatform:\n os: linux\n arch: amd64\n\ntrigger:\n event:\n exclude:\n - cron\n - pull_request\n\nclone:\n retries: 3\n\nsteps:\n- name: build\n image: rancher/dapper:v0.6.0\n secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader, unprivileged_github_token ]\n environment:\n GITHUB_TOKEN:\n from_secret: unprivileged_github_token\n AWS_SECRET_ACCESS_KEY:\n from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader\n AWS_ACCESS_KEY_ID:\n from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader\n commands:\n - dapper ci\n - echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\n- name: fossa\n image: rancher/drone-fossa:latest\n failure: ignore\n settings:\n api_key:\n from_secret: FOSSA_API_KEY\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n include:\n - "refs/heads/master"\n - "refs/heads/release-*"\n event:\n - push\n - tag\n\n- name: github_binary_release\n image: plugins/github-release\n settings:\n api_key:\n from_secret: github_token\n prerelease: true\n checksum:\n - sha256\n checksum_file: CHECKSUMsum-amd64.txt\n checksum_flatten: true\n files:\n - "dist/artifacts/*"\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: docker-publish\n image: plugins/docker\n settings:\n dockerfile: package/Dockerfile\n password:\n from_secret: docker_password\n repo: "rancher/k3s"\n username:\n from_secret: docker_username\n build_args_from_env:\n - DRONE_TAG\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: ghcr-publish\n image: plugins/docker\n settings:\n registry: ghcr.io\n dockerfile: package/Dockerfile\n username: "k3s-io"\n password:\n from_secret: github_token\n repo: "ghcr.io/k3s-io/k3s"\n build_args_from_env:\n - DRONE_TAG\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: test\n image: rancher/dapper:v0.6.0\n secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]\n environment:\n ENABLE_REGISTRY: 'true'\n AWS_SECRET_ACCESS_KEY:\n from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader\n AWS_ACCESS_KEY_ID:\n from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader\n commands:\n - docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .\n - >\n docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY\n -v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\nvolumes:\n- name: docker\n host:\n path: /var/run/docker.sock\n\n---\nkind: pipeline\nname: conformance\n\nplatform:\n os: linux\n arch: amd64\n\ntrigger:\n event:\n - cron\n cron:\n - nightly\n\nsteps:\n- name: build\n image: rancher/dapper:v0.6.0\n commands:\n - dapper ci\n - echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\n- name: test\n image: rancher/dapper:v0.6.0\n environment:\n ENABLE_REGISTRY: 'true'\n commands:\n - docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .\n - >\n docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e SONOBUOY_VERSION -e ENABLE_REGISTRY\n -v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\nvolumes:\n- name: docker\n host:\n path: /var/run/docker.sock\n\n---\nkind: pipeline\nname: arm64\n\nplatform:\n os: linux\n arch: arm64\n\ntrigger:\n event:\n exclude:\n - cron\n - pull_request\n\nclone:\n retries: 3\n\nsteps:\n- name: build\n image: rancher/dapper:v0.6.0\n secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]\n environment:\n AWS_SECRET_ACCESS_KEY:\n from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader\n AWS_ACCESS_KEY_ID:\n from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader\n commands:\n - dapper ci\n - echo "${DRONE_TAG}-arm64" | sed -e 's/+/-/g' >.tags\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\n- name: github_binary_release\n image: plugins/github-release\n settings:\n api_key:\n from_secret: github_token\n prerelease: true\n checksum:\n - sha256\n checksum_file: CHECKSUMsum-arm64.txt\n checksum_flatten: true\n files:\n - "dist/artifacts/*"\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: docker-publish\n image: plugins/docker\n settings:\n dockerfile: package/Dockerfile\n password:\n from_secret: docker_password\n repo: "rancher/k3s"\n username:\n from_secret: docker_username\n build_args_from_env:\n - DRONE_TAG\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: ghcr-publish\n image: plugins/docker\n settings:\n registry: ghcr.io\n dockerfile: package/Dockerfile\n username: "k3s-io"\n password:\n from_secret: github_token\n repo: "ghcr.io/k3s-io/k3s"\n build_args_from_env:\n - DRONE_TAG\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: test\n image: rancher/dapper:v0.6.0\n secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]\n environment:\n ENABLE_REGISTRY: 'true'\n AWS_SECRET_ACCESS_KEY:\n from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader\n AWS_ACCESS_KEY_ID:\n from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader\n commands:\n - docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .\n - >\n docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY\n -v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\nvolumes:\n- name: docker\n host:\n path: /var/run/docker.sock\n\n---\nkind: pipeline\nname: arm\n\nplatform:\n os: linux\n arch: arm\n\ntrigger:\n event:\n exclude:\n - cron\n\nclone:\n retries: 3\n\nsteps:\n- name: skipfiles\n image: plugins/git\n commands:\n - export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)\n - export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);\n - if [ -z "$DIFF" ]; then\n echo "All files in PR are on ignore list";\n exit 78;\n else\n echo "Some files in PR are not ignored, $DIFF";\n fi;\n when:\n event:\n - pull_request\n\n- name: build\n # Keeping Dapper at v0.5.0 for armv7, as newer versions fails with\n # Bad system call on this architecture. xref:\n #\n # https://github.com/k3s-io/k3s/pull/8959#discussion_r1439736566\n # https://drone-pr.k3s.io/k3s-io/k3s/7922/3/3\n image: rancher/dapper:v0.5.0\n secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]\n environment:\n AWS_SECRET_ACCESS_KEY:\n from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader\n AWS_ACCESS_KEY_ID:\n from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader\n commands:\n - dapper ci\n - echo "${DRONE_TAG}-arm" | sed -e 's/+/-/g' >.tags\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\n- name: github_binary_release\n image: plugins/github-release:linux-arm\n settings:\n api_key:\n from_secret: github_token\n prerelease: true\n checksum:\n - sha256\n checksum_file: CHECKSUMsum-arm.txt\n checksum_flatten: true\n files:\n - "dist/artifacts/*"\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: docker-publish\n image: plugins/docker:linux-arm\n settings:\n dockerfile: package/Dockerfile\n password:\n from_secret: docker_password\n repo: "rancher/k3s"\n username:\n from_secret: docker_username\n build_args_from_env:\n - DRONE_TAG\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: ghcr-publish\n image: plugins/docker:linux-arm\n settings:\n registry: ghcr.io\n dockerfile: package/Dockerfile\n username: "k3s-io"\n password:\n from_secret: github_token\n repo: "ghcr.io/k3s-io/k3s"\n build_args_from_env:\n - DRONE_TAG\n when:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\n- name: test\n # Refer to comment for arm/build.\n image: rancher/dapper:v0.5.0\n secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]\n environment:\n ENABLE_REGISTRY: 'true'\n AWS_SECRET_ACCESS_KEY:\n from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader\n AWS_ACCESS_KEY_ID:\n from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader\n commands:\n - docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .\n - >\n docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY\n -v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}\n volumes:\n - name: docker\n path: /var/run/docker.sock\n\nvolumes:\n- name: docker\n host:\n path: /var/run/docker.sock\n\n---\nkind: pipeline\nname: manifest\n\nplatform:\n os: linux\n arch: amd64\n\nsteps:\n- name: skipfiles\n image: plugins/git\n commands:\n - export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)\n - export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);\n - if [ -z "$DIFF" ]; then\n echo "All files in PR are on ignore list";\n exit 78;\n else\n echo "Some files in PR are not ignored, $DIFF";\n fi;\n when:\n event:\n - push\n - pull_request\n\n- name: manifest\n image: plugins/docker\n environment:\n DOCKER_USERNAME:\n from_secret: docker_username\n DOCKER_PASSWORD:\n from_secret: docker_password\n settings:\n dry_run: true\n dockerfile: Dockerfile.manifest\n repo: "rancher/k3s-manifest"\n build_args_from_env:\n - DOCKER_USERNAME\n - DOCKER_PASSWORD\n - DRONE_TAG\ntrigger:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n include:\n - tag\n exclude:\n - cron\n\ndepends_on:\n- amd64\n- arm64\n- arm\n\n---\nkind: pipeline\nname: dispatch\n\nplatform:\n os: linux\n arch: amd64\n\nclone:\n retries: 3\n\nsteps:\n- name: skipfiles\n image: plugins/git\n commands:\n - export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)\n - export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);\n - if [ -z "$DIFF" ]; then\n echo "All files in PR are on ignore list";\n exit 78;\n else\n echo "Some files in PR are not ignored, $DIFF";\n fi;\n when:\n event:\n - push\n - pull_request\n\n- name: dispatch\n image: curlimages/curl:7.74.0\n secrets: [ pat_username, github_token, release_token_k3s ] \n user: root\n environment:\n PAT_USERNAME:\n from_secret: pat_username\n PAT_TOKEN:\n from_secret: github_token\n K3S_RELEASE_TOKEN:\n from_secret: release_token_k3s\n commands:\n - apk -U --no-cache add bash\n - scripts/dispatch\n\ntrigger:\n instance:\n - drone-publish.k3s.io\n ref:\n - refs/head/master\n - refs/tags/*\n event:\n - tag\n\ndepends_on:\n- manifest\n\n---\nkind: pipeline\nname: e2e\ntype: docker\n\nplatform:\n os: linux\n arch: amd64\n\nclone:\n retries: 3\n\nsteps:\n- name: skipfiles\n image: plugins/git\n commands:\n - export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)\n - export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);\n - if [ -z "$DIFF" ]; then\n echo "All files in PR are on ignore list";\n exit 78;\n else\n echo "Some files in PR are not ignored, $DIFF";\n fi;\n when:\n event:\n - push\n - pull_request\n\n- name: build-e2e-image\n image: docker:25.0.5\n commands:\n - DOCKER_BUILDKIT=1 docker build --target test-e2e -t test-e2e -f Dockerfile.test .\n - apk add make git bash\n - GOCOVER=1 make local\n - cp dist/artifacts/* /tmp/artifacts/\n volumes:\n - name: cache\n path: /tmp/artifacts\n - name: docker\n path: /var/run/docker.sock\n\n- name: test-e2e-validatecluster\n depends_on:\n - build-e2e-image\n image: test-e2e\n pull: never\n resources:\n cpu: 6000\n memory: 10Gi\n environment:\n E2E_REGISTRY: 'true'\n E2E_GOCOVER: 'true'\n commands:\n - mkdir -p dist/artifacts\n - cp /tmp/artifacts/* dist/artifacts/\n # Cleanup VMs that are older than 2h. Happens if a previous test panics or is canceled\n - tests/e2e/scripts/cleanup_vms.sh\n - tests/e2e/scripts/drone_registries.sh\n # Stagger the launch of this test with the parallel splitserver test\n # to prevent conflicts over libvirt network interfaces\n - |\n cd tests/e2e/validatecluster\n ../scripts/cleanup_vms.sh 'validatecluster_([0-9]+)_(server|agent)'\n sleep 15\n go test -v -timeout=45m ./validatecluster_test.go -ci -local\n cp ./coverage.out /tmp/artifacts/validate-coverage.out\n volumes:\n - name: libvirt\n path: /var/run/libvirt/\n - name: docker\n path: /var/run/docker.sock\n - name: cache\n path: /tmp/artifacts\n\n- name: test-e2e-splitserver\n depends_on:\n - build-e2e-image\n image: test-e2e\n pull: never\n resources:\n cpu: 6000\n memory: 10Gi\n environment:\n E2E_REGISTRY: 'true'\n E2E_GOCOVER: 'true'\n commands:\n - mkdir -p dist/artifacts\n - cp /tmp/artifacts/* dist/artifacts/\n - tests/e2e/scripts/drone_registries.sh\n - |\n cd tests/e2e/splitserver\n ../scripts/cleanup_vms.sh 'splitserver_([0-9]+)'\n go test -v -timeout=30m ./splitserver_test.go -ci -local\n cp ./coverage.out /tmp/artifacts/split-coverage.out\n - |\n if [ "$DRONE_BUILD_EVENT" = "pull_request" ]; then\n cd ../upgradecluster\n ../scripts/cleanup_vms.sh 'upgradecluster_([0-9]+)_(server|agent)'\n # Convert release-1.XX branch to v1.XX channel\n if [ "$DRONE_BRANCH" = "master" ]; then\n UPGRADE_CHANNEL="latest"\n else\n UPGRADE_CHANNEL=$(echo $DRONE_BRANCH | sed 's/release-/v/')\n # Check if the UPGRADE_CHANNEL exists, in the case of new minor releases it won't\n if ! curl --head --silent --fail https://update.k3s.io/v1-release/channels/$UPGRADE_CHANNEL; then\n UPGRADE_CHANNEL="latest"\n fi\n fi\n E2E_RELEASE_CHANNEL=$UPGRADE_CHANNEL go test -v -timeout=45m ./upgradecluster_test.go -ci -local -ginkgo.v\n cp ./coverage.out /tmp/artifacts/upgrade-coverage.out\n fi\n \n volumes:\n - name: libvirt\n path: /var/run/libvirt/\n - name: docker\n path: /var/run/docker.sock\n - name: cache\n path: /tmp/artifacts\n\n- name: upload to codecov\n depends_on:\n - test-e2e-validatecluster\n - test-e2e-splitserver\n image: robertstettner/drone-codecov\n settings:\n token: \n from_secret: codecov_token\n files:\n - /tmp/artifacts/validate-coverage.out\n - /tmp/artifacts/split-coverage.out\n - /tmp/artifacts/upgrade-coverage.out\n flags: \n - e2etests\n when:\n event:\n - push\n\n volumes:\n - name: cache\n path: /tmp/artifacts\n\nvolumes:\n- name: docker\n host:\n path: /var/run/docker.sock\n- name: libvirt\n host:\n path: /var/run/libvirt/\n- name: cache\n temp: {}\n
dataset_sample\yaml\go\.drone.yml
.drone.yml
YAML
16,192
0.8
0.015805
0.017296
vue-tools
4
2025-04-28T05:23:04.393813
MIT
false
d7141770a54de72cd59874917a81ef88
---\nparser: '@typescript-eslint/parser'\nparserOptions:\n ecmaVersion: 2020\n sourceType: module\n ecmaFeatures:\n jsx: true\n impliedStrict: true\n project: ./tsconfig-eslint.json\nplugins:\n - react\n - '@typescript-eslint/eslint-plugin'\n - react-hooks\n - jest\n - import\nenv:\n browser: true\n node: true\n shared-node-browser: true\n es6: true\n jest: true\nglobals:\n __DEV__: readonly\nextends:\n - eslint:recommended\n - plugin:react/recommended\n - plugin:@typescript-eslint/recommended\n - plugin:import/errors\n - plugin:import/typescript\n - prettier\nsettings:\n react:\n version: detect\n import/resolver:\n typescript:\n alwaysTryTypes: true\nrules:\n '@typescript-eslint/ban-types':\n - error\n - extendDefaults: true\n types:\n '{}': false\n\n # custom rules\n no-document-body-snapshot: 2\n duplicate-data-tooltip-id: 'warn'\n jest-no-mock:\n - 2\n - module:\n react-router-dom: 'react-router-dom should not be mocked. Wrap the component inside TestWrapper instead'\n\n # built-in\n no-console: 2\n semi: 0\n no-await-in-loop: 2\n no-shadow: 0\n\n # react hooks\n react-hooks/rules-of-hooks: 2\n react-hooks/exhaustive-deps: 1\n\n # react\n react/prop-types: 0\n react/display-name: 1\n\n #typescript\n '@typescript-eslint/no-use-before-define': 0\n '@typescript-eslint/explicit-function-return-type': 0\n no-unused-vars: 0\n '@typescript-eslint/no-unused-vars':\n - 2\n - vars: all\n args: after-used\n ignoreRestSiblings: true\n argsIgnorePattern: ^_\n '@typescript-eslint/member-delimiter-style': 0\n '@typescript-eslint/no-shadow': 2\n '@typescript-eslint/no-extra-semi': 0\n '@typescript-eslint/explicit-module-boundary-types': 0\n\n #import\n import/order:\n - error\n - groups:\n - builtin\n - external\n - internal\n - - parent\n - sibling\n pathGroups:\n - pattern: '*.scss'\n group: index\n position: after\n patternOptions:\n matchBase: true\n import/no-useless-path-segments: 2\n\n no-restricted-imports:\n - error\n - patterns:\n - lodash.*\n paths:\n - lodash\n\noverrides:\n - files:\n - '**/*.test.ts'\n - '**/*.test.tsx'\n rules:\n '@typescript-eslint/no-magic-numbers': 0\n '@typescript-eslint/no-non-null-assertion': 'off'\n '@typescript-eslint/no-non-null-asserted-optional-chain': 0\n '@typescript-eslint/no-explicit-any': 0\n no-await-in-loop: 0\n jest/consistent-test-it:\n - 2\n - fn: test\n withinDescribe: test\n jest/expect-expect: 2\n jest/no-disabled-tests: 2\n jest/no-commented-out-tests: 2\n - files:\n - services.tsx\n rules:\n '@typescript-eslint/explicit-function-return-type': 0\n '@typescript-eslint/no-explicit-any': 0\n
dataset_sample\yaml\go\.eslintrc.yml
.eslintrc.yml
YAML
2,793
0.95
0.015873
0.050847
node-utils
775
2025-06-28T17:10:28.719312
Apache-2.0
false
846cb01579b8104bf43c8697c8d9a6af
version: 3\n\nproject:\n id: vitess\n name: vitess\n\n# Exclude the maven based scanning of our java client until we can get it working again.\ntargets:\n exclude:\n - type: maven\n path: java\n
dataset_sample\yaml\go\.fossa.yml
.fossa.yml
YAML
194
0.8
0
0.111111
awesome-app
49
2024-11-17T03:15:16.094952
Apache-2.0
false
5047607d4e35deef0edad4da98d33fde
tasks:\n - init: >\n test -f go.mod && go get -v ./...;\n wget https://github.com/prometheus/prometheus/releases/download/v2.22.2/prometheus-2.22.2.linux-amd64.tar.gz --directory-prefix=/tmp;\n cd /tmp && tar xvfz prometheus-*.tar.gz && rm prometheus-*.tar.gz;\n mv prometheus-*/prometheus /workspace/go/bin/;\n wget https://raw.githubusercontent.com/vishnubob/wait-for-it/master/wait-for-it.sh && chmod u+x ./wait-for-it.sh && cd /workspace/thanos/scripts && mv /tmp/wait-for-it.sh .;\n ./quickstart.sh;\n for i in {0..2};do ./wait-for-it.sh -q localhost:909"$i" -- && echo "Prometheus instance no.$((i+1)) localhost:909$i is up"; done;\n ./wait-for-it.sh localhost:10914 -- && echo "Thanos frontend at localhost:10914 is up";\nports:\n - port: 10914\n onOpen: open-preview\n
dataset_sample\yaml\go\.gitpod.yml
.gitpod.yml
YAML
810
0.8
0.538462
0
node-utils
711
2024-12-02T11:07:23.269394
GPL-3.0
false
0104b64b4d0197c49591702d06f85f0d
run:\n timeout: 3m\n\nlinters:\n enable:\n - vet\n - errcheck\n - staticcheck\n# - dupl\n# - funlen\n# - goconst\n# - gocritic\n - gofmt\n# - golint\n# - misspell\n# - stylecheck\n - unconvert\n# - whitespace\n
dataset_sample\yaml\go\.golangci.yml
.golangci.yml
YAML
225
0.8
0
0.470588
awesome-app
182
2023-09-10T16:46:38.012910
GPL-3.0
false
63dc0e770f4f6484f9cc4ea19825b17b
version: 2\n\nproject_name: dagger\n\nbuilds:\n - builder: prebuilt\n binary: ./dagger\n goos:\n - linux\n - windows\n - darwin\n goarch:\n - amd64\n - arm64\n - arm\n goarm:\n - "7"\n\n prebuilt:\n path: build/dagger_{{ .Env.ENGINE_VERSION }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}_v{{ . }}{{ end }}/dagger\n\nchecksum:\n name_template: "checksums.txt"\n
dataset_sample\yaml\go\.goreleaser.common.yml
.goreleaser.common.yml
YAML
387
0.7
0
0
python-kit
247
2024-03-07T11:45:15.411264
BSD-3-Clause
false
1af3d4425f03dd36841f7988f50f0208
version: 2\n\nincludes:\n - from_file:\n path: ./.goreleaser.common.yml\n\nnightly:\n # version_template will override .Version for nightly builds:\n # https://goreleaser.com/customization/nightlies/#how-it-works\n version_template: "{{ .FullCommit }}"\n\narchives:\n - name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}"\n id: sha\n files:\n - LICENSE\n format_overrides:\n - goos: windows\n formats: [zip]\n - name_template: "{{ .ProjectName }}_head_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}"\n id: head\n files:\n - LICENSE\n format_overrides:\n - goos: windows\n formats: [zip]\n\nblobs:\n - provider: s3\n ids:\n - sha\n region: "{{ .Env.AWS_REGION }}"\n bucket: "{{ .Env.AWS_BUCKET }}"\n directory: "dagger/main/{{ .Version }}"\n - provider: s3\n ids:\n - head\n region: "{{ .Env.AWS_REGION }}"\n bucket: "{{ .Env.AWS_BUCKET }}"\n directory: "dagger/main/head"\n
dataset_sample\yaml\go\.goreleaser.nightly.yml
.goreleaser.nightly.yml
YAML
1,064
0.8
0.125
0.055556
awesome-app
578
2025-06-08T17:17:06.120549
Apache-2.0
false
04fc446628aeb7a70a43341817e8ed54
version: 2\n\nincludes:\n - from_file:\n path: ./.goreleaser.common.yml\n\nnightly:\n # version_template will override .Version for nightly builds:\n # https://goreleaser.com/customization/nightlies/#how-it-works\n # version should *not* have a v prefix\n version_template: "{{ trimprefix .Tag \"v\" }}"\n\narchives:\n - name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}"\n id: sha\n files:\n - LICENSE\n format_overrides:\n - goos: windows\n formats: [zip]\n\nblobs:\n - provider: s3\n region: "{{ .Env.AWS_REGION }}"\n bucket: "{{ .Env.AWS_BUCKET }}"\n directory: "dagger/releases/{{ .Version }}"\n
dataset_sample\yaml\go\.goreleaser.prerelease.yml
.goreleaser.prerelease.yml
YAML
712
0.8
0.115385
0.136364
python-kit
787
2023-08-09T10:16:43.071053
MIT
false
08d23202fa2620ee56b950b04a114976
env:\n - GO111MODULE=on\n - GOPROXY="https://proxy.golang.org,direct"\n\narchives:\n - id: default\n wrap_in_directory: true\n\nbuilds:\n - binary: wtfutil\n goos:\n - darwin\n - linux\n goarch:\n - amd64\n - arm\n - arm64\n\nbefore:\n hooks:\n - make install\n\nbrews:\n -\n name: wtfutil\n homepage: 'https://wtfutil.com'\n description: 'The personal information dashboard for your terminal.'\n tap:\n owner: wtfutil\n name: homebrew-wtfutil\n
dataset_sample\yaml\go\.goreleaser.yml
.goreleaser.yml
YAML
480
0.8
0.033333
0
python-kit
492
2024-06-09T03:49:43.247236
MIT
false
db7f65aa8369d5e3f60691780a34f33d
# Apply checks for artifact registry (ar) code\n'src/ar/**/*': 'sh src/ar/scripts/license/stamp.sh'\n'src/ar/**/*.{ts,tsx,p1}': 'prettier --check'\n'src/ar/**/*.{ts,tsx,p2}': 'eslint --rulesdir src/ar/scripts/eslint-rules'\n'src/ar/**/*.{ts,tsx,p3}': 'sh src/ar/scripts/typecheck-staged.sh'\n'src/ar/**/*.scss,p1': 'stylelint --max-warnings 0'\n'src/ar/**/*.scss,p2': 'prettier --check'\n'src/ar/**/*.i18n.ts': 'exit 1'\n
dataset_sample\yaml\go\.lintstagedrc.yml
.lintstagedrc.yml
YAML
413
0.8
0.125
0.125
vue-tools
604
2024-08-07T03:01:56.443289
BSD-3-Clause
false
52a7e3c3755cbab132648c03d222ae95
pull_request_rules:\n # if there is a conflict in a backport PR, ping the author to send a proper backport PR\n - name: ping author on conflicts\n conditions:\n - conflict\n actions:\n comment:\n message: This pull request has merge conflicts that must be resolved before it can be merged. @{{author}} please rebase it. https://rook.io/docs/rook/latest/Contributing/development-flow/#updating-your-fork\n\n - name: ping author on direct push to release branch\n conditions:\n - base~=^release-\n - author!=mergify[bot]\n actions:\n comment:\n message: Hi @{{author}}, this pull request was opened against a release branch, is it expected? Normally patches should go in the master branch first and then be backported to release branches.\n\n # release-1.14 branch\n - name: automerge backport release-1.14\n conditions:\n - author=mergify[bot]\n - base=release-1.14\n - label!=do-not-merge\n - "status-success=DCO"\n - "check-success=linux-build-all (1.21)"\n - "check-success=linux-build-all (1.22)"\n - "check-success=unittests"\n - "check-success=golangci-lint"\n - "check-success=codegen"\n - "check-success=codespell"\n - "check-success=lint"\n - "check-success=modcheck"\n - "check-success=Shellcheck"\n - "check-success=yaml-linter"\n - "check-success=lint-test"\n - "check-success=gen-rbac"\n - "check-success=crds-gen"\n - "check-success=docs-check"\n - "check-success=pylint"\n - "check-success=canary-tests / canary (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / raw-disk-with-object (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / two-osds-in-device (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / osd-with-metadata-partition-device (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / osd-with-metadata-device (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / lvm (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / pvc (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / pvc-db (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / pvc-db-wal (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-db (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-db-wal (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-token-auth (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-k8s-auth (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / lvm-pvc (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / multi-cluster-mirroring (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / rgw-multisite-testing (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-kms-ibm-kp (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / multus-cluster-network (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / csi-hostnetwork-disabled (quay.io/ceph/ceph:v18)"\n - "check-success=TestCephSmokeSuite (v1.25.16)"\n - "check-success=TestCephSmokeSuite (v1.30.0)"\n - "check-success=TestCephHelmSuite (v1.25.16)"\n - "check-success=TestCephHelmSuite (v1.30.0)"\n - "check-success=TestCephMultiClusterDeploySuite (v1.30.0)"\n - "check-success=TestCephObjectSuite (v1.30.0)"\n - "check-success=TestCephUpgradeSuite (v1.25.16)"\n - "check-success=TestCephUpgradeSuite (v1.30.0)"\n - "check-success=TestHelmUpgradeSuite (v1.25.16)"\n - "check-success=TestHelmUpgradeSuite (v1.30.0)"\n actions:\n merge:\n method: merge\n dismiss_reviews: {}\n delete_head_branch: {}\n\n # release-1.15 branch\n - name: automerge backport release-1.15\n conditions:\n - author=mergify[bot]\n - base=release-1.15\n - label!=do-not-merge\n - "status-success=DCO"\n - "check-success=linux-build-all (1.22)"\n - "check-success=unittests"\n - "check-success=golangci-lint"\n - "check-success=codegen"\n - "check-success=codespell"\n - "check-success=lint"\n - "check-success=modcheck"\n - "check-success=Shellcheck"\n - "check-success=yaml-linter"\n - "check-success=lint-test"\n - "check-success=gen-rbac"\n - "check-success=crds-gen"\n - "check-success=docs-check"\n - "check-success=pylint"\n - "check-success=canary-tests / canary (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / raw-disk-with-object (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / two-osds-in-device (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / osd-with-metadata-partition-device (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / osd-with-metadata-device (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / lvm (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / pvc (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / pvc-db (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / pvc-db-wal (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-db (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-db-wal (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-token-auth (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-k8s-auth (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / lvm-pvc (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / multi-cluster-mirroring (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / rgw-multisite-testing (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / encryption-pvc-kms-ibm-kp (quay.io/ceph/ceph:v18)"\n - "check-success=canary-tests / multus-cluster-network (quay.io/ceph/ceph:v18)" # note: changed name for 1.16 (multus-public-and-cluster)\n - "check-success=canary-tests / csi-hostnetwork-disabled (quay.io/ceph/ceph:v18)"\n - "check-success=TestCephSmokeSuite (v1.26.15)"\n - "check-success=TestCephSmokeSuite (v1.31.0)"\n - "check-success=TestCephHelmSuite (v1.26.15)"\n - "check-success=TestCephHelmSuite (v1.31.0)"\n - "check-success=TestCephMultiClusterDeploySuite (v1.31.0)"\n - "check-success=TestCephObjectSuite (v1.26.15)"\n - "check-success=TestCephObjectSuite (v1.31.0)"\n - "check-success=TestCephUpgradeSuite (v1.26.15)"\n - "check-success=TestCephUpgradeSuite (v1.31.0)"\n - "check-success=TestHelmUpgradeSuite (v1.26.15)"\n - "check-success=TestHelmUpgradeSuite (v1.31.0)"\n actions:\n merge:\n method: merge\n dismiss_reviews: {}\n delete_head_branch: {}\n\n # release-1.16 branch\n - name: automerge backport release-1.16\n conditions:\n - author=mergify[bot]\n - base=release-1.16\n - label!=do-not-merge\n - "status-success=DCO"\n - "check-success=linux-build-all (1.22)"\n - "check-success=linux-build-all (1.23)"\n - "check-success=unittests"\n - "check-success=golangci-lint"\n - "check-success=codegen"\n - "check-success=codespell"\n - "check-success=lint"\n - "check-success=modcheck"\n - "check-success=Shellcheck"\n - "check-success=yaml-linter"\n - "check-success=lint-test"\n - "check-success=gen-rbac"\n - "check-success=crds-gen"\n - "check-success=docs-check"\n - "check-success=pylint"\n - "check-success=canary-tests / canary (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / raw-disk-with-object (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / two-osds-in-device (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / osd-with-metadata-partition-device (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / osd-with-metadata-device (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / lvm (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / pvc (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / pvc-db (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / pvc-db-wal (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-db (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-db-wal (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-token-auth (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-k8s-auth (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / lvm-pvc (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / multi-cluster-mirroring (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-kms-ibm-kp (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / multus-public-and-cluster (quay.io/ceph/ceph:v19)"\n - "check-success=TestCephSmokeSuite (v1.27.16)"\n - "check-success=TestCephSmokeSuite (v1.32.0)"\n - "check-success=TestCephHelmSuite (v1.27.16)"\n - "check-success=TestCephHelmSuite (v1.32.0)"\n - "check-success=TestCephMultiClusterDeploySuite (v1.32.0)"\n - "check-success=TestCephObjectSuite (v1.27.16)"\n - "check-success=TestCephObjectSuite (v1.32.0)"\n - "check-success=TestCephUpgradeSuite (v1.27.16)"\n - "check-success=TestCephUpgradeSuite (v1.32.0)"\n - "check-success=TestHelmUpgradeSuite (v1.27.16)"\n - "check-success=TestHelmUpgradeSuite (v1.32.0)"\n actions:\n merge:\n method: merge\n dismiss_reviews: {}\n delete_head_branch: {}\n\n # release-1.17 branch\n - name: automerge backport release-1.17\n conditions:\n - author=mergify[bot]\n - base=release-1.17\n - label!=do-not-merge\n - "status-success=DCO"\n - "check-success=codegen"\n - "check-success=codespell"\n - "check-success=crds-gen"\n - "check-success=docs-check"\n - "check-success=gen-rbac"\n - "check-success=golangci-lint"\n - "check-success=govulncheck"\n - "check-success=lint"\n - "check-success=lint-test"\n - "check-success=linux-build-all (1.23)"\n - "check-success=linux-build-all (1.24)"\n - "check-success=misspell"\n - "check-success=modcheck"\n - "check-success=pylint"\n - "check-success=Shellcheck"\n - "check-success=unittests"\n - "check-success=yaml-linter"\n - "check-success=canary-tests / canary (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-db (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-db-wal (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-kms-ibm-kp (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-k8s-auth (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / encryption-pvc-kms-vault-token-auth (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / lvm (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / lvm-pvc (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / multi-cluster-mirroring (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / multus-public-and-cluster (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / osd-with-metadata-device (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / osd-with-metadata-partition-device (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / pvc (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / pvc-db (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / pvc-db-wal (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / raw-disk-with-object (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / two-object-one-zone (quay.io/ceph/ceph:v19)"\n - "check-success=canary-tests / two-osds-in-device (quay.io/ceph/ceph:v19)"\n - "check-success=TestCephHelmSuite (v1.28.15)"\n - "check-success=TestCephHelmSuite (v1.32.3)"\n - "check-success=TestCephMultiClusterDeploySuite (v1.32.3)"\n - "check-success=TestCephObjectSuite (v1.28.15)"\n - "check-success=TestCephObjectSuite (v1.32.3)"\n - "check-success=TestCephSmokeSuite (v1.28.15)"\n - "check-success=TestCephSmokeSuite (v1.32.3)"\n - "check-success=TestCephUpgradeSuite (v1.28.15)"\n - "check-success=TestCephUpgradeSuite (v1.32.3)"\n - "check-success=TestHelmUpgradeSuite (v1.28.15)"\n - "check-success=TestHelmUpgradeSuite (v1.32.3)"\n actions:\n merge:\n method: merge\n dismiss_reviews: {}\n delete_head_branch: {}\n\n # release-1.14 branch\n - actions:\n backport:\n branches:\n - release-1.14\n conditions:\n - label=backport-release-1.14\n name: backport release-1.14\n\n # release-1.15 branch\n - actions:\n backport:\n branches:\n - release-1.15\n conditions:\n - label=backport-release-1.15\n name: backport release-1.15\n\n # release-1.16 branch\n - actions:\n backport:\n branches:\n - release-1.16\n conditions:\n - label=backport-release-1.16\n name: backport release-1.16\n\n # release-1.17 branch\n - actions:\n backport:\n branches:\n - release-1.17\n conditions:\n - label=backport-release-1.17\n name: backport release-1.17\n
dataset_sample\yaml\go\.mergify.yml
.mergify.yml
YAML
13,896
0.8
0.00692
0.032143
vue-tools
517
2024-03-02T06:59:37.713932
Apache-2.0
false
26f058cfa0a8f8f5c4ae317025481fc6
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nversion: 1\nsnapshot:\n # Hide high-variability data from Percy snapshots; helps make sure that randomized data doesn't cause a visual diff.\n percy-css: | \n .topo-viz { \n display: none;\n }\n .related-evaluations path,\n .related-evaluations circle,\n .dashboard-metric {\n visibility: hidden;\n }
dataset_sample\yaml\go\.percy.yml
.percy.yml
YAML
388
0.8
0
0.214286
vue-tools
199
2024-06-27T03:23:20.482424
BSD-3-Clause
false
5b15e5910a88849cc1cade7831ebffe7
targets:\n debian-10: &debian\n build_dependencies:\n - libpam0g-dev\n dependencies:\n - libpam0g\n - git\n debian-11:\n <<: *debian\n debian-12:\n <<: *debian\n ubuntu-18.04:\n <<: *debian\n ubuntu-20.04:\n <<: *debian\n ubuntu-22.04:\n <<: *debian\n centos-9:\n build_dependencies:\n - pam-devel\n # required for Go buildpack\n - perl-Digest-SHA\n dependencies:\n - pam\n - git\nbefore:\n - mv .packager/Procfile .\nafter:\n - mv bin/gogs gogs\nafter_install: ./.packager/hooks/postinst\nbuildpack: https://github.com/heroku/heroku-buildpack-go.git#main\n
dataset_sample\yaml\go\.pkgr.yml
.pkgr.yml
YAML
602
0.95
0.032258
0.032258
python-kit
837
2023-08-01T23:43:46.021766
BSD-3-Clause
false
24a78c671eadfa9bd70fa1aa89768ea2
# This is the policy-bot configuration for this repository. It controls\n# which approvals are required for any given pull request. The format is\n# described at https://github.com/palantir/policy-bot. The syntax of the\n# policy can be verified by the bot:\n# curl https://pb.syncthing.net/api/validate -X PUT -T .policy.yml\n\n# The policy below is what is required for any pull request.\npolicy:\n approval:\n - subject is conventional commit\n - project metadata requires maintainer approval\n - or:\n - is approved by a syncthing contributor\n - is a translation or dependency update by a contributor\n - is a trivial change by a contributor\n\n # Additionally, contributors can disapprove of a PR\n disapproval:\n requires:\n teams:\n - syncthing/contributors\n\n# The rules for the policy are described below.\n\napproval_rules:\n\n # All commits (PRs before squashing) should have a valid conventional\n # commit type subject.\n - name: subject is conventional commit\n requires:\n conditions:\n title:\n matches:\n - '^(feat|fix|docs|chore|refactor|build): [a-z].+'\n - '^(feat|fix|docs|chore|refactor|build)\(\w+(, \w+)*\): [a-z].+'\n\n # Changes to important project metadata and documentation, including this\n # policy, require signoff by a maintainer\n - name: project metadata requires maintainer approval\n if:\n changed_files:\n paths:\n - ^[^/]+\.md\n - ^\.policy\.yml\n - ^LICENSE\n requires:\n count: 1\n teams:\n - syncthing/maintainers\n options:\n ignore_update_merges: true\n allow_contributor: true\n\n # Regular pull requests require approval by an active contributor\n - name: is approved by a syncthing contributor\n requires:\n count: 1\n teams:\n - syncthing/contributors\n options:\n ignore_update_merges: true\n allow_contributor: true\n\n # Changes to some files (translations, dependencies, compatibility) do not\n # require approval if they were proposed by a contributor and have a\n # matching commit subject\n - name: is a translation or dependency update by a contributor\n if:\n only_changed_files:\n paths:\n - ^gui/default/assets/lang/\n - ^go\.mod$\n - ^go\.sum$\n - ^compat\.yaml$\n title:\n matches:\n - '^chore\(gui\):'\n - '^build\(deps\):'\n - '^build\(compat\):'\n has_author_in:\n teams:\n - syncthing/contributors\n\n # If the change is small and the label "trivial" is added, we accept that\n # on trust. These PRs can be audited after the fact as appropriate.\n # Features are not trivial.\n - name: is a trivial change by a contributor\n if:\n modified_lines:\n total: "< 25"\n title:\n not_matches:\n - '^feat'\n has_labels:\n - trivial\n has_author_in:\n teams:\n - syncthing/contributors\n
dataset_sample\yaml\go\.policy.yml
.policy.yml
YAML
2,940
0.95
0.081633
0.213483
react-lib
964
2024-03-22T19:09:34.224672
GPL-3.0
false
4d28d1237aa8b2a3a8703924ff04303c
overrides:\n - files:\n - "**/*.md"\n options:\n printWidth: 80\n proseWrap: always\n
dataset_sample\yaml\go\.prettierrc.yml
.prettierrc.yml
YAML
98
0.6
0
0
vue-tools
682
2025-03-26T23:45:00.421234
GPL-3.0
false
abeaf3b62b9db7ee2d4077099ac4acc5
go:\n version: 1.23\nrepository:\n path: github.com/thanos-io/thanos\nbuild:\n binaries:\n - name: thanos\n path: ./cmd/thanos\n flags: -a -tags netgo\n ldflags: |\n -X github.com/prometheus/common/version.Version={{.Version}}\n -X github.com/prometheus/common/version.Revision={{.Revision}}\n -X github.com/prometheus/common/version.Branch={{.Branch}}\n -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}\n -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}\ncrossbuild:\n platforms:\n - linux/amd64\n - darwin/amd64\n - linux/arm64\n - windows/amd64\n - freebsd/amd64\n - linux/ppc64le\n
dataset_sample\yaml\go\.promu.yml
.promu.yml
YAML
658
0.7
0
0
awesome-app
807
2024-03-30T01:00:42.809780
MIT
false
72155f467be59f577898e9400e3c86f7
version: 2\nformats: all\nmkdocs:\n fail_on_warning: false\n configuration: mkdocs.yml\npython:\n install:\n - requirements: docs/requirements.txt\nbuild:\n os: "ubuntu-22.04"\n tools:\n python: "3.12"\n
dataset_sample\yaml\go\.readthedocs.yml
.readthedocs.yml
YAML
202
0.85
0
0
node-utils
831
2024-01-12T20:07:57.717669
BSD-3-Clause
false
a5eed05f722cab769aae945f0fbce7e2
---\nrunner:\n golangci:\n cmd: golangci-lint run\n errorformat:\n - '%E%f:%l:%c: %m'\n - '%E%f:%l: %m'\n - '%C%.%#'\n level: error\n eslint:\n cmd: cd web && eslint -f rdjson '*/**/*.{js,ts,tsx}'\n format: rdjson\n level: error\n yamllint:\n cmd: yamllint --format parsable .\n errorformat:\n - '%f:%l:%c: %m'\n level: warning\n...\n
dataset_sample\yaml\go\.reviewdog.yml
.reviewdog.yml
YAML
365
0.8
0
0
python-kit
523
2024-08-11T06:27:20.860057
GPL-3.0
false
c3a51dff3650c486a83f576dadeb5613
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\npaths-ignore:\n - '**/*.mdx' # any file ending in .mdx\n - '**/*.md' # any file ending in .md\n - '**/*_test.go' # any file ending in _test.go\n
dataset_sample\yaml\go\.secret_scanning.yml
.secret_scanning.yml
YAML
224
0.8
0
0.333333
react-lib
613
2025-01-17T08:50:54.771806
Apache-2.0
false
be6985d0669f7fd4ce2e1c2c85f6cb8d
---\nlinters:\n golint:\n min_confidence: 0.85\n fixer: true\n\nfiles:\n ignore:\n - 'vendor/*'\n - 'pb/*'\n
dataset_sample\yaml\go\.stickler.yml
.stickler.yml
YAML
112
0.8
0
0
node-utils
712
2024-10-18T09:32:02.335817
BSD-3-Clause
false
e4059abdc3840ae4300394ae18d5d847
language: go\n\ngo:\n - "1.19.x"\n\nbefore_install:\n # Make sure travis builds work for forks\n - mkdir -p $TRAVIS_BUILD_DIR $GOPATH/src/github.com/wtfutil\n - test ! -d $GOPATH/src/github.com/wtfutil/wtf && mv $TRAVIS_BUILD_DIR $GOPATH/src/github.com/wtfutil/wtf || true\n - export TRAVIS_BUILD_DIR=$HOME/gopath/src/github.com/wtfutil/wtf\n - cd $HOME/gopath/src/github.com/wtfutil/wtf\n - export GOPROXY="https://proxy.golang.org,direct"\n\nscript: go get ./... && ./scripts/check-uncommitted-vendor-files.sh && go test -v github.com/wtfutil/wtf/...\n
dataset_sample\yaml\go\.travis.yml
.travis.yml
YAML
547
0.8
0.071429
0.090909
awesome-app
293
2025-03-10T00:42:36.881347
MIT
false
10c2b30e97ab00e566b36911aa871a05
---\nextends: default\n\nlocale: en_US.UTF-8\n\nyaml-files:\n - '*.yaml'\n - '*.yml'\n - '.yamllint'\n\nignore: |\n api/openapi.yml\n docs/pnpm-lock.yaml\n docs/node_modules/\n internal/configuration/test_resources/config_bad_quoting.yml\n internal/templates/src/pnpm-lock.yaml\n web/pnpm-lock.yaml\n web/node_modules/\n .github/ISSUE_TEMPLATE/feature-request.yml\n .github/ISSUE_TEMPLATE/bug-report.yml\n internal/suites/\nrules:\n document-end:\n level: warning\n empty-values:\n level: warning\n indentation:\n spaces: 2\n check-multi-line-strings: true\n line-length:\n max: 120\n octal-values: enable\n...\n
dataset_sample\yaml\go\.yamllint.yml
.yamllint.yml
YAML
612
0.7
0
0
vue-tools
457
2024-07-22T17:37:43.736759
GPL-3.0
false
73a8844c726e66d210a3942ec4284025
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nnodeLinker: node-modules\n\nyarnPath: .yarn/releases/yarn-3.5.0.cjs\n
dataset_sample\yaml\go\.yarnrc.yml
.yarnrc.yml
YAML
135
0.8
0
0.5
node-utils
291
2025-05-26T19:51:32.389133
MIT
false
4cdb265f997485d07f5a6857884fcc9c
self-hosted-runner:\n # Labels of self-hosted runner in array of string\n labels:\n - warp-ubuntu-latest-arm64-4x\n - warp-ubuntu-latest-x64-4x\n - warp-ubuntu-latest-arm64-16x\n - warp-ubuntu-latest-x64-16x\n
dataset_sample\yaml\go\actionlint.yml
actionlint.yml
YAML
216
0.8
0
0.142857
vue-tools
575
2025-03-25T02:17:10.441626
MIT
false
a3654c1ce293dba1b956a869f20762d2
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n - id: "fmt_errorf_unformatted_use"\n patterns:\n - pattern: fmt.Errorf("...")\n message: "Use of fmt.Errorf without formatting. Please use errors.New"\n languages:\n - "go"\n severity: "WARNING"\n paths:\n include:\n - "./api/*"\n
dataset_sample\yaml\go\api_errorf.yml
api_errorf.yml
YAML
335
0.8
0
0.153846
react-lib
312
2024-11-16T04:22:43.998798
BSD-3-Clause
false
893ceaab02591778973e85dbac0306ae
# Notes:\n# - Minimal appveyor.yml file is an empty file. All sections are optional.\n# - Indent each level of configuration with 2 spaces. Do not use tabs!\n# - All section names are case-sensitive.\n# - Section names should be unique on each level.\n\nversion: "{build}"\n\nos: Windows Server 2012 R2\n\nclone_folder: c:\gopath\src\github.com\ipfs\go-ipfs\n\nenvironment:\n GOPATH: c:\gopath\n TEST_VERBOSE: 1\n #TEST_NO_FUSE: 1\n #TEST_SUITE: test_sharness\n #GOFLAGS: -tags nofuse\n global:\n BASH: C:\cygwin\bin\bash\n matrix:\n - GOARCH: amd64\n GOVERSION: 1.5.1\n GOROOT: c:\go\n DOWNLOADPLATFORM: "x64"\n\ninstall:\n # Enable make\n #- SET PATH=c:\MinGW\bin;%PATH%\n #- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe\n - go version\n - go env\n\n# Cygwin build script\n#\n# NOTES:\n#\n# The stdin/stdout file descriptor appears not to be valid for the Appveyor\n# build which causes failures as certain functions attempt to redirect\n# default file handles. Ensure a dummy file descriptor is opened with 'exec'.\n#\nbuild_script:\n - '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; make nofuse"'\n\ntest_script:\n - '%BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0</dev/null; export PATH=$GOPATH/bin:$PATH; export GOFLAGS=''-tags nofuse''; export TEST_NO_FUSE=1; export TEST_VERBOSE=1; export TEST_EXPENSIVE=1; export TEST_SUITE=test_sharness; make $TEST_SUITE"'\n\n#build:\n# parallel: true\n
dataset_sample\yaml\go\appveyor.yml
appveyor.yml
YAML
1,437
0.95
0.020408
0.512195
awesome-app
890
2023-08-31T17:47:31.432916
GPL-3.0
false
e565db3a2942ef4dc040e81b4a87aa25
auditbeat.modules:\n\n- module: auditd\n audit_rules: |\n -w /etc/passwd -p wa -k identity\n -a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access\n\n- module: file_integrity\n paths:\n - /bin\n - /usr/bin\n - /sbin\n - /usr/sbin\n - /etc\n\nprocessors:\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n\noutput.elasticsearch:\n hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'\n username: '${ELASTICSEARCH_USERNAME:}'\n password: '${ELASTICSEARCH_PASSWORD:}'\n
dataset_sample\yaml\go\auditbeat.docker.yml
auditbeat.docker.yml
YAML
529
0.7
0
0
react-lib
889
2025-03-20T05:56:34.250229
BSD-3-Clause
false
59a0124574ac1d274546117d588fd97a
########################## Auditbeat Configuration #############################\n\n# This is a reference configuration file documenting all non-deprecated options\n# in comments. For a shorter configuration example that contains only the most\n# common options, please see auditbeat.yml in the same directory.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/auditbeat/index.html\n\n# ============================== Config Reloading ==============================\n\n# Config reloading allows to dynamically load modules. Each file that is\n# monitored must contain one or multiple modules as a list.\nauditbeat.config.modules:\n\n # Glob pattern for configuration reloading\n path: ${path.config}/modules.d/*.yml\n\n # Period on which files under path should be checked for changes\n reload.period: 10s\n\n # Set to true to enable config reloading\n reload.enabled: false\n\n# Maximum amount of time to randomly delay the start of a dataset. Use 0 to\n# disable startup delay.\nauditbeat.max_start_delay: 10s\n\n# =========================== Modules configuration ============================\nauditbeat.modules:\n\n# The auditd module collects events from the audit framework in the Linux\n# kernel. You need to specify audit rules for the events that you want to audit.\n- module: auditd\n resolve_ids: true\n failure_mode: silent\n backlog_limit: 8196\n rate_limit: 0\n include_raw_message: false\n include_warnings: false\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Load audit rules from separate files. Same format as audit.rules(7).\n audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ]\n audit_rules: |\n ## Define audit rules here.\n ## Create file watches (-w) or syscall audits (-a or -A). Uncomment these\n ## examples or add your own rules.\n\n ## If you are on a 64 bit platform, everything should be running\n ## in 64 bit mode. This rule will detect any use of the 32 bit syscalls\n ## because this might be a sign of someone exploiting a hole in the 32\n ## bit API.\n #-a always,exit -F arch=b32 -S all -F key=32bit-abi\n\n ## Executions.\n #-a always,exit -F arch=b64 -S execve,execveat -k exec\n\n ## External access (warning: these can be expensive to audit).\n #-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access\n\n ## Identity changes.\n #-w /etc/group -p wa -k identity\n #-w /etc/passwd -p wa -k identity\n #-w /etc/gshadow -p wa -k identity\n\n ## Unauthorized access attempts.\n #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access\n #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access\n\n# The file integrity module sends events when files are changed (created,\n# updated, deleted). The events contain file metadata and hashes.\n- module: file_integrity\n paths:\n - /bin\n - /usr/bin\n - /sbin\n - /usr/sbin\n - /etc\n\n # Detect changes to files included in subdirectories. Disabled by default.\n recursive: false\n \n # List of regular expressions to filter out notifications for unwanted files.\n # Wrap in single quotes to workaround YAML escaping rules. By default no files\n # are ignored.\n exclude_files:\n - '(?i)\.sw[nop]$'\n - '~$'\n - '/\.git($|/)'\n\n # If recursive is set to true, subdirectories can also be excluded here by\n # specifying them.\n # - '(?i)/tmp/'\n\n # List of regular expressions used to explicitly include files. When configured,\n # Auditbeat will ignore files unless they match a pattern.\n #include_files:\n #- '/\.ssh($|/)'\n # Select the backend which will be used to source events.\n # "fsnotify" doesn't have the ability to associate user data to file events.\n # Valid values: auto, fsnotify, kprobes, ebpf.\n # Default: fsnotify.\n backend: fsnotify\n\n # Scan over the configured file paths at startup and send events for new or\n # modified files since the last time Auditbeat was running.\n scan_at_start: true\n\n # Average scan rate. This throttles the amount of CPU and I/O that Auditbeat\n # consumes at startup while scanning. Default is "50 MiB".\n scan_rate_per_sec: 50 MiB\n\n # Limit on the size of files that will be hashed. Default is "100 MiB".\n max_file_size: 100 MiB\n\n # Hash types to compute when the file changes. Supported types are\n # blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384,\n # sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384, sha3_512, and xxh64.\n # Default is sha1.\n hash_types: [sha1]\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n # Parse detailed information for the listed fields. Field paths in the list below\n # that are a prefix of other field paths imply the longer field path. A set of\n # fields may be specified using an RE2 regular expression quoted in //. For example\n # /^file\.pe\./ will match all file.pe.* fields. Note that the expression is not\n # implicitly anchored, so the empty expression will match all fields.\n # file_parsers:\n # - file.elf.sections\n # - file.elf.sections.name\n # - file.elf.sections.physical_size\n # - file.elf.sections.virtual_size\n # - file.elf.sections.entropy\n # - file.elf.sections.var_entropy\n # - file.elf.import_hash\n # - file.elf.imports\n # - file.elf.imports_names_entropy\n # - file.elf.imports_names_var_entropy\n # - file.elf.go_import_hash\n # - file.elf.go_imports\n # - file.elf.go_imports_names_entropy\n # - file.elf.go_imports_names_var_entropy\n # - file.elf.go_stripped\n # - file.macho.sections\n # - file.macho.sections.name\n # - file.macho.sections.physical_size\n # - file.macho.sections.virtual_size\n # - file.macho.sections.entropy\n # - file.macho.sections.var_entropy\n # - file.macho.import_hash\n # - file.macho.symhash\n # - file.macho.imports\n # - file.macho.imports_names_entropy\n # - file.macho.imports_names_var_entropy\n # - file.macho.go_import_hash\n # - file.macho.go_imports\n # - file.macho.go_imports_names_entropy\n # - file.macho.go_imports_names_var_entropy\n # - file.macho.go_stripped\n # - file.pe.sections\n # - file.pe.sections.name\n # - file.pe.sections.physical_size\n # - file.pe.sections.virtual_size\n # - file.pe.sections.entropy\n # - file.pe.sections.var_entropy\n # - file.pe.import_hash\n # - file.pe.imphash\n # - file.pe.imports\n # - file.pe.imports_names_entropy\n # - file.pe.imports_names_var_entropy\n # - file.pe.go_import_hash\n # - file.pe.go_imports\n # - file.pe.go_imports_names_entropy\n # - file.pe.go_imports_names_var_entropy\n # - file.pe.go_stripped\n \n\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n# If this option is not defined, the hostname is used.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published. Tags make it easy to group servers by different\n# logical properties.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output. Fields can be scalar values, arrays, dictionaries, or any nested\n# combination of these.\n#fields:\n# env: staging\n\n# If this option is set to true, the custom fields are stored as top-level\n# fields in the output document instead of being grouped under a field\n# sub-dictionary. Default is false.\n#fields_under_root: false\n\n# Configure the precision of all timestamps in Auditbeat.\n# Available options: millisecond, microsecond, nanosecond\n#timestamp.precision: millisecond\n\n# Internal queue configuration for buffering events to be published.\n# Queue settings may be overridden by performance presets in the\n# Elasticsearch output. To configure them manually use "preset: custom".\n#queue:\n # Queue type by name (default 'mem')\n # The memory queue will present all available events (up to the outputs\n # bulk_max_size) to the output, the moment the output is ready to serve\n # another batch of events.\n #mem:\n # Max number of events the queue can buffer.\n #events: 3200\n\n # Hints the minimum number of events stored in the queue,\n # before providing a batch of events to the outputs.\n # The default value is set to 2048.\n # A value of 0 ensures events are immediately available\n # to be sent to the outputs.\n #flush.min_events: 1600\n\n # Maximum duration after which events are available to the outputs,\n # if the number of events stored in the queue is < `flush.min_events`.\n #flush.timeout: 10s\n\n # The disk queue stores incoming events on disk until the output is\n # ready for them. This allows a higher event limit than the memory-only\n # queue and lets pending events persist through a restart.\n #disk:\n # The directory path to store the queue's data.\n #path: "${path.data}/diskqueue"\n\n # The maximum space the queue should occupy on disk. Depending on\n # input settings, events that exceed this limit are delayed or discarded.\n #max_size: 10GB\n\n # The maximum size of a single queue data file. Data in the queue is\n # stored in smaller segments that are deleted after all their events\n # have been processed.\n #segment_size: 1GB\n\n # The number of events to read from disk to memory while waiting for\n # the output to request them.\n #read_ahead: 512\n\n # The number of events to accept from inputs while waiting for them\n # to be written to disk. If event data arrives faster than it\n # can be written to disk, this setting prevents it from overflowing\n # main memory.\n #write_ahead: 2048\n\n # The duration to wait before retrying when the queue encounters a disk\n # write error.\n #retry_interval: 1s\n\n # The maximum length of time to wait before retrying on a disk write\n # error. If the queue encounters repeated errors, it will double the\n # length of its retry interval each time, up to this maximum.\n #max_retry_interval: 30s\n\n# Sets the maximum number of CPUs that can be executed simultaneously. The\n# default is the number of logical CPUs available in the system.\n#max_procs:\n\n# ================================= Processors =================================\n\n# Processors are used to reduce the number of fields in the exported event or to\n# enhance the event with external metadata. This section defines a list of\n# processors that are applied one by one and the first one receives the initial\n# event:\n#\n# event -> filter1 -> event1 -> filter2 ->event2 ...\n#\n# The supported processors are drop_fields, drop_event, include_fields,\n# decode_json_fields, and add_cloud_metadata.\n#\n# For example, you can use the following processors to keep the fields that\n# contain CPU load percentages, but remove the fields that contain CPU ticks\n# values:\n#\n#processors:\n# - include_fields:\n# fields: ["cpu"]\n# - drop_fields:\n# fields: ["cpu.user", "cpu.system"]\n#\n# The following example drops the events that have the HTTP response code 200:\n#\n#processors:\n# - drop_event:\n# when:\n# equals:\n# http.code: 200\n#\n# The following example renames the field a to b:\n#\n#processors:\n# - rename:\n# fields:\n# - from: "a"\n# to: "b"\n#\n# The following example tokenizes the string into fields:\n#\n#processors:\n# - dissect:\n# tokenizer: "%{key1} - %{key2}"\n# field: "message"\n# target_prefix: "dissect"\n#\n# The following example enriches each event with metadata from the cloud\n# provider about the host machine. It works on EC2, GCE, DigitalOcean,\n# Tencent Cloud, and Alibaba Cloud.\n#\n#processors:\n# - add_cloud_metadata: ~\n#\n# The following example enriches each event with the machine's local time zone\n# offset from UTC.\n#\n#processors:\n# - add_locale:\n# format: offset\n#\n# The following example enriches each event with docker metadata, it matches\n# given fields to an existing container id and adds info from that container:\n#\n#processors:\n# - add_docker_metadata:\n# host: "unix:///var/run/docker.sock"\n# match_fields: ["system.process.cgroup.id"]\n# match_pids: ["process.pid", "process.parent.pid"]\n# match_source: true\n# match_source_index: 4\n# match_short_id: false\n# cleanup_timeout: 60\n# labels.dedot: false\n# # To connect to Docker over TLS you must specify a client and CA certificate.\n# #ssl:\n# # certificate_authority: "/etc/pki/root/ca.pem"\n# # certificate: "/etc/pki/client/cert.pem"\n# # key: "/etc/pki/client/cert.key"\n#\n# The following example enriches each event with docker metadata, it matches\n# container id from log path available in `source` field (by default it expects\n# it to be /var/lib/docker/containers/*/*.log).\n#\n#processors:\n# - add_docker_metadata: ~\n#\n# The following example enriches each event with host metadata.\n#\n#processors:\n# - add_host_metadata: ~\n#\n# The following example enriches each event with process metadata using\n# process IDs included in the event.\n#\n#processors:\n# - add_process_metadata:\n# match_pids: ["system.process.ppid"]\n# target: system.process.parent\n#\n# The following example decodes fields containing JSON strings\n# and replaces the strings with valid JSON objects.\n#\n#processors:\n# - decode_json_fields:\n# fields: ["field1", "field2", ...]\n# process_array: false\n# max_depth: 1\n# target: ""\n# overwrite_keys: false\n#\n#processors:\n# - decompress_gzip_field:\n# from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n#\n# The following example copies the value of the message to message_copied\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: message_copied\n# fail_on_error: true\n# ignore_missing: false\n#\n# The following example truncates the value of the message to 1024 bytes\n#\n#processors:\n# - truncate_fields:\n# fields:\n# - message\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example preserves the raw message under event.original\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: event.original\n# fail_on_error: false\n# ignore_missing: true\n# - truncate_fields:\n# fields:\n# - event.original\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example URL-decodes the value of field1 to field2\n#\n#processors:\n# - urldecode:\n# fields:\n# - from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Auditbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify and additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n hosts: ["localhost:9200"]\n\n # Performance presets configure other output fields to recommended values\n # based on a performance priority.\n # Options are "balanced", "throughput", "scale", "latency" and "custom".\n # Default if unspecified: "custom"\n preset: balanced\n\n # Set gzip compression level. Set to 0 to disable compression.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1.\n #compression_level: 1\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Number of workers per Elasticsearch host.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n #worker: 1\n\n # If set to true and multiple hosts are configured, the output plugin load\n # balances published events onto all Elasticsearch hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # Optional data stream or index name. The default is "auditbeat-%{[agent.version]}".\n # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.\n #index: "auditbeat-%{[agent.version]}"\n\n # Optional ingest pipeline. By default, no pipeline will be used.\n #pipeline: ""\n\n # Optional HTTP path\n #path: "/elasticsearch"\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server URL\n #proxy_url: http://proxy:3128\n\n # Whether to disable proxy settings for outgoing connections. If true, this\n # takes precedence over both the proxy_url field and any environment settings\n # (HTTP_PROXY, HTTPS_PROXY). The default is false.\n #proxy_disable: false\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1600.\n #bulk_max_size: 1600\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum amount of time an idle connection will remain idle\n # before closing itself. Zero means use the default of 60s. The\n # format is a Go language duration (example 60s is 60 seconds).\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 3s.\n # idle_connection_timeout: 3s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Prevents auditbeat from connecting to older Elasticsearch versions when set to `false`\n #allow_older_versions: true\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n # Enables restarting auditbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Number of workers per Logstash host.\n #worker: 1\n\n # Set gzip compression level.\n #compression_level: 3\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Optional maximum time to live for a connection to Logstash, after which the\n # connection will be re-established. A value of `0s` (the default) will\n # disable this feature.\n #\n # Not yet supported for async connections (i.e. with the "pipelining" option set)\n #ttl: 30s\n\n # Optionally load-balance events between Logstash hosts. Default is false.\n #loadbalance: false\n\n # Number of batches to be sent asynchronously to Logstash while processing\n # new batches.\n #pipelining: 2\n\n # If enabled only a subset of events in a batch of events is transferred per\n # transaction. The number of events to be sent increases up to `bulk_max_size`\n # if no error is encountered.\n #slow_start: false\n\n # The number of seconds to wait before trying to reconnect to Logstash\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Logstash after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Optional index name. The default index name is set to auditbeat\n # in all lowercase.\n #index: 'auditbeat'\n\n # SOCKS5 proxy server URL\n #proxy_url: socks5://user:password@socks5-server:2233\n\n # Resolve names locally when using a proxy server. Defaults to false.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting auditbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting\n # and retry until all events are published. Set max_retries to a value less\n # than 0 to retry until all events are published. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Logstash request. The\n # default is 2048.\n #bulk_max_size: 2048\n\n # The number of seconds to wait for responses from the Logstash server before\n # timing out. The default is 30s.\n #timeout: 30s\n\n# -------------------------------- Kafka Output --------------------------------\n#output.kafka:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The list of Kafka broker addresses from which to fetch the cluster metadata.\n # The cluster metadata contain the actual Kafka brokers events are published\n # to.\n #hosts: ["localhost:9092"]\n\n # The Kafka topic used for produced events. The setting can be a format string\n # using any event field. To set the topic from document type use `%{[type]}`.\n #topic: beats\n\n # The Kafka event key setting. Use format string to create a unique event key.\n # By default no event key will be generated.\n #key: ''\n\n # The Kafka event partitioning strategy. Default hashing strategy is `hash`\n # using the `output.kafka.key` setting or randomly distributes events if\n # `output.kafka.key` is not configured.\n #partition.hash:\n # If enabled, events will only be published to partitions with reachable\n # leaders. Default is false.\n #reachable_only: false\n\n # Configure alternative event field names used to compute the hash value.\n # If empty `output.kafka.key` setting will be used.\n # Default value is empty list.\n #hash: []\n\n # Authentication details. Password is required if username is set.\n #username: ''\n #password: ''\n\n # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.\n # Defaults to PLAIN when `username` and `password` are configured.\n #sasl.mechanism: ''\n\n # Kafka version Auditbeat is assumed to run against. Defaults to the "1.0.0".\n #version: '1.0.0'\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Metadata update configuration. Metadata contains leader information\n # used to decide which broker to use when publishing.\n #metadata:\n # Max metadata request retry attempts when cluster is in middle of leader\n # election. Defaults to 3 retries.\n #retry.max: 3\n\n # Wait time between retries during leader elections. Default is 250ms.\n #retry.backoff: 250ms\n\n # Refresh metadata interval. Defaults to every 10 minutes.\n #refresh_frequency: 10m\n\n # Strategy for fetching the topics metadata from the broker. Default is false.\n #full: false\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to republish to Kafka\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to republish. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful publish, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to republish to\n # Kafka after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Kafka request. The default\n # is 2048.\n #bulk_max_size: 2048\n\n # Duration to wait before sending bulk Kafka request. 0 is no delay. The default\n # is 0.\n #bulk_flush_frequency: 0s\n\n # The number of seconds to wait for responses from the Kafka brokers before\n # timing out. The default is 30s.\n #timeout: 30s\n\n # The maximum duration a broker will wait for number of required ACKs. The\n # default is 10s.\n #broker_timeout: 10s\n\n # The number of messages buffered for each Kafka broker. The default is 256.\n #channel_buffer_size: 256\n\n # The keep-alive period for an active network connection. If 0s, keep-alives\n # are disabled. The default is 0 seconds.\n #keep_alive: 0\n\n # Sets the output compression codec. Must be one of none, snappy and gzip. The\n # default is gzip.\n #compression: gzip\n\n # Set the compression level. Currently only gzip provides a compression level\n # between 0 and 9. The default value is chosen by the compression algorithm.\n #compression_level: 4\n\n # The maximum permitted size of JSON-encoded messages. Bigger messages will be\n # dropped. The default value is 1000000 (bytes). This value should be equal to\n # or less than the broker's message.max.bytes.\n #max_message_bytes: 1000000\n\n # The ACK reliability level required from broker. 0=no response, 1=wait for\n # local commit, -1=wait for all replicas to commit. The default is 1. Note:\n # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently\n # on error.\n #required_acks: 1\n\n # The configurable ClientID used for logging, debugging, and auditing\n # purposes. The default is "beats".\n #client_id: beats\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting auditbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/security/keytabs/kafka.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # The service name. Service principal name is contructed from\n # service_name/hostname@realm.\n #kerberos.service_name: kafka\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n # Enables Kerberos FAST authentication. This may\n # conflict with certain Active Directory configurations.\n #kerberos.enable_krb5_fast: false\n\n# -------------------------------- Redis Output --------------------------------\n#output.redis:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty print json event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # The list of Redis servers to connect to. If load-balancing is enabled, the\n # events are distributed to the servers in the list. If one server becomes\n # unreachable, the events are distributed to the reachable servers only.\n # The hosts setting supports redis and rediss urls with custom password like\n # redis://:password@localhost:6379.\n #hosts: ["localhost:6379"]\n\n # The name of the Redis list or channel the events are published to. The\n # default is auditbeat.\n #key: auditbeat\n\n # The password to authenticate to Redis with. The default is no authentication.\n #password:\n\n # The Redis database number where the events are published. The default is 0.\n #db: 0\n\n # The Redis data type to use for publishing events. If the data type is list,\n # the Redis RPUSH command is used. If the data type is channel, the Redis\n # PUBLISH command is used. The default value is list.\n #datatype: list\n\n # The number of workers to use for each host configured to publish events to\n # Redis. Use this setting along with the loadbalance option. For example, if\n # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each\n # host).\n #worker: 1\n\n # If set to true and multiple hosts or workers are configured, the output\n # plugin load balances published events onto all Redis hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # The Redis connection timeout in seconds. The default is 5 seconds.\n #timeout: 5s\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to reconnect to Redis\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Redis after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Redis request or pipeline.\n # The default is 2048.\n #bulk_max_size: 2048\n\n # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The\n # value must be a URL with a scheme of socks5://.\n #proxy_url:\n\n # This option determines whether Redis hostnames are resolved locally when\n # using a proxy. The default value is false, which means that name resolution\n # occurs on the proxy server.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# -------------------------------- File Output ---------------------------------\n#output.file:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Path to the directory where to save the generated files. The option is\n # mandatory.\n #path: "/tmp/auditbeat"\n\n # Name of the generated files. The default is `auditbeat` and it generates\n # files: `auditbeat-{datetime}.ndjson`, `auditbeat-{datetime}-1.ndjson`, etc.\n #filename: auditbeat\n\n # Maximum size in kilobytes of each file. When this size is reached, and on\n # every Auditbeat restart, the files are rotated. The default value is 10240\n # kB.\n #rotate_every_kb: 10000\n\n # Maximum number of files under path. When this number of files is reached,\n # the oldest file is deleted and the rest are shifted from last to first. The\n # default is 7 files.\n #number_of_files: 7\n\n # Permissions to use for file creation. The default is 0600.\n #permissions: 0600\n \n # Configure automatic file rotation on every startup. The default is true.\n #rotate_on_startup: true\n\n# ------------------------------- Console Output -------------------------------\n#output.console:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n# =================================== Paths ====================================\n\n# The home path for the Auditbeat installation. This is the default base path\n# for all other path settings and for miscellaneous files that come with the\n# distribution (for example, the sample dashboards).\n# If not set by a CLI flag or in the configuration file, the default for the\n# home path is the location of the binary.\n#path.home:\n\n# The configuration path for the Auditbeat installation. This is the default\n# base path for configuration files, including the main YAML configuration file\n# and the Elasticsearch template file. If not set by a CLI flag or in the\n# configuration file, the default for the configuration path is the home path.\n#path.config: ${path.home}\n\n# The data path for the Auditbeat installation. This is the default base path\n# for all the files in which Auditbeat needs to store its data. If not set by a\n# CLI flag or in the configuration file, the default for the data path is a data\n# subdirectory inside the home path.\n#path.data: ${path.home}/data\n\n# The logs path for a Auditbeat installation. This is the default location for\n# the Beat's log files. If not set by a CLI flag or in the configuration file,\n# the default for the logs path is a logs subdirectory inside the home path.\n#path.logs: ${path.home}/logs\n\n# ================================== Keystore ==================================\n\n# Location of the Keystore containing the keys and their sensitive values.\n#keystore.path: "${path.config}/beats.keystore"\n\n# ================================= Dashboards =================================\n\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards are disabled by default and can be enabled either by setting the\n# options here or by using the `-setup` CLI flag or the `setup` command.\n#setup.dashboards.enabled: false\n\n# The directory from where to read the dashboards. The default is the `kibana`\n# folder in the home path.\n#setup.dashboards.directory: ${path.home}/kibana\n\n# The URL from where to download the dashboard archive. It is used instead of\n# the directory if it has a value.\n#setup.dashboards.url:\n\n# The file archive (zip file) from where to read the dashboards. It is used instead\n# of the directory when it has a value.\n#setup.dashboards.file:\n\n# In case the archive contains the dashboards from multiple Beats, this lets you\n# select which one to load. You can load all the dashboards in the archive by\n# setting this to the empty string.\n#setup.dashboards.beat: auditbeat\n\n# The name of the Kibana index to use for setting the configuration. Default is ".kibana"\n#setup.dashboards.kibana_index: .kibana\n\n# The Elasticsearch index name. This overwrites the index name defined in the\n# dashboards and index pattern. Example: testbeat-*\n#setup.dashboards.index:\n\n# Always use the Kibana API for loading the dashboards instead of autodetecting\n# how to install the dashboards by first querying Elasticsearch.\n#setup.dashboards.always_kibana: false\n\n# If true and Kibana is not reachable at the time when dashboards are loaded,\n# it will retry to reconnect to Kibana instead of exiting with an error.\n#setup.dashboards.retry.enabled: false\n\n# Duration interval between Kibana connection retries.\n#setup.dashboards.retry.interval: 1s\n\n# Maximum number of retries before exiting with an error, 0 for unlimited retrying.\n#setup.dashboards.retry.maximum: 0\n\n# ================================== Template ==================================\n\n# A template is used to set the mapping in Elasticsearch\n# By default template loading is enabled and the template is loaded.\n# These settings can be adjusted to load your own template or overwrite existing ones.\n\n# Set to false to disable template loading.\n#setup.template.enabled: true\n\n# Template name. By default the template name is "auditbeat-%{[agent.version]}"\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.name: "auditbeat-%{[agent.version]}"\n\n# Template pattern. By default the template pattern is "auditbeat-%{[agent.version]}" to apply to the default index settings.\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.pattern: "auditbeat-%{[agent.version]}"\n\n# Path to fields.yml file to generate the template\n#setup.template.fields: "${path.config}/fields.yml"\n\n# A list of fields to be added to the template and Kibana index pattern. Also\n# specify setup.template.overwrite: true to overwrite the existing template.\n#setup.template.append_fields:\n#- name: field_name\n# type: field_type\n\n# Enable JSON template loading. If this is enabled, the fields.yml is ignored.\n#setup.template.json.enabled: false\n\n# Path to the JSON template file\n#setup.template.json.path: "${path.config}/template.json"\n\n# Name under which the template is stored in Elasticsearch\n#setup.template.json.name: ""\n\n# Set this option if the JSON template is a data stream.\n#setup.template.json.data_stream: false\n\n# Overwrite existing template\n# Do not enable this option for more than one instance of auditbeat as it might\n# overload your Elasticsearch with too many update requests.\n#setup.template.overwrite: false\n\n# Elasticsearch template settings\nsetup.template.settings:\n\n # A dictionary of settings to place into the settings.index dictionary\n # of the Elasticsearch template. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html\n #index:\n #number_of_shards: 1\n #codec: best_compression\n\n # A dictionary of settings for the _source field. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html\n #_source:\n #enabled: false\n\n# ====================== Index Lifecycle Management (ILM) ======================\n\n# Configure index lifecycle management (ILM) to manage the backing indices\n# of your data streams.\n\n# Enable ILM support. Valid values are true, or false.\n#setup.ilm.enabled: true\n\n# Set the lifecycle policy name. The default policy name is\n# 'beatname'.\n#setup.ilm.policy_name: "mypolicy"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n#setup.ilm.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true.\n# If you set this option to false, lifecycle policy will not be installed,\n# even if setup.ilm.overwrite is set to true.\n#setup.ilm.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.ilm.overwrite: false\n\n# ======================== Data Stream Lifecycle (DSL) =========================\n\n# Configure Data Stream Lifecycle to manage data streams while connected to Serverless elasticsearch. \n# These settings are mutually exclusive with ILM settings which are not supported in Serverless projects.\n\n# Enable DSL support. Valid values are true, or false.\n#setup.dsl.enabled: true\n\n# Set the lifecycle policy name or pattern. For DSL, this name must match the data stream that the lifecycle is for.\n# The default data stream pattern is auditbeat-%{[agent.version]}"\n# The template string `%{[agent.version]}` will resolve to the current stack version. \n# The other possible template value is `%{[beat.name]}`.\n#setup.dsl.data_stream_pattern: "auditbeat-%{[agent.version]}"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n# If no custom policy is specified, a default policy with a lifetime of 7 days will be created.\n#setup.dsl.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true. If\n# you disable this check, set setup.dsl.overwrite: true so the lifecycle policy\n# can be installed.\n#setup.dsl.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.dsl.overwrite: false\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Optional protocol and basic auth credentials.\n #protocol: "https"\n #username: "elastic"\n #password: "changeme"\n\n # Optional HTTP path\n #path: ""\n\n # Optional Kibana space ID.\n #space.id: ""\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# ================================== Logging ===================================\n\n# There are four options for the log output: file, stderr, syslog, eventlog\n# The file output is the default.\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: info\n\n# Enable debug output for selected components. To enable all selectors use ["*"]\n# Other available selectors are "beat", "publisher", "service"\n# Multiple selectors can be chained.\n#logging.selectors: [ ]\n\n# Send all logging output to stderr. The default is false.\n#logging.to_stderr: false\n\n# Send all logging output to syslog. The default is false.\n#logging.to_syslog: false\n\n# Send all logging output to Windows Event Logs. The default is false.\n#logging.to_eventlog: false\n\n# If enabled, Auditbeat periodically logs its internal metrics that have changed\n# in the last period. For each metric that changed, the delta from the value at\n# the beginning of the period is logged. Also, the total values for\n# all non-zero internal metrics are logged on shutdown. The default is true.\n#logging.metrics.enabled: true\n\n# The period after which to log the internal metrics. The default is 30s.\n#logging.metrics.period: 30s\n\n# A list of metrics namespaces to report in the logs. Defaults to [stats].\n# `stats` contains general Beat metrics. `dataset` may be present in some\n# Beats and contains module or input metrics.\n#logging.metrics.namespaces: [stats]\n\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\nlogging.to_files: true\nlogging.files:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/auditbeat\n\n # The name of the files where the logs are written to.\n #name: auditbeat\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 10485760 # = 10MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 7\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to true.\n # rotateonstartup: true\n\n#=============================== Events Logging ===============================\n# Some outputs will log raw events on errors like indexing errors in the\n# Elasticsearch output, to prevent logging raw events (that may contain\n# sensitive information) together with other log messages, a different\n# log file, only for log entries containing raw events, is used. It will\n# use the same level, selectors and all other configurations from the\n# default logger, but it will have it's own file configuration.\n#\n# Having a different log file for raw events also prevents event data\n# from drowning out the regular log files.\n#\n# IMPORTANT: No matter the default logger output configuration, raw events\n# will **always** be logged to a file configured by `logging.event_data.files`.\n\n# logging.event_data:\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\n#logging.event_data.to_files: true\n#logging.event_data:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/auditbeat\n\n # The name of the files where the logs are written to.\n #name: auditbeat-events-data\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 5242880 # = 5MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 2\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to false.\n # rotateonstartup: false\n\n# ============================= X-Pack Monitoring ==============================\n# Auditbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Auditbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch output are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify an additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n #hosts: ["localhost:9200"]\n\n # Set gzip compression level.\n #compression_level: 0\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "beats_system"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server url\n #proxy_url: http://proxy:3128\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # The default is 50.\n #bulk_max_size: 50\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n #metrics.period: 10s\n #state.period: 1m\n\n# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts`\n# setting. You can find the value for this setting in the Elastic Cloud web UI.\n#monitoring.cloud.id:\n\n# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username`\n# and `monitoring.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#monitoring.cloud.auth:\n\n# =============================== HTTP Endpoint ================================\n\n# Each beat can expose internal metrics through an HTTP endpoint. For security\n# reasons the endpoint is disabled by default. This feature is currently experimental.\n# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output\n# append ?pretty to the URL.\n\n# Defines if the HTTP endpoint is enabled.\n#http.enabled: false\n\n# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe.\n# When using IP addresses, it is recommended to only use localhost.\n#http.host: localhost\n\n# Port on which the HTTP endpoint will bind. Default is 5066.\n#http.port: 5066\n\n# Define which user should be owning the named pipe.\n#http.named_pipe.user:\n\n# Define which permissions should be applied to the named pipe, use the Security\n# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with\n# `http.user`.\n#http.named_pipe.security_descriptor:\n\n# Defines if the HTTP pprof endpoints are enabled.\n# It is recommended that this is only enabled on localhost as these endpoints may leak data.\n#http.pprof.enabled: false\n\n# Controls the fraction of goroutine blocking events that are reported in the\n# blocking profile.\n#http.pprof.block_profile_rate: 0\n\n# Controls the fraction of memory allocations that are recorded and reported in\n# the memory profile.\n#http.pprof.mem_profile_rate: 524288\n\n# Controls the fraction of mutex contention events that are reported in the\n# mutex profile.\n#http.pprof.mutex_profile_rate: 0\n\n# ============================== Process Security ==============================\n\n# Enable or disable seccomp system call filtering on Linux. Default is enabled.\n#seccomp.enabled: true\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the auditbeat.\n#instrumentation:\n # Set to true to enable instrumentation of auditbeat.\n #enabled: false\n\n # Environment in which auditbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n # Enable profiling of the server, recording profile samples as events.\n #\n # This feature is experimental.\n #profiling:\n #cpu:\n # Set to true to enable CPU profiling.\n #enabled: false\n #interval: 60s\n #duration: 10s\n #heap:\n # Set to true to enable heap profiling.\n #enabled: false\n #interval: 60s\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: false\n\n# =============================== Feature Flags ================================\n\n# Enable and configure feature flags.\n#features:\n# fqdn:\n# enabled: true\n\n
dataset_sample\yaml\go\auditbeat.reference.yml
auditbeat.reference.yml
YAML
71,846
0.75
0.073577
0.973666
react-lib
643
2023-10-26T02:23:33.010634
BSD-3-Clause
false
929656bb8bc02157ce0d17e3734c5b7d
###################### Auditbeat Configuration Example #########################\n\n# This is an example configuration file highlighting only the most common\n# options. The auditbeat.reference.yml file from the same directory contains all\n# the supported options with more comments. You can use it as a reference.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/auditbeat/index.html\n\n# =========================== Modules configuration ============================\nauditbeat.modules:\n\n- module: auditd\n # Load audit rules from separate files. Same format as audit.rules(7).\n audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ]\n audit_rules: |\n ## Define audit rules here.\n ## Create file watches (-w) or syscall audits (-a or -A). Uncomment these\n ## examples or add your own rules.\n\n ## If you are on a 64 bit platform, everything should be running\n ## in 64 bit mode. This rule will detect any use of the 32 bit syscalls\n ## because this might be a sign of someone exploiting a hole in the 32\n ## bit API.\n #-a always,exit -F arch=b32 -S all -F key=32bit-abi\n\n ## Executions.\n #-a always,exit -F arch=b64 -S execve,execveat -k exec\n\n ## External access (warning: these can be expensive to audit).\n #-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access\n\n ## Identity changes.\n #-w /etc/group -p wa -k identity\n #-w /etc/passwd -p wa -k identity\n #-w /etc/gshadow -p wa -k identity\n\n ## Unauthorized access attempts.\n #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access\n #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access\n\n- module: file_integrity\n paths:\n - /bin\n - /usr/bin\n - /sbin\n - /usr/sbin\n - /etc\n\n\n# ======================= Elasticsearch template setting =======================\nsetup.template.settings:\n index.number_of_shards: 1\n #index.codec: best_compression\n #_source.enabled: false\n\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output.\n#fields:\n# env: staging\n\n# ================================= Dashboards =================================\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards is disabled by default and can be enabled either by setting the\n# options here or by using the `setup` command.\n#setup.dashboards.enabled: false\n\n# The URL from where to download the dashboard archive. By default, this URL\n# has a value that is computed based on the Beat name and version. For released\n# versions, this URL points to the dashboard archive on the artifacts.elastic.co\n# website.\n#setup.dashboards.url:\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Kibana Space ID\n # ID of the Kibana Space into which the dashboards should be loaded. By default,\n # the Default Space will be used.\n #space.id:\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Auditbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: ["localhost:9200"]\n\n # Performance preset - one of "balanced", "throughput", "scale",\n # "latency", or "custom".\n preset: balanced\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Optional SSL. By default is off.\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client Certificate Key\n #ssl.key: "/etc/pki/client/cert.key"\n\n# ================================= Processors =================================\n\n# Configure processors to enhance or manipulate events generated by the beat.\n\nprocessors:\n - add_host_metadata: ~\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n\n\n# ================================== Logging ===================================\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: debug\n\n# At debug level, you can selectively enable logging only for some components.\n# To enable all selectors, use ["*"]. Examples of other selectors are "beat",\n# "publisher", "service".\n#logging.selectors: ["*"]\n\n# ============================= X-Pack Monitoring ==============================\n# Auditbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Auditbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch outputs are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the auditbeat.\n#instrumentation:\n # Set to true to enable instrumentation of auditbeat.\n #enabled: false\n\n # Environment in which auditbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: true\n\n
dataset_sample\yaml\go\auditbeat.yml
auditbeat.yml
YAML
8,056
0.95
0.036364
0.871166
vue-tools
520
2025-06-28T10:39:46.796365
BSD-3-Clause
false
43f3f0cd0ec6ef514aa96eabc814b55c
---\n# This assigns a PR to its author\naddAssignees: true\n\naddReviewers: false\n\nassignees:\n - wy65701436\n - Vad1mo\n - OrlinVasilev\n - stonezdj\n - chlins\n - MinerYang\n\nnumberOfAssignees: 3\n
dataset_sample\yaml\go\auto-assignees.yml
auto-assignees.yml
YAML
193
0.8
0
0.083333
node-utils
500
2024-12-20T17:04:14.063316
MIT
false
7349641fd94e5eae003131cd3d6be01f
# AUTO-GENERATED, DO NOT EDIT!\n# Please edit the original at https://github.com/ory/meta/blob/master/templates/repository/common/.github/auto_assign.yml\n\n# Set to true to add reviewers to pull requests\naddReviewers: true\n\n# Set to true to add assignees to pull requests\naddAssignees: true\n\n# A list of reviewers to be added to pull requests (GitHub user name)\nassignees:\n - ory/maintainers\n\n# A number of reviewers added to the pull request\n# Set 0 to add all the reviewers (default: 0)\nnumberOfReviewers: 0\n
dataset_sample\yaml\go\auto_assign.yml
auto_assign.yml
YAML
509
0.8
0
0.583333
awesome-app
349
2024-03-10T03:36:49.839666
BSD-3-Clause
false
970d62e32379dd087764a270a5d484f7
trigger:\n batch: true\n branches:\n include:\n - master\n - dev*\n - refs/tags/*\n\npool:\n vmImage: "ubuntu-latest"\n\nvariables:\n - group: GithubToken\n\nsteps:\n - checkout: self\n - task: GoTool@0\n inputs:\n version: "1.15.2"\n - script: |\n go version\n go mod download\n workingDirectory: $(system.defaultWorkingDirectory)\n displayName: "Fetch sources"\n - script: |\n bazel build --action_env=PATH=$PATH --action_env=GOPATH=$(go env GOPATH) --action_env=GOCACHE=$(go env GOCACHE) --action_env=SPWD=$(pwd) --spawn_strategy local //release:all\n workingDirectory: $(system.defaultWorkingDirectory)\n displayName: "Build Binaries"\n - script: |\n echo $RELEASE_TAG\n ./release/bleedingrelease.sh\n workingDirectory: $(system.defaultWorkingDirectory)\n displayName: "Generate Bleeding Edge Release"\n env:\n WORKDIR: $(system.defaultWorkingDirectory)\n GITHUB_TOKEN: $(GITHUB_TOKEN)\n PRERELEASE: true\n RELEASE_TAG: unstable-$(Build.SourceVersion)\n RELEASE_SHA: $(Build.SourceVersion)\n TRIGGER_REASON: $(Build.SourceBranch)\n GITHUB_REPO_OWNER: v2fly\n GITHUB_REPO_NAME: v2ray-core\n - script: |\n echo $RELEASE_TAG\n ./release/tagrelease.sh\n workingDirectory: $(system.defaultWorkingDirectory)\n displayName: "Generate Tag Release"\n env:\n WORKDIR: $(system.defaultWorkingDirectory)\n GITHUB_TOKEN: $(GITHUB_TOKEN)\n PRERELEASE: true\n RELEASE_TAG: unstable-$(Build.SourceVersion)\n RELEASE_SHA: $(Build.SourceVersion)\n TRIGGER_REASON: $(Build.SourceBranch)\n
dataset_sample\yaml\go\azure-pipelines.yml
azure-pipelines.yml
YAML
1,590
0.8
0
0
vue-tools
517
2024-08-22T15:55:49.114755
BSD-3-Clause
false
88572555e8a212cb5269319986b5b8d8
wtf:\n colors:\n border:\n focusable: darkslateblue\n focused: orange\n normal: gray\n grid:\n columns: [40, 40]\n rows: [13, 13, 4]\n refreshInterval: 1\n mods:\n bargraph:\n enabled: true\n graphIcon: "💀"\n graphStars: 25\n position:\n top: 1\n left: 0\n height: 2\n width: 2\n refreshInterval: 30
dataset_sample\yaml\go\bargraph_config.yml
bargraph_config.yml
YAML
366
0.7
0
0
python-kit
934
2024-08-19T05:01:59.119248
MIT
false
faa9dafb08227c7f9f1a2b6324ec5d4a
firstPRWelcomeComment: Hey, thank you for opening your first Pull Request !\n\n# Comment to be posted to on first time issues\nfirstIssueWelcomeComment: >\n Hey, thank you for opening your first Issue ! 🙂\n If you would like to contribute we have a [guide for contributors](https://golangci-lint.run/contributing/quick-start/).\n
dataset_sample\yaml\go\boring-cyborg.yml
boring-cyborg.yml
YAML
328
0.8
0.5
0.2
node-utils
640
2023-11-26T11:21:40.749706
GPL-3.0
false
f6a91ede3febaf0c11440b09363d8cc8
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n # Check `release-note` in changelog entries.\n # https://semgrep.dev/s/DyRW\n - id: "changelog-release-note"\n patterns:\n - pattern: "```$CHANGE_TYPE"\n - pattern-not-inside: "```release-note"\n # This is probably the right way to write this rule, but semgrep doesn't\n # like it: https://github.com/returntocorp/semgrep/issues/4565\n # - pattern-not: "```release-note:..."\n message: "Missing `relelease-note`"\n languages:\n - "generic"\n severity: "ERROR"\n fix: "```release-note:$CHANGE_TYPE"\n paths:\n include:\n - ".changelog"\n
dataset_sample\yaml\go\changelog.yml
changelog.yml
YAML
659
0.8
0
0.35
python-kit
802
2023-11-05T10:03:20.909009
GPL-3.0
false
48378d1081ed674c842973823b91f62c
machine:\n services:\n - docker\n\ndependencies:\n cache_directories:\n - "~/.ethash" # Cache the ethash DAG generated by hive for consecutive builds\n - "~/.docker" # Cache all docker images manually to avoid lengthy rebuilds\n override:\n # Restore all previously cached docker images\n - mkdir -p ~/.docker\n - for img in `ls ~/.docker`; do docker load -i ~/.docker/$img; done\n\n # Pull in and hive, restore cached ethash DAGs and do a dry run\n - go get -u github.com/karalabe/hive\n - (cd ~/.go_workspace/src/github.com/karalabe/hive && mkdir -p workspace/ethash/ ~/.ethash)\n - (cd ~/.go_workspace/src/github.com/karalabe/hive && cp -r ~/.ethash/. workspace/ethash/)\n - (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=NONE --test=. --sim=. --loglevel=6)\n\n # Cache all the docker images and the ethash DAGs\n - for img in `docker images | grep -v "^<none>" | tail -n +2 | awk '{print $1}'`; do docker save $img > ~/.docker/`echo $img | tr '/' ':'`.tar; done\n - cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/ethash/. ~/.ethash\n\ntest:\n override:\n # Build Geth and move into a known folder\n - make geth\n - cp ./build/bin/geth $HOME/geth\n\n # Run hive and move all generated logs into the public artifacts folder\n - (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=go-ethereum:local --override=$HOME/geth --test=. --sim=.)\n - cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/logs/* $CIRCLE_ARTIFACTS\n
dataset_sample\yaml\go\circle.yml
circle.yml
YAML
1,544
0.8
0.09375
0.185185
python-kit
585
2024-09-17T11:48:47.421494
Apache-2.0
false
f79b26bcda2ec60fa5876b9e868fcc42
coverage:\n status:\n project:\n default:\n informational: true\n patch:\n default:\n informational: true\ncomment: false\n
dataset_sample\yaml\go\codecov.yml
codecov.yml
YAML
145
0.7
0
0
python-kit
989
2024-04-07T19:41:20.420263
BSD-3-Clause
false
38f9d90e8e9eaea2872d024d6bf0e081
---\nissues:\n - label: needs_team\n comment: |\n This issue doesn't have a `Team:<team>` label.\n - label: request-discuss\n comment: |\n Thank you very much for creating this issue. However, we would kindly like to ask you to post all questions and issues on the [Discuss forum](https://discuss.elastic.co/c/beats) first. In addition to awesome, knowledgeable community contributors, core Beats developers are on the forums every single day to help you out as well. So, your questions will reach a wider audience there, and if we confirm that there is a bug, then you can reopen this issue with the new information or open a new one.\npulls:\n - label: needs_team\n comment: |\n This pull request doesn't have a `Team:<team>` label.\n
dataset_sample\yaml\go\comment.yml
comment.yml
YAML
751
0.8
0.166667
0
python-kit
807
2024-10-29T11:54:27.669932
MIT
false
f85dfb976c62c6d4968d3e776025f2fb
# NOTE: Current plan gives 1500 build minutes per month.\nversion: 2.1\n\norbs:\n go: circleci/go@1.7.3\n git-shallow-clone: guitarrapc/git-shallow-clone@2.5.0\n\nexecutors:\n golang:\n docker:\n - image: cimg/go:1.24.0-node\n golang-test:\n docker:\n - image: cimg/go:1.24.0-node\n - image: quay.io/thanos/docker-swift-onlyone-authv2-keystone:v0.1\n\njobs:\n # Cross build is needed for publish_release but needs to be done outside of docker.\n cross_build:\n machine: true\n working_directory: /home/circleci/.go_workspace/src/github.com/thanos-io/thanos\n environment:\n GOBIN: "/home/circleci/.go_workspace/go/bin"\n PROMU_VERSION: "0.5.0"\n steps:\n - git-shallow-clone/checkout\n - run: mkdir -p ${GOBIN}\n - run: curl -L "https://github.com/prometheus/promu/releases/download/v${PROMU_VERSION}/promu-${PROMU_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz" | tar --strip-components=1 -xzf - -C ${GOBIN}\n - run: mv -f ${GOBIN}/promu "${GOBIN}/promu-v${PROMU_VERSION}"\n - run: make crossbuild -W ${GOBIN}/promu-v${PROMU_VERSION} # Ignore make dependency, it needs to be enforced somehow.\n - persist_to_workspace:\n root: .\n paths:\n - .build\n\n publish_main:\n executor: golang\n steps:\n - git-shallow-clone/checkout\n - go/mod-download-cached\n - setup_remote_docker:\n version: docker24\n - attach_workspace:\n at: .\n # Register qemu to support multi-arch.\n - run: docker run --privileged tonistiigi/binfmt:qemu-v6.1.0 --install all\n - run: make crossbuild\n - run: make docker-build\n - run: make docker-test\n # Upload to both dockerhub and quay.io.\n - run: echo "${DOCKERHUB_PASSWORD}" | docker login -u="${DOCKERHUB_USERNAME}" --password-stdin\n - run: make docker-push DOCKER_IMAGE_REPO=thanosio/thanos\n - run: make docker-manifest DOCKER_IMAGE_REPO=thanosio/thanos\n - run: echo "${QUAY_PASSWORD}" | docker login -u="${QUAY_USERNAME}" quay.io --password-stdin\n - run: make docker-push\n - run: make docker-manifest\n\n publish_release:\n executor: golang\n steps:\n - git-shallow-clone/checkout\n - go/mod-download-cached\n - setup_remote_docker:\n version: docker24\n - attach_workspace:\n at: .\n - run: make tarballs-release\n - store_artifacts:\n path: .tarballs\n destination: releases\n # Register qemu to support multi-arch.\n - run: docker run --privileged tonistiigi/binfmt:qemu-v6.1.0 --install all\n - run: make docker-build\n - run: make docker-test\n # Upload to both dockerhub and quay.io.\n - run: echo "${DOCKERHUB_PASSWORD}" | docker login -u="${DOCKERHUB_USERNAME}" --password-stdin\n - run: make docker-push DOCKER_IMAGE_REPO=thanosio/thanos DOCKER_IMAGE_TAG=$CIRCLE_TAG\n - run: make docker-manifest DOCKER_IMAGE_REPO=thanosio/thanos DOCKER_IMAGE_TAG=$CIRCLE_TAG\n - run: echo "${QUAY_PASSWORD}" | docker login -u="${QUAY_USERNAME}" quay.io --password-stdin\n - run: make docker-push DOCKER_IMAGE_TAG=$CIRCLE_TAG\n - run: make docker-manifest DOCKER_IMAGE_TAG=$CIRCLE_TAG\n\nworkflows:\n version: 2\n thanos:\n jobs:\n - publish_main:\n filters:\n branches:\n only: main\n - cross_build:\n filters:\n tags:\n only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/\n branches:\n ignore: /.*/\n - publish_release:\n requires:\n - cross_build\n filters:\n tags:\n only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/\n branches:\n ignore: /.*/\n
dataset_sample\yaml\go\config.yml
config.yml
YAML
3,689
0.95
0.009615
0.061224
python-kit
485
2023-07-20T15:07:58.344855
BSD-3-Clause
false
a0c80b9f0e4ff909b3bfc7991c18463b
project_id: "531392"\napi_token_env: CROWDIN_PERSONAL_TOKEN\npreserve_hierarchy: true\ncommit_message: "[ci skip]"\nfiles:\n - source: /docs/**/*\n translation: /i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name%\n ignore:\n - /**/*.json\n - source: /blog/**/*\n translation: /i18n/%two_letters_code%/docusaurus-plugin-content-blog/**/%original_file_name%\n - source: /src/pages/**/*\n translation: /i18n/%two_letters_code%/docusaurus-plugin-content-pages/**/%original_file_name%\n ignore:\n - /**/*.js\n - /**/*.jsx\n - /**/*.ts\n - /**/*.tsx\n - /**/*.css\n - source: /i18n/en/**/*.json\n translation: /i18n/%two_letters_code%/**/%original_file_name%\n
dataset_sample\yaml\go\crowdin.yml
crowdin.yml
YAML
721
0.8
0
0
node-utils
871
2023-12-13T12:44:16.654268
Apache-2.0
false
a0efb1200c32889a25b6013dda8e802f
dashboards:\n- id: Packetbeat-Cassandra\n file: Packetbeat-cassandra.json\n- id: a7b35890-8baa-11e8-9676-ef67484126fb\n file: Packetbeat-dhcpv4.json\n- id: 65120940-1454-11e9-9de0-f98d1808db8e\n file: Packetbeat-dns-overview.json\n- id: DNS-Unique-Domains\n file: Packetbeat-dns-tunneling.json\n- id: Packetbeat-Flows\n file: Packetbeat-flows.json\n- id: Packetbeat-HTTP\n file: Packetbeat-http.json\n- id: Packetbeat-MongoDB-performance\n file: Packetbeat-mongodb.json\n- id: Packetbeat-MySQL-performance\n file: Packetbeat-mysql.json\n- id: Packetbeat-NFS\n file: Packetbeat-nfs.json\n- id: Packetbeat-Dashboard\n file: Packetbeat-overview.json\n- id: Packetbeat-PgSQL-performance\n file: Packetbeat-pgsql.json\n- id: Packetbeat-Thrift-performance\n file: Packetbeat-thrift.json\n- id: tls-sessions\n file: Packetbeat-tls.json\n
dataset_sample\yaml\go\dashboards.yml
dashboards.yml
YAML
816
0.7
0
0
awesome-app
241
2024-06-14T07:29:02.695029
BSD-3-Clause
false
603d62f3415d4aee949948784355b39e
version: 2\nupdates:\n- package-ecosystem: gomod\n directory: "/"\n schedule:\n interval: daily\n open-pull-requests-limit: 10\n reviewers:\n - senorprogrammer\n assignees:\n - senorprogrammer\n- package-ecosystem: "github-actions"\n directory: "/"\n schedule:\n interval: daily\n open-pull-requests-limit: 10\n reviewers:\n - senorprogrammer\n assignees:\n - senorprogrammer\n
dataset_sample\yaml\go\dependabot.yml
dependabot.yml
YAML
377
0.7
0
0
vue-tools
12
2024-04-08T09:02:36.718522
BSD-3-Clause
false
7efbd4d0322ae58b207c93672049ae12
deprecations:\n - id: rest-meta-prop\n status: deprecated # switch to removed once feature is completely removed\n apiType: REST\n locations: \n - GET /v1/thing/{id}\n - GET /v1/things\n - GET /v1/action/{id}\n - GET /v1/actions\n msg: "use of deprecated property ?meta=true/false"\n mitigation: "Use ?include=<propName>, e.g. ?include=_classification for classification meta or ?include=_vector to show the vector position or ?include=_classification,_vector for both. When consuming the response use the additional fields such as _vector, as the meta object in the response, such as meta.vector will be removed."\n sinceVersion: "0.22.8"\n sinceTime: "2020-06-15T16:18:06+00:00"\n plannedRemovalVersion: "0.23.0"\n removedIn: "0.23.0"\n removedTime: "2020-12-18T18:00:00+00:00"\n - id: config-files\n status: deprecated # switch to removed once feature is completely removed\n apiType: Configuration\n locations: \n - --config-file=""\n msg: "use of deprecated command line argument --config-file"\n mitigation: "Configure Weaviate using environment variables."\n sinceVersion: "0.22.16"\n sinceTime: "2020-09-08T09:46:00+00:00"\n plannedRemovalVersion: "0.23.0"\n removedIn: null\n removedTime: null\n - id: cardinality\n status: deprecated # switch to removed once feature is completely removed\n apiType: REST\n locations:\n - GET /v1/schema\n - POST /v1/schema/things\n - POST /v1/schema/actions\n - POST /v1/schema/things/{className}/properties\n - POST /v1/schema/actions/{className}/properties\n msg: "use of deprecated property option 'cardinality'"\n mitigation: "Omit this field. Starting in 0.22.7 it no longer has any effect."\n sinceVersion: "0.22.17"\n sinceTime: "2020-09-16T09:06:00+00:00"\n plannedRemovalVersion: "0.23.0"\n removedIn: "0.23.0"\n removedTime: "2020-12-18T18:00:00+00:00"\n - id: ref-meta-deprecated-fields\n status: deprecated # switch to removed once feature is completely removed\n apiType: REST\n locations:\n - GET /v1/thing/{id}\n - GET /v1/things\n - GET /v1/action/{id}\n - GET /v1/actions\n msg: "response contains deprecated fields winningDistance and losingDistance"\n mitigation: |-\n when using _classification the reference meta after a successful\n classification contains various counts and distances. Starting in 0.22.20\n the fields winningDistance and losingDistance are considered deprecated.\n New fields were added and they have more descriptive names. User\n meanWinningDistance instead of winningDistance and use meanLosingDistance\n instead of losingDistance\n sinceVersion: "0.22.20"\n sinceTime: "2020-11-26T14:58:00+00:00"\n plannedRemovalVersion: "0.23.0"\n removedIn: "0.23.0"\n removedTime: "2020-12-18T18:00:00+00:00"\n
dataset_sample\yaml\go\deprecations.yml
deprecations.yml
YAML
2,817
0.8
0.092308
0
react-lib
397
2023-10-24T19:34:19.452165
GPL-3.0
false
4f4a634f7e2c4b399100fb83968feba0
version: '3.4'\nservices:\n weaviate:\n image: weaviate/debug-server\n build:\n context: .\n dockerfile: debug.Dockerfile\n target: weaviate_debug\n restart: on-failure:0\n ports:\n - "2345:2345"\n - "8080:8080"\n - "6060:6060"\n - "2112:2112"\n - "7101:7101"\n - "50051:50051"\n environment:\n LOG_LEVEL: "debug"\n CONTEXTIONARY_URL: contextionary:9999\n QUERY_DEFAULTS_LIMIT: 20\n AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true'\n PERSISTENCE_DATA_PATH: "./data"\n DEFAULT_VECTORIZER_MODULE: text2vec-contextionary\n ENABLE_MODULES: text2vec-contextionary\n PROMETHEUS_MONITORING_ENABLED: 'true'\n PROMETHEUS_MONITORING_GROUP_CLASSES: 'true'\n CLUSTER_GOSSIP_BIND_PORT: "7100"\n CLUSTER_DATA_BIND_PORT: "7101"\n ASYNC_INDEXING: ${ASYNC_INDEXING:-false}\n\n # necessary for the metrics tests, some metrics only exist once segments\n # are flushed. If we wait to long the before run is completely in\n # memtables, the after run has some flushed which leads to some metrics\n # diffs in the before and after\n PERSISTENCE_MEMTABLES_FLUSH_DIRTY_AFTER_SECONDS: 2\n contextionary:\n image: semitechnologies/contextionary:en0.16.0-v1.2.1\n ports:\n - "9999:9999"\n environment:\n OCCURRENCE_WEIGHT_LINEAR_FACTOR: 0.75\n EXTENSIONS_STORAGE_MODE: weaviate\n EXTENSIONS_STORAGE_ORIGIN: http://weaviate:8080\n
dataset_sample\yaml\go\docker-compose-debug.yml
docker-compose-debug.yml
YAML
1,434
0.8
0.023256
0.095238
node-utils
328
2025-03-22T17:26:34.511358
GPL-3.0
false
575d3ec092436f40cae9972983a73ec4
# Copyright (c) The OpenTofu Authors\n# SPDX-License-Identifier: MPL-2.0\n# Copyright (c) 2023 HashiCorp, Inc.\n# SPDX-License-Identifier: MPL-2.0\n\nversion: '3.8'\nservices:\n website:\n build: .\n ports:\n - "3000:3000"\n volumes:\n - ../:/work/opentofu-repo/main\n command:\n - npm\n - run\n - build\n restart: no
dataset_sample\yaml\go\docker-compose.build.yml
docker-compose.build.yml
YAML
341
0.8
0
0.235294
node-utils
760
2024-04-20T15:25:34.722414
GPL-3.0
false
711af7633a6b5541e98bf3a1c0ed21bf
##\n# NOTE:\n# This docker-compose file is intended only for Weaviate development by contributors and is not meant for end-users.\n# Additionally, it should not be used directly with `docker compose up`; instead, please utilize the scripts provided\n# under tools/dev for execution.\n\n# To get a docker-compose file to run Weaviate, please follow the instructions at\n# https://weaviate.io/developers/weaviate/installation/docker-compose\n##\nversion: '3.4'\nservices:\n contextionary:\n image: semitechnologies/contextionary:en0.16.0-v1.2.1\n ports:\n - "9999:9999"\n environment:\n EXTENSIONS_STORAGE_MODE: weaviate\n EXTENSIONS_STORAGE_ORIGIN: http://host.docker.internal:8080\n OCCURRENCE_WEIGHT_LINEAR_FACTOR: 0.75\n LOG_LEVEL: debug\n prometheus:\n image: prom/prometheus:v2.46.0\n volumes:\n - ./tools/dev/prometheus_config/:/etc/prometheus/\n - ./data/prometheus:/prometheus\n command:\n - '--config.file=/etc/prometheus/prometheus.yml'\n - '--storage.tsdb.path=/prometheus'\n - '--web.console.libraries=/usr/share/prometheus/console_libraries'\n - '--web.console.templates=/usr/share/prometheus/consoles'\n ports:\n - "9090:9090"\n grafana:\n image: grafana/grafana-oss\n ports:\n - "3000:3000"\n volumes:\n - ./tools/dev/grafana/grafana.ini:/etc/grafana/grafana.ini\n - ./tools/dev/grafana/datasource.yml:/etc/grafana/provisioning/datasources/prometheus.yml\n - ./tools/dev/grafana/dashboard_provider.yml:/etc/grafana/provisioning/dashboards/dashboards.yml\n - ./tools/dev/grafana/dashboards:/var/lib/grafana/dashboards\n keycloak:\n image: jboss/keycloak:5.0.0\n environment:\n KEYCLOAK_USER: admin\n KEYCLOAK_PASSWORD: admin\n KEYCLOAK_IMPORT: /tmp/weaviate-realm.json\n volumes:\n - ./tools/dev/keycloak/weaviate-realm.json:/tmp/weaviate-realm.json\n ports:\n - "9090:8080"\n t2v-transformers:\n image: semitechnologies/transformers-inference:baai-bge-small-en-v1.5-onnx\n ports:\n - "8000:8080"\n qna-transformers:\n image: semitechnologies/qna-transformers:distilbert-base-uncased-distilled-squad\n ports:\n - "8001:8080"\n i2v-neural:\n image: semitechnologies/img2vec-pytorch:resnet50\n ports:\n - "8002:8080"\n ner-transformers:\n image: semitechnologies/ner-transformers:latest\n ports:\n - "8003:8080"\n text-spellcheck:\n image: semitechnologies/text-spellcheck-model:pyspellchecker-en\n ports:\n - "8004:8080"\n multi2vec-clip:\n image: semitechnologies/multi2vec-clip:sentence-transformers-clip-ViT-B-32-multilingual-v1\n ports:\n - "8005:8080"\n t2v-transformers-passage:\n image: semitechnologies/transformers-inference:facebook-dpr-ctx_encoder-single-nq-base\n ports:\n - "8006:8080"\n t2v-transformers-query:\n image: semitechnologies/transformers-inference:facebook-dpr-question_encoder-single-nq-base\n ports:\n - "8007:8080"\n sum-transformers:\n image: semitechnologies/sum-transformers:facebook-bart-large-cnn\n ports:\n - "8008:8080"\n reranker-transformers:\n image: semitechnologies/reranker-transformers:cross-encoder-ms-marco-MiniLM-L-6-v2\n ports:\n - "8009:8080"\n t2v-gpt4all:\n image: semitechnologies/gpt4all-inference:all-MiniLM-L6-v2\n ports:\n - "8010:8080"\n multi2vec-bind:\n image: semitechnologies/multi2vec-bind:imagebind\n ports:\n - "8011:8080"\n text2vec-model2vec:\n image: semitechnologies/model2vec-inference:minishlab-potion-base-32M\n ports:\n - "8012:8080"\n backup-s3:\n image: minio/minio\n ports:\n - "9000:9000"\n volumes:\n - ./backups-s3:/data\n environment:\n MINIO_ROOT_USER: aws_access_key\n MINIO_ROOT_PASSWORD: aws_secret_key\n entrypoint: sh\n command: -c 'mkdir -p /data/weaviate-backups && minio server /data'\n backup-gcs:\n image: oittaa/gcp-storage-emulator\n ports:\n - "9090:8080"\n volumes:\n - ./backups-gcs:/storage\n backup-azure:\n image: mcr.microsoft.com/azure-storage/azurite\n ports:\n - "10000:10000"\n volumes:\n - ./backups-azure:/data\n command: "azurite --blobHost 0.0.0.0 --blobPort 10000"\n ollama:\n image: ollama/ollama:latest\n ports:\n - "11435:11434"\n volumes:\n - ./_local/ollama:/root/.ollama\n - ./tools/dev/ollama_startup.sh:/ollama_startup.sh\n environment:\n - MODELS_TO_PULL=nomic-embed-text\n - OLLAMA_HOST=0.0.0.0\n entrypoint: ["/bin/sh", "/ollama_startup.sh"]\n
dataset_sample\yaml\go\docker-compose.yml
docker-compose.yml
YAML
4,470
0.8
0.021739
0.058394
node-utils
56
2025-04-01T15:34:50.183947
GPL-3.0
false
451ebc832ebf52afb82947b13e1ac92d
project: 'Beats docs'\ncross_links:\n - docs-content\n - ecs\n - elasticsearch\n - integration-docs\n - logstash\n - logstash-docs-md\ntoc:\n - toc: reference\n - toc: release-notes\n - toc: extend\nsubs:\n beats-ref: "https://www.elastic.co/guide/en/beats/libbeat/current"\n ecloud: "Elastic Cloud"\n ess: "Elasticsearch Service"\n es-serverless: "Elasticsearch Serverless"\n uptime-app: "Uptime app"\n logs-app: "Logs app"\n metrics-app: "Metrics app"\n siem-app: "SIEM app"\n stack-monitor-app: "Stack Monitoring"\n agent: "Elastic Agent"\n agents: "Elastic Agents"\n fleet: "Fleet"\n stack: "Elastic Stack"\n xpack: "X-Pack"\n es: "Elasticsearch"\n kib: "Kibana"\n ls: "Logstash"\n beats: "Beats"\n auditbeat: "Auditbeat"\n filebeat: "Filebeat"\n heartbeat: "Heartbeat"\n metricbeat: "Metricbeat"\n packetbeat: "Packetbeat"\n winlogbeat: "Winlogbeat"\n security-features: "security features"\n es-security-features: "Elasticsearch security features"\n elastic-sec: "Elastic Security"\n monitoring: "X-Pack monitoring"\n monitor-features: "monitoring features"\n stack-version: "9.0.0"\n major-version: "9.0"\n major-release: "9.x"\n beats-pull: "https://github.com/elastic/beats/pull/"\n beats-issue: "https://github.com/elastic/beats/issue/"\n
dataset_sample\yaml\go\docset.yml
docset.yml
YAML
1,302
0.8
0
0
python-kit
421
2024-08-23T23:06:13.355104
GPL-3.0
false
1d0e081b5ef3b7acdda8eee6b1eef41e
wtf:\n mods:\n battery:\n type: power\n title: "⚡️"\n enabled: true\n position:\n top: 0\n left: 0\n height: 1\n width: 1\n refreshInterval: 15\n security_info:\n type: security\n enabled: true\n position:\n top: 0\n left: 1\n height: 1\n width: 1\n refreshInterval: 3600
dataset_sample\yaml\go\dynamic_sizing.yml
dynamic_sizing.yml
YAML
363
0.7
0
0
awesome-app
191
2023-09-29T23:55:57.766770
Apache-2.0
false
f974c69a86285d6048fecbe1eaa0b531
# The ECS migration file contains the information about all the Beats fields\n# which were changed between 7.x and 8.0.\n#\n# This file will be used to generate breaking change documentation and migrate\n# Kibana dashboards. See script/renamed_fields.py and script/kibana-migration.py.\n#\n# The format of the file is as following:\n#\n# - from: <original-name-in-7x> # Field name in 7.x.\n# to: <new-name-in-8x> # Field name in 8.0.\n# alias: <true|false> # True if a migration alias exists in fields.yml.\n# beat: <beatname> # Omit for libbeat fields.\n\n## Libbeat\n- from: agent.hostname\n to: agent.name\n alias: true\n
dataset_sample\yaml\go\ecs-migration-8x.yml
ecs-migration-8x.yml
YAML
641
0.8
0.117647
0.8125
awesome-app
374
2024-10-08T17:48:52.715301
BSD-3-Clause
false
76e5a7a291e0b0e3ceee76336ce1c4bb
# The ECS migration file contains the information about all the Beats fields which are migrated to ECS in 7.0.\n# The goal of the file is to potentially have scripts on top of this information to convert visualisations and templates\n# based on this information in an automated way and to keep track of all changes which were applied.\n#\n# The format of the file is as following:\n#\n# - from: source-field-in-6.x\n# to: target-filed-in-ECS\n# # Alias field is useful for fields where many-1 mapping from new to old are needed\n# alias: true-if-alias-is-required-in-7\n# # Alias6 field is useful for fields where there is a 1-1 mapping from old to new. If absent, defaults to false.\n# alias6: true-if-alias-is-required-in-6\n# # Copy to is useful for fields where multiple fields map to the same ECS field\n# copy_to: true-if-field-should-be-copied-to-target-in-6x\n# # Beat name this applies to, if not beat is set, applies to all Beats\n# beat: filebeat\n\n## Libbeat\n- from: beat.name\n to: host.name\n alias6: true\n alias: true\n\n- from: beat.hostname\n to: agent.hostname\n alias6: true\n alias: true\n\n- from: beat.version\n to: agent.version\n alias6: true\n alias: true\n\n- from: beat.timezone\n to: event.timezone\n alias6: true\n alias: true\n\n# Processor fields\n\n# Docker processor\n- from: docker.container.id\n to: container.id\n alias: true\n alias6: true\n\n- from: docker.container.image\n to: container.image.name\n alias6: true\n alias: true\n\n- from: docker.container.name\n to: container.name\n alias6: true\n alias: true\n\n- from: docker.container.labels # TODO: How to map these?\n to: container.labels\n alias6: false\n alias: true\n\n# Cloud\n- from: meta.cloud.provider\n to: cloud.provider\n alias: true\n alias6: true\n\n- from: meta.cloud.instance_id\n to: cloud.instance.id\n alias: true\n alias6: true\n\n- from: meta.cloud.instance_name\n to: cloud.instance.name\n alias: true\n alias6: true\n\n- from: meta.cloud.machine_type\n to: cloud.machine.type\n alias: true\n alias6: true\n\n- from: meta.cloud.availability_zone\n to: cloud.availability_zone\n alias: true\n alias6: true\n\n- from: meta.cloud.project_id\n to: cloud.project.id\n alias: true\n alias6: true\n\n- from: meta.cloud.region\n to: cloud.region\n alias: true\n alias6: true\n\n# process processor\n- from: process.exe\n to: process.executable\n alias: true\n\n## Filebeat\n- from: offset\n to: log.offset\n alias6: true\n alias: true\n beat: filebeat\n\n- from: fileset.module\n to: event.module\n alias6: true\n alias: true\n beat: filebeat\n\n- from: source\n to:\n - log.file.path\n - log.source.address\n alias: false\n beat: filebeat\n\n# Filebeat modules\n\n# Auditd module\n\n- from: auditd.log.acct\n to: user.name\n alias: true\n beat: filebeat\n\n- from: auditd.log.pid\n to: process.pid\n alias: true\n beat: filebeat\n\n- from: auditd.log.ppid\n to: process.ppid\n alias: true\n beat: filebeat\n\n- from: auditd.log.res\n to: event.outcome\n alias: true\n beat: filebeat\n\n- from: auditd.log.record_type\n to: event.action\n alias: true\n beat: filebeat\n\n- from: auditd.log.arch\n to: host.architecture\n alias: true\n beat: filebeat\n\n- from: auditd.log.gid\n to: user.group.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.uid\n to: user.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.agid\n to: user.audit.group.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.auid\n to: user.audit.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.fsgid\n to: user.filesystem.group.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.egid\n to: user.effective.group.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.euid\n to: user.effective.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.sgid\n to: user.saved.group.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.suid\n to: user.saved.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.ogid\n to: user.owner.group.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.ouid\n to: user.owner.id\n alias: true\n beat: filebeat\n\n- from: auditd.log.terminal\n to: user.terminal\n alias: true\n beat: filebeat\n\n- from: auditd.log.comm\n to: process.name\n alias: true\n beat: filebeat\n\n- from: auditd.log.cmd\n to: process.args\n alias: false\n beat: filebeat\n comment: Was a cmdline string, whereas args is an array of keywords.\n\n- from: auditd.log.exe\n to: process.executable\n alias: true\n beat: filebeat\n\n- from: auditd.log.msg\n to: message\n alias: true\n beat: filebeat\n\n- from: auditd.log.src\n to: source.address\n alias: true\n beat: filebeat\n\n- from: auditd.log.dst\n to: destination.address\n alias: true\n beat: filebeat\n\n- from: auditd.log.geoip.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: auditd.log.geoip.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: auditd.log.geoip.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: auditd.log.geoip.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: auditd.log.geoip.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: auditd.log.geoip.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n# Suricata module\n\n- from: source_ecs.ip\n to: source.ip\n alias: true\n beat: filebeat\n\n- from: source_ecs.port\n to: source.port\n alias: true\n beat: filebeat\n\n- from: source_ecs.geo.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: source_ecs.geo.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: source_ecs.geo.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: source_ecs.geo.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: source_ecs.geo.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: source_ecs.geo.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n- from: suricata.eve.alert.action\n to: event.outcome\n alias: true\n beat: filebeat\n\n- from: suricata.eve.alert.severity\n to: event.severity\n alias: true\n beat: filebeat\n\n- from: suricata.eve.app_proto\n to: network.protocol\n alias: true\n beat: filebeat\n\n- from: suricata.eve.dest_ip\n to: destination.ip\n alias: true\n beat: filebeat\n\n- from: suricata.eve.dest_port\n to: destination.port\n alias: true\n beat: filebeat\n\n- from: suricata.eve.fileinfo.filename\n to: file.path\n alias: true\n beat: filebeat\n\n- from: suricata.eve.fileinfo.size\n to: file.size\n alias: true\n beat: filebeat\n\n- from: suricata.eve.flow.start\n to: event.start\n alias: true\n beat: filebeat\n\n- from: suricata.eve.flow.bytes_toclient\n to: destination.bytes\n alias: true\n beat: filebeat\n\n- from: suricata.eve.flow.bytes_toserver\n to: source.bytes\n alias: true\n beat: filebeat\n\n- from: suricata.eve.flow.pkts_toclient\n to: destination.packets\n alias: true\n beat: filebeat\n\n- from: suricata.eve.flow.pkts_toserver\n to: source.packets\n alias: true\n beat: filebeat\n\n- from: suricata.eve.http.hostname\n to: url.domain\n alias: true\n beat: filebeat\n\n- from: suricata.eve.http.http_method\n to: http.request.method\n alias: true\n beat: filebeat\n\n- from: suricata.eve.http.http_refer\n to: http.request.referrer\n alias: true\n beat: filebeat\n\n- from: suricata.eve.http.http_user_agent\n to: user_agent.original\n alias: true\n beat: filebeat\n\n- from: suricata.eve.http.length\n to: http.response.body.bytes\n alias: true\n beat: filebeat\n\n- from: suricata.eve.http.status\n to: http.response.status_code\n alias: true\n beat: filebeat\n\n- from: suricata.eve.http.url\n to: url.original\n alias: true\n beat: filebeat\n\n- from: suricata.eve.proto\n to: network.transport\n alias: true\n beat: filebeat\n\n- from: suricata.eve.src_ip\n to: source.ip\n alias: true\n beat: filebeat\n\n- from: suricata.eve.src_port\n to: source.port\n alias: true\n beat: filebeat\n\n- from: suricata.eve.timestamp\n to: '@timestamp'\n alias: true\n beat: filebeat\n\n- from: event.type\n to: suricata.eve.event_type\n alias: false\n beat: auditbeat\n comment: event.type is reserved for future use by ECS.\n\n## System module\n\n- from: system.syslog.hostname\n to: host.hostname\n alias: true\n beat: filebeat\n\n- from: system.syslog.program\n to: process.name\n alias: true\n beat: filebeat\n\n- from: system.syslog.pid\n to: process.pid\n alias: true\n beat: filebeat\n\n- from: system.syslog.message\n to: message\n alias: true\n beat: filebeat\n\n- from: system.auth.hostname\n to: host.hostname\n alias: true\n beat: filebeat\n\n- from: system.auth.message\n to: message\n alias: true\n beat: filebeat\n\n- from: system.auth.program\n to: process.name\n alias: true\n beat: filebeat\n\n- from: system.auth.timestamp\n to: '@timestamp'\n alias: true\n beat: filebeat\n\n- from: system.auth.user\n to: user.name\n alias: true\n beat: filebeat\n\n- from: system.auth.pid\n to: process.pid\n alias: true\n beat: filebeat\n\n- from: system.auth.groupadd.gid\n to: group.id\n alias: true\n beat: filebeat\n\n- from: system.auth.groupadd.name\n to: group.name\n alias: true\n beat: filebeat\n\n- from: system.auth.useradd.gid\n to: group.id\n alias: true\n beat: filebeat\n\n- from: system.auth.useradd.uid\n to: user.id\n alias: true\n beat: filebeat\n\n- from: system.auth.useradd.name\n to: user.name\n alias: true\n beat: filebeat\n\n- from: system.auth.program\n to: process.name\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.ip\n to: source.ip\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.port\n to: source.port\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.geoip.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.geoip.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.geoip.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.geoip.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.geoip.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: system.auth.ssh.geoip.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n## Elasticsearch module\n\n- from: elasticsearch.audit.origin_address\n to: source.ip\n alias: true\n beat: filebeat\n\n- from: elasticsearch.audit.principal\n to: user.name\n alias: true\n beat: filebeat\n\n- from: elasticsearch.audit.request_body\n to: http.request.body.content\n alias: true\n beat: filebeat\n\n- from: elasticsearch.audit.uri\n to: url.original\n alias: true\n beat: filebeat\n\n- from: elasticsearch.slowlog.took_millis\n to: event.duration\n alias: false\n scale: 1000000\n beat: filebeat\n\n## IIS module\n\n- from: iis.access.server_ip\n to: destination.address\n alias: true\n beat: filebeat\n\n- from: iis.access.remote_ip\n to: source.address\n alias: true\n beat: filebeat\n\n- from: iis.access.url\n to: url.path\n alias: true\n beat: filebeat\n\n- from: iis.access.query_string\n to: url.query\n alias: true\n beat: filebeat\n\n- from: iis.access.port\n to: destination.port\n alias: true\n beat: filebeat\n\n- from: iis.access.user_name\n to: user.name\n alias: true\n beat: filebeat\n\n- from: iis.access.hostname\n to: destination.domain\n alias: true\n beat: filebeat\n\n- from: iis.access.user_agent.original\n to: user_agent.original\n alias: true\n beat: filebeat\n- from: iis.access.user_agent.device\n to: user_agent.device.name\n alias: true\n beat: filebeat\n- from: iis.access.user_agent.name\n to: user_agent.name\n alias: true\n beat: filebeat\n- from: iis.access.user_agent.os\n to: user_agent.os.full_name\n alias: true\n beat: filebeat\n- from: iis.access.user_agent.os_name\n to: user_agent.os.name\n alias: true\n beat: filebeat\n\n- from: iis.access.user_agent.major\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: iis.access.user_agent.minor\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: iis.access.user_agent.patch\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: iis.access.user_agent.os_major\n to: user_agent.os.version\n alias: false\n beat: filebeat\n- from: iis.access.user_agent.os_minor\n to: user_agent.os.version\n alias: false\n beat: filebeat\n- from: iis.access.user_agent.os_patch\n to: user_agent.os.version\n alias: false\n beat: filebeat\n\n- from: iis.access.geoip.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: iis.access.geoip.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: iis.access.geoip.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: iis.access.geoip.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: iis.access.geoip.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: iis.access.geoip.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n- from: iis.access.method\n to: http.request.method\n alias: true\n beat: filebeat\n\n- from: iis.access.response_code\n to: http.response.status_code\n alias: true\n beat: filebeat\n\n- from: iis.access.referrer\n to: http.request.referrer\n alias: true\n beat: filebeat\n\n- from: iis.error.remote_ip\n to: source.address\n alias: true\n beat: filebeat\n\n- from: iis.error.remote_port\n to: source.port\n alias: true\n beat: filebeat\n\n- from: iis.error.server_ip\n to: destination.address\n alias: true\n beat: filebeat\n\n- from: iis.error.server_port\n to: destination.port\n alias: true\n beat: filebeat\n\n- from: iis.error.http_version\n to: http.version\n alias: true\n beat: filebeat\n\n- from: iis.access.body_received.bytes\n to: http.request.body.bytes\n alias: true\n beat: filebeat\n\n- from: iis.access.body_sent.bytes\n to: http.response.body.bytes\n alias: true\n beat: filebeat\n\n- from: iis.access.request_time_ms\n to: event.duration\n alias: false\n scale: 1000000\n beat: filebeat\n\n- from: iis.error.method\n to: http.request.method\n alias: true\n beat: filebeat\n\n- from: iis.error.url\n to: url.original\n alias: true\n beat: filebeat\n\n- from: iis.error.response_code\n to: http.response.status_code\n alias: true\n beat: filebeat\n\n- from: iis.error.geoip.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: iis.error.geoip.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: iis.error.geoip.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: iis.error.geoip.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: iis.error.geoip.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: iis.error.geoip.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n## HAProxy module\n\n- from: haproxy.total_waiting_time_ms\n to: event.duration\n alias: false\n scale: 1000000\n beat: filebeat\n\n- from: haproxy.http.request.time_active_ms\n to: event.duration\n alias: false\n scale: 1000000\n beat: filebeat\n\n- from: haproxy.client.ip\n to: source.address\n alias: true\n beat: filebeat\n\n- from: haproxy.client.port\n to: source.port\n alias: true\n beat: filebeat\n\n- from: haproxy.process_name\n to: process.name\n alias: true\n beat: filebeat\n\n- from: haproxy.pid\n to: process.pid\n alias: true\n beat: filebeat\n\n- from: haproxy.destination.ip\n to: destination.ip\n alias: true\n beat: filebeat\n\n- from: haproxy.destination.port\n to: destination.port\n alias: true\n beat: filebeat\n\n- from: haproxy.http.response.status_code\n to: http.response.status_code\n alias: true\n beat: filebeat\n\n- from: haproxy.geoip.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: haproxy.geoip.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: haproxy.geoip.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: haproxy.geoip.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: haproxy.geoip.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: haproxy.geoip.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n## Kafka module\n\n- from: kafka.log.level\n to: log.level\n alias: true\n beat: filebeat\n\n- from: kafka.log.message\n to: message\n alias: true\n beat: filebeat\n\n## Kibana module\n\n- from: http.response.content_length\n to: http.response.body.bytes\n alias: true\n alias6: true\n beat: filebeat\n comment: >\n Very general name that was only used by the Kibana module.\n\n- from: http.response.elapsed_time\n to: event.duration\n alias: false\n alias6: false\n scale: 1000000\n beat: filebeat\n comment: >\n Very general name that was only used by the Kibana module.\n\n- from: kibana.log.meta.req.headers.referer\n to: http.request.referrer\n alias: true\n beat: filebeat\n\n- from: kibana.log.meta.req.referer\n to: http.request.referrer\n alias: true\n beat: filebeat\n\n- from: kibana.log.meta.req.headers.user-agent\n to: user_agent.original\n alias: true\n beat: filebeat\n\n- from: kibana.log.meta.req.remoteAddress\n to: source.address\n alias: true\n beat: filebeat\n\n- from: kibana.log.meta.req.url\n to: url.original\n alias: true\n beat: filebeat\n\n- from: kibana.log.meta.meta.statusCode\n to: http.response.status_code\n alias: true\n beat: filebeat\n\n- from: kibana.log.meta.method\n to: http.request.method\n alias: true\n beat: filebeat\n\n## Logstash module\n\n- from: logstash.log.level\n to: log.level\n alias: true\n beat: filebeat\n\n- from: logstash.log.message\n to: message\n alias: true\n beat: filebeat\n\n- from: logstash.slowlog.level\n to: log.level\n alias: true\n beat: filebeat\n\n- from: logstash.slowlog.took_in_nanos\n to: event.duration\n alias: true\n beat: filebeat\n\n## MongoDB module\n\n- from: mongodb.log.severity\n to: log.level\n alias: true\n beat: filebeat\n\n- from: mongodb.log.message\n to: message\n alias: true\n beat: filebeat\n\n## MySQL module\n\n- from: mysql.error.message\n to: message\n alias: true\n beat: filebeat\n\n- from: mysql.error.level\n to: log.level\n alias: true\n beat: filebeat\n\n- from: mysql.error.thread_id\n to: mysql.thread_id\n alias: true\n beat: filebeat\n\n- from: mysql.slowlog.ip\n to: source.ip\n alias: true\n beat: filebeat\n\n- from: mysql.slowlog.host\n to: source.domain\n alias: true\n beat: filebeat\n\n- from: mysql.slowlog.user\n to: user.name\n alias: true\n beat: filebeat\n\n- from: mysql.slowlog.id\n to: mysql.thread_id\n alias: true\n beat: filebeat\n\n- from: mysql.slowlog.query_time.sec\n to: event.duration\n alias: false\n scale: 1000000000\n beat: filebeat\n\n## NGINX module\n\n- from: nginx.access.remote_ip\n to: source.address\n alias: true\n beat: filebeat\n\n- from: nginx.access.user_name\n to: user.name\n alias: true\n beat: filebeat\n\n- from: nginx.access.url\n to: url.original\n alias: true\n beat: filebeat\n\n- from: nginx.access.agent\n to: user_agent.original\n alias: true\n beat: filebeat\n\n- from: nginx.access.response_code\n to: http.response.status_code\n alias: true\n beat: filebeat\n\n- from: nginx.access.referrer\n to: http.request.referrer\n alias: true\n beat: filebeat\n\n- from: nginx.access.method\n to: http.request.method\n alias: true\n beat: filebeat\n\n- from: nginx.access.http_version\n to: http.version\n alias: true\n beat: filebeat\n\n- from: nginx.access.body_sent.bytes\n to: http.response.body.bytes\n alias: true\n beat: filebeat\n\n- from: nginx.access.geoip.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: nginx.access.geoip.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: nginx.access.geoip.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: nginx.access.geoip.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: nginx.access.geoip.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: nginx.access.geoip.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n- from: nginx.access.agent\n to: user_agent.original\n alias: true\n beat: filebeat\n\n- from: nginx.access.user_agent.device\n to: user_agent.device.name\n alias: true\n beat: filebeat\n- from: nginx.access.user_agent.name\n to: user_agent.name\n alias: true\n beat: filebeat\n- from: nginx.access.user_agent.os\n to: user_agent.os.full_name\n alias: true\n beat: filebeat\n- from: nginx.access.user_agent.os_name\n to: user_agent.os.name\n alias: true\n beat: filebeat\n\n- from: nginx.access.user_agent.major\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: nginx.access.user_agent.minor\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: nginx.access.user_agent.patch\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: nginx.access.user_agent.os_major\n to: user_agent.os.version\n alias: false\n beat: filebeat\n- from: nginx.access.user_agent.os_minor\n to: user_agent.os.version\n alias: false\n beat: filebeat\n- from: nginx.access.user_agent.os_patch\n to: user_agent.os.version\n alias: false\n beat: filebeat\n\n- from: nginx.error.level\n to: log.level\n alias: true\n beat: filebeat\n\n- from: nginx.error.pid\n to: process.pid\n alias: true\n beat: filebeat\n\n- from: nginx.error.tid\n to: process.thread.id\n alias: true\n beat: filebeat\n\n- from: nginx.error.message\n to: message\n alias: true\n beat: filebeat\n\n## PostgreSQL module\n\n- from: postgresql.log.duration\n to: event.duration\n alias: false\n scale: 1000000\n beat: filebeat\n\n- from: postgresql.log.timezone\n to: event.timezone\n alias: true\n beat: filebeat\n\n- from: postgresql.log.thread_id\n to: process.pid\n alias: true\n beat: filebeat\n\n- from: postgresql.log.user\n to: user.name\n alias: true\n beat: filebeat\n\n- from: postgresql.log.level\n to: log.level\n alias: true\n beat: filebeat\n\n- from: postgresql.log.message\n to: message\n alias: true\n beat: filebeat\n\n## Redis module\n\n- from: redis.log.pid\n to: process.pid\n alias: true\n beat: filebeat\n\n- from: redis.log.level\n to: log.level\n alias: true\n beat: filebeat\n\n- from: redis.log.message\n to: message\n alias: true\n beat: filebeat\n\n## Icinga module\n\n- from: icinga.debug.message\n to: message\n alias: true\n beat: filebeat\n\n- from: icinga.debug.severity\n to: log.level\n alias: true\n beat: filebeat\n\n- from: icinga.main.message\n to: message\n alias: true\n beat: filebeat\n\n- from: icinga.main.severity\n to: log.level\n alias: true\n beat: filebeat\n\n- from: icinga.startup.message\n to: message\n alias: true\n beat: filebeat\n\n- from: icinga.startup.severity\n to: log.level\n alias: true\n beat: filebeat\n\n## Traefik module\n\n- from: traefik.access.remote_ip\n to: source.address\n alias: true\n beat: filebeat\n\n- from: traefik.access.url\n to: url.original\n alias: true\n beat: filebeat\n\n- from: traefik.access.user_name\n to: user.name\n alias: true\n beat: filebeat\n\n- from: traefik.access.agent\n to: user_agent.original\n alias: true\n beat: filebeat\n\n- from: traefik.access.user_agent.original\n to: user_agent.original\n alias: true\n beat: filebeat\n- from: traefik.access.user_agent.device\n to: user_agent.device.name\n alias: true\n beat: filebeat\n- from: traefik.access.user_agent.name\n to: user_agent.name\n alias: true\n beat: filebeat\n- from: traefik.access.user_agent.os\n to: user_agent.os.full_name\n alias: true\n beat: filebeat\n- from: traefik.access.user_agent.os_name\n to: user_agent.os.name\n alias: true\n beat: filebeat\n\n- from: traefik.access.user_agent.major\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: traefik.access.user_agent.minor\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: traefik.access.user_agent.patch\n to: user_agent.version\n alias: false\n beat: filebeat\n- from: traefik.access.user_agent.os_major\n to: user_agent.os.version\n alias: false\n beat: filebeat\n- from: traefik.access.user_agent.os_minor\n to: user_agent.os.version\n alias: false\n beat: filebeat\n- from: traefik.access.user_agent.os_patch\n to: user_agent.os.version\n alias: false\n beat: filebeat\n\n- from: traefik.access.geoip.continent_name\n to: source.geo.continent_name\n alias: true\n beat: filebeat\n\n- from: traefik.access.geoip.country_iso_code\n to: source.geo.country_iso_code\n alias: true\n beat: filebeat\n\n- from: traefik.access.geoip.location\n to: source.geo.location\n alias: true\n beat: filebeat\n\n- from: traefik.access.geoip.region_name\n to: source.geo.region_name\n alias: true\n beat: filebeat\n\n- from: traefik.access.geoip.city_name\n to: source.geo.city_name\n alias: true\n beat: filebeat\n\n- from: traefik.access.geoip.region_iso_code\n to: source.geo.region_iso_code\n alias: true\n beat: filebeat\n\n- from: traefik.access.method\n to: http.request.method\n alias: true\n beat: filebeat\n\n- from: traefik.access.response_code\n to: http.response.status_code\n alias: true\n beat: filebeat\n\n- from: traefik.access.referrer\n to: http.request.referrer\n alias: true\n beat: filebeat\n\n- from: traefik.access.http_version\n to: http.version\n alias: true\n beat: filebeat\n\n- from: traefik.access.body_sent.bytes\n to: http.response.body.bytes\n alias: true\n beat: filebeat\n\n- from: traefik.access.duration\n to: event.duration\n alias: false\n scale: 1000000\n beat: filebeat\n\n# Filebeat Redis Input\n\n- from: read_timestamp\n to: event.created\n alias: false\n beat: filebeat\n\n# Auditbeat\n\n## From Auditbeat's auditd module.\n- from: auditd.messages\n to: event.original\n alias: true\n beat: auditbeat\n\n- from: auditd.warnings\n to: error.message\n alias: true\n beat: auditbeat\n\n- from: source.hostname\n to: source.domain\n alias: true\n beat: auditbeat\n\n- from: process.exe\n to: process.executable\n alias: true\n beat: auditbeat\n\n- from: process.cwd\n to: process.working_directory\n alias: true\n beat: auditbeat\n\n- from: user.auid\n to: user.audit.id\n alias: true\n beat: auditbeat\n\n- from: user.uid\n to: user.id\n alias: true\n beat: auditbeat\n\n- from: user.euid\n to: user.effective.id\n alias: true\n beat: auditbeat\n\n- from: user.fsuid\n to: user.filesystem.id\n alias: true\n beat: auditbeat\n\n- from: user.suid\n to: user.saved.id\n alias: true\n beat: auditbeat\n\n- from: user.gid\n to: user.group.id\n alias: true\n beat: auditbeat\n\n- from: user.egid\n to: user.effective.group.id\n alias: true\n beat: auditbeat\n\n- from: user.sgid\n to: user.saved.group.id\n alias: true\n beat: auditbeat\n\n- from: user.fsgid\n to: user.filesystem.group.id\n alias: true\n beat: auditbeat\n\n- from: user.name_map.auid\n to: user.audit.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.uid\n to: user.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.euid\n to: user.effective.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.fsuid\n to: user.filesystem.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.suid\n to: user.saved.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.gid\n to: user.group.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.egid\n to: user.effective.group.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.sgid\n to: user.saved.group.name\n alias: true\n beat: auditbeat\n\n- from: user.name_map.fsgid\n to: user.filesystem.group.name\n alias: true\n beat: auditbeat\n\n- from: event.type\n to: auditd.message_type\n alias: false\n beat: auditbeat\n comment: event.type is reserved for future use by ECS.\n\n# Metricbeat\n\n## Metricbeat base fields\n- from: metricset.module\n to: event.module\n alias: true\n beat: metricbeat\n\n- from: metricset.rrt\n to: event.duration\n alias: false\n comment: Unit changed, old field will stay around if needed\n breaking: true\n beat: metricbeat\n\n- from: metricset.host\n to: service.address\n alias: true\n beat: metricbeat\n\n- from: metricset.namespace\n to: event.dataset\n alias: false\n comment: No alias mapping as field did not always exist\n beat: metricbeat\n\n## Modules\n\n### HAProxy\n\n- from: haproxy.stat.process_id\n to: process.pid\n alias: true\n beat: metricbeat\n\n- from: haproxy.info.pid\n to: process.pid\n alias: true\n beat: metricbeat\n\n### Mongodb\n\n- from: mongodb.status.version\n to: service.version\n alias: true\n beat: metricbeat\n\n- from: mongodb.status.process\n to: process.name\n alias: true\n beat: metricbeat\n\n### Rabbitmq\n\n- from: rabbitmq.connection.node\n to: rabbitmq.node.name\n alias: true\n beat: metricbeat\n\n- from: rabbitmq.connection.vhost\n to: rabbitmq.vhost\n alias: true\n beat: metricbeat\n\n- from: rabbitmq.connection.user\n to: user.name\n alias: true\n beat: metricbeat\n\n- from: rabbitmq.exchange.vhost\n to: rabbitmq.vhost\n alias: true\n beat: metricbeat\n\n- from: rabbitmq.exchange.user\n to: user.name\n alias: true\n beat: metricbeat\n\n- from: rabbitmq.queue.node\n to: rabbitmq.node.name\n alias: true\n beat: metricbeat\n\n- from: rabbitmq.queue.vhost\n to: rabbitmq.vhost\n alias: true\n beat: metricbeat\n\n\n### Redis\n\n- from: php_fpm.status.pid\n to: process.pid\n alias: true\n beat: metricbeat\n\n- from: php_fpm.status.request_method\n to: http.request.method\n alias: true\n beat: metricbeat\n\n- from: php_fpm.status.request_uri\n to: url.original\n alias: true\n beat: metricbeat\n\n- from: php_fpm.status.content_length\n to: http.response.body.bytes\n alias: true\n beat: metricbeat\n\n- from: php_fpm.status.user\n to: http.response.user.name\n alias: true\n beat: metricbeat\n\n### Redis\n\n- from: redis.info.server.version\n to: service.version\n alias: true\n beat: metricbeat\n\n- from: redis.info.server.process_id\n to: process.pid\n alias: true\n beat: metricbeat\n\n- from: redis.info.server.os\n to: os.full\n alias: true\n beat: metricbeat\n\n### HTTP\n- from: http.request.body\n to: http.request.body.content\n alias: false\n beat: metricbeat\n\n### System\n\n- from: system.process.name\n to: process.name\n alias: true\n beat: metricbeat\n\n- from: system.process.pid\n to: process.pid\n alias: true\n beat: metricbeat\n\n- from: system.process.ppid\n to: process.ppid\n alias: true\n beat: metricbeat\n\n- from: system.process.pgid\n to: process.pgid\n alias: true\n beat: metricbeat\n\n- from: system.process.cwd\n to: process.working_directory\n alias: true\n beat: metricbeat\n\n- from: system.process.username\n to: user.name\n alias: true\n beat: metricbeat\n\n- from: system.socket.direction\n to: network.direction\n alias: true\n beat: metricbeat\n\n- from: system.socket.family\n to: network.type\n alias: true\n beat: metricbeat\n\n- from: system.socket.process.command\n to: process.name\n alias: true\n beat: metricbeat\n\n- from: system.socket.process.exe\n to: process.executable\n alias: true\n beat: metricbeat\n\n- from: system.socket.process.pid\n to: process.pid\n alias: true\n beat: metricbeat\n\n- from: system.socket.user.id\n to: user.id\n alias: true\n beat: metricbeat\n\n- from: system.socket.user.name\n to: user.full_name\n alias: true\n beat: metricbeat\n\n### Kibana\n\n- from: kibana.stats.uuid\n to: service.id\n alias: true\n beat: metricbeat\n\n- from: kibana.stats.transport_address\n to: service.address\n alias: true\n beat: metricbeat\n\n- from: kibana.stats.version\n to: service.version\n alias: true\n beat: metricbeat\n\n- from: kibana.status.uuid\n to: service.id\n alias: true\n beat: metricbeat\n\n- from: kibana.status.version.number\n to: service.version\n alias: true\n beat: metricbeat\n\n### Logstash\n\n- from: logstash.node.host\n to: service.hostname\n alias: true\n beat: metricbeat\n\n- from: logstash.node.version\n to: service.version\n alias: true\n beat: metricbeat\n\n- from: logstash.node.jvm.pid\n to: process.pid\n alias: true\n beat: metricbeat\n\n### Zookeeper\n\n- from: zookeeper.mntr.version\n to: service.version\n alias: true\n beat: metricbeat\n\n# Packetbeat\n\n## Shared\n- from: bytes_in\n to: source.bytes\n alias: true\n beat: packetbeat\n\n- from: bytes_out\n to: destination.bytes\n alias: true\n beat: packetbeat\n\n- from: notes\n to: error.message\n alias: true\n beat: packetbeat\n\n- from: responsetime\n to: event.duration\n alias: false\n scale: 1000000\n comment: The units changed so no alias was added.\n beat: packetbeat\n\n- from: transport\n to: network.transport\n alias: true\n beat: packetbeat\n\n- from: real_ip\n to: network.forwarded_ip\n alias: true\n beat: packetbeat\n\n## Flows\n- from: flow_id\n to: flow.id\n alias: true\n beat: packetbeat\n\n- from: final\n to: flow.final\n alias: true\n beat: packetbeat\n\n- from: start_time\n to: event.start\n alias: true\n beat: packetbeat\n\n- from: last_time\n to: event.end\n alias: true\n beat: packetbeat\n\n- from: source.stats.net_bytes_total\n to: source.bytes\n alias: true\n beat: packetbeat\n\n- from: source.stats.net_packets_total\n to: source.packets\n alias: true\n beat: packetbeat\n\n- from: dest.stats.net_bytes_total\n to: destination.bytes\n alias: true\n beat: packetbeat\n\n- from: dest.stats.net_packets_total\n to: destination.packets\n alias: true\n beat: packetbeat\n\n## HTTP\n- from: http.request.body\n to: http.request.body.content\n alias: false\n beat: packetbeat\n\n- from: http.response.body\n to: http.response.body.content\n alias: false\n beat: packetbeat\n\n- from: http.response.code\n to: http.response.status_code\n alias: true\n beat: packetbeat\n\n- from: http.response.phrase\n to: http.response.status_phrase\n alias: true\n beat: packetbeat\n\n- from: http.request.params\n to: url.query\n alias: true\n beat: packetbeat\n\n- from: method\n to: http.request.method\n alias: false\n comment: Field is used by several protocols.\n beat: packetbeat\n\n- from: path\n to: url.path\n alias: false\n beat: packetbeat\n\n- from: real_ip\n to: network.forwarded_ip\n alias: false\n comment: Field is used by several protocols.\n beat: packetbeat\n\n## MySQL\n- from: mysql.iserror\n to: status\n alias: false\n comment: >\n Status reflects whether or not an error occurred. Its values are either\n OK or Error.\n beat: packetbeat\n\n## NFS\n- from: rpc.time\n to: event.duration\n alias: false\n comment: >\n Cannot add alias at this time because the field is used by MongoDB which\n has not been updated yet.\n beat: packetbeat\n\n- from: rpc.call_size\n to: source.bytes\n alias: false\n comment: >\n Cannot add alias at this time because the field is used by MongoDB which\n has not been updated yet.\n beat: packetbeat\n\n- from: rpc.reply_size\n to: destination.bytes\n alias: false\n comment: >\n Cannot add alias at this time because the field is used by MongoDB which\n has not been updated yet.\n beat: packetbeat\n\n## Cassandra\n- from: no_request\n to: cassandra.no_request\n alias: true\n beat: packetbeat\n\n# Heartbeat\n\n- from: monitor.scheme\n to: url.scheme\n alias: true\n beat: heartbeat\n\n- from: monitor.host\n to: url.domain\n alias: true\n beat: heartbeat\n\n- from: resolve.host\n to: url.domain\n alias: true\n beat: heartbeat\n\n- from: http.url\n to: url.full\n alias: true\n beat: heartbeat\n\n- from: tcp.port\n to: url.port\n alias: true\n beat: heartbeat\n\n## Winlogbeat\n\n# Alias to ECS fields\n\n- from: xml\n to: event.original\n alias: true\n beat: winlogbeat\n\n- from: level\n to: log.level\n alias: true\n beat: winlogbeat\n\n# Move fields to winlog.* section, to reduce chances of top level field name conflicts.\n\n# renames to match Windows Event Log naming\n\n- from: log_name\n to: winlog.channel\n alias: true\n beat: winlogbeat\n\n- from: record_number\n to: winlog.record_id\n alias: true\n beat: winlogbeat\n\n- from: process_id\n to: winlog.process.pid\n alias: true\n beat: winlogbeat\n\n- from: source_name\n to: winlog.provider_name\n alias: true\n beat: winlogbeat\n\n- from: thread_id\n to: winlog.process.thread.id\n alias: true\n beat: winlogbeat\n\n# Fields moved without adjusting the name\n\n- from: activity_id\n to: winlog.activity_id\n alias: true\n beat: winlogbeat\n\n- from: computer_name\n to: winlog.computer_name\n alias: true\n beat: winlogbeat\n\n- from: event_id\n to: winlog.event_id\n alias: true\n beat: winlogbeat\n\n- from: keywords\n to: winlog.keywords\n alias: true\n beat: winlogbeat\n\n- from: message_error\n to: error.message\n alias: true\n beat: winlogbeat\n\n- from: related_activity_id\n to: winlog.related_activity_id\n alias: true\n beat: winlogbeat\n\n- from: opcode\n to: winlog.opcode\n alias: true\n beat: winlogbeat\n\n- from: provider_guid\n to: winlog.provider_guid\n alias: true\n beat: winlogbeat\n\n- from: task\n to: winlog.task\n alias: true\n beat: winlogbeat\n\n- from: user.identifier\n to: winlog.user.identifier\n alias: true\n beat: winlogbeat\n\n- from: user.domain\n to: winlog.user.domain\n alias: true\n beat: winlogbeat\n\n- from: user.type\n to: winlog.user.type\n alias: true\n beat: winlogbeat\n\n- from: version\n to: winlog.version\n alias: true\n beat: winlogbeat\n rename: false\n
dataset_sample\yaml\go\ecs-migration.yml
ecs-migration.yml
YAML
36,540
0.95
0.00484
0.040865
node-utils
183
2023-09-18T02:29:02.223932
Apache-2.0
false
ea259e1a14a3f90f16a7ed48350a0f57
filebeat.config:\n modules:\n path: ${path.config}/modules.d/*.yml\n reload.enabled: false\n\nprocessors:\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n\noutput.elasticsearch:\n hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'\n username: '${ELASTICSEARCH_USERNAME:}'\n password: '${ELASTICSEARCH_PASSWORD:}'\n
dataset_sample\yaml\go\filebeat.docker.yml
filebeat.docker.yml
YAML
319
0.8
0
0
vue-tools
994
2024-09-17T16:35:33.697182
GPL-3.0
false
1821be6ae87724dcb8bcb79132eb528c
###################### Filebeat Configuration Example #########################\n\n# This file is an example configuration file highlighting only the most common\n# options. The filebeat.reference.yml file from the same directory contains all the\n# supported options with more comments. You can use it as a reference.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/filebeat/index.html\n\n# For more available modules and options, please see the filebeat.reference.yml sample\n# configuration file.\n\n# ============================== Filebeat inputs ===============================\n\nfilebeat.inputs:\n\n# Each - is an input. Most options can be set at the input level, so\n# you can use different inputs for various configurations.\n# Below are the input-specific configurations.\n\n# filestream is an input for collecting log messages from files.\n- type: filestream\n\n # Unique ID among all inputs, an ID is required.\n id: my-filestream-id\n\n # Change to true to enable this input configuration.\n enabled: false\n\n # Paths that should be crawled and fetched. Glob based paths.\n paths:\n - /var/log/*.log\n #- c:\programdata\elasticsearch\logs\*\n\n # Exclude lines. A list of regular expressions to match. It drops the lines that are\n # matching any regular expression from the list.\n # Line filtering happens after the parsers pipeline. If you would like to filter lines\n # before parsers, use include_message parser.\n #exclude_lines: ['^DBG']\n\n # Include lines. A list of regular expressions to match. It exports the lines that are\n # matching any regular expression from the list.\n # Line filtering happens after the parsers pipeline. If you would like to filter lines\n # before parsers, use include_message parser.\n #include_lines: ['^ERR', '^WARN']\n\n # Exclude files. A list of regular expressions to match. Filebeat drops the files that\n # are matching any regular expression from the list. By default, no files are dropped.\n #prospector.scanner.exclude_files: ['.gz$']\n\n # Optional additional fields. These fields can be freely picked\n # to add additional information to the crawled log files for filtering\n #fields:\n # level: debug\n # review: 1\n\n# journald is an input for collecting logs from Journald\n#- type: journald\n\n # Unique ID among all inputs, if the ID changes, all entries\n # will be re-ingested\n #id: my-journald-id\n\n # The position to start reading from the journal, valid options are:\n # - head: Starts reading at the beginning of the journal.\n # - tail: Starts reading at the end of the journal.\n # This means that no events will be sent until a new message is written.\n # - since: Use also the `since` option to determine when to start reading from.\n #seek: head\n\n # A time offset from the current time to start reading from.\n # To use since, seek option must be set to since.\n #since: -24h\n\n # Collect events from the service and messages about the service,\n # including coredumps.\n #units:\n #- docker.service\n\n# ============================== Filebeat modules ==============================\n\nfilebeat.config.modules:\n # Glob pattern for configuration loading\n path: ${path.config}/modules.d/*.yml\n\n # Set to true to enable config reloading\n reload.enabled: false\n\n # Period on which files under path should be checked for changes\n #reload.period: 10s\n\n# ======================= Elasticsearch template setting =======================\n\nsetup.template.settings:\n index.number_of_shards: 1\n #index.codec: best_compression\n #_source.enabled: false\n\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output.\n#fields:\n# env: staging\n\n# ================================= Dashboards =================================\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards is disabled by default and can be enabled either by setting the\n# options here or by using the `setup` command.\n#setup.dashboards.enabled: false\n\n# The URL from where to download the dashboard archive. By default, this URL\n# has a value that is computed based on the Beat name and version. For released\n# versions, this URL points to the dashboard archive on the artifacts.elastic.co\n# website.\n#setup.dashboards.url:\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Kibana Space ID\n # ID of the Kibana Space into which the dashboards should be loaded. By default,\n # the Default Space will be used.\n #space.id:\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: ["localhost:9200"]\n\n # Performance preset - one of "balanced", "throughput", "scale",\n # "latency", or "custom".\n preset: balanced\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Optional SSL. By default is off.\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client Certificate Key\n #ssl.key: "/etc/pki/client/cert.key"\n\n# ================================= Processors =================================\nprocessors:\n - add_host_metadata:\n when.not.contains.tags: forwarded\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n - add_kubernetes_metadata: ~\n\n# ================================== Logging ===================================\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: debug\n\n# At debug level, you can selectively enable logging only for some components.\n# To enable all selectors, use ["*"]. Examples of other selectors are "beat",\n# "publisher", "service".\n#logging.selectors: ["*"]\n\n# ============================= X-Pack Monitoring ==============================\n# Filebeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch outputs are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the filebeat.\n#instrumentation:\n # Set to true to enable instrumentation of filebeat.\n #enabled: false\n\n # Environment in which filebeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: true\n\n
dataset_sample\yaml\go\filebeat.yml
filebeat.yml
YAML
9,540
0.95
0.057692
0.891192
awesome-app
586
2023-08-12T22:02:39.262635
BSD-3-Clause
false
494ee205c337cdfe5bbc772efd3dc6df
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n - id: "no-time-in-fsm"\n patterns:\n - pattern: time.Now()\n\n # Metric state is local to the server and therefore must use time.\n - pattern-not-inside: |\n defer metrics.MeasureSince(...)\n\n # Metric state is local to the server and therefore must use time.\n - pattern-not-inside: |\n defer metrics.MeasureSinceWithLabels(...)\n\n # The timetable's whole point is to roughly track timestamps for Raft log\n # indexes, so it must use time.\n - pattern-not-inside: |\n $N.timetable.Witness(...)\n message: |\n time.Now() should not be called from within the Server's FSM. Apply Raft\n log messages to the State Store must be deterministic so that each server\n contains exactly the same state. Since time drifts between nodes, it must\n be set before the Raft log message is applied so that all Raft members\n see the same timestamp.\n languages:\n - "go"\n severity: "WARNING"\n paths:\n include:\n - "nomad/fsm*"\n - "nomad/state/state_store*"\n exclude:\n - "*_test.go"\n
dataset_sample\yaml\go\fsm_time.yml
fsm_time.yml
YAML
1,166
0.8
0.028571
0.193548
awesome-app
705
2025-02-02T10:13:32.980791
GPL-3.0
false
cc9da66f8f572c4b705fe9ee5fc99482
# These are supported funding model platforms\n\ngithub: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]\npatreon: # Replace with a single Patreon username\nopen_collective: # Replace with a single Open Collective username\nko_fi: yorukot # Replace with a single Ko-fi username\ntidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel\ncommunity_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry\nliberapay: # Replace with a single Liberapay username\nissuehunt: # Replace with a single IssueHunt username\nlfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry\npolar: # Replace with a single Polar username\nbuy_me_a_coffee: # Replace with a single Buy Me a Coffee username\ncustom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']\n
dataset_sample\yaml\go\FUNDING.yml
FUNDING.yml
YAML
873
0.8
0
0.076923
react-lib
737
2025-02-06T21:44:31.396520
BSD-3-Clause
false
f3f1b30fc9059f69006e3d5d34b79fe4
---\n# This action is synced from https://github.com/prometheus/prometheus\nname: golangci-lint\non:\n push:\n paths:\n - "go.sum"\n - "go.mod"\n - "**.go"\n - "scripts/errcheck_excludes.txt"\n - ".github/workflows/golangci-lint.yml"\n - ".golangci.yml"\n pull_request:\n\npermissions: # added using https://github.com/step-security/secure-repo\n contents: read\n\njobs:\n golangci:\n permissions:\n contents: read # for actions/checkout to fetch code\n pull-requests: read # for golangci/golangci-lint-action to fetch pull requests\n name: lint\n runs-on: ubuntu-latest\n steps:\n - name: Checkout repository\n uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2\n - name: Install Go\n uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0\n with:\n go-version: 1.24.x\n - name: Install snmp_exporter/generator dependencies\n run: sudo apt-get update && sudo apt-get -y install libsnmp-dev\n if: github.repository == 'prometheus/snmp_exporter'\n - name: Lint\n uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0\n with:\n args: --verbose\n version: v2.0.2\n
dataset_sample\yaml\go\golangci-lint.yml
golangci-lint.yml
YAML
1,252
0.8
0.076923
0.027027
vue-tools
372
2024-10-27T22:07:53.590425
Apache-2.0
false
1bc3d39debc62dcb9f3a329a26a400ad
version: "2"\nrun:\n go: 1.23\n concurrency: 4\n timeout: 10m\nlinters:\n default: none\n enable:\n - asasalint\n - asciicheck\n - gomodguard\n - goprintffuncname\n - govet\n - ineffassign\n - misspell\n - nakedret\n - nolintlint\n - prealloc\n - reassign\n - staticcheck\n - unconvert\n - unused\n - whitespace\nformatters:\n enable:\n - gofmt\n - goimports\n
dataset_sample\yaml\go\golangci.yml
golangci.yml
YAML
390
0.85
0
0
python-kit
147
2025-03-08T23:25:27.221643
BSD-3-Clause
false
4349bc21a5c26f86c6f1a1aec71c4571
version: 2\n\nproject_name: trivy_canary_build\nbuilds:\n -\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - darwin\n - linux\n - windows\n goarch:\n - amd64\n - arm64\n ignore:\n - goos: windows\n goarch: arm64\n\narchives:\n -\n format: tar.gz\n name_template: >-\n {{ .ProjectName }}_{{ .Version }}_\n {{- if eq .Os "darwin" }}macOS\n {{- else}}{{- title .Os }}{{ end }}-\n {{- if eq .Arch "amd64" }}64bit\n {{- else if eq .Arch "arm64" }}ARM64\n {{- else }}{{ .Arch }}{{ end }}\n files:\n - README.md\n - LICENSE\n - contrib/*.tpl\n format_overrides:\n - goos: windows\n format: zip\n
dataset_sample\yaml\go\goreleaser-canary.yml
goreleaser-canary.yml
YAML
840
0.8
0.073171
0
vue-tools
108
2024-02-18T09:01:26.377299
BSD-3-Clause
false
9043654b802f88b66e47b762d971c932
version: 2\n\nproject_name: trivy\nbuilds:\n - id: build-linux\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - linux\n goarch:\n - 386\n - arm\n - amd64\n - arm64\n - s390x\n - ppc64le\n goarm:\n - 7\n - id: build-bsd\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - freebsd\n goarch:\n # modernc.org/sqlite doesn't support freebsd/arm64, etc.\n - 386\n - amd64\n - id: build-macos\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - darwin\n goarch:\n - amd64\n - arm64\n goarm:\n - 7\n - id: build-windows\n main: ./cmd/trivy/\n binary: trivy\n ldflags:\n - -s -w\n - "-extldflags '-static'"\n - -X github.com/aquasecurity/trivy/pkg/version/app.ver={{.Version}}\n env:\n - CGO_ENABLED=0\n goos:\n - windows\n goarch:\n # modernc.org/sqlite doesn't support windows/386 and windows/arm, etc.\n - amd64\n goarm:\n - 7\n\nrelease:\n extra_files:\n - glob: ./bom.json\n discussion_category_name: Announcements\n\nnfpms:\n -\n formats:\n - deb\n - rpm\n vendor: "aquasecurity"\n homepage: "https://github.com/aquasecurity"\n maintainer: "Teppei Fukuda <knqyf263@gmail.com>"\n description: "A Fast Vulnerability Scanner for Containers"\n license: "Apache-2.0"\n file_name_template: >-\n {{ .ProjectName }}_{{ .Version }}_\n {{- if eq .Os "darwin" }}macOS\n {{- else if eq .Os "openbsd" }}OpenBSD\n {{- else if eq .Os "netbsd" }}NetBSD\n {{- else if eq .Os "freebsd" }}FreeBSD\n {{- else if eq .Os "dragonfly" }}DragonFlyBSD\n {{- else}}{{- title .Os }}{{ end }}-\n {{- if eq .Arch "amd64" }}64bit\n {{- else if eq .Arch "386" }}32bit\n {{- else if eq .Arch "arm" }}ARM\n {{- else if eq .Arch "arm64" }}ARM64\n {{- else if eq .Arch "ppc64le" }}PPC64LE\n {{- else }}{{ .Arch }}{{ end }}\n contents:\n - src: contrib/*.tpl\n dst: /usr/local/share/trivy/templates\n rpm:\n signature:\n key_file: '{{ .Env.GPG_FILE }}'\n\narchives:\n - id: archive\n format: tar.gz\n name_template: >-\n {{ .ProjectName }}_{{ .Version }}_\n {{- if eq .Os "darwin" }}macOS\n {{- else if eq .Os "linux" }}Linux\n {{- else if eq .Os "openbsd" }}OpenBSD\n {{- else if eq .Os "netbsd" }}NetBSD\n {{- else if eq .Os "freebsd" }}FreeBSD\n {{- else if eq .Os "dragonfly" }}DragonFlyBSD\n {{- else}}{{- .Os }}{{ end }}-\n {{- if eq .Arch "amd64" }}64bit\n {{- else if eq .Arch "386" }}32bit\n {{- else if eq .Arch "arm" }}ARM\n {{- else if eq .Arch "arm64" }}ARM64\n {{- else if eq .Arch "ppc64le" }}PPC64LE\n {{- else }}{{ .Arch }}{{ end }}\n files:\n - README.md\n - LICENSE\n - contrib/*.tpl\n format_overrides:\n - goos: windows\n format: zip\n\n\nbrews:\n -\n repository:\n owner: aquasecurity\n name: homebrew-trivy\n homepage: "https://github.com/aquasecurity/trivy"\n description: "Scanner for vulnerabilities in container images, file systems, and Git repositories, as well as for configuration issues"\n test: |\n system "#{bin}/trivy", "--version"\n\ndockers:\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-amd64"\n - "docker.io/aquasec/trivy:latest-amd64"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-amd64"\n - "ghcr.io/aquasecurity/trivy:latest-amd64"\n - "public.ecr.aws/aquasecurity/trivy:latest-amd64"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-amd64"\n use: buildx\n goos: linux\n goarch: amd64\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/amd64"\n extra_files:\n - contrib/\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-arm64"\n - "docker.io/aquasec/trivy:latest-arm64"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-arm64"\n - "ghcr.io/aquasecurity/trivy:latest-arm64"\n - "public.ecr.aws/aquasecurity/trivy:latest-arm64"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-arm64"\n use: buildx\n goos: linux\n goarch: arm64\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/arm64"\n extra_files:\n - contrib/\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-s390x"\n - "docker.io/aquasec/trivy:latest-s390x"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-s390x"\n - "ghcr.io/aquasecurity/trivy:latest-s390x"\n - "public.ecr.aws/aquasecurity/trivy:latest-s390x"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-s390x"\n use: buildx\n goos: linux\n goarch: s390x\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/s390x"\n extra_files:\n - contrib/\n - image_templates:\n - "docker.io/aquasec/trivy:{{ .Version }}-ppc64le"\n - "docker.io/aquasec/trivy:latest-ppc64le"\n - "ghcr.io/aquasecurity/trivy:{{ .Version }}-ppc64le"\n - "ghcr.io/aquasecurity/trivy:latest-ppc64le"\n - "public.ecr.aws/aquasecurity/trivy:latest-ppc64le"\n - "public.ecr.aws/aquasecurity/trivy:{{ .Version }}-ppc64le"\n use: buildx\n goos: linux\n goarch: ppc64le\n ids:\n - build-linux\n build_flag_templates:\n - "--label=org.opencontainers.image.title={{ .ProjectName }}"\n - "--label=org.opencontainers.image.description=A Fast Vulnerability Scanner for Containers"\n - "--label=org.opencontainers.image.vendor=Aqua Security"\n - "--label=org.opencontainers.image.version={{ .Version }}"\n - "--label=org.opencontainers.image.created={{ .Date }}"\n - "--label=org.opencontainers.image.source=https://github.com/aquasecurity/trivy"\n - "--label=org.opencontainers.image.revision={{ .FullCommit }}"\n - "--label=org.opencontainers.image.url=https://www.aquasec.com/products/trivy/"\n - "--label=org.opencontainers.image.documentation=https://trivy.dev/v{{ .Version }}/"\n - "--platform=linux/ppc64le"\n extra_files:\n - contrib/\n\ndocker_manifests:\n - name_template: 'aquasec/trivy:{{ .Version }}'\n image_templates:\n - 'aquasec/trivy:{{ .Version }}-amd64'\n - 'aquasec/trivy:{{ .Version }}-arm64'\n - 'aquasec/trivy:{{ .Version }}-s390x'\n - 'aquasec/trivy:{{ .Version }}-ppc64le'\n - name_template: 'ghcr.io/aquasecurity/trivy:{{ .Version }}'\n image_templates:\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-ppc64le'\n - name_template: 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}'\n image_templates:\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-ppc64le'\n - name_template: 'aquasec/trivy:latest'\n image_templates:\n - 'aquasec/trivy:{{ .Version }}-amd64'\n - 'aquasec/trivy:{{ .Version }}-arm64'\n - 'aquasec/trivy:{{ .Version }}-s390x'\n - 'aquasec/trivy:{{ .Version }}-ppc64le'\n - name_template: 'ghcr.io/aquasecurity/trivy:latest'\n image_templates:\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'ghcr.io/aquasecurity/trivy:{{ .Version }}-ppc64le'\n - name_template: 'public.ecr.aws/aquasecurity/trivy:latest'\n image_templates:\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-amd64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-arm64'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-s390x'\n - 'public.ecr.aws/aquasecurity/trivy:{{ .Version }}-ppc64le'\n\nsigns:\n- cmd: cosign\n env:\n - COSIGN_EXPERIMENTAL=1\n signature: "${artifact}.sig"\n certificate: "${artifact}.pem"\n args:\n - "sign-blob"\n - "--oidc-issuer=https://token.actions.githubusercontent.com"\n - "--output-certificate=${certificate}"\n - "--output-signature=${signature}"\n - "${artifact}"\n - "--yes"\n artifacts: all\n output: true\n\ndocker_signs:\n- cmd: cosign\n env:\n - COSIGN_EXPERIMENTAL=1\n artifacts: manifests\n output: true\n args:\n - 'sign'\n - '${artifact}'\n - '--yes'\n
dataset_sample\yaml\go\goreleaser.yml
goreleaser.yml
YAML
10,861
0.8
0.090032
0.006645
vue-tools
587
2023-12-10T02:01:05.338959
GPL-3.0
false
b861bf47dafbbd42fc82a29c64ae2d90
# Define a directory to load monitor definitions from. Definitions take the form\n# of individual yaml files.\nheartbeat.config.monitors:\n # Directory + glob pattern to search for configuration files\n path: ${path.config}/monitors.d/*.yml\n # If enabled, heartbeat will periodically check the config.monitors path for changes\n reload.enabled: false\n # How often to check for changes\n reload.period: 5s\n\n\nheartbeat.monitors:\n- type: http\n schedule: '@every 5s'\n urls:\n - http://elasticsearch:9200\n - http://kibana:5601\n\n- type: icmp\n schedule: '@every 5s'\n hosts:\n - elasticsearch\n - kibana\n\nprocessors:\n - add_cloud_metadata: ~\n - add_docker_metadata: ~\n\noutput.elasticsearch:\n hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}'\n username: '${ELASTICSEARCH_USERNAME:}'\n password: '${ELASTICSEARCH_PASSWORD:}'\n
dataset_sample\yaml\go\heartbeat.docker.yml
heartbeat.docker.yml
YAML
833
0.8
0.09375
0.185185
vue-tools
862
2023-09-10T21:43:17.722351
BSD-3-Clause
false
a782a8303a3e4f70219a5328d47dba36
################### Heartbeat Configuration Example #########################\n\n# This file is a full configuration example documenting all non-deprecated\n# options in comments. For a shorter configuration example, that contains\n# only some common options, please see heartbeat.yml in the same directory.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/heartbeat/index.html\n\n############################# Heartbeat ######################################\n\n\n# Define a directory from which to load monitor definitions. Definitions take the form\n# of individual yaml files.\nheartbeat.config.monitors:\n # Directory + glob pattern to search for configuration files\n path: ${path.config}/monitors.d/*.yml\n # If enabled, heartbeat will periodically check the config.monitors path for changes\n reload.enabled: false\n # How often to check for changes\n reload.period: 5s\n\n# Configure monitors\nheartbeat.monitors:\n- type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping\n # configured hosts\n\n # ID used to uniquely identify this monitor in Elasticsearch even if the config changes\n id: my-monitor\n\n # Human readable display name for this service in Uptime UI and elsewhere\n name: my-icmp-monitor\n\n # Name of corresponding APM service, if Elastic APM is in use for the monitored service.\n # service.name: my-apm-service-name\n\n # Enable/Disable monitor\n #enabled: true\n\n # Configure task schedule using cron-like syntax\n schedule: '*/5 * * * * * *' # exactly every 5 seconds like 10:00:00, 10:00:05, ...\n\n # List of hosts to ping\n hosts: ["localhost"]\n\n # Configure IP protocol types to ping if hostnames are configured.\n # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`.\n ipv4: true\n ipv6: true\n mode: any\n\n # Total running time per ping test.\n timeout: 16s\n\n # Waiting duration until another ICMP Echo Request is emitted.\n wait: 1s\n\n # The tags of the monitors are included in their field with each\n # transaction published. Tags make it easy to group servers by different\n # logical properties.\n #tags: ["service-X", "web-tier"]\n\n # Optional fields that you can specify to add additional information to the\n # monitor output. Fields can be scalar values, arrays, dictionaries, or any nested\n # combination of these.\n #fields:\n # env: staging\n\n # If this option is set to true, the custom fields are stored as top-level\n # fields in the output document instead of being grouped under a fields\n # sub-dictionary. Default is false.\n #fields_under_root: false\n\n# Define a directory to load monitor definitions from. Definitions take the form\n# of individual yaml files.\n# heartbeat.config.monitors:\n # Directory + glob pattern to search for configuration files\n #path: /path/to/my/monitors.d/*.yml\n # If enabled, heartbeat will periodically check the config.monitors path for changes\n #reload.enabled: true\n # How often to check for changes\n #reload.period: 1s\n\n- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify the endpoint\n # by sending/receiving a custom payload\n # ID used to uniquely identify this monitor in Elasticsearch even if the config changes\n id: my-monitor\n\n # Human readable display name for this service in Uptime UI and elsewhere\n name: my-tcp-monitor\n\n # Enable/Disable monitor\n #enabled: true\n\n # Configure task schedule\n schedule: '@every 5s' # every 5 seconds from start of beat\n\n # configure hosts to ping.\n # Entries can be:\n # - plain hostname or IP like `localhost`:\n # Requires ports configs to be checked. If ssl is configured,\n # an SSL/TLS based connection will be established. Otherwise plain tcp connection\n # will be established\n # - hostname + port like `localhost:12345`:\n # Connect to port on a given host. If ssl is configured,\n # an SSL/TLS based connection will be established. Otherwise plain tcp connection\n # will be established\n # - full url syntax. `scheme://<host>:[port]`. The `<scheme>` can be one of\n # `tcp`, `plain`, `ssl`, and `tls`. If `tcp`, `plain` is configured, a plain\n # tcp connection will be established, even if ssl is configured.\n # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured,\n # system defaults will be used (not supported on windows).\n # If `port` is missing in url, the port setting is required.\n hosts: ["localhost:9200"]\n\n # Configure IP protocol types to ping if hostnames are configured.\n # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`.\n ipv4: true\n ipv6: true\n mode: any\n\n # List of ports to ping if host does not contain a port number\n # ports: [80, 9200, 5044]\n\n # Total test connection and data exchange timeout\n #timeout: 16s\n\n # Optional payload string to send to remote and expected answer. If none is\n # configured, the endpoint is expected to be up if a connection attempt was\n # successful. If only `send_string` is configured, any response will be\n # accepted as ok. If only `receive_string` is configured, no payload will be\n # send, but the client expects to receive the expected payload on connect.\n #check:\n #send: ''\n #receive: ''\n\n # SOCKS5 proxy url\n # proxy_url: ''\n\n # Resolve hostnames locally instead on SOCKS5 server:\n #proxy_use_local_resolver: false\n\n # TLS/SSL connection settings:\n #ssl:\n # Certificate Authorities\n #certificate_authorities: ['']\n\n # Required TLS protocols\n #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"]\n\n # The ingest pipeline ID associated with this input. If this is set, it\n # overwrites the pipeline option from the Elasticsearch output.\n #pipeline:\n\n # The index name associated with this input. If this is set, it\n # overwrites the index option from the Elasticsearch output.\n #index:\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\n- type: http # monitor type `http`. Connect via HTTP and optionally verify the response\n # ID used to uniquely identify this monitor in Elasticsearch even if the config changes.\n id: my-http-monitor\n\n # Human readable display name for this service in Uptime UI and elsewhere\n name: My Monitor\n\n # Enable/Disable monitor\n #enabled: true\n\n # Configure task schedule\n schedule: '@every 5s' # every 5 seconds from the start of beat\n\n # Configure URLs to ping\n urls: ["http://localhost:9200"]\n\n # Configure IP protocol types to ping if hostnames are configured.\n # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`.\n ipv4: true\n ipv6: true\n mode: any\n\n # Optional HTTP proxy url.\n #proxy_url: ''\n\n # Total test connection and data exchange timeout\n #timeout: 16s\n\n # Optional Authentication Credentials\n #username: ''\n #password: ''\n\n # TLS/SSL connection settings for use with HTTPS endpoint. If not configured,\n # system defaults will be used.\n #ssl:\n # Certificate Authorities\n #certificate_authorities: ['']\n\n # Required TLS protocols\n #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"]\n\n # Request settings:\n #check.request:\n # Configure HTTP method to use. Only 'HEAD', 'GET', and 'POST' methods are allowed.\n #method: "GET"\n\n # Dictionary of additional HTTP headers to send:\n #headers:\n\n # Optional request body content\n #body:\n\n # Expected response settings\n #check.response:\n # Expected status code. If not configured or set to 0 any status code not\n # being 404 is accepted.\n #status: 0\n\n # Required response headers.\n #headers:\n\n # Required response contents.\n #body:\n\n # Parses the body as JSON, then checks against the given expression\n #json:\n #- description: Explanation of what the check does\n # expression: 'myField == "expectedValue"'\n\n # (Deprecated: see 'expression' above) Parses the body as JSON, then checks against the given condition expression\n #json:\n #- description: Explanation of what the check does\n # condition:\n # equals:\n # myField: expectedValue\n\n # The ingest pipeline ID associated with this input. If this is set, it\n # overwrites the pipeline option from the Elasticsearch output.\n #pipeline:\n\n # The index name associated with this input. If this is set, it\n # overwrites the index option from the Elasticsearch output.\n #index:\n\n # Set to true to publish fields with null values in events.\n #keep_null: false\n\nheartbeat.scheduler:\n # Limit the number of concurrent tasks executed by heartbeat. The task limit if\n # disabled if set to 0. The default is 0.\n #limit: 0\n\n # Set the scheduler to its time zone\n #location: ''\n\nheartbeat.jobs:\n # Limit the number of concurrent monitors executed by heartbeat. This differs from\n # heartbeat.scheduler.limit in that it maps to individual monitors rather than the \n # subtasks of monitors. For non-browser monitors, a subtask usually corresponds to a\n # single file descriptor.\n # This feature is most useful for the browser type\n #browser.limit: 1\n #http.limit: 10\n #tcp.limit: 10\n #icmp.limit: 10\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n# If this option is not defined, the hostname is used.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published. Tags make it easy to group servers by different\n# logical properties.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output. Fields can be scalar values, arrays, dictionaries, or any nested\n# combination of these.\n#fields:\n# env: staging\n\n# If this option is set to true, the custom fields are stored as top-level\n# fields in the output document instead of being grouped under a field\n# sub-dictionary. Default is false.\n#fields_under_root: false\n\n# Configure the precision of all timestamps in Heartbeat.\n# Available options: millisecond, microsecond, nanosecond\n#timestamp.precision: millisecond\n\n# Internal queue configuration for buffering events to be published.\n# Queue settings may be overridden by performance presets in the\n# Elasticsearch output. To configure them manually use "preset: custom".\n#queue:\n # Queue type by name (default 'mem')\n # The memory queue will present all available events (up to the outputs\n # bulk_max_size) to the output, the moment the output is ready to serve\n # another batch of events.\n #mem:\n # Max number of events the queue can buffer.\n #events: 3200\n\n # Hints the minimum number of events stored in the queue,\n # before providing a batch of events to the outputs.\n # The default value is set to 2048.\n # A value of 0 ensures events are immediately available\n # to be sent to the outputs.\n #flush.min_events: 1600\n\n # Maximum duration after which events are available to the outputs,\n # if the number of events stored in the queue is < `flush.min_events`.\n #flush.timeout: 10s\n\n # The disk queue stores incoming events on disk until the output is\n # ready for them. This allows a higher event limit than the memory-only\n # queue and lets pending events persist through a restart.\n #disk:\n # The directory path to store the queue's data.\n #path: "${path.data}/diskqueue"\n\n # The maximum space the queue should occupy on disk. Depending on\n # input settings, events that exceed this limit are delayed or discarded.\n #max_size: 10GB\n\n # The maximum size of a single queue data file. Data in the queue is\n # stored in smaller segments that are deleted after all their events\n # have been processed.\n #segment_size: 1GB\n\n # The number of events to read from disk to memory while waiting for\n # the output to request them.\n #read_ahead: 512\n\n # The number of events to accept from inputs while waiting for them\n # to be written to disk. If event data arrives faster than it\n # can be written to disk, this setting prevents it from overflowing\n # main memory.\n #write_ahead: 2048\n\n # The duration to wait before retrying when the queue encounters a disk\n # write error.\n #retry_interval: 1s\n\n # The maximum length of time to wait before retrying on a disk write\n # error. If the queue encounters repeated errors, it will double the\n # length of its retry interval each time, up to this maximum.\n #max_retry_interval: 30s\n\n# Sets the maximum number of CPUs that can be executed simultaneously. The\n# default is the number of logical CPUs available in the system.\n#max_procs:\n\n# ================================= Processors =================================\n\n# Processors are used to reduce the number of fields in the exported event or to\n# enhance the event with external metadata. This section defines a list of\n# processors that are applied one by one and the first one receives the initial\n# event:\n#\n# event -> filter1 -> event1 -> filter2 ->event2 ...\n#\n# The supported processors are drop_fields, drop_event, include_fields,\n# decode_json_fields, and add_cloud_metadata.\n#\n# For example, you can use the following processors to keep the fields that\n# contain CPU load percentages, but remove the fields that contain CPU ticks\n# values:\n#\n#processors:\n# - include_fields:\n# fields: ["cpu"]\n# - drop_fields:\n# fields: ["cpu.user", "cpu.system"]\n#\n# The following example drops the events that have the HTTP response code 200:\n#\n#processors:\n# - drop_event:\n# when:\n# equals:\n# http.code: 200\n#\n# The following example renames the field a to b:\n#\n#processors:\n# - rename:\n# fields:\n# - from: "a"\n# to: "b"\n#\n# The following example tokenizes the string into fields:\n#\n#processors:\n# - dissect:\n# tokenizer: "%{key1} - %{key2}"\n# field: "message"\n# target_prefix: "dissect"\n#\n# The following example enriches each event with metadata from the cloud\n# provider about the host machine. It works on EC2, GCE, DigitalOcean,\n# Tencent Cloud, and Alibaba Cloud.\n#\n#processors:\n# - add_cloud_metadata: ~\n#\n# The following example enriches each event with the machine's local time zone\n# offset from UTC.\n#\n#processors:\n# - add_locale:\n# format: offset\n#\n# The following example enriches each event with docker metadata, it matches\n# given fields to an existing container id and adds info from that container:\n#\n#processors:\n# - add_docker_metadata:\n# host: "unix:///var/run/docker.sock"\n# match_fields: ["system.process.cgroup.id"]\n# match_pids: ["process.pid", "process.parent.pid"]\n# match_source: true\n# match_source_index: 4\n# match_short_id: false\n# cleanup_timeout: 60\n# labels.dedot: false\n# # To connect to Docker over TLS you must specify a client and CA certificate.\n# #ssl:\n# # certificate_authority: "/etc/pki/root/ca.pem"\n# # certificate: "/etc/pki/client/cert.pem"\n# # key: "/etc/pki/client/cert.key"\n#\n# The following example enriches each event with docker metadata, it matches\n# container id from log path available in `source` field (by default it expects\n# it to be /var/lib/docker/containers/*/*.log).\n#\n#processors:\n# - add_docker_metadata: ~\n#\n# The following example enriches each event with host metadata.\n#\n#processors:\n# - add_host_metadata: ~\n#\n# The following example enriches each event with process metadata using\n# process IDs included in the event.\n#\n#processors:\n# - add_process_metadata:\n# match_pids: ["system.process.ppid"]\n# target: system.process.parent\n#\n# The following example decodes fields containing JSON strings\n# and replaces the strings with valid JSON objects.\n#\n#processors:\n# - decode_json_fields:\n# fields: ["field1", "field2", ...]\n# process_array: false\n# max_depth: 1\n# target: ""\n# overwrite_keys: false\n#\n#processors:\n# - decompress_gzip_field:\n# from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n#\n# The following example copies the value of the message to message_copied\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: message_copied\n# fail_on_error: true\n# ignore_missing: false\n#\n# The following example truncates the value of the message to 1024 bytes\n#\n#processors:\n# - truncate_fields:\n# fields:\n# - message\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example preserves the raw message under event.original\n#\n#processors:\n# - copy_fields:\n# fields:\n# - from: message\n# to: event.original\n# fail_on_error: false\n# ignore_missing: true\n# - truncate_fields:\n# fields:\n# - event.original\n# max_bytes: 1024\n# fail_on_error: false\n# ignore_missing: true\n#\n# The following example URL-decodes the value of field1 to field2\n#\n#processors:\n# - urldecode:\n# fields:\n# - from: "field1"\n# to: "field2"\n# ignore_missing: false\n# fail_on_error: true\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Heartbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify and additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n hosts: ["localhost:9200"]\n\n # Performance presets configure other output fields to recommended values\n # based on a performance priority.\n # Options are "balanced", "throughput", "scale", "latency" and "custom".\n # Default if unspecified: "custom"\n preset: balanced\n\n # Set gzip compression level. Set to 0 to disable compression.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1.\n #compression_level: 1\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Number of workers per Elasticsearch host.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n #worker: 1\n\n # If set to true and multiple hosts are configured, the output plugin load\n # balances published events onto all Elasticsearch hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # Optional data stream or index name. The default is "heartbeat-%{[agent.version]}".\n # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.\n #index: "heartbeat-%{[agent.version]}"\n\n # Optional ingest pipeline. By default, no pipeline will be used.\n #pipeline: ""\n\n # Optional HTTP path\n #path: "/elasticsearch"\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server URL\n #proxy_url: http://proxy:3128\n\n # Whether to disable proxy settings for outgoing connections. If true, this\n # takes precedence over both the proxy_url field and any environment settings\n # (HTTP_PROXY, HTTPS_PROXY). The default is false.\n #proxy_disable: false\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 1600.\n #bulk_max_size: 1600\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum amount of time an idle connection will remain idle\n # before closing itself. Zero means use the default of 60s. The\n # format is a Go language duration (example 60s is 60 seconds).\n # This field may conflict with performance presets. To set it\n # manually use "preset: custom".\n # The default is 3s.\n # idle_connection_timeout: 3s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Prevents heartbeat from connecting to older Elasticsearch versions when set to `false`\n #allow_older_versions: true\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n # Enables restarting heartbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Number of workers per Logstash host.\n #worker: 1\n\n # Set gzip compression level.\n #compression_level: 3\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Optional maximum time to live for a connection to Logstash, after which the\n # connection will be re-established. A value of `0s` (the default) will\n # disable this feature.\n #\n # Not yet supported for async connections (i.e. with the "pipelining" option set)\n #ttl: 30s\n\n # Optionally load-balance events between Logstash hosts. Default is false.\n #loadbalance: false\n\n # Number of batches to be sent asynchronously to Logstash while processing\n # new batches.\n #pipelining: 2\n\n # If enabled only a subset of events in a batch of events is transferred per\n # transaction. The number of events to be sent increases up to `bulk_max_size`\n # if no error is encountered.\n #slow_start: false\n\n # The number of seconds to wait before trying to reconnect to Logstash\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Logstash after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Optional index name. The default index name is set to heartbeat\n # in all lowercase.\n #index: 'heartbeat'\n\n # SOCKS5 proxy server URL\n #proxy_url: socks5://user:password@socks5-server:2233\n\n # Resolve names locally when using a proxy server. Defaults to false.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting heartbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting\n # and retry until all events are published. Set max_retries to a value less\n # than 0 to retry until all events are published. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Logstash request. The\n # default is 2048.\n #bulk_max_size: 2048\n\n # The number of seconds to wait for responses from the Logstash server before\n # timing out. The default is 30s.\n #timeout: 30s\n\n# -------------------------------- Kafka Output --------------------------------\n#output.kafka:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # The list of Kafka broker addresses from which to fetch the cluster metadata.\n # The cluster metadata contain the actual Kafka brokers events are published\n # to.\n #hosts: ["localhost:9092"]\n\n # The Kafka topic used for produced events. The setting can be a format string\n # using any event field. To set the topic from document type use `%{[type]}`.\n #topic: beats\n\n # The Kafka event key setting. Use format string to create a unique event key.\n # By default no event key will be generated.\n #key: ''\n\n # The Kafka event partitioning strategy. Default hashing strategy is `hash`\n # using the `output.kafka.key` setting or randomly distributes events if\n # `output.kafka.key` is not configured.\n #partition.hash:\n # If enabled, events will only be published to partitions with reachable\n # leaders. Default is false.\n #reachable_only: false\n\n # Configure alternative event field names used to compute the hash value.\n # If empty `output.kafka.key` setting will be used.\n # Default value is empty list.\n #hash: []\n\n # Authentication details. Password is required if username is set.\n #username: ''\n #password: ''\n\n # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.\n # Defaults to PLAIN when `username` and `password` are configured.\n #sasl.mechanism: ''\n\n # Kafka version Heartbeat is assumed to run against. Defaults to the "1.0.0".\n #version: '1.0.0'\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Metadata update configuration. Metadata contains leader information\n # used to decide which broker to use when publishing.\n #metadata:\n # Max metadata request retry attempts when cluster is in middle of leader\n # election. Defaults to 3 retries.\n #retry.max: 3\n\n # Wait time between retries during leader elections. Default is 250ms.\n #retry.backoff: 250ms\n\n # Refresh metadata interval. Defaults to every 10 minutes.\n #refresh_frequency: 10m\n\n # Strategy for fetching the topics metadata from the broker. Default is false.\n #full: false\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to republish to Kafka\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to republish. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful publish, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to republish to\n # Kafka after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Kafka request. The default\n # is 2048.\n #bulk_max_size: 2048\n\n # Duration to wait before sending bulk Kafka request. 0 is no delay. The default\n # is 0.\n #bulk_flush_frequency: 0s\n\n # The number of seconds to wait for responses from the Kafka brokers before\n # timing out. The default is 30s.\n #timeout: 30s\n\n # The maximum duration a broker will wait for number of required ACKs. The\n # default is 10s.\n #broker_timeout: 10s\n\n # The number of messages buffered for each Kafka broker. The default is 256.\n #channel_buffer_size: 256\n\n # The keep-alive period for an active network connection. If 0s, keep-alives\n # are disabled. The default is 0 seconds.\n #keep_alive: 0\n\n # Sets the output compression codec. Must be one of none, snappy and gzip. The\n # default is gzip.\n #compression: gzip\n\n # Set the compression level. Currently only gzip provides a compression level\n # between 0 and 9. The default value is chosen by the compression algorithm.\n #compression_level: 4\n\n # The maximum permitted size of JSON-encoded messages. Bigger messages will be\n # dropped. The default value is 1000000 (bytes). This value should be equal to\n # or less than the broker's message.max.bytes.\n #max_message_bytes: 1000000\n\n # The ACK reliability level required from broker. 0=no response, 1=wait for\n # local commit, -1=wait for all replicas to commit. The default is 1. Note:\n # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently\n # on error.\n #required_acks: 1\n\n # The configurable ClientID used for logging, debugging, and auditing\n # purposes. The default is "beats".\n #client_id: beats\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enables restarting heartbeat if any file listed by `key`,\n # `certificate`, or `certificate_authorities` is modified.\n # This feature IS NOT supported on Windows.\n #ssl.restart_on_cert_change.enabled: false\n\n # Period to scan for changes on CA certificate files\n #ssl.restart_on_cert_change.period: 1m\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/security/keytabs/kafka.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # The service name. Service principal name is contructed from\n # service_name/hostname@realm.\n #kerberos.service_name: kafka\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n # Enables Kerberos FAST authentication. This may\n # conflict with certain Active Directory configurations.\n #kerberos.enable_krb5_fast: false\n\n# -------------------------------- Redis Output --------------------------------\n#output.redis:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty print json event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # The list of Redis servers to connect to. If load-balancing is enabled, the\n # events are distributed to the servers in the list. If one server becomes\n # unreachable, the events are distributed to the reachable servers only.\n # The hosts setting supports redis and rediss urls with custom password like\n # redis://:password@localhost:6379.\n #hosts: ["localhost:6379"]\n\n # The name of the Redis list or channel the events are published to. The\n # default is heartbeat.\n #key: heartbeat\n\n # The password to authenticate to Redis with. The default is no authentication.\n #password:\n\n # The Redis database number where the events are published. The default is 0.\n #db: 0\n\n # The Redis data type to use for publishing events. If the data type is list,\n # the Redis RPUSH command is used. If the data type is channel, the Redis\n # PUBLISH command is used. The default value is list.\n #datatype: list\n\n # The number of workers to use for each host configured to publish events to\n # Redis. Use this setting along with the loadbalance option. For example, if\n # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each\n # host).\n #worker: 1\n\n # If set to true and multiple hosts or workers are configured, the output\n # plugin load balances published events onto all Redis hosts. If set to false,\n # the output plugin sends all events to only one host (determined at random)\n # and will switch to another host if the currently selected one becomes\n # unreachable. The default value is true.\n #loadbalance: true\n\n # The Redis connection timeout in seconds. The default is 5 seconds.\n #timeout: 5s\n\n # The number of times to retry publishing an event after a publishing failure.\n # After the specified number of retries, the events are typically dropped.\n # Some Beats, such as Filebeat, ignore the max_retries setting and retry until\n # all events are published. Set max_retries to a value less than 0 to retry\n # until all events are published. The default is 3.\n #max_retries: 3\n\n # The number of seconds to wait before trying to reconnect to Redis\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Redis after a network error. The default is 60s.\n #backoff.max: 60s\n\n # The maximum number of events to bulk in a single Redis request or pipeline.\n # The default is 2048.\n #bulk_max_size: 2048\n\n # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The\n # value must be a URL with a scheme of socks5://.\n #proxy_url:\n\n # This option determines whether Redis hostnames are resolved locally when\n # using a proxy. The default value is false, which means that name resolution\n # occurs on the proxy server.\n #proxy_use_local_resolver: false\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# -------------------------------- File Output ---------------------------------\n#output.file:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n # Path to the directory where to save the generated files. The option is\n # mandatory.\n #path: "/tmp/heartbeat"\n\n # Name of the generated files. The default is `heartbeat` and it generates\n # files: `heartbeat-{datetime}.ndjson`, `heartbeat-{datetime}-1.ndjson`, etc.\n #filename: heartbeat\n\n # Maximum size in kilobytes of each file. When this size is reached, and on\n # every Heartbeat restart, the files are rotated. The default value is 10240\n # kB.\n #rotate_every_kb: 10000\n\n # Maximum number of files under path. When this number of files is reached,\n # the oldest file is deleted and the rest are shifted from last to first. The\n # default is 7 files.\n #number_of_files: 7\n\n # Permissions to use for file creation. The default is 0600.\n #permissions: 0600\n \n # Configure automatic file rotation on every startup. The default is true.\n #rotate_on_startup: true\n\n# ------------------------------- Console Output -------------------------------\n#output.console:\n # Boolean flag to enable or disable the output module.\n #enabled: true\n\n # Configure JSON encoding\n #codec.json:\n # Pretty-print JSON event\n #pretty: false\n\n # Configure escaping HTML symbols in strings.\n #escape_html: false\n\n# =================================== Paths ====================================\n\n# The home path for the Heartbeat installation. This is the default base path\n# for all other path settings and for miscellaneous files that come with the\n# distribution (for example, the sample dashboards).\n# If not set by a CLI flag or in the configuration file, the default for the\n# home path is the location of the binary.\n#path.home:\n\n# The configuration path for the Heartbeat installation. This is the default\n# base path for configuration files, including the main YAML configuration file\n# and the Elasticsearch template file. If not set by a CLI flag or in the\n# configuration file, the default for the configuration path is the home path.\n#path.config: ${path.home}\n\n# The data path for the Heartbeat installation. This is the default base path\n# for all the files in which Heartbeat needs to store its data. If not set by a\n# CLI flag or in the configuration file, the default for the data path is a data\n# subdirectory inside the home path.\n#path.data: ${path.home}/data\n\n# The logs path for a Heartbeat installation. This is the default location for\n# the Beat's log files. If not set by a CLI flag or in the configuration file,\n# the default for the logs path is a logs subdirectory inside the home path.\n#path.logs: ${path.home}/logs\n\n# ================================== Keystore ==================================\n\n# Location of the Keystore containing the keys and their sensitive values.\n#keystore.path: "${path.config}/beats.keystore"\n\n# ================================= Dashboards =================================\n\n# These settings control loading the sample dashboards to the Kibana index. Loading\n# the dashboards are disabled by default and can be enabled either by setting the\n# options here or by using the `-setup` CLI flag or the `setup` command.\n#setup.dashboards.enabled: false\n\n# The directory from where to read the dashboards. The default is the `kibana`\n# folder in the home path.\n#setup.dashboards.directory: ${path.home}/kibana\n\n# The URL from where to download the dashboard archive. It is used instead of\n# the directory if it has a value.\n#setup.dashboards.url:\n\n# The file archive (zip file) from where to read the dashboards. It is used instead\n# of the directory when it has a value.\n#setup.dashboards.file:\n\n# In case the archive contains the dashboards from multiple Beats, this lets you\n# select which one to load. You can load all the dashboards in the archive by\n# setting this to the empty string.\n#setup.dashboards.beat: heartbeat\n\n# The name of the Kibana index to use for setting the configuration. Default is ".kibana"\n#setup.dashboards.kibana_index: .kibana\n\n# The Elasticsearch index name. This overwrites the index name defined in the\n# dashboards and index pattern. Example: testbeat-*\n#setup.dashboards.index:\n\n# Always use the Kibana API for loading the dashboards instead of autodetecting\n# how to install the dashboards by first querying Elasticsearch.\n#setup.dashboards.always_kibana: false\n\n# If true and Kibana is not reachable at the time when dashboards are loaded,\n# it will retry to reconnect to Kibana instead of exiting with an error.\n#setup.dashboards.retry.enabled: false\n\n# Duration interval between Kibana connection retries.\n#setup.dashboards.retry.interval: 1s\n\n# Maximum number of retries before exiting with an error, 0 for unlimited retrying.\n#setup.dashboards.retry.maximum: 0\n\n# ================================== Template ==================================\n\n# A template is used to set the mapping in Elasticsearch\n# By default template loading is enabled and the template is loaded.\n# These settings can be adjusted to load your own template or overwrite existing ones.\n\n# Set to false to disable template loading.\n#setup.template.enabled: true\n\n# Template name. By default the template name is "heartbeat-%{[agent.version]}"\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.name: "heartbeat-%{[agent.version]}"\n\n# Template pattern. By default the template pattern is "heartbeat-%{[agent.version]}" to apply to the default index settings.\n# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.\n#setup.template.pattern: "heartbeat-%{[agent.version]}"\n\n# Path to fields.yml file to generate the template\n#setup.template.fields: "${path.config}/fields.yml"\n\n# A list of fields to be added to the template and Kibana index pattern. Also\n# specify setup.template.overwrite: true to overwrite the existing template.\n#setup.template.append_fields:\n#- name: field_name\n# type: field_type\n\n# Enable JSON template loading. If this is enabled, the fields.yml is ignored.\n#setup.template.json.enabled: false\n\n# Path to the JSON template file\n#setup.template.json.path: "${path.config}/template.json"\n\n# Name under which the template is stored in Elasticsearch\n#setup.template.json.name: ""\n\n# Set this option if the JSON template is a data stream.\n#setup.template.json.data_stream: false\n\n# Overwrite existing template\n# Do not enable this option for more than one instance of heartbeat as it might\n# overload your Elasticsearch with too many update requests.\n#setup.template.overwrite: false\n\n# Elasticsearch template settings\nsetup.template.settings:\n\n # A dictionary of settings to place into the settings.index dictionary\n # of the Elasticsearch template. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html\n #index:\n #number_of_shards: 1\n #codec: best_compression\n\n # A dictionary of settings for the _source field. For more details, please check\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html\n #_source:\n #enabled: false\n\n# ====================== Index Lifecycle Management (ILM) ======================\n\n# Configure index lifecycle management (ILM) to manage the backing indices\n# of your data streams.\n\n# Enable ILM support. Valid values are true, or false.\n#setup.ilm.enabled: true\n\n# Set the lifecycle policy name. The default policy name is\n# 'beatname'.\n#setup.ilm.policy_name: "mypolicy"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n#setup.ilm.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true.\n# If you set this option to false, lifecycle policy will not be installed,\n# even if setup.ilm.overwrite is set to true.\n#setup.ilm.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.ilm.overwrite: false\n\n# ======================== Data Stream Lifecycle (DSL) =========================\n\n# Configure Data Stream Lifecycle to manage data streams while connected to Serverless elasticsearch. \n# These settings are mutually exclusive with ILM settings which are not supported in Serverless projects.\n\n# Enable DSL support. Valid values are true, or false.\n#setup.dsl.enabled: true\n\n# Set the lifecycle policy name or pattern. For DSL, this name must match the data stream that the lifecycle is for.\n# The default data stream pattern is heartbeat-%{[agent.version]}"\n# The template string `%{[agent.version]}` will resolve to the current stack version. \n# The other possible template value is `%{[beat.name]}`.\n#setup.dsl.data_stream_pattern: "heartbeat-%{[agent.version]}"\n\n# The path to a JSON file that contains a lifecycle policy configuration. Used\n# to load your own lifecycle policy.\n# If no custom policy is specified, a default policy with a lifetime of 7 days will be created.\n#setup.dsl.policy_file:\n\n# Disable the check for an existing lifecycle policy. The default is true. If\n# you disable this check, set setup.dsl.overwrite: true so the lifecycle policy\n# can be installed.\n#setup.dsl.check_exists: true\n\n# Overwrite the lifecycle policy at startup. The default is false.\n#setup.dsl.overwrite: false\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Optional protocol and basic auth credentials.\n #protocol: "https"\n #username: "elastic"\n #password: "changeme"\n\n # Optional HTTP path\n #path: ""\n\n # Optional Kibana space ID.\n #space.id: ""\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n\n# ================================== Logging ===================================\n\n# There are four options for the log output: file, stderr, syslog, eventlog\n# The file output is the default.\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: info\n\n# Enable debug output for selected components. To enable all selectors use ["*"]\n# Other available selectors are "beat", "publisher", "service"\n# Multiple selectors can be chained.\n#logging.selectors: [ ]\n\n# Send all logging output to stderr. The default is false.\n#logging.to_stderr: false\n\n# Send all logging output to syslog. The default is false.\n#logging.to_syslog: false\n\n# Send all logging output to Windows Event Logs. The default is false.\n#logging.to_eventlog: false\n\n# If enabled, Heartbeat periodically logs its internal metrics that have changed\n# in the last period. For each metric that changed, the delta from the value at\n# the beginning of the period is logged. Also, the total values for\n# all non-zero internal metrics are logged on shutdown. The default is true.\n#logging.metrics.enabled: true\n\n# The period after which to log the internal metrics. The default is 30s.\n#logging.metrics.period: 30s\n\n# A list of metrics namespaces to report in the logs. Defaults to [stats].\n# `stats` contains general Beat metrics. `dataset` may be present in some\n# Beats and contains module or input metrics.\n#logging.metrics.namespaces: [stats]\n\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\nlogging.to_files: true\nlogging.files:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/heartbeat\n\n # The name of the files where the logs are written to.\n #name: heartbeat\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 10485760 # = 10MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 7\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to true.\n # rotateonstartup: true\n\n#=============================== Events Logging ===============================\n# Some outputs will log raw events on errors like indexing errors in the\n# Elasticsearch output, to prevent logging raw events (that may contain\n# sensitive information) together with other log messages, a different\n# log file, only for log entries containing raw events, is used. It will\n# use the same level, selectors and all other configurations from the\n# default logger, but it will have it's own file configuration.\n#\n# Having a different log file for raw events also prevents event data\n# from drowning out the regular log files.\n#\n# IMPORTANT: No matter the default logger output configuration, raw events\n# will **always** be logged to a file configured by `logging.event_data.files`.\n\n# logging.event_data:\n# Logging to rotating files. Set logging.to_files to false to disable logging to\n# files.\n#logging.event_data.to_files: true\n#logging.event_data:\n # Configure the path where the logs are written. The default is the logs directory\n # under the home path (the binary location).\n #path: /var/log/heartbeat\n\n # The name of the files where the logs are written to.\n #name: heartbeat-events-data\n\n # Configure log file size limit. If the limit is reached, log file will be\n # automatically rotated.\n #rotateeverybytes: 5242880 # = 5MB\n\n # Number of rotated log files to keep. The oldest files will be deleted first.\n #keepfiles: 2\n\n # The permissions mask to apply when rotating log files. The default value is 0600.\n # Must be a valid Unix-style file permissions mask expressed in octal notation.\n #permissions: 0600\n\n # Enable log file rotation on time intervals in addition to the size-based rotation.\n # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h\n # are boundary-aligned with minutes, hours, days, weeks, months, and years as\n # reported by the local system clock. All other intervals are calculated from the\n # Unix epoch. Defaults to disabled.\n #interval: 0\n\n # Rotate existing logs on startup rather than appending them to the existing\n # file. Defaults to false.\n # rotateonstartup: false\n\n# ============================= X-Pack Monitoring ==============================\n# Heartbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Heartbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch output are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n # Array of hosts to connect to.\n # Scheme and port can be left out and will be set to the default (http and 9200)\n # In case you specify an additional path, the scheme is required: http://localhost:9200/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200\n #hosts: ["localhost:9200"]\n\n # Set gzip compression level.\n #compression_level: 0\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "beats_system"\n #password: "changeme"\n\n # Dictionary of HTTP parameters to pass within the URL with index operations.\n #parameters:\n #param1: value1\n #param2: value2\n\n # Custom HTTP headers to add to each request\n #headers:\n # X-My-Header: Contents of the header\n\n # Proxy server url\n #proxy_url: http://proxy:3128\n\n # The number of times a particular Elasticsearch index operation is attempted. If\n # the indexing operation doesn't succeed after this many retries, the events are\n # dropped. The default is 3.\n #max_retries: 3\n\n # The maximum number of events to bulk in a single Elasticsearch bulk API index request.\n # The default is 50.\n #bulk_max_size: 50\n\n # The number of seconds to wait before trying to reconnect to Elasticsearch\n # after a network error. After waiting backoff.init seconds, the Beat\n # tries to reconnect. If the attempt fails, the backoff timer is increased\n # exponentially up to backoff.max. After a successful connection, the backoff\n # timer is reset. The default is 1s.\n #backoff.init: 1s\n\n # The maximum number of seconds to wait before attempting to connect to\n # Elasticsearch after a network error. The default is 60s.\n #backoff.max: 60s\n\n # Configure HTTP request timeout before failing a request to Elasticsearch.\n #timeout: 90\n\n # Use SSL settings for HTTPS.\n #ssl.enabled: true\n\n # Controls the verification of certificates. Valid values are:\n # * full, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate.\n # * strict, which verifies that the provided certificate is signed by a trusted\n # authority (CA) and also verifies that the server's hostname (or IP address)\n # matches the names identified within the certificate. If the Subject Alternative\n # Name is empty, it returns an error.\n # * certificate, which verifies that the provided certificate is signed by a\n # trusted authority (CA), but does not perform any hostname verification.\n # * none, which performs no verification of the server's certificate. This\n # mode disables many of the security benefits of SSL/TLS and should only be used\n # after very careful consideration. It is primarily intended as a temporary\n # diagnostic mechanism when attempting to resolve TLS errors; its use in\n # production environments is strongly discouraged.\n # The default value is full.\n #ssl.verification_mode: full\n\n # List of supported/valid TLS versions. By default all TLS versions from 1.1\n # up to 1.3 are enabled.\n #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]\n\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client certificate key\n #ssl.key: "/etc/pki/client/cert.key"\n\n # Optional passphrase for decrypting the certificate key.\n #ssl.key_passphrase: ''\n\n # Configure cipher suites to be used for SSL connections\n #ssl.cipher_suites: []\n\n # Configure curve types for ECDHE-based cipher suites\n #ssl.curve_types: []\n\n # Configure what types of renegotiation are supported. Valid options are\n # never, once, and freely. Default is never.\n #ssl.renegotiation: never\n\n # Configure a pin that can be used to do extra validation of the verified certificate chain,\n # this allow you to ensure that a specific certificate is used to validate the chain of trust.\n #\n # The pin is a base64 encoded string of the SHA-256 fingerprint.\n #ssl.ca_sha256: ""\n\n # A root CA HEX encoded fingerprint. During the SSL handshake if the\n # fingerprint matches the root CA certificate, it will be added to\n # the provided list of root CAs (`certificate_authorities`), if the\n # list is empty or not defined, the matching certificate will be the\n # only one in the list. Then the normal SSL validation happens.\n #ssl.ca_trusted_fingerprint: ""\n\n # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.\n #kerberos.enabled: true\n\n # Authentication type to use with Kerberos. Available options: keytab, password.\n #kerberos.auth_type: password\n\n # Path to the keytab file. It is used when auth_type is set to keytab.\n #kerberos.keytab: /etc/elastic.keytab\n\n # Path to the Kerberos configuration.\n #kerberos.config_path: /etc/krb5.conf\n\n # Name of the Kerberos user.\n #kerberos.username: elastic\n\n # Password of the Kerberos user. It is used when auth_type is set to password.\n #kerberos.password: changeme\n\n # Kerberos realm.\n #kerberos.realm: ELASTIC\n\n #metrics.period: 10s\n #state.period: 1m\n\n# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts`\n# setting. You can find the value for this setting in the Elastic Cloud web UI.\n#monitoring.cloud.id:\n\n# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username`\n# and `monitoring.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#monitoring.cloud.auth:\n\n# =============================== HTTP Endpoint ================================\n\n# Each beat can expose internal metrics through an HTTP endpoint. For security\n# reasons the endpoint is disabled by default. This feature is currently experimental.\n# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output\n# append ?pretty to the URL.\n\n# Defines if the HTTP endpoint is enabled.\n#http.enabled: false\n\n# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe.\n# When using IP addresses, it is recommended to only use localhost.\n#http.host: localhost\n\n# Port on which the HTTP endpoint will bind. Default is 5066.\n#http.port: 5066\n\n# Define which user should be owning the named pipe.\n#http.named_pipe.user:\n\n# Define which permissions should be applied to the named pipe, use the Security\n# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with\n# `http.user`.\n#http.named_pipe.security_descriptor:\n\n# Defines if the HTTP pprof endpoints are enabled.\n# It is recommended that this is only enabled on localhost as these endpoints may leak data.\n#http.pprof.enabled: false\n\n# Controls the fraction of goroutine blocking events that are reported in the\n# blocking profile.\n#http.pprof.block_profile_rate: 0\n\n# Controls the fraction of memory allocations that are recorded and reported in\n# the memory profile.\n#http.pprof.mem_profile_rate: 524288\n\n# Controls the fraction of mutex contention events that are reported in the\n# mutex profile.\n#http.pprof.mutex_profile_rate: 0\n\n# ============================== Process Security ==============================\n\n# Enable or disable seccomp system call filtering on Linux. Default is enabled.\n#seccomp.enabled: true\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the heartbeat.\n#instrumentation:\n # Set to true to enable instrumentation of heartbeat.\n #enabled: false\n\n # Environment in which heartbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n # Enable profiling of the server, recording profile samples as events.\n #\n # This feature is experimental.\n #profiling:\n #cpu:\n # Set to true to enable CPU profiling.\n #enabled: false\n #interval: 60s\n #duration: 10s\n #heap:\n # Set to true to enable heap profiling.\n #enabled: false\n #interval: 60s\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: false\n\n# =============================== Feature Flags ================================\n\n# Enable and configure feature flags.\n#features:\n# fqdn:\n# enabled: true\n\n
dataset_sample\yaml\go\heartbeat.reference.yml
heartbeat.reference.yml
YAML
74,352
0.75
0.082262
0.973941
vue-tools
141
2023-07-22T07:32:48.043140
GPL-3.0
false
b6d164a76dafcc56aa00f9d83d91a95e
################### Heartbeat Configuration Example #########################\n\n# This file is an example configuration file highlighting only some common options.\n# The heartbeat.reference.yml file in the same directory contains all the supported options\n# with detailed comments. You can use it for reference.\n#\n# You can find the full configuration reference here:\n# https://www.elastic.co/guide/en/beats/heartbeat/index.html\n\n############################# Heartbeat ######################################\n\n# Define a directory from which to load monitor definitions. Definitions take the form\n# of individual yaml files.\nheartbeat.config.monitors:\n # Directory + glob pattern to search for configuration files\n path: ${path.config}/monitors.d/*.yml\n # If enabled, heartbeat will periodically check the config.monitors path for changes\n reload.enabled: false\n # How often to check for changes\n reload.period: 5s\n\n# Configure monitors inline\nheartbeat.monitors:\n- type: http\n # Set enabled to true (or delete the following line) to enable this monitor\n enabled: false\n # ID used to uniquely identify this monitor in Elasticsearch even if the config changes\n id: my-monitor\n # Human readable display name for this service in Uptime UI and elsewhere\n name: My Monitor\n # List of URLs to query\n urls: ["http://localhost:9200"]\n # Configure task schedule\n schedule: '@every 10s'\n # Total test connection and data exchange timeout\n #timeout: 16s\n # Name of corresponding APM service, if Elastic APM is in use for the monitored service.\n #service.name: my-apm-service-name\n\n# Experimental: Set this to true to run heartbeat monitors exactly once at startup\n#heartbeat.run_once: true\n\n# ======================= Elasticsearch template setting =======================\n\nsetup.template.settings:\n index.number_of_shards: 1\n index.codec: best_compression\n #_source.enabled: false\n\n# ================================== General ===================================\n\n# The name of the shipper that publishes the network data. It can be used to group\n# all the transactions sent by a single shipper in the web interface.\n#name:\n\n# The tags of the shipper are included in their field with each\n# transaction published.\n#tags: ["service-X", "web-tier"]\n\n# Optional fields that you can specify to add additional information to the\n# output.\n#fields:\n# env: staging\n\n\n# =================================== Kibana ===================================\n\n# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.\n# This requires a Kibana endpoint configuration.\nsetup.kibana:\n\n # Kibana Host\n # Scheme and port can be left out and will be set to the default (http and 5601)\n # In case you specify and additional path, the scheme is required: http://localhost:5601/path\n # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601\n #host: "localhost:5601"\n\n # Kibana Space ID\n # ID of the Kibana Space into which the dashboards should be loaded. By default,\n # the Default Space will be used.\n #space.id:\n\n# =============================== Elastic Cloud ================================\n\n# These settings simplify using Heartbeat with the Elastic Cloud (https://cloud.elastic.co/).\n\n# The cloud.id setting overwrites the `output.elasticsearch.hosts` and\n# `setup.kibana.host` options.\n# You can find the `cloud.id` in the Elastic Cloud web UI.\n#cloud.id:\n\n# The cloud.auth setting overwrites the `output.elasticsearch.username` and\n# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.\n#cloud.auth:\n\n# ================================== Outputs ===================================\n\n# Configure what output to use when sending the data collected by the beat.\n\n# ---------------------------- Elasticsearch Output ----------------------------\noutput.elasticsearch:\n # Array of hosts to connect to.\n hosts: ["localhost:9200"]\n\n # Performance preset - one of "balanced", "throughput", "scale",\n # "latency", or "custom".\n preset: balanced\n\n # Protocol - either `http` (default) or `https`.\n #protocol: "https"\n\n # Authentication credentials - either API key or username/password.\n #api_key: "id:api_key"\n #username: "elastic"\n #password: "changeme"\n\n# ------------------------------ Logstash Output -------------------------------\n#output.logstash:\n # The Logstash hosts\n #hosts: ["localhost:5044"]\n\n # Optional SSL. By default is off.\n # List of root certificates for HTTPS server verifications\n #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]\n\n # Certificate for SSL client authentication\n #ssl.certificate: "/etc/pki/client/cert.pem"\n\n # Client Certificate Key\n #ssl.key: "/etc/pki/client/cert.key"\n\n# ================================= Processors =================================\n\nprocessors:\n - add_observer_metadata:\n # Optional, but recommended geo settings for the location Heartbeat is running in\n #geo:\n # Token describing this location\n #name: us-east-1a\n # Lat, Lon "\n #location: "37.926868, -78.024902"\n\n\n# ================================== Logging ===================================\n\n# Sets log level. The default log level is info.\n# Available log levels are: error, warning, info, debug\n#logging.level: debug\n\n# At debug level, you can selectively enable logging only for some components.\n# To enable all selectors, use ["*"]. Examples of other selectors are "beat",\n# "publisher", "service".\n#logging.selectors: ["*"]\n\n# ============================= X-Pack Monitoring ==============================\n# Heartbeat can export internal metrics to a central Elasticsearch monitoring\n# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The\n# reporting is disabled by default.\n\n# Set to true to enable the monitoring reporter.\n#monitoring.enabled: false\n\n# Sets the UUID of the Elasticsearch cluster under which monitoring data for this\n# Heartbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch\n# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.\n#monitoring.cluster_uuid:\n\n# Uncomment to send the metrics to Elasticsearch. Most settings from the\n# Elasticsearch outputs are accepted here as well.\n# Note that the settings should point to your Elasticsearch *monitoring* cluster.\n# Any setting that is not set is automatically inherited from the Elasticsearch\n# output configuration, so if you have the Elasticsearch output configured such\n# that it is pointing to your Elasticsearch monitoring cluster, you can simply\n# uncomment the following line.\n#monitoring.elasticsearch:\n\n# ============================== Instrumentation ===============================\n\n# Instrumentation support for the heartbeat.\n#instrumentation:\n # Set to true to enable instrumentation of heartbeat.\n #enabled: false\n\n # Environment in which heartbeat is running on (eg: staging, production, etc.)\n #environment: ""\n\n # APM Server hosts to report instrumentation results to.\n #hosts:\n # - http://localhost:8200\n\n # API Key for the APM Server(s).\n # If api_key is set then secret_token will be ignored.\n #api_key:\n\n # Secret token for the APM Server(s).\n #secret_token:\n\n\n# ================================= Migration ==================================\n\n# This allows to enable 6.7 migration aliases\n#migration.6_to_7.enabled: true\n\n
dataset_sample\yaml\go\heartbeat.yml
heartbeat.yml
YAML
7,387
0.95
0.083744
0.869281
node-utils
785
2024-08-19T09:17:36.888651
MIT
false
eafb9f1a5cccd1698e9d9df740f0fe6d
organization: dapr\ndefaultSticker: clrqh1xny39170fl75cawk0h5\nstickers:\n -\n id: clrqh1xny39170fl75cawk0h5\n alias: runtime-badge\n
dataset_sample\yaml\go\holopin.yml
holopin.yml
YAML
134
0.7
0
0
react-lib
336
2023-09-11T20:12:48.310471
Apache-2.0
false
bff089b3b71792be800fb696e4aea2d3
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: BUSL-1.1\n\nrules:\n - id: "http-endpoint-request-decode-error-code"\n patterns:\n - pattern: |\n if err := decodeBody(...); err != nil {\n return nil, CodedError(...)\n }\n - pattern-not-inside: |\n if err := decodeBody(...); err != nil {\n return nil, CodedError(400, ...)\n }\n - pattern-not-inside: |\n if err := decodeBody(...); err != nil {\n return nil, CodedError(http.StatusBadRequest, ...)\n }\n message: "HTTP endpoint request decode should return http.StatusBadRequest"\n languages:\n - "go"\n severity: "ERROR"\n paths:\n include:\n - "command/agent/*_endpoint.go"\n
dataset_sample\yaml\go\http_endpoint.yml
http_endpoint.yml
YAML
746
0.8
0.12
0.083333
react-lib
735
2024-10-06T12:01:20.497130
GPL-3.0
false
92197363ae037021bde6e9567f75ddb4
# Copyright (c) HashiCorp, Inc.\n# SPDX-License-Identifier: MPL-2.0\n\nrules:\n - id: "disallow-imports"\n patterns:\n - pattern: '"github.com/boltdb/bolt"'\n - pattern: '"github.com/pkg/errors"'\n - pattern: '"github.com/hashicorp/consul"'\n - pattern: '"github.com/hashicorp/consul/command/flags"'\n - pattern: '"github.com/hashicorp/consul/sdk"'\n - pattern: '"github.com/hashicorp/go-set"'\n - pattern: '"github.com/mitchellh/cli"'\n - pattern: '"golang.org/x/exp/slices"'\n - pattern: '"golang.org/x/exp/maps"'\n - pattern: '"golang.org/x/exp/constraints"'\n message: "Import of this package has been disallowed"\n languages:\n - "generic"\n severity: "ERROR"\n paths:\n include:\n - "*.go"\n
dataset_sample\yaml\go\imports.yml
imports.yml
YAML
757
0.95
0
0.090909
python-kit
963
2025-07-05T12:17:17.946018
MIT
false
40c59a6ce1410fa683c48e48b9f925c5
# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n\nenv:\n # Other deps\n ASDF_MAGE_VERSION: 1.15.0\n\nsteps:\n - label: ":package: :test_tube: Package and Validate Iron Bank for x-pack/heartbeat"\n command: |\n set -euo pipefail\n export BEAT_VERSION=$$(make get-version)\n echo "~~~ Using BEAT_VERSION: $$BEAT_VERSION"\n cd x-pack/heartbeat\n echo "--- Package x-pack/heartbeat for the artifacts consumed by the Iron Bank docker context"\n make -C ironbank package\n echo "--- Prepare the Iron Bank container context"\n mage ironbank\n echo "--- Build and validate the Iron Bank docker context for x-pack/heartbeat"\n make -C ironbank validate-ironbank\n agents:\n provider: "gcp"\n image: "family/platform-ingest-beats-ubuntu-2204"\n machineType: "n2-standard-8"\n artifact_paths:\n - "x-pack/heartbeat/build/distributions/*"\n - "x-pack/heartbeat/build/heartbeat-ironbank-*-docker-build-context/*"\n
dataset_sample\yaml\go\ironbank-validation.yml
ironbank-validation.yml
YAML
1,024
0.8
0.115385
0.083333
vue-tools
189
2024-08-31T18:10:44.455964
MIT
false
2a3097974d5ff1d7177855da6ceba2fe
# List of labels which should be assigned to issues based on a regex\nwindows:\n # info prints OsArch: ...\n # version prints OS/Arch: ...\n - 'O[Ss]\/?Arch:\s*windows'\nmacos:\n # info prints OsArch: ...\n # version prints OS/Arch: ...\n - 'O[Ss]\/?Arch:\s*darwin'\n\nremote:\n # we cannot use multiline regex so we check for serviceIsRemote in podman info\n - 'serviceIsRemote:\strue'\n
dataset_sample\yaml\go\issue-labeler.yml
issue-labeler.yml
YAML
383
0.8
0.076923
0.5
react-lib
86
2023-08-15T19:11:47.609979
GPL-3.0
false
7b09efba240afe0d978d97487c717cd4
when:\n branches: true ## for all the branches\n changeset: ## when PR contains any of those entries in the changeset\n - "^winlogbeat/.*"\n - "@ci" ## special token regarding the changeset for the ci\n - "@oss" ## special token regarding the changeset for the oss\n comments: ## when PR comment contains any of those entries\n - "/test winlogbeat"\n labels: ## when PR labels matches any of those entries\n - "winlogbeat"\n parameters: ## when parameter was selected in the UI.\n - "winlogbeat"\n tags: true ## for all the tags\nplatform: "immutable && ubuntu-22" ## default label for all the stages\nstages:\n crosscompile:\n make: "make -C winlogbeat crosscompile"\n stage: mandatory\n windows-2022:\n mage: "mage build unitTest"\n platforms: ## override default labels in this specific stage.\n - "windows-2022"\n stage: mandatory\n windows-2019:\n mage: "mage build unitTest"\n platforms: ## override default labels in this specific stage.\n - "windows-2019"\n stage: mandatory\n windows-2016:\n mage: "mage build unitTest"\n platforms: ## override default labels in this specific stage.\n - "windows-2016"\n stage: mandatory\n windows-2012:\n mage: "mage build unitTest"\n platforms: ## override default labels in this specific stage.\n - "windows-2012-r2"\n stage: extended_win\n windows-11:\n mage: "mage build unitTest"\n platforms: ## override default labels in this specific stage.\n - "windows-11"\n stage: extended_win\n windows-10:\n mage: "mage build unitTest"\n platforms: ## override default labels in this specific stage.\n - "windows-10"\n stage: extended_win\n windows-8:\n mage: "mage build unitTest"\n platforms: ## override default labels in this specific stage.\n - "windows-8"\n stage: extended_win\n packaging-linux:\n packaging-linux: "mage package"\n e2e:\n enabled: false\n stage: packaging\n when:\n branches: false ## Only on a PR basis for the time being\n tags: false ## packaging on branches/tags is already in place with the downstream build.\n changeset: ## when PR contains any of those entries in the changeset\n - "^winlogbeat/.*"\n - "@oss" ## special token regarding the changeset for the oss\n
dataset_sample\yaml\go\Jenkinsfile.yml
Jenkinsfile.yml
YAML
2,712
0.8
0.109375
0
vue-tools
891
2024-05-07T22:24:29.103924
Apache-2.0
false
0cc3f6e2e7d005d4dafb4974ad6e6d85
wtf:\n colors:\n border:\n focusable: darkslateblue\n focused: orange\n normal: gray\n grid:\n columns: [32, 32, 32, 32, 32, 32]\n rows: [10, 10, 10, 10, 10, 10]\n refreshInterval: 2\n mods:\n kubernetes:\n enabled: true\n kubeconfig: /Users/testuser/.kube/config\n namespaces: ["demo", "kube-system"]\n objects: ["nodes","deployments", "pods"]\n position:\n top: 0\n left: 0\n height: 6\n width: 3\n
dataset_sample\yaml\go\kubernetes_config.yml
kubernetes_config.yml
YAML
463
0.7
0
0
python-kit
84
2024-09-02T06:08:54.058009
GPL-3.0
false
93b41a2c69e663059cccd51d2a7dcdda