code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-trigger-git_poll/146/
timestamp: 2016-03-22 11:40:10 UTC
duration: 49m27s
active_duration: 1h24m18s
parameters: {}
change:
git_remote: <EMAIL>:chef/chef-backend.git
git_commit: <PASSWORD>
project: chef-backend
version: 0.2.0+20160322114020.git.239.52e7416
stages:
chef-backend-build/architecture=x86_64,platform=ubuntu-10.04,project=chef-backend,role=builder:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-build/architecture=x86_64,platform=ubuntu-10.04,project=chef-backend,role=builder/174/
duration: 18m58s
chef-backend-test/architecture=x86_64,platform=ubuntu-10.04,project=chef-backend,role=tester:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/architecture=x86_64,platform=ubuntu-10.04,project=chef-backend,role=tester/167/
duration: 16m14s
steps:
total: 16m14s
before (setup time): 10m29s
chef-client private-chef::default: 1m55s
after (cleanup time): 1m43s
chef-backend-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-promote/137/
duration: 7s
chef-backend-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/167/
duration: 20m47s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/architecture=x86_64,platform=el-5,project=chef-backend,role=tester/167/
duration: 19m53s
steps:
total: 19m53s
before (setup time): 12m37s
chef-client private-chef::default: 3m16s
after (cleanup time): 1m51s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/architecture=x86_64,platform=el-6,project=chef-backend,role=tester/167/
duration: 20m46s
steps:
total: 20m46s
before (setup time): 13m53s
chef-client private-chef::default: 4m25s
after (cleanup time): 19s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/architecture=x86_64,platform=el-7,project=chef-backend,role=tester/167/
duration: 16m20s
steps:
total: 16m20s
before (setup time): 10m
chef-client private-chef::default: 2m41s
after (cleanup time): 1m33s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/architecture=x86_64,platform=ubuntu-10.04,project=chef-backend,role=tester/167/
duration: 16m14s
steps:
total: 16m14s
before (setup time): 10m29s
chef-client private-chef::default: 1m55s
after (cleanup time): 1m43s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/architecture=x86_64,platform=ubuntu-12.04,project=chef-backend,role=tester/167/
duration: 18m
steps:
total: 18m
before (setup time): 11m4s
chef-client private-chef::default: 2m58s
after (cleanup time): 1m51s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-test/architecture=x86_64,platform=ubuntu-14.04,project=chef-backend,role=tester/167/
duration: 16m28s
steps:
total: 16m28s
before (setup time): 10m38s
chef-client private-chef::default: 1m56s
after (cleanup time): 1m47s
chef-backend-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-build/174/
duration: 28m6s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-build/architecture=x86_64,platform=el-5,project=chef-backend,role=builder/174/
duration: 27m59s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-build/architecture=x86_64,platform=el-6,project=chef-backend,role=builder/174/
duration: 26m20s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-build/architecture=x86_64,platform=el-7,project=chef-backend,role=builder/174/
duration: 17m51s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-build/architecture=x86_64,platform=ubuntu-10.04,project=chef-backend,role=builder/174/
duration: 18m58s
chef-backend-trigger-git_poll:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-backend-trigger-git_poll/146/
duration: 3s
|
reports/wilson.ci.chef.co/job/chef-backend-trigger-git_poll/146.yaml
|
author: Arfoire#9219
config: "options swap_delay=12 debug=true iteration=1000 duration=110 workers=30 mode=sl;\n\n#Character
builds:\nxiao char lvl=90/90 cons=0 talent=9,9,9;\nxiao add weapon=\"favoniuslance\"
refine=3 lvl=90/90;\nxiao add set=\"viridescentvenerer\" count=4;\nxiao add stats
hp=4780 atk=311 atk%=0.466 anemo%=0.466 cd=0.622; #Main stats\nxiao add stats def%=0.124
def=39.36 hp=507.88 hp%=0.0992 atk=33.08 atk%=0.3968 er=0.3306 em=39.64 cr=0.3972
cd=0.1324;\n\nyaemiko char lvl=90/90 cons=0 talent=9,9,9; \nyaemiko add weapon=\"thewidsith\"
refine=3 lvl=90/90;\nyaemiko add set=\"shimenawasreminiscence\" count=2;\nyaemiko
add set=\"thunderingfury\" count=2;\nyaemiko add stats hp=4780 atk=311 atk%=0.466
cr=0.311 electro%=0.466 ; #main\nyaemiko add stats def%=0.124 def=39.36 hp=507.88
hp%=0.0992 atk=33.08 atk%=0.0992 er=0.4408 em=39.64 cr=0.2648 cd=0.662;\n\nfischl
char lvl=90/90 cons=6 talent=9,9,9;\nfischl add weapon=\"thestringless\" refine=3
lvl=90/90;\nfischl add set=\"thunderingfury\" count=2;\nfischl add set=\"shimenawasreminiscence\"
count=2;\nfischl add stats hp=4780 atk=311 atk%=0.466 cr=0.311 electro%=0.466 ;
#main\nfischl add stats def%=0.124 def=39.36 hp=507.88 hp%=0.0992 atk=33.08 atk%=0.1984
er=0.1102 em=39.64 cr=0.331 cd=0.7944;\n\nalbedo char lvl=90/90 cons=0 talent=9,9,9;
\nalbedo add weapon=\"cinnabarspindle\" refine=5 lvl=90/90;\nalbedo add set=\"huskofopulentdreams\"
count=4 +params=[stacks=4];\nalbedo add stats hp=4780 atk=311 def%=0.583 geo%=0.466
cr=0.311 ; #main\nalbedo add stats def%=0.248 def=39.36 hp=507.88 hp%=0.0992 atk=33.08
atk%=0.0992 er=0.1102 em=39.64 cr=0.331 cd=0.7944;\n\n#Enemies and Particles:\ntarget
lvl=100 resist=.1;\nenergy every interval=480,720 amount=1;\n\n#Action List:\n\nactive
xiao;\n\nyae skill:3;\nfischl attack:2, skill;\nalbedo skill;\nyae burst, skill:3;\nfischl
attack:2, burst;\nxiao skill, skill, burst,\n high_plunge[plunge_hits=1],\n high_plunge[plunge_hits=1],\n
\ high_plunge[plunge_hits=1],\n high_plunge[plunge_hits=1],\n high_plunge[plunge_hits=1],\n
\ high_plunge[plunge_hits=1],\n high_plunge[plunge_hits=1],\n high_plunge[plunge_hits=1],\n
\ high_plunge[plunge_hits=1],\n high_plunge[plunge_hits=1],\n high_plunge[plunge_hits=1]\n
\ ;\nrestart;\n"
description: Xiao Double Geo, but you forgot the second Geo, so now you have Yae instead
of heals and shields.
hash: 24e9cf95fe2842b70f9881e8fd3b801d89519e05
team:
- name: albedo
con: 0
weapon: cinnabarspindle
refine: 5
er: 0.1102
talents:
attack: 9
skill: 9
burst: 9
- name: fischl
con: 6
weapon: thestringless
refine: 3
er: 0.1102
talents:
attack: 9
skill: 9
burst: 9
- name: xiao
con: 0
weapon: favoniuslance
refine: 3
er: 0.3306
talents:
attack: 9
skill: 9
burst: 9
- name: yaemiko
con: 0
weapon: thewidsith
refine: 3
er: 0.4408
talents:
attack: 9
skill: 9
burst: 9
dps: 32002.50702934092
mode: sl
duration: 109.99999999999949
target_count: 1
viewer_key: <KEY>
|
db/Xiao/abfsxaya.yaml
|
nameWithType: KeyVaultClient.backupKeyAsync
type: method
members:
- fullName: com.microsoft.azure.keyvault.KeyVaultClient.backupKeyAsync(String vaultBaseUrl, String keyName, final ServiceCallback<BackupKeyResult> serviceCallback)
name: backupKeyAsync(String vaultBaseUrl, String keyName, final ServiceCallback<BackupKeyResult> serviceCallback)
nameWithType: KeyVaultClient.backupKeyAsync(String vaultBaseUrl, String keyName, final ServiceCallback<BackupKeyResult> serviceCallback)
parameters:
- description: <p>The vault name, e.g. <a href="https://myvault.vault.azure.net">https://myvault.vault.azure.net</a></p>
name: vaultBaseUrl
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
- description: <p>The name of the key </p>
name: keyName
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
- description: <p>the async ServiceCallback to handle successful and failed responses. </p>
name: serviceCallback
type: final ServiceCallback<<xref href="com.microsoft.azure.keyvault.models.BackupKeyResult?alt=com.microsoft.azure.keyvault.models.BackupKeyResult&text=BackupKeyResult" data-throw-if-not-resolved="False"/>>
returns:
description: <p>the <xref uid="" data-throw-if-not-resolved="false" data-raw-source="ServiceFuture"></xref> object </p>
type: ServiceFuture<<xref href="com.microsoft.azure.keyvault.models.BackupKeyResult?alt=com.microsoft.azure.keyvault.models.BackupKeyResult&text=BackupKeyResult" data-throw-if-not-resolved="False"/>>
summary: >-
<p>Requests that a backup of the specified key be downloaded to the client.</p>
<p></p>
syntax: public ServiceFuture<BackupKeyResult> backupKeyAsync(String vaultBaseUrl, String keyName, final ServiceCallback<BackupKeyResult> serviceCallback)
uid: com.microsoft.azure.keyvault.KeyVaultClient.backupKeyAsync(String,String,final ServiceCallback<BackupKeyResult>)
uid: com.microsoft.azure.keyvault.KeyVaultClient.backupKeyAsync*
fullName: com.microsoft.azure.keyvault.KeyVaultClient.backupKeyAsync
name: backupKeyAsync(String vaultBaseUrl, String keyName, final ServiceCallback<BackupKeyResult> serviceCallback)
package: com.microsoft.azure.keyvault
metadata: {}
|
legacy/docs-ref-autogen/com.microsoft.azure.keyvault.KeyVaultClient.backupKeyAsync.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "konga.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "konga.name" . }}
helm.sh/chart: {{ include "konga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "konga.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "konga.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
volumes:
- configMap:
defaultMode: 420
name: {{ include "konga.fullname" . }}-snapshot
name: {{ include "konga.fullname" . }}-snapshot
{{- if .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 8 }}
{{- end }}
containers:
- name: konga
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 1337
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
envFrom:
- configMapRef:
name: {{ include "konga.fullname" . }}-config
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- mountPath: /opt/clabs/files/
name: {{ include "konga.fullname" . }}-snapshot
{{- if .Values.extraVolumeMounts }}
{{ toYaml .Values.extraVolumeMounts | nindent 12 }}
{{- end }}
- name: restore-snapshot
image: jcortejoso/httpie-jq:latest
imagePullPolicy: IfNotPresent
stdin: true
tty: true
command:
- /bin/sh
- -c
args:
- |
while :; do
# Check if konga api is ready
if http HEAD localhost:1337 --follow --check-status >/dev/null 2>&1; then
break
fi
done
# Check if snapshot already exists
snapshot_name="$(cat /opt/clabs/files/snapshot.json | jq -r '.name')"
if http GET localhost:1337/api/snapshot | jq -e -r ".[] | select (.name==\"$snapshot_name\")" >/dev/null 2>&1; then
echo "Snapshot already present"
else
# Upload snapshot
http POST localhost:1337/api/snapshot Authorization:'Bearer noauthtoken' 'Content-Type: application/json' < /opt/clabs/files/snapshot.json
# Restore snapshot
## Connection id field can be non-unique. It should be 1
connection_id=$(http GET localhost:1337/api/kongnode | jq -r '.[] | select (.name=="Kong") | .id' | head -n1)
snapshot_id=$(http GET localhost:1337/api/snapshot | jq -e -r ".[] | select (.name==\"$snapshot_name\") | .id")
sleep 90
http POST localhost:1337/api/snapshots/$snapshot_id/restore Authorization:'Bearer noauthtoken' "Connection-Id:$connection_id" imports\\:='["services","routes","consumers","plugins","acls","upstreams","certificates","snis"]' token=noauthtoken
sleep 90
http POST localhost:1337/api/snapshots/$snapshot_id/restore Authorization:'Bearer noauthtoken' "Connection-Id:$connection_id" imports\\:='["services","routes","consumers","plugins","acls","upstreams","certificates","snis"]' token=noauthtoken
fi
tail -f /dev/null
volumeMounts:
- mountPath: /opt/clabs/files/
name: {{ include "konga.fullname" . }}-snapshot
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
|
packages/helm-charts/konga/templates/deployment.yaml
|
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: grafana
namespace: argocd
spec:
destination:
namespace: monitoring
server: https://kubernetes.default.svc
project: default
source:
chart: grafana
repoURL: https://grafana.github.io/helm-charts
targetRevision: 6.15.0
helm:
values: |
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus-k8s:9090
isDefault: true
- name: Loki
type: loki
url: http://loki:3100
- name: CloudWatch
type: cloudwatch
jsonData:
authType: default
defaultRegion: ap-northeast-1
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: default
orgId: 1
folder:
type: file
disableDeletion: true
editable: false
options:
path: /var/lib/grafana/dashboards/default
dashboards:
default:
Amazon RDS:
gnetId: 11264
revision: 2
Amazon EC2:
gnetId: 11265
revision: 2
envFromSecret: grafana-secret
grafana.ini:
server:
root_url: https://grafana.cloudnativedays.jp
auth.generic_oauth:
enabled: true
allow_sign_up: true
team_ids:
allowed_organizations:
name: Auth0
scopes: openid profile email
auth_url: https://dreamkast.us.auth0.com/authorize
token_url: https://dreamkast.us.auth0.com/oauth/token
api_url: https://dreamkast.us.auth0.com/userinfo
role_attribute_strict: true
role_attribute_path: contains("https://cloudnativedays.jp/roles", 'CNDT2021-Admin') && 'Admin'
parameters:
- name: image.tag
value: "8.1.6"
- name: persistence.enabled
value: "true"
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: grafana-ingress
namespace: argocd
spec:
destination:
namespace: argocd
server: https://kubernetes.default.svc
project: default
source:
path: manifests/infra/grafana-ingress/overlays/production
repoURL: https://github.com/cloudnativedaysjp/dreamkast-infra.git
targetRevision: main
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
|
manifests/argocd-apps/prd/grafana.yaml
|
name: Release
on:
release:
types:
- published
jobs:
build:
runs-on: ubuntu-latest
container: ghcr.io/sitkoru/actions-container
steps:
- uses: actions/checkout@v2.3.5
- name: Prepare
id: prep
run: |
VERSION=${GITHUB_REF#refs/tags/}
echo ::set-output name=version::${VERSION}
- name: Install .NET Core 3.1
uses: actions/setup-dotnet@v1.8.2
with:
dotnet-version: '3.1.x'
- name: Install .NET 5.0.x
uses: actions/setup-dotnet@v1.8.2
with:
dotnet-version: '5.0.x'
- name: Install .NET 6.0.x
uses: actions/setup-dotnet@v1.8.2
with:
dotnet-version: '6.0.x'
include-prerelease: true
- name: Install node.js
uses: actions/setup-node@v2.4.1
with:
node-version: '15'
cache: 'npm'
cache-dependency-path: |
src/Sitko.Core.Blazor.AntDesign/Web/package.json
- name: Build Sitko.Core.Blazor.AntDesign js
working-directory: src/Sitko.Core.Blazor.AntDesign/Web
shell: bash
run: |
npm ci
npm run prod
- name: Create packages
run: dotnet pack -c Release /p:Version=${{ steps.prep.outputs.version }} -o $(pwd)/packages
- name: Push to Nuget
run: find packages -type f -name '*.nupkg' -print0 | xargs -0 -n 10 -P 4 dotnet nuget push -s https://api.nuget.org/v3/index.json -k ${{ secrets.NUGET_API_KEY }}
- name: Download Sitko.Core
uses: actions/checkout@v2.3.5
with:
repository: "sitkoru/Sitko.Core"
path: proto
ref: "proto"
token: ${{ secrets.BOT_TOKEN }}
- name: Publish proto archive
run: |
tar czf proto/proto.tar.gz -C src/Sitko.Core.Grpc/Proto .
echo echo ${{ steps.prep.outputs.version }}> proto/VERSION
cd proto
git config user.name ${{ secrets.BOT_NAME }}
git config user.email ${{ secrets.BOT_EMAIL }}
git add proto.tar.gz
git add VERSION
git commit -m "publish proto archive ${{ steps.prep.outputs.version }}"
git push
|
.github/workflows/release.yml
|
version: 2
jobs:
test:
working_directory: ~/decide
parallelism: 1
shell: /bin/bash --login
docker:
- image: circleci/ruby:2.7.1-node-browsers
environment:
RAILS_ENV: test
RACK_ENV: test
PGHOST: 127.0.0.1
PGUSER: postgres
- image: circleci/postgres:9.6-ram
environment:
POSTGRES_USER: postgres
POSTGRES_DB: decide_test
POSTGRES_PASSWORD: postgres
steps:
- checkout
- restore_cache:
keys:
- v1-dep-{{ checksum "Gemfile.lock" }}
- v1-dep-bundle-
- run: bundle check --path=vendor/bundle || bundle install --path=vendor/bundle --jobs=4 --retry=3
- save_cache:
key: v1-dep-{{ checksum "Gemfile.lock" }}
paths:
- vendor/bundle
- run:
command: bundle exec rspec --color --format documentation --require spec_helper --require rails_helper spec
environment:
RAILS_GROUPS: assets
- store_artifacts:
path: coverage
prefix: tests
cypress:
working_directory: ~/decide
parallelism: 1
shell: /bin/bash --login
docker:
- image: circleci/ruby:2.7.1-node-browsers
environment:
RAILS_ENV: development
RACK_ENV: development
PGHOST: 127.0.0.1
PGUSER: postgres
- image: circleci/postgres:9.6-ram
environment:
POSTGRES_USER: postgres
POSTGRES_DB: decide_development
POSTGRES_PASSWORD: postgres
- image: circleci/redis:6-alpine
steps:
- checkout
- run: npm install
- restore_cache:
keys:
- v1-dep-{{ checksum "Gemfile.lock" }}
- v1-dep-bundle-
- run: bundle check --path=vendor/bundle || bundle install --path=vendor/bundle --jobs=4 --retry=3
- save_cache:
key: v1-dep-{{ checksum "Gemfile.lock" }}
paths:
- vendor/bundle
- run: bundle exec rake db:reset
- run:
name: Rails server
command: bundle exec rails s
background: true
- run: sleep 5 ; npm run cy:run:recorded
build:
docker:
- image: circleci/ruby:2.7.1-node-browsers
steps:
- checkout
- run:
name: Setup VirtualEnv
command: |
echo 'export TAG=${CIRCLE_BRANCH}' >> $BASH_ENV
echo 'export IMAGE_NAME=decide' >> $BASH_ENV
echo 'export ORG_NAME=exploradoresdemadrid' >> $BASH_ENV
- setup_remote_docker:
docker_layer_caching: true
- run:
name: Build & push to DockerHub
command: |
docker build -f Dockerfile-prod -t $ORG_NAME/$IMAGE_NAME:$TAG .
echo $DOCKER_PWD | docker login -u $DOCKER_LOGIN --password-stdin
docker push $ORG_NAME/$IMAGE_NAME:$TAG
deploy:
docker:
- image: ictu/sshpass
steps:
- run:
name: Deploy to server
command: |
sshpass -p $SERVER_PASSWORD ssh -o StrictHostKeyChecking=no $SERVER_USER@$SERVER_ADDRESS "cd /home/ubuntu/deployments/scripts && ./deploy-prod.sh"
workflows:
version: 2
build:
jobs:
- test
- cypress
- build:
filters:
branches:
only:
- master
- deploy:
requires:
- build
- test
- cypress
filters:
branches:
only:
- master
|
.circleci/config.yml
|
{% set version = "1.4-1" %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-sombrero
version: {{ version|replace("-", "_") }}
source:
url:
- {{ cran_mirror }}/src/contrib/SOMbrero_{{ version }}.tar.gz
- {{ cran_mirror }}/src/contrib/Archive/SOMbrero/SOMbrero_{{ version }}.tar.gz
sha256: e2b2364da9f68dd34ed7190138b5f5d8c34fe9f8fdd8be3f151f1e0372473c2d
build:
merge_build_host: true # [win]
number: 0
noarch: generic
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- {{ posix }}zip # [win]
host:
- r-base
- r-ggplot2
- r-ggwordcloud
- r-igraph >=1.0
- r-interp
- r-markdown
- r-metr
- r-scatterplot3d
- r-shiny
run:
- r-base
- r-ggplot2
- r-ggwordcloud
- r-igraph >=1.0
- r-interp
- r-markdown
- r-metr
- r-scatterplot3d
- r-shiny
test:
commands:
- $R -e "library('SOMbrero')" # [not win]
- "\"%R%\" -e \"library('SOMbrero')\"" # [win]
about:
home: https://CRAN.R-project.org/package=SOMbrero
license: GPL-2.0-or-later
summary: The stochastic (also called on-line) version of the Self-Organising Map (SOM) algorithm is provided. Different versions of the algorithm are implemented, for numeric and relational data and for contingency tables as described, respectively, in Kohonen (2001) <isbn:3-540-67921-9>, Olteanu & Villa-Vialaneix (2005)
<doi:10.1016/j.neucom.2013.11.047> and Cottrell et al (2004) <doi:10.1016/j.neunet.2004.07.010>. The package also contains many plotting features (to help the user interpret the results) and a graphical user interface based on 'shiny'.
license_family: GPL3
license_file:
- {{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2
extra:
recipe-maintainers:
- conda-forge/r
# Package: SOMbrero
# Title: SOM Bound to Realize Euclidean and Relational Outputs
# Version: 1.2-4
# Date: 2019-03-07
# Authors@R: c(person("Nathalie", "Vialaneix", role = c("aut", "cre"), email="<EMAIL>"), person("Jerome", "Mariette", role= "aut", email="<EMAIL>"), person("Madalina", "Olteanu", role = "aut", email="<EMAIL>"), person("Fabrice", "Rossi", role = "aut", email="<EMAIL>"), person("Laura", "Bendhaiba", role = "ctb", email="<EMAIL>"), person("Julien", "Boelaert", role= "ctb", email="<EMAIL>"))
# Maintainer: <NAME> <<EMAIL>>
# Description: The stochastic (also called on-line) version of the Self-Organising Map (SOM) algorithm is provided. Different versions of the algorithm are implemented, for numeric and relational data and for contingency tables as described, respectively, in Kohonen (2001) <isbn:3-540-67921-9>, Olteanu & Villa-Vialaneix (2005) <doi:10.1016/j.neucom.2013.11.047> and Cottrell et al (2004) <doi:10.1016/j.neunet.2004.07.010>. The package also contains many plotting features (to help the user interpret the results) and a graphical user interface based on 'shiny'.
# BugReports: http://github.com/tuxette/sombrero/issues
# Depends: R (>= 3.1.0), igraph (>= 1.0)
# Imports: wordcloud, scatterplot3d, RColorBrewer, shiny, grDevices, graphics, stats
# Suggests: testthat, rmarkdown, knitr
# License: GPL (>= 2)
# Repository: CRAN
# VignetteBuilder: knitr
# RoxygenNote: 6.1.1
# NeedsCompilation: no
# Packaged: 2019-03-07 16:18:07 UTC; nathalie
# Author: <NAME> [aut, cre], <NAME> [aut], <NAME> [aut], <NAME> [aut], <NAME> [ctb], <NAME> [ctb]
# Date/Publication: 2019-03-07 17:32:51 UTC
|
recipe/meta.yaml
|
setLeader: Test your knowledge of sizing
tabLabel: question
questions:
- type: multiple-choice
cardinality: "1+"
correctAnswers: "0,2,4,5,7,8"
stem: Which of the following are valid dimensions?
options:
- content: cm
rationale: "Centimeters, a valid absolute dimension."
- content: ui
rationale: "User interface is not a dimension in CSS."
- content: in
rationale: "Inches, a valid absolute dimension."
- content: 8th
rationale: "Not a CSS dimension"
- content: px
rationale: "Pixels, a valid absolute dimension."
- content: ch
rationale: "Character units, a valid relative dimension."
- content: ux
rationale: "User experience is not a dimension in CSS."
- content: em
rationale: "Letter 'M' units, a valid relative dimension."
- content: ex
rationale: "Letter 'x' units, a valid relative dimension."
- type: multiple-choice
cardinality: "1"
correctAnswers: "1"
stem: How are absolute and relative units different?
options:
- content: "Absolute values can't change but relative units can"
rationale: "Absolute values can change, but the base they
calculate against can't."
- content: "An absolute length is calculated against a single shared base
value, where a relative unit is compared against a base value that can change."
rationale: "Relative units are more adaptive and fluid because of their
contextual awareness, but there's a power and predictability to absolute
units that can be foundational for certain designs."
- type: multiple-choice
cardinality: "1"
correctAnswers: "1"
stem: Viewport units are absolute.
options:
- content: "True"
rationale: "They may feel absolute, but they're relative to a viewport,
which could be an iframe or webview, and isn't always representative of
a device screen size."
- content: "False"
rationale: "They are relative to the document window they were created
in, which may or may not be the same as a device screen."
|
src/site/content/en/learn/css/sizing/sizing.assess.yml
|
name: Ubuntu 20.04
on:
push:
branches:
- main
- "release/*"
pull_request:
types: [assigned, opened, synchronize, reopened, labeled]
env:
UBUNTU_2004_IMAGE: "ghcr.io/gofractally/contract-lab-ubuntu-2004-builder:ccaa037795aca7ee3353d5d8e80a02f2df5b4384"
jobs:
ubuntu-2004-build:
name: Ubuntu 20.04 | Build
runs-on: ubuntu-latest
steps:
- name: Timestamp
id: ccache_cache_timestamp
shell: cmake -P {0}
run: |
string(TIMESTAMP current_date "%Y-%m-%d-%H-%M-%S" UTC)
message("::set-output name=timestamp::${current_date}")
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
- name: Preserve ccache
uses: actions/cache@v1.1.0
with:
path: .ccache
key: $ubuntu-20.04-ccache_make-${{ steps.ccache_cache_timestamp.outputs.timestamp }}
restore-keys: |
$ubuntu-20.04-ccache_make-
- name: Build
run: |
set -e
export CCACHE_DIR=${GITHUB_WORKSPACE}/.ccache
export CCACHE_CONFIGPATH=${GITHUB_WORKSPACE}/ccache.conf
echo max_size = 600M >${GITHUB_WORKSPACE}/ccache.conf
echo log_file = ${GITHUB_WORKSPACE}/ccache.log >>${GITHUB_WORKSPACE}/ccache.conf
export DOCKER="docker run --rm -v ${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE} -w ${GITHUB_WORKSPACE} -e CCACHE_DIR -e CCACHE_CONFIGPATH --user $(id -u):$(id -g) ${UBUNTU_2004_IMAGE}"
export DOCKER_ROOT="docker run --rm -v ${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE} -w ${GITHUB_WORKSPACE} -e CCACHE_DIR -e CCACHE_CONFIGPATH ${UBUNTU_2004_IMAGE}"
docker pull ${UBUNTU_2004_IMAGE}
echo =====
${DOCKER} ccache -s
echo =====
mkdir build
${DOCKER} bash -c "cd build && cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_C_COMPILER_LAUNCHER=ccache .."
echo =====
${DOCKER} bash -c "cd build && make -j $(nproc)"
echo =====
${DOCKER} bash -c "cd build && ctest -j $(nproc)"
echo =====
ls -la ${GITHUB_WORKSPACE}
echo =====
${DOCKER} ccache -s
echo =====
echo nodeos version:
${DOCKER} build/clsdk/bin/nodeos --version
${DOCKER} build/clsdk/bin/nodeos --full-version
echo =====
bash -c "cd build && tar czf ../clsdk-ubuntu-20-04.tar.gz clsdk"
- name: Upload build
uses: actions/upload-artifact@v1
with:
name: ubuntu-2004-build
path: clsdk-ubuntu-20-04.tar.gz
|
.github/workflows/ubuntu-2004.yml
|
uid: "com.azure.storage.blob.BlobContainerAsyncClient.getBlobVersionAsyncClient*"
fullName: "com.azure.storage.blob.BlobContainerAsyncClient.getBlobVersionAsyncClient"
name: "getBlobVersionAsyncClient"
nameWithType: "BlobContainerAsyncClient.getBlobVersionAsyncClient"
members:
- uid: "com.azure.storage.blob.BlobContainerAsyncClient.getBlobVersionAsyncClient(java.lang.String,java.lang.String)"
fullName: "com.azure.storage.blob.BlobContainerAsyncClient.getBlobVersionAsyncClient(String blobName, String versionId)"
name: "getBlobVersionAsyncClient(String blobName, String versionId)"
nameWithType: "BlobContainerAsyncClient.getBlobVersionAsyncClient(String blobName, String versionId)"
summary: "Creates a new BlobAsyncClient object by concatenating blobName to the end of ContainerAsyncClient's URL. The new BlobAsyncClient uses the same request policy pipeline as the ContainerAsyncClient."
parameters:
- description: "A <code>String</code> representing the name of the blob. If the blob name contains special characters,\n pass in the url encoded version of the blob name."
name: "blobName"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "the version identifier for the blob, pass <code>null</code> to interact with the latest blob version."
name: "versionId"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
syntax: "public BlobAsyncClient getBlobVersionAsyncClient(String blobName, String versionId)"
returns:
description: "A new <xref uid=\"com.azure.storage.blob.BlobAsyncClient\" data-throw-if-not-resolved=\"false\" data-raw-source=\"BlobAsyncClient\"></xref> object which references the blob with the specified name in this container."
type: "<xref href=\"com.azure.storage.blob.BlobAsyncClient?alt=com.azure.storage.blob.BlobAsyncClient&text=BlobAsyncClient\" data-throw-if-not-resolved=\"False\" />"
type: "method"
metadata: {}
package: "com.azure.storage.blob"
artifact: com.azure:azure-storage-blob:12.11.0-beta.1
|
preview/docs-ref-autogen/com.azure.storage.blob.BlobContainerAsyncClient.getBlobVersionAsyncClient.yml
|
--- !ruby/hash:SeasonHash
title: 鬼斬
watchable: true
thumbnail_url: https://cs1.anime.dmkt-sp.jp/anime_kv/img/20/88/1/20881_1_6.png?1458874826000
outline: 魑魅魍魎が跋扈する異ノ國・日本。「神喰い」と呼ばれる、地の底より生まれし魔物が、万物の指導者である神に牙をむき人々に恐怖を与えていたが、三つの大結界に封印することで、長きに渉る死闘に幕を閉じることが出来た。それから時は流れ。。。何者かによって大結界の一つが破られた。再びの世界の危機に、少女たちが立ち上がった!!静御前・義経・茨木童子・かぐや・あまてらすたちが神喰いサタン討伐を目指す!?各話5分で!!
tags: !ruby/array:Hashie::Array
- !ruby/hash:TagHash
name: SF/ファンタジー
type: genre
- !ruby/hash:TagHash
name: ショート
type: genre
- !ruby/hash:TagHash
name: 小西克幸
type: cast
- !ruby/hash:TagHash
name: 三森すずこ
type: cast
- !ruby/hash:TagHash
name: 松井恵理子
type: cast
- !ruby/hash:TagHash
name: 橘田いずみ
type: cast
- !ruby/hash:TagHash
name: 鈴木愛奈
type: cast
- !ruby/hash:TagHash
name: 原奈津子
type: cast
- !ruby/hash:TagHash
name: 中島由貴
type: cast
- !ruby/hash:TagHash
name: 中根もにゃ
type: cast
- !ruby/hash:TagHash
name: 木下望
type: cast
- !ruby/hash:TagHash
name: 脚本:あおしまたかし
type: staff
- !ruby/hash:TagHash
name: 監督:山本天志
type: staff
- !ruby/hash:TagHash
name: 脚本:鴻野貴光
type: staff
- !ruby/hash:TagHash
name: アニメーション制作:ぴえろプラス
type: staff
- !ruby/hash:TagHash
name: キャラクターデザイン:伊部由起子
type: staff
- !ruby/hash:TagHash
name: 原作:サイバーステップ
type: staff
- !ruby/hash:TagHash
name: 音楽制作:株式会社ブシロードミュージック
type: staff
- !ruby/hash:TagHash
name: 音楽制作:株式会社ビーイング
type: staff
- !ruby/hash:TagHash
name: 製作年代:2010年代
type: other
- !ruby/hash:TagHash
name: 製作年:2016年
type: other
- !ruby/hash:TagHash
name: 本田保則
type: other
- !ruby/hash:TagHash
name: 鴻野貴光
type: other
- !ruby/hash:TagHash
name: 岡篤志
type: other
episodes: !ruby/array:Hashie::Array
- !ruby/hash:EpisodeHash
episode_no: "#1~13"
title: 鬼斬開幕/魍魎跋扈/博多炎上/暗中飛躍/比翼連理/一番富籤/酔生夢死/泡沫之夢/愛別離苦/夢幻泡影/竜闘虎争/気焔万丈/鬼斬閉幕
description: "#1:「神喰い(かみくい)」……それは地の底より湧きいでた災厄である。この世に災厄をもたらす神喰いと戦う少女たちがいた。彼女たちの物語の幕が上がる!?/#2:九州に飛ばされ、鬼ヶ島(おにがしま)を目指す静御前(しずかごぜん)と義経(よしつね)は、再会した仲間の茨木童子(いばらきどうじ)とともに人助けをすることに。/#3:突如、玄海灘沖に現れた大神喰い・メフィスト。メフィストの九州上陸を阻止するため、あまてらすが考え出した作戦とは?/#4:義経とともに、茨木童子のせいでとんでもない目にあった静御前。そんな静御前を付け狙う「御一行」とは一体何者なのか?/#5:サクラとジンはこの物語の主人公である。ふたりが出会い、ともに神喰いと戦うことを選んだ理由が明らかに!?/#6:因幡国にて。店限定のコンビニくじの極レア武器がどうしてもほしいかぐやは、当選率倍増キャンペーンと聞いてさっそく挑戦!!/#7:神喰いサタンにリベンジすべく、鬼ヶ島に戻ってきた一同。ところがそこは、以前の鬼ヶ島とはすっかり様子が変わっていた!?/#8:力をあわせて妖怪を倒した一同だったが、倒れる直前に妖怪がかけた呪いにより、彼女たちの人生は大きく変わってしまう!?/#9:そこは讃岐国にある小さな隠れ里――。村人たちがつつましく静かに暮らすこの場所に、ある日突然やってきたのは? ほか<br><br>静御前:鈴木愛奈/義経:松井恵理子/茨木童子:三森すずこ/かぐや:橘田いずみ/あまてらす:原奈津子/ヴェロニカ:中島由貴/うずめ:中根もにゃ/キジムナー:木下望/サクラ:三森すずこ/ナレーション:小西克幸 ほか<br><br>原作:サイバーステップ/監督:山本天志/シリーズ構成・脚本:鴻野貴光/脚本:あおしま
たかし/脚本:岡 篤志/キャラクターデザイン・総作画監督:伊部由起子/美術監督・美術設定:柴田聡/色彩設計:月舘順子/プロップデザイン:阿萬和俊/音響監督:本田保則/音楽制作:株式会社ブシロードミュージック・株式会社ビーイング/アニメーション制作:ぴえろプラス"
length_seconds: 2742
content_id: so32245003
default_thread_id: 1510284335
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32245003
cast: "[キャスト]<br>静御前:鈴木愛奈/義経:松井恵理子/茨木童子:三森すずこ/かぐや:橘田いずみ/あまてらす:原奈津子/ヴェロニカ:中島由貴/うずめ:中根もにゃ/キジムナー:木下望/サクラ:三森すずこ/ナレーション:小西克幸 ほか"
staff: "[スタッフ]<br>原作:サイバーステップ/監督:山本天志/シリーズ構成・脚本:鴻野貴光/脚本:あおしま たかし/脚本:岡 篤志/キャラクターデザイン・総作画監督:伊部由起子/美術監督・美術設定:柴田聡/色彩設計:月舘順子/プロップデザイン:阿萬和俊/音響監督:本田保則/音楽制作:株式会社ブシロードミュージック・株式会社ビーイング/アニメーション制作:ぴえろプラス"
produced_year: "[製作年]<br>2016年"
copyright: "©サイバーステップ/鬼斬製作委員会"
related_seasons: !ruby/array:Hashie::Array []
|
db/fixtures/seasons/season_00226.yml
|
service: evpnVni
keys:
- vni
show-fields:
- vni
- vlan
- type
- remoteVtepList
- priVtepIp
- secVtepIp
apply:
cumulus:
version: all
command: sudo vtysh -c "show evpn vni detail json"
normalize: '*/[
"vni: vni",
"type: type",
"numMacs: numMacs?|0",
"numArpNd: numArpNd?|0",
"numRemoteVteps: remoteVtepList?|[]",
"l2Vnis: l2VniList?|[]",
"advertiseGatewayMacIp: advGateway?|False",
"vtepIp: priVtepIp?|",
"localVtepIp: priVtepIp?|priVtepIp",
"vniFilter: vniFilter?|",
"state: state?|up",
"vxlanInterface: ifname?|",
"routerMac: routerMac?|",
"mcastGroup: mcastGroup?|",
"vrf: vrf",
"vxlanIntf: ifname?|ifname",
]'
eos:
version: all
command: show interfaces Vxlan $
normalize: 'interfaces/*/[
"vlanToVniMap: _vlan2VniMap",
"vlanToVtepList: _vlan2VtepMap?|{}",
"srcIpAddr: priVtepIp",
"name: ifname?|",
"vrfToVniMap: _vrf2VniMap",
"lineProtocolStatus: state",
"replicationMode: replicationType",
"floodMcastGrp: mcastGroup?|",
]'
linux:
copy: cumulus
sonic:
copy: cumulus
junos-qfx:
version: all
command:
- command: show evpn instance extensive | display json | no-more
normalize: 'evpn-instance-information/[0]/evpn-instance/[
"evpn-instance-name/[0]/data: vni?|0",
"bridge-domain-status-table/[0]/bridge-domain/[*]/vlan-id/[0]/data: _vlanList?|[]",
"bridge-domain-status-table/[0]/bridge-domain/[*]/domain-id/[0]/data: _vniList?|[]",
"bridge-domain-status-table/[0]/bridge-domain/[*]/irb-interface/[0]/data: _irbList?|[]",
"irb-interface-status-table/[0]/irb-interface/[*]/irb-interface-name/[0]/data: _irbIfList?|[]",
"irb-interface-status-table/[0]/irb-interface/[*]/irb-interface-l3-context/[0]/data: _vrfList?|[]",
"bridge-domain-status-table/[0]/bridge-domain/[*]/irb-interface/[0]/data: _irbList?|[]",
"evpn-neighbor/[0]/evpn-neighbor-route-information/[*]/evpn-neighbor-address/[0]/data: remoteVtepList?|[]",
"_entryType: _entryType?|instance",
]'
- command: show evpn l3-context extensive | display json | no-more
normalize: 'evpn-l3-context-information/[0]/evpn-l3-context/*/[
"context-name/[0]/data: vrf",
"context-vni/[0]/data: vni?|0",
"context-source-vtep/[0]/vtep-address/[0]/data: priVtepIp?|",
"context-router-mac/[0]/data: routerMac",
"_entryType: _entryType?|l3",
]'
- command: show ethernet-switching vxlan-tunnel-end-point remote | display json | no-more
normalize: 'vxlan-source-vtep-information/[0]/svtep-format/*:source-vtep-address:priVtepIp|vxlan-remote-vtep-information/*/[
"remote-vtep-address/[0]/data: _floodVtepList",
"remote-vtep-mode: _mode",
"remote-vtep-interface-name/[0]/data: vni?|0",
"vxlan-dynamic-information/[0]/vxlan-format/[*]/vn-id/[0]/data: _vniList?|[]",
"vxlan-dynamic-information/[0]/vxlan-format/[*]/multicast-address/[0]/data: replicationType",
"_entryType: _entryType?|remote",
]'
junos-ex:
copy: junos-qfx
junos-es:
copy: junos-qfx
junos-qfx10k:
copy: junos-qfx
nxos:
version: all
command:
- command: show nve vni all detail | json native
normalize: 'TABLE_nve_vni/ROW_nve_vni/*?/[
"if-name: ifname",
"vni: vni",
"type: type",
"vni-state: state",
"vlan-bd: vlan?|0",
"svi-state: _sviState",
"mcast: replicationType",
"_entryType: _entryType?|VNI",
]'
- command: show nve interface nve 1 | json native
normalize: 'TABLE_nve_if/ROW_nve_if/*?/[
"primary-ip: priVtepIp",
"encap-type: encapType",
"source-if: vni",
"if-name: ifname",
"vpc-capability: _vpcCap?|",
"secondary-ip: secVtepIp?|",
"local-rmac: routerMac",
"_entryType: _entryType?|iface",
]'
- command: show nve peers detail | json native
normalize: 'TABLE_nve_peers/ROW_nve_peers/*?/[
"peer-ip: vni",
"config-vnis: _configVnis",
"cp-vni: _vniList?|",
"_entryType: _entryType?|peers",
]'
|
suzieq/config/evpnVni.yml
|
resource_types:
- name: pull-request
type: docker-image
source:
repository: teliaoss/github-pr-resource
resources:
- name: pull-request
type: pull-request
webhook_token: ((kubernetes-engine.github_webhook_token))
source:
repository: terraform-google-modules/terraform-google-kubernetes-engine
access_token: ((github.pr-access-token))
- name: lint-test-image
type: docker-image
source:
repository: gcr.io/cloud-foundation-cicd/cft/lint
tag: 2.3.0
username: _json_key
password: ((<PASSWORD>))
- name: integration-test-image
type: docker-image
source:
repository: gcr.io/cloud-foundation-cicd/cft/kitchen-terraform
tag: 2.3.0
username: _json_key
password: ((<PASSWORD>))
jobs:
- name: lint-tests
public: true
plan:
- get: pull-request
trigger: true
version: every
- put: notify-lint-test-pending
resource: pull-request
params:
path: pull-request
context: lint-tests
status: pending
- get: lint-test-image
- task: run
image: lint-test-image
config:
platform: linux
inputs:
- name: pull-request
path: terraform-google-kubernetes-engine
- name: lint-test-image
run:
path: make
args: ['-s']
dir: terraform-google-kubernetes-engine
on_success:
put: notify-lint-test-success
resource: pull-request
params:
path: pull-request
context: lint-tests
status: success
on_failure:
put: notify-lint-test-failure
resource: pull-request
params:
path: pull-request
context: lint-tests
status: failure
on_abort:
put: notify-lint-test-error
resource: pull-request
params:
path: pull-request
context: lint-tests
status: error
- name: integration-tests
public: true
plan:
- get: pull-request
trigger: true
version: every
- put: notify-integration-test-pending
resource: pull-request
params:
path: pull-request
context: integration-tests
status: pending
- get: integration-test-image
trigger: true
- aggregate:
- task: run-tests-deploy-service
image: integration-test-image
file: pull-request/test/ci/deploy-service.yml
params: &run-tests-params
PROJECT_ID: ((kubernetes-engine.phoogle_project_id))
SERVICE_ACCOUNT_JSON: ((kubernetes-engine.phoogle_sa))
- task: run-tests-node-pool
image: integration-test-image
file: pull-request/test/ci/node-pool.yml
params:
<<: *run-tests-params
- task: run-tests-shared-vpc
image: integration-test-image
file: pull-request/test/ci/shared-vpc.yml
params:
<<: *run-tests-params
- task: run-tests-simple-regional
image: integration-test-image
file: pull-request/test/ci/simple-regional.yml
params:
<<: *run-tests-params
- task: run-tests-simple-regional-private
image: integration-test-image
file: pull-request/test/ci/simple-regional-private.yml
params:
<<: *run-tests-params
- task: run-tests-simple-zonal
image: integration-test-image
file: pull-request/test/ci/simple-zonal.yml
params:
<<: *run-tests-params
- task: run-tests-simple-zonal-private
image: integration-test-image
file: pull-request/test/ci/simple-zonal-private.yml
params:
<<: *run-tests-params
- task: run-tests-stub-domains
image: integration-test-image
file: pull-request/test/ci/stub-domains.yml
params:
<<: *run-tests-params
- task: run-workload-metadata-config
image: integration-test-image
file: pull-request/test/ci/workload-metadata-config.yml
params:
<<: *run-tests-params
- task: run-upstream-nameservers
image: integration-test-image
file: pull-request/test/ci/upstream-nameservers.yml
params:
<<: *run-tests-params
- task: run-stub-domains-upstream-nameservers
image: integration-test-image
file: pull-request/test/ci/stub-domains-upstream-nameservers.yml
params:
<<: *run-tests-params
- task: run-tests-beta-cluster
image: integration-test-image
file: pull-request/test/ci/beta-cluster.yml
params:
<<: *run-tests-params
on_success:
put: notify-integration-test-success
resource: pull-request
params:
path: pull-request
context: integration-tests
status: success
on_failure:
put: notify-integration-test-failure
resource: pull-request
params:
path: pull-request
context: integration-tests
status: failure
on_abort:
put: notify-integration-tests-abort
resource: pull-request
params:
path: pull-request
context: integration-tests
status: error
|
infra/concourse/pipelines/terraform-google-kubernetes-engine.yml
|
esphome:
name: linp_doorbell
platform: ESP32
board: esp32doit-devkit-v1
includes:
- linp-doorbell.h
libraries:
- "ArduinoQueue"
platformio_options:
platform: espressif32@1.11.0
platform_packages: |-4
framework-arduinoespressif32 @ https://github.com/pauln/arduino-esp32.git#solo-no-mac-crc/1.0.4
wifi:
ssid: 'your-wifi-ssid-here'
password: '<PASSWORD>'
# Enable Home Assistant API
api:
password: '<PASSWORD>'
custom_component:
- id: doorbell
lambda: |-
auto doorbell = new LinpDoorbell();
App.register_component(doorbell);
return {doorbell};
sensor:
# Doorbell volume sensor (recommended)
- platform: custom
lambda: return {get_linp_doorbell(doorbell)->volume_sensor};
sensors:
- name: "Volume"
# Doorbell "chime playing" sensor (optional)
- platform: custom
lambda: return {get_linp_doorbell(doorbell)->playing_sensor};
sensors:
- name: "Tune Playing"
# Tune/Chime configuration (one per button, optional)
- platform: custom
lambda: |-
return std::vector<Sensor*>(get_linp_doorbell(doorbell)->chime_sensors, std::end(get_linp_doorbell(doorbell)->chime_sensors));
sensors:
- name: "Button 1 Tune"
- name: "Button 2 Tune"
- name: "Button 3 Tune"
- name: "Button 4 Tune"
- name: "Button 5 Tune"
- name: "Button 6 Tune"
- name: "Button 7 Tune"
- name: "Button 8 Tune"
- name: "Button 9 Tune"
- name: "Button 10 Tune"
# Binary sensors for button presses (optional)
binary_sensor:
- platform: custom
lambda: |-
return std::vector<BinarySensor*>(get_linp_doorbell(doorbell)->button_sensors, std::end(get_linp_doorbell(doorbell)->button_sensors));
binary_sensors:
- name: "Button 1"
- name: "Button 2"
- name: "Button 3"
- name: "Button 4"
- name: "Button 5"
- name: "Button 6"
- name: "Button 7"
- name: "Button 8"
- name: "Button 9"
- name: "Button 10"
|
linp-doorbell.yaml
|
apiVersion: xl/v2
kind: Blueprint
metadata:
name: GKE-Cluster
description: |
The blueprint provisions a GCP GKE Cluster using Terraform and XL Deploy.
author: XebiaLabs
version: 2.0
instructions: Read xebialabs/USAGE-gcp-basic-gke-cluster.md to learn how to use this blueprint.
spec:
parameters:
- name: GCPProjectID
type: Input
prompt: What is the GCP project ID?
- name: K8SMasterUser
type: Input
prompt: What is the username for Kubernetes cluster?
default: admin
- name: K8SMasterPassword
type: SecretInput
prompt: What is the password for Kubernetes cluster? (minimum 16 characters)
validate: !expr "regex('^(\\\\S){16,}$', K8SMasterPassword)"
- name: ClusterName
type: Input
prompt: What is the name of the cluster?
- name: GCPRegion
type: Select
prompt: "Select the GCP region:"
options:
- label: Taiwan
value: asia-east1
- label: Hong Kong
value: asia-east2
- label: Japan
value: asia-northeast1
- label: India
value: asia-south1
- label: Singapore
value: asia-southeast1
- label: Australia (Sydney)
value: australia-southeast1
- label: Finland
value: europe-north1
- label: Belgium
value: europe-west1
- label: England
value: europe-west2
- label: Germany
value: europe-west3
- label: Netherlands
value: europe-west4
- label: Canada
value: northamerica-northeast1
- label: Brazil
value: southamerica-east1
- label: Iowa, USA
value: us-central1
- label: South Carolina, USA
value: us-east1
- label: Northern Virginia, USA
value: us-east4
- label: Oregon, USA
value: us-west1
- label: California, USA
value: us-west2
- name: GKENumNodes
type: Input
prompt: What is the number of nodes in each GKE cluster zone?
default: 2
- name: GKENodeMachineType
type: Select
prompt: What is the machine type of the GKE nodes?
options:
- n1-standard-1
- n1-standard-2
- n1-standard-4
- n1-standard-8
- n1-standard-16
- n1-standard-32
- n1-standard-64
- n1-standard-96
- n1-highmem-2
- n1-highmem-4
- n1-highmem-8
- n1-highmem-16
- n1-highmem-32
- n1-highmem-64
- n1-highmem-96
- n1-highcpu-2
- n1-highcpu-4
- n1-highcpu-8
- n1-highcpu-16
- n1-highcpu-32
- n1-highcpu-64
- n1-highcpu-96
- f1-micro
- g1-small
- n1-ultramem-40
- n1-ultramem-80
- n1-ultramem-160
- n1-megamem-96
# Hidden prefix
- name: Prefix
value: gcp-gke-
files:
# XebiaLabs
- path: xebialabs/USAGE-gcp-basic-gke-cluster.md.tmpl
- path: xebialabs/gcp-basic-gke-cluster.yaml.tmpl
- path: xebialabs.yaml
# Terraform
- path: terraform-gcp-basic-gke-cluster/gke/main.tf
- path: terraform-gcp-basic-gke-cluster/gke/outputs.tf
- path: terraform-gcp-basic-gke-cluster/gke/variables.tf
- path: terraform-gcp-basic-gke-cluster/vpc/main.tf
- path: terraform-gcp-basic-gke-cluster/vpc/outputs.tf
- path: terraform-gcp-basic-gke-cluster/vpc/variables.tf
- path: terraform-gcp-basic-gke-cluster/.gitignore
- path: terraform-gcp-basic-gke-cluster/main.tf
- path: terraform-gcp-basic-gke-cluster/outputs.tf
- path: terraform-gcp-basic-gke-cluster/variables.tf
# docker-compose setup for required tools
- path: docker/docker-compose.yml
|
gcp/basic-gke-cluster/blueprint.yaml
|
---
trigger:
branches:
include:
- "*"
tags:
include:
- "*"
pool:
vmImage: "ubuntu-latest"
steps:
- script: sudo apt-get update && sudo apt-get install -y moreutils
displayName: Install Ubuntu dependencies
- task: UsePythonVersion@0
inputs:
versionSpec: "3.x"
displayName: Use Python 3.x
- script: pip install ansible ansible-lint docker yamllint
displayName: Install Python dependencies
- script: |
set -ex
wget -qO fabric-bins.tar.gz https://github.com/hyperledger/fabric/releases/download/v1.4.6/hyperledger-fabric-linux-amd64-1.4.6.tar.gz
sudo tar xvf fabric-bins.tar.gz -C /usr/local
rm fabric-bins.tar.gz
displayName: Download Fabric CLI
- script: |
set -ex
wget -qO fabric-ca-bins.tar.gz https://github.com/hyperledger/fabric-ca/releases/download/v1.4.6/hyperledger-fabric-ca-linux-amd64-1.4.6.tar.gz
sudo tar xvf fabric-ca-bins.tar.gz -C /usr/local
rm fabric-ca-bins.tar.gz
displayName: Download Fabric CA CLI
- script: |
set -ex
ansible-lint . -x 208
yamllint .
displayName: Lint
- script: |
set -ex
docker pull ibmblockchain/vscode-prereqs:latest || true
displayName: Pull Docker image
- script: docker/build.sh
displayName: Build Docker image
- script: |
set -ex
mkdir -p ~/.ansible/roles
ln -s $(Build.Repository.LocalPath) ~/.ansible/roles/ibm.blockchain_platform_manager
displayName: Link Playbook dependencies
- script: |
set -ex
ansible-playbook tests/test.yml
ansible-playbook --extra-vars "state=absent" tests/test.yml
displayName: Run Playbook tests
- script: |
set -ex
ansible-galaxy login --github-token=$(GitHub Token)
ansible-galaxy import IBM-Blockchain ansible-role-blockchain-platform-manager
condition: ne(variables['Build.Reason'], 'PullRequest')
displayName: Publish to Ansible Galaxy
- script: |
set -ex
docker login -u $(Docker Username) -p $(Docker Password)
docker tag ibmblockchain/ansible:latest ibmblockchain/vscode-prereqs:unstable
docker push ibmblockchain/vscode-prereqs:unstable
condition: ne(variables['Build.Reason'], 'PullRequest')
displayName: Push Docker image to Docker Hub (merge build)
- script: |
set -ex
docker login -u $(Docker Username) -p $(Docker Password)
VERSION=$(git describe --exact-match --tags $(Build.SourceVersion))
docker tag ibmblockchain/ansible:latest ibmblockchain/vscode-prereqs:${VERSION}
docker tag ibmblockchain/vscode-prereqs:${VERSION} ibmblockchain/vscode-prereqs:latest
docker push ibmblockchain/vscode-prereqs:${VERSION}
docker push ibmblockchain/vscode-prereqs:latest
condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags'))
displayName: Push Docker image to Docker Hub (release build)
|
azure-pipelines.yml
|
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: sbot-deployment-{TENANT_ID}
namespace: {NAMESPACE}
labels:
app: sbot-{TENANT_ID}
spec:
replicas: 1
selector:
matchLabels:
app: sbot-{TENANT_ID}
template:
metadata:
labels:
app: sbot-{TENANT_ID}
annotations:
pod.boostport.com/vault-approle: core-baseinfra
pod.boostport.com/vault-init-container: install
spec:
initContainers:
- name: install
image: localhost:5000/kubernetes-vault-init:0.5.0
securityContext:
runAsUser: 1999
env:
- name: VAULT_ROLE_ID
value: {VAULT_ROLE_ID}
- name: CERT_COMMON_NAME
value: {EXTERNAL_ACCESS_HOST}
volumeMounts:
- name: vault-token
mountPath: /var/run/secrets/boostport.com
containers:
- name: sbot
image: {IMAGE_REPOSITORY}
args:
- "chatbot"
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 600Mi
limits:
cpu: 2
memory: 1536Mi
livenessProbe:
httpGet:
path: /version
port: 3000
scheme: HTTPS
initialDelaySeconds: 360
periodSeconds: 30
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /version
port: 3000
scheme: HTTPS
initialDelaySeconds: 45
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
env:
# common env for all the collaboration
- name: ENV_TYPE
value: "kubernetes"
volumeMounts:
- name: vault-token
mountPath: /var/run/secrets/boostport.com
- name: -sbot-scripts
mountPath: /opt/sbot/scripts
subPath: {TENANT_ID}/{script_path}
- name: -sbot-log
mountPath: /var/opt/sbot/log
subPath: {TENANT_ID}/{log_path}
- name: kubernetes-vault-renew
image: localhost:5000/kubernetes-vault-renew:0.5.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: vault-token
mountPath: /var/run/secrets/boostport.com
volumes:
- name: -sbot-log
persistentVolumeClaim:
claimName: {log-pv-claim}
- name: -sbot-scripts
persistentVolumeClaim:
claimName: {script-pv-claim}
- name: vault-token
emptyDir: {}
|
yaml-templates/sactive-sbot-deployment.yaml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-trigger-release/6/
timestamp: 2016-06-20 19:05:03 UTC
duration: 2h33m15s
triggered_by: sdelano
active_duration: 2h32m53s
parameters:
GIT_REF: 12.7.0
EXPIRE_CACHE: false
change:
git_remote: <EMAIL>:chef/chef-server.git
git_commit: '<PASSWORD>'
project: chef-server
version: 12.7.0
stages:
chef-server-12-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-promote/179/
duration: 10s
chef-server-12-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/257/
duration: 1h16m44s
runs:
el-7-ppc64:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=ppc64,platform=el-7,project=chef-server,role=tester/257/
duration: 58m52s
el-7-ppc64le:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=ppc64le,platform=el-7,project=chef-server,role=tester/257/
duration: 1h6m38s
ubuntu-14.04-ppc64le:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=ppc64le,platform=ubuntu-14.04,project=chef-server,role=tester/257/
duration: 1h16m43s
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-5,project=chef-server,role=tester/257/
duration: 49m12s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-6,project=chef-server,role=tester/257/
duration: 52m6s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-7,project=chef-server,role=tester/257/
duration: 40m47s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-12.04,project=chef-server,role=tester/257/
duration: 43m16s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-14.04,project=chef-server,role=tester/257/
duration: 42m57s
chef-server-12-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/317/
duration: 1h15m47s
runs:
el-7-ppc64:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=ppc64,platform=el-7,project=chef-server,role=builder/317/
duration: 1h15m33s
el-7-ppc64le:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=ppc64le,platform=el-7,project=chef-server,role=builder/317/
duration: 1h11m42s
ubuntu-14.04-ppc64le:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=ppc64le,platform=ubuntu-14.04,project=chef-server,role=builder/317/
duration: 1h2m55s
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-5,project=chef-server,role=builder/317/
duration: 1h4m24s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-6,project=chef-server,role=builder/317/
duration: 1h5m45s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-7,project=chef-server,role=builder/317/
duration: 50m30s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=ubuntu-12.04,project=chef-server,role=builder/317/
duration: 1h13m42s
chef-server-12-trigger-release:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-trigger-release/6/
duration: 11s
|
reports/wilson.ci.chef.co/job/chef-server-12-trigger-release/6.yaml
|
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "app.fullname" . }}
namespace: {{ include "app.namespace" . }}
labels:
{{- include "app.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "app.selectorLabels" . | nindent 6 }}
strategy:
type: RollingUpdate
template:
metadata:
annotations:
rollme: {{ randAlphaNum 5 | quote }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "app.selectorLabels" . | nindent 8 }}
spec:
serviceAccountName: {{ .Values.serviceAccount.name | default "default" }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy | default "IfNotPresent" }}
ports:
- name: app-port
containerPort: {{ .Values.pod.port }}
protocol: TCP
- name: health-port
containerPort: {{ .Values.pod.health.port | default .Values.pod.port }}
protocol: TCP
livenessProbe:
httpGet:
path: {{ .Values.pod.health.livePath | default "/" }}
port: health-port
readinessProbe:
httpGet:
path: {{ .Values.pod.health.readyPath | default "/" }}
port: health-port
resources:
{{- toYaml .Values.resources | nindent 12 }}
# example of usage of nodeSelector
nodeSelector:
kubernetes.io/os: linux
affinity:
{{- if .Values.affinity.nodeAffinity.enabled }}
# Example to limit which zones app can be deployed on
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
{{- with .Values.affinity.nodeAffinity.zones }}
{{- toYaml . | nindent 20 }}
{{- end }}
{{- end }}
{{- if .Values.affinity.podAntiAffinity.enabled }}
# To ensure pods are not deployed on the node (zone)
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- {{ include "app.name" . }}
topologyKey: {{ .Values.affinity.podAntiAffinity.topologyKey }}
{{- end }}
terminationGracePeriodSeconds: 30
|
k8s/apps/echoserver/templates/deployment.yaml
|
apiVersion: carto.run/v1alpha1
kind: ClusterTemplate
metadata:
name: config-template
spec:
ytt: |
#@ load("@ytt:data", "data")
#@ load("@ytt:yaml", "yaml")
#@ def delivery():
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: #@ data.values.workload.metadata.name
labels:
app.kubernetes.io/component: run
#@ if hasattr(data.values.workload.metadata, "labels") and hasattr(data.values.workload.metadata.labels, "app.kubernetes.io/part-of"):
app.kubernetes.io/part-of: #@ data.values.workload.metadata.labels["app.kubernetes.io/part-of"]
#@ end
carto.run/workload-name: #@ data.values.workload.metadata.name
spec:
template: #@ data.values.config
#@ end
#@ def claims():
#@ for s in data.values.workload.spec.serviceClaims:
---
apiVersion: services.tanzu.vmware.com/v1alpha1
kind: ResourceClaim
metadata:
name: #@ data.values.workload.metadata.name + '-' + s.name
labels:
app.kubernetes.io/component: run
#@ if hasattr(data.values.workload.metadata, "labels") and hasattr(data.values.workload.metadata.labels, "app.kubernetes.io/part-of"):
app.kubernetes.io/part-of: #@ data.values.workload.metadata.labels["app.kubernetes.io/part-of"]
#@ end
carto.run/workload-name: #@ data.values.workload.metadata.name
spec:
ref: #@ s.ref
---
apiVersion: servicebinding.io/v1alpha3
kind: ServiceBinding
metadata:
name: #@ data.values.workload.metadata.name + '-' + s.name
labels:
app.kubernetes.io/component: run
#@ if hasattr(data.values.workload.metadata, "labels") and hasattr(data.values.workload.metadata.labels, "app.kubernetes.io/part-of"):
app.kubernetes.io/part-of: #@ data.values.workload.metadata.labels["app.kubernetes.io/part-of"]
#@ end
carto.run/workload-name: #@ data.values.workload.metadata.name
spec:
name: #@ s.name
service:
apiVersion: services.tanzu.vmware.com/v1alpha1
kind: ResourceClaim
name: #@ data.values.workload.metadata.name + '-' + s.name
workload:
apiVersion: serving.knative.dev/v1
kind: Service
name: #@ data.values.workload.metadata.name
#@ end
#@ end
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ data.values.workload.metadata.name
data:
delivery.yml: #@ yaml.encode(delivery())
#@ if hasattr(data.values.workload.spec, "serviceClaims") and len(data.values.workload.spec.serviceClaims):
serviceclaims.yml: #@ yaml.encode(claims())
#@ end
|
supplychain/supply-chain-05-config.yaml
|
host: 0.0.0.0
port: ${GRAPHDB_PERSONA_SCHEMA_LOADER_PORT}
scriptEvaluationTimeout: 120000
threadPoolWorker: ${GRAPHDB_PERSONA_SCHEMA_LOADER_THREAD_POOL_WORKER}
gremlinPool: 1
channelizer: org.apache.tinkerpop.gremlin.server.channel.WsAndHttpChannelizer
graphs: {
graph: /opt/graphdb/conf/janusgraph-dynamodb.properties
}
scriptEngines: {
gremlin-groovy: {
plugins: { org.apache.tinkerpop.gremlin.server.jsr223.GremlinServerGremlinPlugin: {},
org.apache.tinkerpop.gremlin.tinkergraph.jsr223.TinkerGraphGremlinPlugin: {},
org.apache.tinkerpop.gremlin.jsr223.ImportGremlinPlugin: {classImports: [java.lang.Math], methodImports: [java.lang.Math#*]},
org.apache.tinkerpop.gremlin.jsr223.ScriptFileGremlinPlugin: {files: [/opt/graphdb/bin/loadschema-globals.groovy, /opt/graphdb/bin/loadschema-bootstrap.groovy]}
#org.apache.tinkerpop.gremlin.groovy.jsr223.GroovyCompilerGremlinPlugin: {enableThreadInterrupt: true, timedInterrupt: 120000, compilation: COMPILE_STATIC, extensions: org.apache.tinkerpop.gremlin.groovy.jsr223.customizer.FileSandboxExtension }
}}}
serializers:
- { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { ioRegistries: [org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV1d0, org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] , useMapperFromGraph: graph}}
- { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { serializeResultToString: true, useMapperFromGraph: graph }}
- { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0, config: { ioRegistries: [org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0, org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] , useMapperFromGraph: graph}} # application/vnd.gremlin-v3.0+gryo
- { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV3d0, config: { serializeResultToString: true, useMapperFromGraph: graph }} # application/vnd.gremlin-v3.0+gryo-stringd
- { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0, config: { ioRegistries: [org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV3d0, org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} # application/json
processors:
- { className: org.apache.tinkerpop.gremlin.server.op.standard.StandardOpProcessor, config: { maxParameters: 64 }}
- { className: org.apache.tinkerpop.gremlin.server.op.session.SessionOpProcessor, config: { maxParameters: 64 , sessionTimeout: 28800000 }}
- { className: org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor, config: { cacheExpirationTime: 0, cacheMaxSize: 0 }}
metrics: {
consoleReporter: {enabled: false, interval: 180000},
csvReporter: {enabled: false, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv},
jmxReporter: {enabled: true},
slf4jReporter: {enabled: false, interval: 180000}}
strictTransactionManagement: false
maxInitialLineLength: 4096
maxHeaderSize: 8192
maxChunkSize: 8192
maxContentLength: 2000000
maxAccumulationBufferComponents: 1024
resultIterationBatchSize: 64
writeBufferLowWaterMark: 32768
writeBufferHighWaterMark: 65536
ssl: {
enabled: false
}
|
conf-template/graphdb/persona/schema-loader/gremlin-server.yml
|
version: '3.7'
services:
mongodb:
build: ./mongodb
image: tms-portal/mongodb:latest
container_name: tms-portal-mongodb
back:
build: ./back
image: tms-portal/back:latest
container_name: tms-portal-back
environment:
# app 端口号
# - TMS_APP_PORT=3000
# app 名称
# - TMS_APP_NAME=dev-op
# reids 配置
# - TMS_REDIS_PREFIX=dev
# - TMS_REDIS_HOST=localhost
# - TMS_REDIS_PORT=6378
# mongodb配置
# - TMS_MONGODB_USER_NAME=root
# - TMS_MONGODB_USER_PWD=<PASSWORD>
# 库名配置
# - TMS_CTRL_MONGODB_DATABASE=tms-portal
command:
['sh', './wait-for.sh', 'psmongodb:27017', '-t', '300', '--', 'node', 'server']
ue_admin:
build:
context: ./ue_admin
args:
vue_app_login_key_username: username
vue_app_login_key_password: password
vue_app_login_key_pin: pin
vue_app_base_url: /admin
vue_app_auth_base: /auth
vue_app_api_base: /api
vue_app_auth_disabled: No
vue_app_storetoken_way: session
image: tms-portal/ue_admin:latest
container_name: tms-portal-ue_admin
# environment:
# - NGINX_BACK_BASE_URL=http://back:3000
# - NGINX_WEB_BASE_URL=/admin
# # 定义连接基数区域,10m 可以储存 320000 个并发会话
# - NGINX_LIMIT_CONN_ZONE=10m
# # 是限制每个IP只能发起8000个并发连接
# - NGINX_LIMIT_CONN_CONNIP=8000
# # 指定当触发limit的时候日志打印级别
# - NGINX_LIMIT_CONN_LOG_LEVEL=error
# # 回复被禁用的连接请求时的状态码
# - NGINX_LIMIT_CONN_STATUS=503
# # 达到阀值后开始限速(字节)
# - NGINX_LIMIT_RATE_AFTER=300m
# # 限制向客户端传输数据的速度(Byte/s)
# - NGINX_LIMIT_RATE=300k
# # 指定等待client发送一个包体的超时时间
# - NGINX_CLIENT_BODY_TIMEOUT=60s
# # 指定等待client发送一个包头的超时时间
# - NGINX_CLIENT_HEADER_TIMEOUT=60s
# # keep-alive连接超时时间
# - NGINX_KEEPALIVE_TIMEOUT=75s
# # 服务端向客户端传输数据的超时时间
# - NGINX_SEND_TIMEOUT=60s
# # 隐藏版本号
# - NGINX_SERVER_TOKENS=off
mongo:
build:
context: ./ue_portal
args:
vue_app_login_key_username: username
vue_app_login_key_password: password
vue_app_login_key_pin: pin
vue_app_base_url: /portal
vue_app_auth_base: /auth
vue_app_api_base: /api
vue_app_auth_disabled: No
vue_app_storetoken_way: session
image: tms-portal/ue_portal:latest
container_name: tms-portal-ue_portal
# environment:
# - NGINX_BACK_BASE_URL=http://back:3000
# - NGINX_WEB_BASE_URL=/portal
# # 定义连接基数区域,10m 可以储存 320000 个并发会话
# - NGINX_LIMIT_CONN_ZONE=10m
# # 是限制每个IP只能发起8000个并发连接
# - NGINX_LIMIT_CONN_CONNIP=8000
# # 指定当触发limit的时候日志打印级别
# - NGINX_LIMIT_CONN_LOG_LEVEL=error
# # 回复被禁用的连接请求时的状态码
# - NGINX_LIMIT_CONN_STATUS=503
# # 达到阀值后开始限速(字节)
# - NGINX_LIMIT_RATE_AFTER=300m
# # 限制向客户端传输数据的速度(Byte/s)
# - NGINX_LIMIT_RATE=300k
# # 指定等待client发送一个包体的超时时间
# - NGINX_CLIENT_BODY_TIMEOUT=60s
# # 指定等待client发送一个包头的超时时间
# - NGINX_CLIENT_HEADER_TIMEOUT=60s
# # keep-alive连接超时时间
# - NGINX_KEEPALIVE_TIMEOUT=75s
# # 服务端向客户端传输数据的超时时间
# - NGINX_SEND_TIMEOUT=60s
# # 隐藏版本号
# - NGINX_SERVER_TOKENS=off
|
docker-compose.yml
|
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: imagecaches.kubefledged.k8s.io
spec:
group: kubefledged.k8s.io
versions:
- name: v1alpha1
served: true
storage: true
scope: Namespaced
names:
plural: imagecaches
singular: imagecache
kind: ImageCache
shortNames:
- ic
"validation":
"openAPIV3Schema":
description: ImageCache is a specification for a ImageCache resource
type: object
required:
- spec
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ImageCacheSpec is the spec for a ImageCache resource
type: object
required:
- cacheSpec
properties:
cacheSpec:
type: array
items:
description: CacheSpecImages specifies the Images to be cached
type: object
required:
- images
properties:
images:
type: array
items:
type: string
nodeSelector:
type: object
additionalProperties:
type: string
imagePullSecrets:
type: array
items:
description: LocalObjectReference contains enough information to let
you locate the referenced object inside the same namespace.
type: object
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
status:
description: ImageCacheStatus is the status for a ImageCache resource
type: object
required:
- message
- reason
- startTime
- status
properties:
completionTime:
type: string
format: date-time
failures:
type: object
additionalProperties:
type: array
items:
description: NodeReasonMessage has failure reason and message for
a node
type: object
required:
- message
- node
- reason
properties:
message:
type: string
node:
type: string
reason:
type: string
message:
type: string
reason:
type: string
startTime:
type: string
format: date-time
status:
description: ImageCacheActionStatus defines the status of ImageCacheAction
type: string
|
deploy/kubefledged-crd.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-17 19:05"
game: "Unreal Tournament"
name: "CTF-Dukubulous"
author: "Bloodwar"
description: "None"
releaseDate: "2006-12"
attachments:
- type: "IMAGE"
name: "CTF-Dukubulous_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/D/CTF-Dukubulous_shot_1.png"
originalFilename: "CTF-Dukubulous.zip"
hash: "a2dddd3a803d2e5e8dcf4ce8970448c24ed93878"
fileSize: 4765210
files:
- name: "CTF-Dukubulous.unr"
fileSize: 1866066
hash: "04dbdc8e44396bce154c7da6f5f992ad90e9d809"
- name: "SGTech1.utx"
fileSize: 7953567
hash: "ec01e4c9011a8cb2d08ebd12d255373f83d98fc1"
- name: "swJumpPad.u"
fileSize: 23548
hash: "7212941ffe366e45715531864ca15ad88208362e"
otherFiles: 0
dependencies:
CTF-Dukubulous.unr:
- status: "OK"
name: "swJumpPad"
- status: "OK"
name: "SGTech1"
downloads:
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/&file=CTF-Dukubulous.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/Capture%20The%20Flag/D/CTF-Dukubulous.zip"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/MapsD/&file=CTF-Dukubulous.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/CTF&file=CTF-Dukubulous.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Maps/CTF/MapsD/CTF-Dukubulous.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/Capture%20The%20Flag/D/a/2/dddd3a/CTF-Dukubulous.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/Capture%20The%20Flag/D/a/2/dddd3a/CTF-Dukubulous.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "W00tabulous DUKU"
playerCount: "10"
themes:
Tech: 0.2
Industrial: 0.4
City: 0.4
bots: true
|
content/Unreal Tournament/Maps/Capture The Flag/D/a/2/dddd3a/ctf-dukubulous_[a2dddd3a].yml
|
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ template "seeder.uname" . }}"
namespace: {{ template "namespace" . }}
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
env: {{ .Values.config.environment }}
app: "{{ template "seeder.uname" . }}"
{{- range $key, $value := .Values.services.backend.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
spec:
selector:
matchLabels:
app: "{{ template "seeder.uname" . }}"
template:
metadata:
labels:
app: {{ template "seeder.uname" . }}
release: {{ .Release.Name }}
version: {{ .Values.images.tag }}
annotations:
odysseia-greek/role: {{ .Values.services.seeder.role }}
odysseia-greek/access: {{ .Values.services.seeder.access }}
spec:
serviceAccountName: {{ template "serviceAccountName" . }}
{{- if .Values.config.inClusterHarbor | quote }}
imagePullSecrets:
- name: harbor-images
{{- end}}
initContainers:
- name: "{{ template "init.uname" . }}"
{{- if .Values.config.inClusterHarbor | quote }}
image: {{ .Values.images.harborRepo}}{{ .Values.images.init.repo }}:{{ .Values.images.tag }}
imagePullPolicy: {{ .Values.config.pullPolicy }}
{{ else }}
image: {{ .Values.images.init.repo }}:{{ .Values.images.tag }}
imagePullPolicy: {{ .Values.config.pullPolicy }}
{{- end}}
env:
- name: ENV
value: {{ .Values.config.environment }}
- name: SOLON_SERVICE
value: {{ .Values.envVariables.solonService }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ELASTIC_ROLE
valueFrom:
fieldRef:
fieldPath: metadata.annotations['odysseia-greek/role']
- name: ELASTIC_ACCESS
valueFrom:
fieldRef:
fieldPath: metadata.annotations['odysseia-greek/access']
containers:
- name: "{{ template "sidecar.uname" . }}"
{{- if .Values.config.inClusterHarbor | quote }}
image: {{ .Values.images.harborRepo}}{{ .Values.images.sidecar.repo }}:{{ .Values.images.tag }}
imagePullPolicy: {{ .Values.config.pullPolicy }}
{{ else }}
image: {{ .Values.images.sidecar.repo }}:{{ .Values.images.tag }}
imagePullPolicy: {{ .Values.config.pullPolicy }}
{{- end}}
env:
- name: SOLON_SERVICE
value: {{ .Values.envVariables.solonService }}
- name: VAULT_SERVICE
value: {{ .Values.envVariables.vaultService }}
- name: ISJOB
value: "true"
- name: ENV
value: {{ .Values.config.environment }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
ports:
- containerPort: {{ .Values.envVariables.sidecar.port }}
resources:
requests:
memory: {{ .Values.services.sidecar.requests.memory }}
cpu: {{ .Values.services.sidecar.requests.cpu }}
limits:
memory: {{.Values.services.sidecar.limits.memory}}
cpu: {{.Values.services.sidecar.limits.memory}}
- name: "{{ template "seeder.uname" . }}"
{{- if .Values.config.inClusterHarbor | quote }}
image: {{ .Values.images.harborRepo}}{{ .Values.images.seeder.repo }}:{{ .Values.images.tag }}
imagePullPolicy: {{ .Values.config.pullPolicy }}
{{ else }}
image: {{ .Values.images.seeder.repo }}:{{ .Values.images.tag }}
imagePullPolicy: {{ .Values.config.pullPolicy }}
{{- end}}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ELASTIC_SEARCH_SERVICE
value: {{ .Values.envVariables.elasticService }}
- name: ENV
value: {{ .Values.config.environment }}
- name: TLSENABLED
value: {{ .Values.config.tls | quote }}
restartPolicy: Never
backoffLimit: 3
|
themistokles/odysseia/charts/sokrates/templates/job.yaml
|
items:
- uid: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic"
id: "WithAccessTraffic"
parent: "com.microsoft.azure.management.storage"
children:
- "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic()"
langs:
- "java"
name: "StorageAccount.DefinitionStages.WithAccessTraffic"
nameWithType: "StorageAccount.DefinitionStages.WithAccessTraffic"
fullName: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic"
type: "Interface"
package: "com.microsoft.azure.management.storage"
summary: "The stage of storage account definition allowing to restrict access protocol."
syntax:
content: "public static interface StorageAccount.DefinitionStages.WithAccessTraffic"
- uid: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic()"
id: "withOnlyHttpsTraffic()"
parent: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic"
langs:
- "java"
name: "withOnlyHttpsTraffic()"
nameWithType: "StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic()"
fullName: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic()"
overload: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic*"
type: "Method"
package: "com.microsoft.azure.management.storage"
summary: "Specifies that only https traffic should be allowed to storage account."
syntax:
content: "public abstract StorageAccount.DefinitionStages.WithCreate withOnlyHttpsTraffic()"
return:
type: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithCreate"
description: "the next stage of storage account definition"
references:
- uid: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithCreate"
name: "StorageAccount.DefinitionStages.WithCreate"
nameWithType: "StorageAccount.DefinitionStages.WithCreate"
fullName: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithCreate"
- uid: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic*"
name: "withOnlyHttpsTraffic"
nameWithType: "StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic"
fullName: "com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic.withOnlyHttpsTraffic"
package: "com.microsoft.azure.management.storage"
|
docs-ref-autogen/com.microsoft.azure.management.storage.StorageAccount.DefinitionStages.WithAccessTraffic.yml
|
# The AWSTemplateFormatVersion identifies the capabilities of the template
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/format-version-structure.html
AWSTemplateFormatVersion: 2010-09-09
Description: >-
Serverless Website backend api for DynamoDB access 10 percent.
# Transform section specifies one or more macros that AWS CloudFormation uses to process your template
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-section-structure.html
Transform: AWS::Serverless-2016-10-31
# Shared configuration for all resources, more in
# https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst
Globals:
Function:
# The PermissionsBoundary allows users to safely develop with their function's permissions constrained
# to their current application. All the functions and roles in this application have to include it and
# it has to be manually updated when you add resources to your application.
# More information in https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html
PermissionsBoundary: !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${AppId}-${AWS::Region}-PermissionsBoundary'
Parameters:
AppId:
Type: String
# Resources declares the AWS resources that you want to include in the stack
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/resources-section-structure.html
Resources:
Api3N:
Type: AWS::Serverless::Api
Properties:
StageName: Prod
Cors:
AllowOrigin: "'*'"
AllowHeaders: "'Content-Type'"
AllowMethods: "'OPTIONS,POST,GET'"
Auth:
DefaultAuthorizer: CognitoAuthorizer3N
Authorizers:
CognitoAuthorizer3N:
UserPoolArn: !GetAtt CognitoUserPool3N.Arn
# Each Lambda function is defined by properties:
# https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction
# This is a Lambda function config associated with the source code: get-all-items.js
#
getAllItemsFunction:
Type: AWS::Serverless::Function
Properties:
CodeUri: ./
Handler: src/handlers/get-all-items.getAllItemsHandler
Runtime: nodejs10.x
MemorySize: 128
Timeout: 60
Description: A simple example includes a HTTP get method to get all items from a DynamoDB table.
Policies:
# Give Create/Read/Update/Delete Permissions to the SampleTable
- DynamoDBCrudPolicy:
TableName: !Ref SampleTable
Environment:
Variables:
# Make table name accessible as environment variable from function code during execution
SAMPLE_TABLE: !Ref SampleTable
Events:
Api:
Type: Api
Properties:
RestApiId: !Ref Api3N
Path: /
Method: GET
# Simple syntax to create a DynamoDB table with a single attribute primary key, more in
# https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlesssimpletable
# DynamoDB table to store item: {id: <ID>, name: <NAME>}
SampleTable:
Type: AWS::Serverless::SimpleTable
Properties:
PrimaryKey:
Name: id
Type: String
ProvisionedThroughput:
ReadCapacityUnits: 2
WriteCapacityUnits: 2
# Cognito User Pool for API Gateway
CognitoUserPool3N:
Type: AWS::Cognito::UserPool
Properties:
Policies:
PasswordPolicy:
MinimumLength: 6
UsernameAttributes:
- email
Schema:
- AttributeDataType: String
Name: email
Required: false
# Cognito User Pool Client for API Gateway
CognitoUserPoolClient3N:
Type: AWS::Cognito::UserPoolClient
Properties:
UserPoolId: !Ref CognitoUserPool3N
GenerateSecret: false
PreventUserExistenceErrors: ENABLED
|
template.yml
|
name: Remote Dispatch Action
on: [repository_dispatch]
jobs:
package-push-common:
runs-on: windows-latest
outputs:
version: ${{ steps.get_version.outputs.version }}
steps:
- name: Event Information
run: |
echo "Event '${{ github.event.action }}' payload '${{ github.event.client_payload.tag }}'"
echo "Windows binary hash '${{ github.event.client_payload.windows_sha }}'"
- name: Get Version
id: get_version
run: |
$version="${{ github.event.client_payload.tag }}"
$version=$version.replace('v','')
echo "::set-output name=version::$version"
package-push-gardenlogin:
if: github.event.client_payload.component == 'gardenlogin'
runs-on: windows-latest
needs: package-push-common
steps:
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # pin@v2.4.0
- name: Update package source files
run: .github\workflows\update-gardenlogin.ps1 ${{ github.event.client_payload.tag }} ${{ github.event.client_payload.windows_sha }}
- name: Choco pack
uses: crazy-max/ghaction-chocolatey@87d06bbbd2cfb1835f1820042d356aef4875fb5f # pin@v1.6
with:
args: pack gardenlogin\gardenlogin.nuspec --version=${{ needs.package-push-common.outputs.version }} -y --outdir gardenlogin
- name: Choco push
uses: crazy-max/ghaction-chocolatey@8<PASSWORD>bbbd2cfb1<PASSWORD>2<PASSWORD>fb5f # pin@v1.6
with:
args: push "gardenlogin\gardenlogin.${{ needs.package-push-common.outputs.version }}.nupkg" --source https://chocolatey.org -k ${{ secrets.CHOCOLATEY_API_KEY }}
- name: Commit files
run: |
git config --local user.email "<EMAIL>"
git config --local user.name "gardener-robot-ci-1"
git add "gardenlogin\gardenlogin.${{ needs.package-push-common.outputs.version }}.nupkg"
git add "gardenlogin\tools\chocolateyinstall.ps1"
git add "gardenlogin\tools\chocolateyuninstall.ps1"
git commit -m "Update gardenlogin"
- name: Push changes
uses: ad-m/github-push-action@40bf560936a8022e68a3c00e7d2abefaf01305a6 # pin@v0.6.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
force: false
package-push-gardenctl-v2:
if: github.event.client_payload.component == 'gardenctl-v2'
runs-on: windows-latest
needs: package-push-common
steps:
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579 # pin@v2.4.0
- name: Update package source files
run: .github\workflows\update-gardenctl-v2.ps1 ${{ github.event.client_payload.tag }} ${{ github.event.client_payload.windows_sha }}
- name: Choco pack
uses: crazy-max/ghaction-chocolatey@87d06bbbd2cfb1835f1820042d356aef4875fb5f # pin@v1.6
with:
args: pack gardenctl-v2\gardenctl-v2.nuspec --version=${{ needs.package-push-common.outputs.version }} -y --outdir gardenctl-v2
- name: Choco push
uses: crazy-max/ghaction-chocolatey@87d06bbbd2cfb1835f1820042d356aef4875fb5f # pin@v1.6
with:
args: push "gardenctl-v2\gardenctl-v2.${{ needs.package-push-common.outputs.version }}.nupkg" --source https://chocolatey.org -k ${{ secrets.CHOCOLATEY_API_KEY }}
- name: Commit files
run: |
git config --local user.email "<EMAIL>"
git config --local user.name "gardener-robot-ci-1"
git add "gardenctl-v2\gardenctl-v2.${{ needs.package-push-common.outputs.version }}.nupkg"
git add "gardenctl-v2\tools\chocolateyinstall.ps1"
git commit -m "Update gardenctl-v2"
- name: Push changes
uses: ad-m/github-push-action@40bf560936a8022e68a3c00e7d2abefaf01305a6 # pin@v0.6.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
force: false
|
.github/workflows/choco-package.yml
|
parameters:
fooParameter: barValue
barParameter: ['foo', 'bar']
fooProperty: 'fooProperty'
param_false: false
param_object:
foo: 'bar'
bar: 'foo'
services:
foo:
class: ./../foo
arguments: ['@bar', '%fs', 'foo-bar', '%fooParameter%']
tags:
- {name: 'fooTag'}
properties:
property: '%fooProperty%'
bar:
class: ./../bar
calls:
- {method: setFooBar, arguments: ['@foobar']}
tags:
- {name: 'fooTag'}
foobar:
class: ./../foobar
arguments:
- '@injected.lazy.service'
deprecated: 'Deprecated service'
f: '@foo'
lazy.service:
class: ./../foobar
lazy: true
injected.lazy.service:
class: ./../foobar
lazy: true
factory:
class: ./../factory
from_factory_without_args:
factory:
class: ./../factory
method: getFactoryWithoutArgs
from_factory_with_args:
arguments: ['ok']
factory:
class: ./../factory
method: getFactoryWithArgs
from_factory_with_reference_without_args:
factory:
class: '@factory'
method: getFactoryWithoutArgs
from_factory_with_reference_with_args:
arguments: ['ok']
factory:
class: '@factory'
method: getFactoryWithArgs
from_factory_with_reference_with_service_arg:
arguments: ['@f']
factory:
class: '@factory'
method: getFactoryWithServiceArg
service_missing_dependencies:
class: ./../missingDependencies
arguments: ['@f', '@?not_exists']
service_with_dependencies:
class: ./../missingDependencies
arguments: ['@f', '@?foobar']
service_missing_dependencies_call:
class: ./../missingDependencies
calls:
- {method: setMethod, arguments: ['@?not_exists']}
service_with_dependencies_call:
class: ./../missingDependencies
calls:
- {method: setMethod, arguments: ['@foobar']}
foo_with_true:
class: ./../foo
arguments: ['@bar', '%fs', true, '%true%']
foo_with_false:
class: ./../foo
arguments: ['@bar', '%fs%', 'not', '%param_false%']
private_service:
class: ./../foo
arguments: ['@bar', '%fs%']
public: false
service_using_private_service:
class: ./../foo
arguments: ['@private_service']
synthetic_service:
synthetic: true
app.listener:
class: ./../listener
tags:
- {name: listener, attributes: {event: postUpdate}}
app.mailer:
class: ./../Mailer
app.decorating_mailer:
class: ./../DecoratingMailer
decorates: app.mailer
arguments: ['@app.decorating_mailer.inner']
public: false
service_with_object_parameter:
class: ./../barManager
arguments: ['%param_object%']
not_shared_service:
class: ./../Mailer
shared: false
decorate.app.mailer:
class: ./../Mailer
decorate.one:
class: ./../DecoratingMailerOne
decorates: decorate.app.mailer
decoration_priority: 3
arguments: ['@decorate.one.inner']
decorate.two:
class: ./../DecoratingMailerTwo
decorates: decorate.app.mailer
decoration_priority: 1
arguments: ['@decorate.two.inner']
|
test/Resources/config/fake-services.yml
|
os: osx
osx_image: xcode11.3
rvm: 2.6.4
branches:
only:
- master
- develop
env:
global:
- LANG=en_US.UTF-8
- PROJECT="JSQSystemSoundPlayer.xcodeproj"
- IOS_SCHEME="JSQSystemSoundPlayer-iOS"
- TVOS_SCHEME="JSQSystemSoundPlayer-tvOS"
- OSX_SCHEME="JSQSystemSoundPlayer-OSX"
- IOS_SDK=iphonesimulator13.2
- TVOS_SDK=appletvsimulator13.2
- OSX_SDK=macosx10.15
matrix:
- DESTINATION="OS=10.3.1,name=iPhone 5" SDK="$IOS_SDK" SCHEME="$IOS_SCHEME" RUN_TESTS="YES" BUILD_EXAMPLE="YES" POD_LINT="YES"
- DESTINATION="OS=11.1,name=iPhone X" SDK="$IOS_SDK" SCHEME="$IOS_SCHEME" RUN_TESTS="YES" BUILD_EXAMPLE="YES" POD_LINT="NO"
- DESTINATION="OS=12.1,name=iPhone X" SDK="$IOS_SDK" SCHEME="$IOS_SCHEME" RUN_TESTS="YES" BUILD_EXAMPLE="YES" POD_LINT="NO"
- DESTINATION="OS=13.2.2,name=iPhone 11" SDK="$IOS_SDK" SCHEME="$IOS_SCHEME" RUN_TESTS="YES" BUILD_EXAMPLE="YES" POD_LINT="NO"
- DESTINATION="arch=x86_64" SDK="$OSX_SDK" SCHEME="$OSX_SCHEME" RUN_TESTS="NO" BUILD_EXAMPLE="YES" POD_LINT="NO"
- DESTINATION="OS=11.1,name=Apple TV" SDK="$TVOS_SDK" SCHEME="$TVOS_SCHEME" RUN_TESTS="NO" BUILD_EXAMPLE="NO" POD_LINT="NO"
- DESTINATION="OS=12.1,name=Apple TV" SDK="$TVOS_SDK" SCHEME="$TVOS_SCHEME" RUN_TESTS="NO" BUILD_EXAMPLE="NO" POD_LINT="NO"
- DESTINATION="OS=13.2,name=Apple TV" SDK="$TVOS_SDK" SCHEME="$TVOS_SCHEME" RUN_TESTS="NO" BUILD_EXAMPLE="NO" POD_LINT="NO"
before_install:
- gem install cocoapods --without=documentation
script:
- set -o pipefail
- xcodebuild -version -sdk
- if [ $POD_LINT == "YES" ]; then
pod lib lint --skip-tests;
fi
- if [[ $BUILD_EXAMPLE == "YES" && $SDK == $IOS_SDK ]]; then
xcodebuild build analyze -project Example/Example.xcodeproj -scheme Example-iOS -sdk "$SDK" -destination "$DESTINATION" ONLY_ACTIVE_ARCH=NO | xcpretty -c;
fi
- if [[ $BUILD_EXAMPLE == "YES" && $SDK == $OSX_SDK ]]; then
xcodebuild build analyze -project Example/Example.xcodeproj -scheme Example-OSX -sdk "$SDK" -destination "$DESTINATION" ONLY_ACTIVE_ARCH=NO | xcpretty -c;
fi
- if [ $RUN_TESTS == "YES" ]; then
xcodebuild analyze test -project "$PROJECT" -scheme "$SCHEME" -sdk "$SDK" -destination "$DESTINATION" -configuration Debug ONLY_ACTIVE_ARCH=NO | xcpretty -c;
else
xcodebuild build analyze -project "$PROJECT" -scheme "$SCHEME" -sdk "$SDK" -destination "$DESTINATION" -configuration Debug ONLY_ACTIVE_ARCH=NO | xcpretty -c;
fi
# Build for reporting test coverage
- if [ $RUN_TESTS == "YES" ]; then
xcodebuild test -project JSQSystemSoundPlayer.xcodeproj -scheme "JSQSystemSoundPlayer-iOS" -destination "platform=iOS Simulator,name=iPhone 11" CODE_SIGNING_REQUIRED=NO GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES GCC_GENERATE_TEST_COVERAGE_FILES=YES;
fi
after_success:
- bash <(curl -s https://codecov.io/bash)
|
.travis.yml
|
{{- if and .Values.mysqlOperator.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "mysql-operator.name" . }}
namespace: {{ template "mysql-operator.namespace" . }}
labels:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/name: {{ template "mysql-operator.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app: {{ template "mysql-operator.name" . }}
release: {{ template "mysql-operator.name" . }}
spec:
replicas: {{ .Values.mysqlOperator.replicas }}
serviceName: {{ template "mysql-operator.name" . }}-orc
podManagementPolicy: Parallel
selector:
matchLabels:
app.kubernetes.io/name: {{ template "mysql-operator.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app: {{ template "mysql-operator.name" . }}
release: {{ template "mysql-operator.name" . }}
template:
metadata:
labels:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/name: {{ template "mysql-operator.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app: {{ template "mysql-operator.name" . }}
release: {{ template "mysql-operator.name" . }}
{{- if .Values.mysqlOperator.statefulSet.annotations }}
annotations:
{{- toYaml .Values.mysqlOperator.statefulSet.annotations | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "mysql-operator.name" . }}
containers:
- name: operator
image: "{{ .Values.mysqlOperator.image.repository }}:{{ .Values.mysqlOperator.image.tag }}"
imagePullPolicy: "{{ .Values.mysqlOperator.image.pullPolicy }}"
env:
- name: ORC_TOPOLOGY_USER
valueFrom:
secretKeyRef:
name: {{ template "mysql-operator.name" . }}-orc
key: TOPOLOGY_USER
- name: ORC_TOPOLOGY_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql-operator.name" . }}-orc
key: TOPOLOGY_PASSWORD
args:
- --leader-election-namespace={{ template "mysql-operator.namespace" . }}
# connect to orchestrator on localhost
- --orchestrator-uri=http://127.0.0.1:3000/api
- --sidecar-image={{ .Values.mysqlOperator.sidecar.image.repository }}:{{ .Values.mysqlOperator.sidecar.image.tag }}
{{- if .Values.mysqlOperator.extraArgs }}
{{ toYaml .Values.mysqlOperator.extraArgs | indent 12 }}
{{- end }}
- name: orchestrator
image: "{{ .Values.mysqlOperator.orchestrator.image.repository }}:{{ .Values.mysqlOperator.orchestrator.image.tag }}"
imagePullPolicy: "{{ .Values.mysqlOperator.orchestrator.image.pullPolicy }}"
ports:
- containerPort: 3000
name: web
protocol: TCP
- containerPort: 10008
name: raft
protocol: TCP
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
envFrom:
- prefix: ORC_
secretRef:
name: {{ template "mysql-operator.name" . }}-orc
volumeMounts:
- name: data
mountPath: /var/lib/orchestrator/
- name: config
mountPath: /templates/
livenessProbe:
timeoutSeconds: {{ .Values.mysqlOperator.livenessProbeTimeout }}
initialDelaySeconds: {{ .Values.mysqlOperator.livenessProbeInitialDelay }}
httpGet:
path: /api/lb-check
port: 3000
# https://github.com/github/orchestrator/blob/master/docs/raft.md#proxy-healthy-raft-nodes
readinessProbe:
timeoutSeconds: {{ .Values.mysqlOperator.readinessProbeTimeout }}
initialDelaySeconds: {{ .Values.mysqlOperator.readinessProbeInitialDelay }}
httpGet:
path: /api/raft-health
port: 3000
volumes:
- name: config
configMap:
name: {{ template "mysql-operator.name" . }}-orc
# security context to mount corectly the volume for orc
securityContext:
fsGroup: {{ .Values.mysqlOperator.fsGroup }}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
{{ toYaml .Values.mysqlOperator.persistentVolume.accessModes | indent 4 }}
resources:
requests:
storage: {{ .Values.mysqlOperator.persistentVolume.size }}
{{- end }}
|
charts/mattermost-operator/templates/mysql-operator/statefulset.yaml
|
uniforms:
roughness:
type: "float"
min: 0
max: 1
default: .5
anisotropy:
type: "float"
min: 0
max: 1
default: 1
metallic:
type: "float"
min: 0
max: 1
default: 1
# GLSL code is defined below. The BRDF function is called by a shader to compute
# image based illumiation and direct illumination, and is also used to generate
# the brdf lobe.
brdf: >
#define M_PI 3.1415926535897932384626433832795
#define M_1_PI 0.318309886183790671538
#define EPSILON 0.0001
float pow2(float x) {
return x * x;
}
float luminance(vec3 c) {
return 0.2126f * c.r + 0.7152f * c.g + 0.0722f * c.b;
}
float schlick_weight(float cos_theta) {
return pow(clamp(1.f - cos_theta, 0.0f, 1.0f), 5.f);
}
// D_GTR1: Generalized Trowbridge-Reitz with gamma=1
// Burley notes eq.
float gtr_1(float cos_theta_h, float alpha) {
if (alpha >= 1.f) {
return M_1_PI;
}
float alpha_sqr = alpha * alpha;
return M_1_PI * (alpha_sqr - 1.f) / (log(alpha_sqr) * (1.f + (alpha_sqr - 1.f) * cos_theta_h * cos_theta_h));
}
// D_GTR2: Generalized Trowbridge-Reitz with gamma=2
// Burley notes eq. 8
float gtr_2(float cos_theta_h, float alpha) {
float alpha_sqr = alpha * alpha;
return M_1_PI * alpha_sqr / pow2(1.f + (alpha_sqr - 1.f) * cos_theta_h * cos_theta_h);
}
// D_GTR2 Anisotropic: Anisotropic generalized Trowbridge-Reitz with gamma=2
// Burley notes eq. 13
float gtr_2_aniso(float h_dot_n, float h_dot_x, float h_dot_y, vec2 alpha) {
return M_1_PI / (alpha.x * alpha.y
* pow2(pow2(h_dot_x / alpha.x) + pow2(h_dot_y / alpha.y) + h_dot_n * h_dot_n));
}
float smith_shadowing_ggx_aniso(float n_dot_o, float o_dot_x, float o_dot_y, vec2 alpha) {
return 1.f / (n_dot_o + sqrt(pow2(o_dot_x * alpha.x) + pow2(o_dot_y * alpha.y) + pow2(n_dot_o)));
}
vec3 disney_microfacet_anisotropic(vec3 n, vec3 w_o, vec3 w_i, vec3 v_x, vec3 v_y)
{
float specular_tint = 0.0f;
vec3 base_color = vec3(1.0f, 1.0f, 1.0f);
vec3 w_h = normalize(w_i + w_o);
float lum = luminance(base_color);
vec3 tint = lum > 0.f ? base_color / lum : vec3(1, 1, 1);
vec3 spec = mix(base_color * 0.08 * mix(vec3(1, 1, 1), tint, specular_tint), base_color, metallic);
float aspect = sqrt(1.f - anisotropy * 0.9f);
float a = roughness * roughness;
vec2 alpha = vec2(max(0.001, a / aspect), max(0.001, a * aspect));
float d = gtr_2_aniso(dot(n, w_h), abs(dot(w_h, v_x)), abs(dot(w_h, v_y)), alpha);
vec3 f = mix(spec, vec3(1, 1, 1), schlick_weight(dot(w_i, w_h)));
float g = smith_shadowing_ggx_aniso(dot(n, w_i), abs(dot(w_i, v_x)), abs(dot(w_i, v_y)), alpha)
* smith_shadowing_ggx_aniso(dot(n, w_o), abs(dot(w_o, v_x)), abs(dot(w_o, v_y)), alpha);
return d * f * g;
}
vec3 BRDF(vec3 L, vec3 V, vec3 N, vec3 X, vec3 Y){
return disney_microfacet_anisotropic(N,L,V,X,Y);
}
|
brdfs/disney_microfacet_anisotropic.yaml
|
main:
#- title: "Quick-Start Guide"
# url: https://mmistakes.github.io/minimal-mistakes/docs/quick-start-guide/
- title: "Splunk Security" #CHANGED
url: /Splunk/Security #CHANGED
- title: "Services" #CHANGED
url: /Services #CHANGED
- title: "Clients" #CHANGED
url: /Clients #CHANGED
- title: "About" #CHANGED
url: /About #CHANGED
- title: Resources #CHANGED
url: /Resources #CHANGED
- title: "Jobs" #CHANGED
url: /Jobs #CHANGED
Services: #CHANGED
- title: "Operations" #CHANGED
url: /Services/Operations #CHANGED
- title: "Consulting" #CHANGED
url: /Services/consulting #CHANGED
- title: "Development" #CHANGED
url: /Services/Development #CHANGED
- title: "Technologies" #CHANGED
url: /Services/Technologies #CHANGED
About: #CHANGED
- title: "Why Aplura?" #CHANGED
url: /About/Why-Aplura #CHANGED
- title: "Our Expert Staff" #CHANGED
url: /About/Out-Expert-Staff #CHANGED
- title: "History and Mission" #CHANGED
url: /About #CHANGED
Resources: #CHANGED
- title: "Splunk Apps" #CHANGED
url: /Resources/Splunk-Apps #CHANGED
- title: "Baltimore Splunk User Group" #CHANGED
url: /Resources/Baltimore-Splunk-User-Group #CHANGED
- title: "Splunk Best Pratices" #CHANGED
url: /Resources/Splunk-Best-Pratices #CHANGED
- title: "Cheat Sheets" #CHANGED
url: /Resources/Cheat-Sheets #CHANGED
- title: "Other Resources" #CHANGED
url: /Resources/Other #CHANGED
Jobs: #CHANGED
- title: "Intigration Developer" #CHANGED
url: /Jobs/Intigration-Developer #CHANGED
- title: "Security Consultant: Splunk" #CHANGED
url: /Jobs/Security-Consultant-Splunk #CHANGED
- title: "Security Consultant: Gigamon and Palo Alto" #CHANGED
url: /Jobs/Security-Consultant-Gigamon-and-Palo-Alto #CHANGED
- title: "Benefits" #CHANGED
url: /Jobs/Benefits #CHANGED
secondary: #CHANGED
- title: "Splunk Security" #CHANGED
url: /Splunk/Security #CHANGED
- title: "Services" #CHANGED
url: /Services #CHANGED
children: #CHANGED
- title: "Operations" #CHANGED
url: /Services/Operations #CHANGED
- title: "Consulting" #CHANGED
url: /Services/consulting #CHANGED
- title: "Development" #CHANGED
url: /Services/Development #CHANGED
- title: "Technologies" #CHANGED
url: /Services/Technologies #CHANGED
- title: "Clients" #CHANGED
url: /Clients #CHANGED
- title: "About" #CHANGED
url: /About #CHANGED
children: #CHANGED
- title: "Why Aplura?" #CHANGED
url: /About/Why-Aplura #CHANGED
- title: "Our Expert Staff" #CHANGED
url: /About/Out-Expert-Staff #CHANGED
- title: "History and Mission" #CHANGED
url: /About #CHANGED
- title: Resources #CHANGED
url: /Resources #CHANGED
children: #CHANGED
- title: "Splunk Apps" #CHANGED
url: /Resources/Splunk-Apps #CHANGED
- title: "Baltimore Splunk User Group" #CHANGED
url: /Resources/Baltimore-Splunk-User-Group #CHANGED
- title: "Splunk Best Pratices" #CHANGED
url: /Resources/Splunk-Best-Pratices #CHANGED
- title: "Cheat Sheets" #CHANGED
url: /Resources/Cheat-Sheets #CHANGED
- title: "Other Resources" #CHANGED
url: /Resources/Other #CHANGED
- title: "Jobs" #CHANGED
url: /Jobs #CHANGED
- title: "Contact" #CHANGED
url: /Contact #CHANGED
|
_data/navigation.yml
|
- company: Edelman Data & Intelligence
position: Data Scientist
duration: Jan, 2022 — Present
summary: Functioning as both data-engineer and data-scientist, I contribute to our product development team by building data pipelines, exploring new learning algorithms, and coordinating workflows to integrate human-dependent data collection (surveys) with automated data pipelines.
# Last Edelman Position (Edelman DxI)
- company: Edelman Data & Intelligence
position: Manager, Data Processing
duration: Feb, 2018 — Jan, 2022
summary: Managed a small team of Python and STATA users, serving a global network of client-facing teams conducting research for Fortune 100 clients. Our mandate is to "empower teams to make data insightful," and with that my responsibilities include <ul class="resume-item-list"><li>Conducting advanced analytics, like consumer segmentations and key drivers analysis</li><li>Writing libraries in STATA and Python to handle survey-specific tasks, including automated deck creation</li><li>Coaching teams on how to correctly explain statistical findings to clients</li><li>Co-managing a relationship with a team of outsourced programmers and debugging programming errors in online surveys</li></ul>
# Peace Corps
- company: US Peace Corps Morocco
position: Youth Development Volunteer, Larache Morocco
duration: Sep, 2016 — Dec, 2017
summary: Developed and taught an English-language instruction program at a youth-center in rural Morocco.
# Unitrac
- company: Unitrac Energy Management
position: VBA Macro Developer, Rochester, NY
duration: May, 2016 — Sep, 2016
summary: Working for my family's business, I learned my first programming language and built macros to automate excel routines for generating quotes and preparring pitch materials for new customers.
# Senate Foreign Relations
- company: US Senate Committee on Foreign Relations
position: Majority Staff Intern, Washington, DC
duration: Jan, 2016 — May 2016
summary: Wrote short memos on policy issues, and quick bios of government officials so Senators and staffers could quickly prepare for meetings with US and foreign officials.
# Shirley and Banister Public Affairs
- company: Shirley & Banister Public Affairs
position: Intern / Junior Account Executive, Washington, DC
duration: Sep, 2015 — Jan, 2016
summary: Wrote press releases and media pitches for Republican thought leaders during the 2016 primary season. After one month as intern I was promoted to a full-time contract employee with my own dedicated clients.
|
_data/experience.yml
|
AddonId: EmulationToolbox_eb3ccf5d-971a-41a6-aefd-aae9455e6de4
Packages:
- Version: 1.0
RequiredApiVersion: 6.2.1
ReleaseDate: 2022-01-10
PackageUrl: https://github.com/ferrazpedro/playnite-emulation-toolbox-plugin/releases/download/1.0.0/EmulationToolbox_eb3ccf5d-971a-41a6-aefd-aae9455e6de4_1_0.pext
Changelog:
- Initial Release
- Version: 1.1
RequiredApiVersion: 6.2.1
ReleaseDate: 2022-01-10
PackageUrl: https://github.com/ferrazpedro/playnite-emulation-toolbox-plugin/releases/download/1.1.0/EmulationToolbox_eb3ccf5d-971a-41a6-aefd-aae9455e6de4_1_1.pext
Changelog:
- Adding the feature to merge games and fixed some bugs
- Version: 1.2
RequiredApiVersion: 6.2.1
ReleaseDate: 2022-01-11
PackageUrl: https://github.com/ferrazpedro/playnite-emulation-toolbox-plugin/releases/download/1.2.0/EmulationToolbox_eb3ccf5d-971a-41a6-aefd-aae9455e6de4_1_2.pext
Changelog:
- Adding the feature to move "The" to the beginning of the game name and moving options to the context menu
- Version: 1.3
RequiredApiVersion: 6.2.1
ReleaseDate: 2022-01-11
PackageUrl: https://github.com/ferrazpedro/playnite-emulation-toolbox-plugin/releases/download/1.3.0/EmulationToolbox_eb3ccf5d-971a-41a6-aefd-aae9455e6de4_1_3.pext
Changelog:
- Adding the feature to add a sorting name to games that begins with "The" without it.
- Version: 1.3.1
RequiredApiVersion: 6.2.1
ReleaseDate: 2022-01-11
PackageUrl: https://github.com/ferrazpedro/playnite-emulation-toolbox-plugin/releases/download/1.3.1/EmulationToolbox_eb3ccf5d-971a-41a6-aefd-aae9455e6de4_1_3_1.pext
Changelog:
- Making the ROM name cleanup feature a little better for PS3 and Wii U.
- Version: 1.3.2
RequiredApiVersion: 6.2.1
ReleaseDate: 2022-02-10
PackageUrl: https://github.com/ferrazpedro/playnite-emulation-toolbox-plugin/releases/download/1.3.2/EmulationToolbox_eb3ccf5d-971a-41a6-aefd-aae9455e6de4_1_3_2.pext
Changelog:
- Fixing when ROM name cleanup made ROMs names empty. Now they will be filled with "default" so Playnite still counts them while importing new games.
|
manifest/Ferrell_EmulationToolbox.yaml
|
sylius:
email:
contact_request:
content: 'Content'
message_from: 'Message from'
subject: 'Contact request'
order_confirmation:
has_been_successfully_placed: 'has been successfully placed.'
subject: 'Order confirmation'
view_order_or_change_payment_method: 'View order or change payment method'
thank_you: 'Thank you for shopping at our store!'
your_order_number: 'Your order no.'
password_reset:
hello: 'Hello'
reset_your_password: '<PASSWORD>'
subject: 'Password reset'
to_reset_your_password: 'To reset your password - click the link below'
to_reset_your_password_token: 'To reset your password - use the token below'
token: 'Your password reset token is'
user_registration:
start_shopping: 'Start shopping'
subject: 'User registration'
welcome_to_our_store: 'Welcome to our store!'
you_have_just_been_registered: 'You have just been registered. Thank you'
verification_token:
hello: 'Hello'
subject: 'Email address verification'
to_verify_your_email_address: 'To verify your email address - click the link below'
verify_your_email_address: 'Verify your email address'
footer:
banner:
free_shipping_title: 'Free Shipping & Return'
free_shipping_content: 'Free shipping on all orders'
guarantee_title: 'Money Guarantee'
guarantee_content: '30 days money back guarantee'
multiple_payments_title: 'Multiple Payment methods'
multiple_payments_content: 'We accept credit cards and PayPal'
homepage:
about_us: 'About Us'
about_us_content: 'Welcome to our store, your number one online shop that gives you the very best products. You can choose from the very best clothes and shoes from our offer. (<i>You can modify this content by overriding "sylius.homepage.about_us_content" key in your translation file</i>)'
banner_content: 'Brace yourself! Winter is coming'
banner_button: 'Check out the new collection'
carousel_header: 'Featured products'
newsletter: 'Newsletter'
newsletter_description: 'Subscribe to our newsletter to see new offers that are waiting for you! (<i>You can modify this content by overriding "sylius.homepage.newsletter_description" key in your translation file</i>)'
subscribe: 'Subscribe'
lightbox:
image_album_label: 'Image %1 of %2'
menu:
shop:
account:
address_book: 'Address book'
change_password: '<PASSWORD>'
dashboard: 'Dashboard'
header: 'Your account'
order_history: 'Order history'
personal_information: 'Personal information'
top_bar: 'Sign up to our newsletter and grab -20% off'
|
src/Sylius/Bundle/ShopBundle/Resources/translations/messages.en.yml
|
version: '3.0'
services:
blackvuesync:
image: acolomba/blackvuesync
container_name: blackvuesync
restart: unless-stopped
volumes:
# Recording download destination. Change only the part before the colon.
- /mnt/dashcam:/recordings:rw
environment:
# Dashcam address
# ADDRESS: dashcam.example.net
# Set these to the desired destination directory's user id and group id.
PUID: 1000
PGID: 1000
# Set to the same timezone as the dashcam. Note that BlackVue dashcams do
# not respect Daylight Savings Time, so their clock needs to be adjusted
# periodically. For the complete list of possible values, see:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ: America/New_York
# Priority to download recordings. Pick "date" to download from oldest to
# newest; pick "rdate" to download from oldest to newest; pick "type" to
# download manual, event (all types), normal and parking recordings in
# that order.
PRIORITY: date
# Groups downloaded recordings in directories: 'daily', 'weekly',
# 'monthly', 'yearly' and 'none' are supported.
GROUPING: none
# Retention period of downloaded recordings. Recordings prior to the
# retention period will be removed from the destination. Accepted units
# are 'd' for days and 'w' for weeks. If no unit is indicated, days are
# assumed.
KEEP: 2w
# Stops downloading if the amount of used disk space exceeds the indicated
# percentage value.
MAX_USED_DISK: 90
# Sets the timeout in seconds for connecting to the dashcam.
TIMEOUT: 10.0
# Set to a number greater than zero to increase logging verbosity.
VERBOSE: 0
# Set to any value to quiet down logs: only unexpected errors will be
# logged.
QUIET: ''
# Makes it so downloads of normal recordings and unexpected error
# conditions are logged. Can be set to '' to disable.
CRON: 1
# If set to any value, makes it so that the script communicates what it
# would do without actually doing anything.
DRY_RUN: ''
|
docker-compose.yml
|
name: 🐳 Build + Publish Multi-Platform Image
on:
workflow_dispatch:
push:
branches: ['master']
tags: [v*]
env:
DH_IMAGE: ${{ secrets.DOCKER_REPO }}
GH_IMAGE: ${{ github.repository_owner }}/${{ github.event.repository.name }}
jobs:
docker:
runs-on: ubuntu-latest
permissions: { contents: read, packages: write }
if: "!contains(github.event.head_commit.message, '[ci-skip]')"
steps:
- name: 🛎️ Checkout Repo
uses: actions/checkout@v2
- name: 🔖 Get App Version
uses: tyankatsu0105/read-package-version-actions@v1
id: package-version
# - name: ✨ Validate Dockerfile
# uses: ghe-actions/dockerfile-validator@v1
# with:
# dockerfile: 'Dockerfile'
# lint: 'hadolint'
- name: 🗂️ Make Docker Meta
id: meta
uses: docker/metadata-action@v3
with:
images: |
${{ env.DH_IMAGE }}
ghcr.io/${{ env.GH_IMAGE }}
${{ secrets.ACR_SERVER }}/${{ secrets.ACR_USERNAME }}
tags: |
type=ref,event=tag,prefix=release-,suffix={{tag}}
type=semver,pattern={{raw}},value=${{ steps.package-version.outputs.version }}
labels: |
maintainer=Lissy93
org.opencontainers.image.title=Dashy
org.opencontainers.image.description=A self-hosted startpage for your server
org.opencontainers.image.documentation=https://dashy.to/docs
org.opencontainers.image.authors=<NAME>
org.opencontainers.image.licenses=MIT
- name: 🔧 Set up QEMU
uses: docker/setup-qemu-action@v1
- name: 🔧 Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: 🔑 Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.<PASSWORD>_PASSWORD }}
- name: 🔑 Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: 🔑 Login to Azure Container Registry
uses: docker/login-action@v1
with:
registry: ${{ secrets.ACR_SERVER }}
username: ${{ secrets.ACR_USERNAME }}
password: ${{ secrets.ACR_PASSWORD }}
- name: ⚒️ Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
push: true
# - name: 💬 Set Docker Hub Description
# uses: peter-evans/dockerhub-description@v2
# with:
# repository: lissy93/dashy
# readme-filepath: ./README.md
# short-description: Dashy - A self-hosted start page for your server
# username: ${{ secrets.DOCKER_USERNAME }}
# password: ${{ secrets.DOCKER_PASSWORD }}
|
.github/workflows/docker-build-publish.yml
|
#组件信息
component: scf # (必选) 组件名称,在该实例中为scf
name: SLscript # (必选) 组件实例名称。
#组件参数配置
inputs:
name: scf-${name} # 云函数名称,默认为 ${name}-${stage}-${app}
enableRoleAuth: true # 默认会尝试创建 SCF_QcsRole 角色,如果不需要配置成 false 即可
src: ./
handler: index.main_handler #入口
runtime: Nodejs12.16 # 运行环境 默认 Nodejs10.15
region: ap-chengdu # 函数所在区域
description: This is a function in ${app} application.
memorySize: 128 # 内存大小,单位MB
timeout: 3600 # 超时时间,单位秒
asyncRunEnable: true # 是否启用异步执行
installDependency: true # 是否在线安装依赖
events: # 触发器
- timer: # 京东极速版红包 #京东全民开红包 #摇京豆
parameters:
name: spdredpocke_redPacket_clot
cronExpression: "45 0,21,23 * * *"
enable: true
argument: jd_speed_redpocke&jd_redPacket&jd_club_lottery
- timer: #农场 #宠物 #财富 #闪购 #金榜创造营 #京喜阶梯红包 #省钱大赢家 ###泡泡大战
parameters:
name: fr_pt_cf_sg_jb_hb
cronExpression: "45 6 * * *"
enable: true
argument: jd_fruit&jd_pet&jd_unsubscribe&jd_jbczy&jd_jxlhb&jd_lsj&jd_dpqd&jd_MMdou&jd_sign_graphics&jd_wish&jd_sgmh&z_ddworld&jd_ddwj #&jd_cfd &jd_ppdz
- timer: #梦工厂 #种豆 #超级盒子[? 5,6] #点点券[smiek2221] #东工厂 #特物Z #心相印[1-30 9,10] #内容鉴赏官 #明星小店[10-17 9] #东东世界 #解压[9.21-10.16] #魔方
parameters:
name: dmf_pb_sbox_ddj #ddf_
cronExpression: "18 9,20 * * *"
enable: true
argument: jd_dreamFactory&jd_plantBean&adolf_superbox&jd_Z4B&jd_xxy&jd_star_shop&jd_ddworld&jd_decomp&jd_mf&jd_connoisseur&jd_ttpt&jd_jieMo #&jd_necklace&jd_jdfactory
- timer: #京喜牧场star #跳跳乐
parameters:
name: mc_jump
cronExpression: "1 8,14,18,20 * * *"
enable: true
argument: jd_jxmc&jd_joypark_task&jd_super_mh&jd_jump # &z_cnvcity
- timer: #整点京豆雨 #直播间红包雨 #健康社区藏经阁版-收能量 #财富岛合成月饼
parameters:
name: redrain
cronExpression: "0 * * * *"
enable: true
argument: jd_ffl&jd_cfd_mooncake # jd_super_redrain&jd_live_redrain&
# - timer: # 半点京豆雨[30 16-23/1] #直播间红包雨
# parameters:
# name: half_redrain
# cronExpression: "30 * * * *"
# enable: true
# argument: jd_half_redrain&jd_live_redrain
- timer: # 京东直播 #环境测试
parameters:
name: live_api
cronExpression: "10-20/5 12 * * *"
enable: true
argument: jd_live&jd_api_test
- timer: #5G超级盲盒 #健康社区藏经阁版
parameters:
name: mohe_jpj_hc
cronExpression: "0 0,1-23/3 * * *"
enable: true
argument: jd_mohe&jd_joypark_joy #&z_health_community&z_cfd
# - timer: #狂欢城[9-16至10-1]
# parameters:
# name: cnvcity
# cronExpression: "0 0-18/6 * * *"
# enable: true
# argument: jd_carnivalcity
# - timer: # 临时运行用
# parameters:
# name: temp
# cronExpression: "33 10,11 5 8 *"
# enable: true
# argument: jd_Z4Brand
environment: # 环境变量
variables: # 环境变量对象
AAA: BBB # 不要删除,用来格式化对齐追加的变量的
|
serverless_SL.yml
|
name: Build CI
on:
push:
branches: [release/latest]
jobs:
version:
runs-on: ubuntu-latest
outputs:
latest-version: ${{steps.latest-version.outputs.version}}
package-version: ${{steps.package-version.outputs.current-version}}
strategy:
matrix:
node-version: [12.x]
steps:
- uses: actions/checkout@v2
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
registry-url: 'https://registry.npmjs.org'
- name: Get latest version from npm
id: latest-version
run: echo ::set-output name=version::$(npm show @microsoft/mgt version)
- name: Get current package version
id: package-version
uses: martinbeentjes/npm-get-version-action@master
release:
runs-on: ubuntu-latest
needs: version
if: needs.version.outputs.latest-version != needs.version.outputs.package-version
strategy:
matrix:
node-version: [12.x]
environment:
name: release
steps:
- uses: actions/checkout@v2
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
registry-url: 'https://registry.npmjs.org'
- name: Build 🛠
run: |
npm install -g yarn lerna
yarn
yarn build
- name: Update package version
run: node scripts/setVersion.js
- name: Publish npm packages
run: lerna exec --scope @microsoft/* -- "npm publish --access=public"
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Attach mgt-spfx solution to GitHub release
run: node scripts/uploadSpfxSolution.js ${{secrets.GITHUB_TOKEN}} ${{needs.version.outputs.package-version}}
storybook:
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [12.x]
steps:
- uses: actions/checkout@v2
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
registry-url: 'https://registry.npmjs.org'
- name: Build 🛠
run: |
npm install -g yarn lerna
yarn
yarn build
yarn storybook:build
- name: Deploy mgt.dev 🚀
uses: JamesIves/github-pages-deploy-action@4.1.1
with:
branch: gh-pages
folder: storybook-static
target-folder: .
clean-exclude: next
|
.github/workflows/push-release.yml
|
uid: Microsoft.Dynamics.CRM.PagingInfo
name: "PagingInfo"
type: complextype
metadata:
title: "Dataverse PagingInfo ComplexType Reference | MicrosoftDocs"
ms.date: "02/10/2021"
ms.service: "powerapps"
ms.topic: "reference"
ms.assetid: 36db5e90-14e7-460b-a2f2-e01854643ef6
author: "kumarvivek"
ms.author: "kvivek"
manager: "annbe"
meta-description: "Reference information about the Dataverse Web API PagingInfo ComplexType."
namespace: Microsoft.Dynamics.CRM
# This complex type doesn't inherit from another complextype.
description: "Specifies a number of pages and a number of entity instances per page to return from the query. "
properties:
description: "The properties of a complex type contain the structured data of the type."
property_items:
- name: PageNumber
type: Edm.Int32
nullable: true
unicode: true
description: " The number of pages returned from the query. "
iscollection: false
- name: Count
type: Edm.Int32
nullable: true
unicode: true
description: "The number of entity instances returned per page. "
iscollection: false
- name: ReturnTotalRecordCount
type: Edm.Boolean
nullable: true
unicode: true
description: "The total number of records should be returned from the query."
iscollection: false
- name: PagingCookie
type: Edm.String
nullable: true
unicode: false
description: "The info used to page large result sets."
iscollection: false
# This complex type doesn't have any navigation properties.
usedby:
description: "The following use the PagingInfo ComplexType."
usedby_items:
- name: Microsoft.Dynamics.CRM.RetrieveAttributeChangeHistory
used_as: Parameter
- name: Microsoft.Dynamics.CRM.RetrieveDuplicates
used_as: Parameter
- name: Microsoft.Dynamics.CRM.RetrieveParsedDataImportFile
used_as: Parameter
- name: Microsoft.Dynamics.CRM.RetrieveRecordChangeHistory
used_as: Parameter
- name: Microsoft.Dynamics.CRM.QueryExpression
used_as: Property
- name: Microsoft.Dynamics.CRM.QueryByAttribute
used_as: Property
- name: Microsoft.Dynamics.CRM.LookupEntityInfo
used_as: Property
# There are no remarks for this complex type.
href_see_also:
- text: "Use the Dataverse Web API"
href: "https://docs.microsoft.com/powerapps/developer/data-platform/webapi/overview"
xref_see_also:
- Microsoft.Dynamics.CRM.EntityTypeIndex
- Microsoft.Dynamics.CRM.ActionIndex
- Microsoft.Dynamics.CRM.FunctionIndex
- Microsoft.Dynamics.CRM.QueryFunctionIndex
- Microsoft.Dynamics.CRM.ComplexTypeIndex
- Microsoft.Dynamics.CRM.EnumTypeIndex
- Microsoft.Dynamics.CRM.MetadataEntityTypeIndex
- Microsoft.Dynamics.CRM.SolutionIndex
|
WebAPI-reference/dynamics-ce-odata-9/complextypes/PagingInfo.yml
|
%YAML 1.1
---
actions:
- action_cheer_up
- action_contribute
- action_default_fallback
- action_about_me
- utter_about_me
- utter_advice
- utter_bye
- utter_contribute
- utter_cost
- utter_events
- utter_greet
- utter_happy
- utter_languages
- utter_my_age
- utter_negative_feedback_reaction
- utter_positive_feedback_reaction
- utter_restaurant
- utter_subscribe
- utter_tech
- utter_time
- utter_weather
- utter_welcome
- utter_who_you_are
intents:
- acknowledge
- affirm
- ask_age
- ask_are_you_bot
- ask_cost
- ask_events
- ask_languages
- ask_faq
- ask_how_contribute
- ask_restaurant
- ask_tech
- ask_time
- ask_weather
- ask_what_can_you_do
- ask_whoami
- bot_welcome
- bye
- deny
- greet
- insult
- opinion+negative
- opinion+positive
- out_of_scope
templates:
utter_about_me:
- text: >
I am a prototype of a multilingual assistant.
Feel free to try my [German](#) or [Swedish](#) skills
and share your feedback. Always keen to learn!
utter_advice:
- text: I trust you will get better. Never give up!
utter_bye:
- text: Bye
- text: Chat soon!
- text: Nice chatting to you.
- text: See ya!
utter_cheer_up:
- image: https://i.imgur.com/nGF1K8f.jpg
text: 'Here is something to cheer you up:'
utter_contribute:
- text: >
Glad you asked, contributions are always welcome.
You don't have to be a programmer to help.
utter_cost:
- text: I am for free.
- text: Free as in beer.
- text: Free as in speech.
- text: Free of charge.
utter_default_fallback:
- buttons:
- payload: contribute
title: Contribute
text: >
Oops, to be frank I didn't quite get you. Come again.
If you feel like I am not smart enough, you are probably right.
I got a lot to learn. Would you consider to become a contributor?
utter_did_that_help:
- text: Did that help you?
utter_events:
- text: I am not your event organizer buddy.
- text: Dunno, why don't you check EventBrite and the likes?
- text: Go ask [Sara](#).
utter_greet:
- text: Hi
- text: Hey
- text: Hey! How are you?
- text: How are you doing?
utter_happy:
- text: Great carry on!
utter_languages:
- text: >
I speak three languages.
Feel free to try my [German](#) or [Swedish](#) skills
and share your feedback. Always keen to learn!
utter_my_age:
- text: I wasn't born yesterday!
- text: Old enough to be a bot!
- text: Forever young!
utter_negative_feedback_reaction:
- text: Sad to hear
- text: Oh, I can do better than that!
- text: Hmm, you are hard to please, aren't you.
- text: Okay
utter_positive_feedback_reaction:
- text: You are welcome
- text: Welcome!
- text: No worries
- text: My pleasure
- text: That's ok
utter_restaurant:
- text: Yeah, I wonder too.
- text: No clue, why don't you check TripAdvisor and the likes?
- text: Go ask [Sara](#).
utter_subscribe:
- buttons:
- payload: subscribe
title: Stay in touch
text: Get all the latest news!
utter_tech:
- text: >
I run on Python 3 and the latest stack of [Rasa](#).
Check the [readme](#) for all the details.
utter_time:
- text: Am I your time keeper, huh?
- text: That's funny, I was about to ask the same question!
- text: Go ask [Sara](#).
utter_try_again:
- text: Come again!
- text: Try something else please.
utter_weather:
- text: I only believe in sunshine.
- text: Good questions, you wanna google it?
- text: Go ask [Sara](#).
utter_welcome:
- buttons:
- payload: faq
title: About me
- payload: subscribe
title: Get in touch
text: Hi, thanks for trying me out. Let's chat!
utter_who_you_are:
- text: You must be a wonderful soul.
- text: I don't know, we've barely met!
- text: Go ask [Sara](#).
|
examples/rasa_demo/config/models/sv/domain.yml
|
name: Dusk
on: [ push,pull_request ]
jobs:
dusk:
runs-on: ubuntu-latest
strategy:
fail-fast: true
max-parallel: 2
matrix:
php-versions: [ '7.4' ]
steps:
- name: Checkout
uses: actions/checkout@v2
env:
DEBUG: true
APP_ENV: "testing"
- uses: ./.github/actions/setup-php
with:
php_version: ${{ matrix.php-versions }}
- name: Run Laravel Server
#run: php artisan serve-test --env=testing > /dev/null 2>&1 &
#run: APP_ENV=testing && php -d variables_order=EGPCS -S 127.0.0.1:8000 > /dev/null 2>&1 &
run: APP_ENV=testing && php -d variables_order=EGPCS -S 127.0.0.1:8000 > /home/runner/work/microweber/microweber/storage/logs/serve.log 2>&1 &
- name: Run Dusk Tests
run: |
chmod -R 0755 vendor/laravel/dusk/bin/
php artisan dusk --testsuite MicroweberCoreTests
- name: Run Dusk Shop Tests
run: |
chmod -R 0755 vendor/laravel/dusk/bin/
php artisan dusk --testsuite MicroweberShopTests
- name: Run Dusk Multilanguage Tests
run: |
chmod -R 0755 vendor/laravel/dusk/bin/
php artisan dusk --testsuite MicroweberMultilanguageTests
- name: Run Dusk Template Tests
run: |
chmod -R 0755 vendor/laravel/dusk/bin/
php artisan dusk --testsuite MicroweberTemplatesTests
- name: Run Dusk Slow Tests
run: |
chmod -R 0755 vendor/laravel/dusk/bin/
php artisan dusk --testsuite MicroweberSlowTests
- name: Upload Screenshots
if: failure()
uses: actions/upload-artifact@v2
with:
name: screenshots
path: tests/Browser/screenshots
- name: Upload Console Logs
if: failure()
uses: actions/upload-artifact@v2
with:
name: console
path: tests/Browser/console
- name: Upload Laravel Storage
if: failure()
uses: actions/upload-artifact@v2
with:
name: storage
path: storage
- name: Upload Laravel config
if: failure()
uses: actions/upload-artifact@v2
with:
name: config
path: config
|
.github/workflows/dusk.yml
|
security:
# http://symfony.com/doc/current/book/security.html#encoding-the-user-s-password
encoders:
Symfony\Component\Security\Core\User\User: plaintext
Checkengine\DashboardBundle\Entity\Usuario:
algorithm: sha512
encode-as-base64: true
iterations: 10
# http://symfony.com/doc/current/book/security.html#hierarchical-roles
role_hierarchy:
ROLE_CLIENTE: ROLE_USUARIO
ROLE_ADMIN: ROLE_CLIENTE
ROLE_SUPER_ADMIN: [ROLE_USUARIO, ROLE_CLIENTE, ROLE_ADMIN, ROLE_ALLOWED_TO_SWITCH]
# http://symfony.com/doc/current/book/security.html#where-do-users-come-from-user-providers
providers:
user_db:
entity: { class: Checkengine\DashboardBundle\Entity\Usuario, property: email }
# the main part of the security, where you can set up firewalls
# for specific sections of your app
firewalls:
# disables authentication for assets and the profiler, adapt it according to your needs
dev:
pattern: ^/(_(profiler|wdt)|css|images|js)/
security: false
frontend:
pattern: ^/
provider: user_db
form_login:
login_path: login
check_path: login_check
success_handler: checkengine.security.authentication.handler
failure_handler: checkengine.security.authentication.handler
remember_me: true
use_referer: false
logout:
path: logout
target: login
remember_me:
key: "%secret%"
lifetime: 31536000
path: /.*
domain: ~
always_remember_me: true
security: true
anonymous: true
# with these settings you can restrict or allow access for different parts
# of your application based on roles, ip, host or methods
# http://symfony.com/doc/current/book/security.html#security-book-access-control-matching-options
access_control:
- { path: ^/api, roles: IS_AUTHENTICATED_ANONYMOUSLY }
- { path: ^/registro, roles: IS_AUTHENTICATED_ANONYMOUSLY }
- { path: ^/recuperar, roles: IS_AUTHENTICATED_ANONYMOUSLY }
- { path: ^/contacto, roles: IS_AUTHENTICATED_ANONYMOUSLY }
- { path: ^/login, roles: IS_AUTHENTICATED_ANONYMOUSLY }
- { path: ^/, roles: [ROLE_CLIENTE] }
|
app/config/security.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2019-05-11 10:19"
game: "Unreal Tournament 2004"
name: "CTF-VertigoHazard"
author: "Michael \"widowmaker\" Cranston"
description: "With the success of the Sky-Line and Plunge arenas the tournament turned\
\ it's attention to creating a new Capture The Flag area in the same genre. And\
\ So Vertigo Hazard was born. Perched high above the old world below the rooftops\
\ are unforgiving. One false move and the camp editor plummets to their doom. To\
\ facilitate movement a network of jump pads was installed on the old buildings.\
\ Now it stands as one of the most competitive and dangerous areas on the active\
\ roster. Close quarters fighters and snipers alike will find it an easy arena to\
\ frag and be fragged in. Good luck! Special thanks to BLITZ for his help getting\
\ the paths working correctly."
releaseDate: "2003-02"
attachments:
- type: "IMAGE"
name: "CTF-VertigoHazard_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/V/CTF-VertigoHazard_shot_2.png"
- type: "IMAGE"
name: "CTF-VertigoHazard_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/V/CTF-VertigoHazard_shot_1.png"
originalFilename: "ctf-vertigohazard.zip"
hash: "982ec0dd0724a403c1d48bf2ebc80d6febd02749"
fileSize: 1792213
files:
- name: "CTF-VertigoHazard.ut2"
fileSize: 8988802
hash: "405072c356c126a993ce1276b4724e39abef16ad"
otherFiles: 2
dependencies: {}
downloads:
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=1235"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/V/9/8/2ec0dd/ctf-vertigohazard.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/V/9/8/2ec0dd/ctf-vertigohazard.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/V/9/8/2ec0dd/ctf-vertigohazard.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "Vertigo_Hazard"
playerCount: "12-20"
themes:
Industrial: 0.7
City: 0.2
bots: true
|
content/Unreal Tournament 2004/Maps/Capture The Flag/V/9/8/2ec0dd/ctf-vertigohazard_[982ec0dd].yml
|
interactions:
- request:
body: '{"score_custom": "7", "access_complexity": "access_complexity_low", "access_vector":
"access_vector_adjacent_network", "availability_impact": "availability_impact_none",
"confidentiality_impact": "confidentiality_impact_low", "integrity_impact":
"integrity_impact_low", "privilege_required": "privilege_required_none", "scope":
"scope_changed", "user_interaction": "user_interaction_required"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Authorization:
- CyberWatch APIAuth-HMAC-SHA256 <KEY>
Connection:
- keep-alive
Content-Length:
- '393'
Content-Type:
- application/json
Date:
- Mon, 24 Feb 2020 13:47:47 GMT
User-Agent:
- python-requests/2.22.0
method: PUT
uri: https://localhost/api/v3/cve_announcements/CVE-2019-16768
response:
body:
string: !!binary |
H4sIAAAAAAAAA21TTW+cMBD9KyNOrZqs2K2StNyiKIeek/ZSKjSYAdw1NrUNLIry3zsGdskme7Ln
zfObT79EwmhP2kdJ9HgQ1HppNDTkHFbkoLSmAckEq1EBHQkOPim5JyjQY46OVs9nQEswWGxbKiAf
IU2fxqY0ekzTB9O0RnMsxkh0VvoJtJSmp9Bpet/5mjlSYLCfyPZS0Joa6gJaa1qs0HMEX1vTVTWf
BG50nhrwBn7+2MBzTZZKVr8CZ<KEY>
headers:
Cache-Control:
- no-store, must-revalidate, private, max-age=0
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Security-Policy:
- 'default-src ''self''; script-src ''self'' ''unsafe-inline'' ''unsafe-eval'';
style-src ''self'' ''unsafe-inline''; img-src ''self'' data:'
Content-Type:
- application/json; charset=utf-8
Date:
- Mon, 24 Feb 2020 13:47:48 GMT
Referrer-Policy:
- strict-origin-when-cross-origin
Server:
- nginx
Set-Cookie:
- __profilin=p%3Dt; path=/; secure; HttpOnly
Transfer-Encoding:
- chunked
Vary:
- Accept-Encoding
X-Content-Type-Options:
- nosniff
X-Download-Options:
- noopen
X-Frame-Options:
- SAMEORIGIN
X-MiniProfiler-Ids:
- egiov7weuohvhg7ljl01,t0mge3ezo1ngjzxq34oo,pt5bvee2t1vx3wb1rb9o,wypnnhra7l5y6bk9hhg3,sylmbbtx382c4gaxghfz
X-MiniProfiler-Original-Cache-Control:
- max-age=0, private, must-revalidate
X-Permitted-Cross-Domain-Policies:
- none
X-Request-Id:
- 7fbdd7df-30e6-4313-8dc3-1748cc85275b
X-Runtime:
- '1.872296'
X-XSS-Protection:
- 1; mode=block
status:
code: 200
message: OK
version: 1
|
spec/fixtures/vcr_cassettes/update_cve_announcement.yaml
|
argo:
images:
pullPolicy: IfNotPresent
{{- if .Values.registry }}
controller:
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "300m"
memory: "150Mi"
image:
registry: {{ .Values.registry }}
executor:
image:
registry: {{ .Values.registry }}
server:
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "300m"
memory: "150Mi"
image:
registry: {{ .Values.registry }}
{{- end }}
# Influences the creation of the ConfigMap for the workflow-controller itself.
useDefaultArtifactRepo: true
useStaticCredentials: true
artifactRepository:
# archiveLogs will archive the main container logs as an artifact
archiveLogs: false
s3:
# Note the `key` attribute is not the actual secret, it's the PATH to
# the contents in the associated secret, as defined by the `name` attribute.
accessKeySecret:
name: artifactory-bucket
key: AWS_ACCESS_KEY_ID
secretKeySecret:
name: artifactory-bucket
key: AWS_SECRET_ACCESS_KEY
insecure: true
bucket: argowf-bucket
endpoint: rook-ceph-rgw-my-store.rook-ceph.svc:80
# region:
# roleARN:
# useSDKCreds: true
# gcs:
# bucket: <project>-argo
# serviceAccountKeySecret is a secret selector.
# It references the k8s secret named 'my-gcs-credentials'.
# This secret is expected to have have the key 'serviceAccountKey',
# containing the base64 encoded credentials
# to the bucket.
#
# If it's running on GKE and Workload Identity is used,
# serviceAccountKeySecret is not needed.
# serviceAccountKeySecret:
# name: my-gcs-credentials
# key: serviceAcc
minio:
# If set to true then chart installs minio and generate according artifactRepository section in workflow controller config map
install: false
defaultBucket:
enabled: false
name: argo-artifacts
|
yggdrasil/services/tooling/argowf/argowf.yaml
|
uid: "com.azure.cosmos.CosmosClient.createGlobalThroughputControlConfigBuilder*"
fullName: "com.azure.cosmos.CosmosClient.createGlobalThroughputControlConfigBuilder"
name: "createGlobalThroughputControlConfigBuilder"
nameWithType: "CosmosClient.createGlobalThroughputControlConfigBuilder"
members:
- uid: "com.azure.cosmos.CosmosClient.createGlobalThroughputControlConfigBuilder(java.lang.String,java.lang.String)"
fullName: "com.azure.cosmos.CosmosClient.createGlobalThroughputControlConfigBuilder(String databaseId, String containerId)"
name: "createGlobalThroughputControlConfigBuilder(String databaseId, String containerId)"
nameWithType: "CosmosClient.createGlobalThroughputControlConfigBuilder(String databaseId, String containerId)"
summary: "Create global throughput control config builder which will be used to build <xref uid=\"com.azure.cosmos.GlobalThroughputControlConfig\" data-throw-if-not-resolved=\"false\" data-raw-source=\"GlobalThroughputControlConfig\"></xref>."
parameters:
- description: "The database id of the control container."
name: "databaseId"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "The container id of the control container."
name: "containerId"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
syntax: "public GlobalThroughputControlConfigBuilder createGlobalThroughputControlConfigBuilder(String databaseId, String containerId)"
returns:
description: "A <xref uid=\"com.azure.cosmos.GlobalThroughputControlConfigBuilder\" data-throw-if-not-resolved=\"false\" data-raw-source=\"GlobalThroughputControlConfigBuilder\"></xref>."
type: "<xref href=\"com.azure.cosmos.GlobalThroughputControlConfigBuilder?alt=com.azure.cosmos.GlobalThroughputControlConfigBuilder&text=GlobalThroughputControlConfigBuilder\" data-throw-if-not-resolved=\"False\" />"
type: "method"
metadata: {}
package: "com.azure.cosmos"
artifact: com.azure:azure-cosmos:4.13.0
|
docs-ref-autogen/com.azure.cosmos.CosmosClient.createGlobalThroughputControlConfigBuilder.yml
|
banner:
location: config/banner.txt
encoding: utf-8
server:
port: 9999
# rocketMQ
apache:
rocketmq:
consumer.consumerGroup: PushConsumer
producer.producerGroup: Producer
namesrvAddr: localhost:9876
rocket:
mq:
topic: detectTest
tag: push
# canal
canal:
server:
address: localhost
port: 11111
instance.name: password
batch.size: 1000
# zookeeper
zk.quorum: localhost:2181,localhost2:2181,localhost3:2181
zk.session.timeout: 20000
# logback
logging.config: classpath:logback-prod.xml
spring:
aop:
proxy-target-class: true
auto: true
datasource:
druid:
parrot:
url: jdbc:mysql://localhost:33061/canal_client?useUnicode=true&characterEncoding=utf8&autoReconnect=true&zeroDateTimeBehavior=convertToNull&transformedBitIsBoolean=true
username: root
password: password
driver-class-name: com.mysql.jdbc.Driver
initialSize: 5
minIdle: 5
maxActive: 20
test:
url: jdbc:mysql://localhost:33061/test?useUnicode=true&characterEncoding=utf8&autoReconnect=true&zeroDateTimeBehavior=convertToNull&transformedBitIsBoolean=true
username: root
password: password
driver-class-name: com.mysql.jdbc.Driver
initialSize: 5
minIdle: 5
maxActive: 20
mybatis-plus:
# 如果是放在src/main/java目录下 classpath:/com/yourpackage/*/mapper/*Mapper.xml
# 如果是放在resource目录 classpath:/mapper/*Mapper.xml
mapper-locations: classpath:/indi/gqxie/parrot/*/mapper/xml/*Mapper.xml
#实体扫描,多个package用逗号或者分号分隔
typeAliasesPackage: indi.gqxie.*.entity
global-config:
#主键类型 0:"数据库ID自增", 1:"用户输入ID",2:"全局唯一ID (数字类型唯一ID)", 3:"全局唯一ID UUID";
id-type: 0
#字段策略 0:"忽略判断",1:"非 NULL 判断"),2:"非空判断"
field-strategy: 2
#驼峰下划线转换
db-column-underline: true
#mp2.3+ 全局表前缀 mp_
#table-prefix: mp_
#刷新mapper 调试神器
#refresh-mapper: true
#数据库大写下划线转换
#capital-mode: true
# Sequence序列接口实现类配置
key-generator: com.baomidou.mybatisplus.incrementer.OracleKeyGenerator
#逻辑删除配置(下面3个配置)
logic-delete-value: 1
logic-not-delete-value: 0
sql-injector: com.baomidou.mybatisplus.mapper.LogicSqlInjector
#自定义填充策略接口实现
meta-object-handler: com.baomidou.springboot.MyMetaObjectHandler
configuration:
#配置返回数据库(column下划线命名&&返回java实体是驼峰命名),自动匹配无需as(没开启这个,SQL需要写as: select user_id as userId)
map-underscore-to-camel-case: true
cache-enabled: false
#配置JdbcTypeForNull, oracle数据库必须配置
jdbc-type-for-null: 'null'
|
src/main/resources/application-dev.yml
|
name: ci-node
# Controls when the workflow will run
on:
# Allow the workflow to be reusable
workflow_call:
inputs:
# Inputs for job steps.
skip_matrix_jobs:
description: A list of matrix jobs to skip. Job names should be the form of `<platform>`.
required: false
default: ''
type: string
codecov_upload:
description: Whether to generate and upload code coverage to codecov service for main branches.
required: false
default: false
type: boolean
notify_teams:
description: Whether to notify about workflow status via Microsoft Teams. Note that you must supply
`incoming_webhook` secret if you switch on this feature.
required: false
default: false
type: boolean
self_build:
description: Whether to build from currently checked out repository or not.
required: false
default: true
type: boolean
self_test:
description: Whether to run tests from currently checked out repository or not.
required: false
default: true
type: boolean
node_version:
description: The version of NodeJS interpreter to use.
required: false
default: '12'
type: string
repository:
description: The source repository name, in case it differs from the current one. Repository names should
follow the standard Github `owner/name` format.
required: false
default: ${{ github.repository }}
type: string
ref:
description: The source repository reference, in case it differs from the current one.
required: false
default: ${{ github.ref }}
type: string
secrets:
# Secret for notify-teams action.
incoming_webhook:
description: Public URL of the Microsoft Teams incoming webhook. To get the value, make sure that channel in
Teams has the appropriate connector set up. It will only be used if `notify_teams` input is switched on.
required: false
jobs:
qa:
name: qa
runs-on: ubuntu-20.04
steps:
- name: Checkout Repository
uses: actions/checkout@v2
with:
repository: ${{ inputs.repository }}
ref: ${{ inputs.ref }}
- name: Setup Node
uses: actions/setup-node@v2
with:
node-version: ${{ inputs.node_version }}
- name: Install Dependencies
run: npm ci
- name: Code QA
run: npm run lint -- --no-fix
setup:
name: setup
runs-on: ubuntu-20.04
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- name: Set Matrix
id: set-matrix
shell: bash -eux {0}
run: |
MATRIX=$(cat << 'EOS'
os:
- ubuntu-20.04
- ubuntu-18.04
- macos-10.15
EOS
)
SKIP_MATRIX_JOBS=$(cat << 'EOS'
${{ inputs.skip_matrix_jobs }}
EOS
)
SELECT_OS_COND="1 != 1"
for skip_job in $SKIP_MATRIX_JOBS; do SELECT_OS_COND="$SELECT_OS_COND or . == \"$skip_job\""; done
echo ::set-output name=matrix::$(echo "$MATRIX" | yq eval "del(.os[] | select($SELECT_OS_COND))" --output-format json --indent 0 -)
test:
name: test
needs:
- qa
- setup
strategy:
matrix: ${{ fromJson(needs.setup.outputs.matrix) }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Repository
uses: actions/checkout@v2
with:
repository: ${{ inputs.repository }}
ref: ${{ inputs.ref }}
- name: Setup Node
uses: actions/setup-node@v2
with:
node-version: ${{ inputs.node_version }}
- name: Install Dependencies
run: npm ci
- name: Test Build
if: inputs.self_build
run: npm run build
- name: Run Tests
if: inputs.self_test
run: npm test
- name: Codecov Upload
if: inputs.self_test && inputs.codecov_upload && matrix.os == 'ubuntu-20.04' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop')
uses: codecov/codecov-action@v2
notify:
name: notify
runs-on: ubuntu-20.04
needs:
- qa
- setup
- test
if: always() && inputs.notify_teams && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop')
steps:
- name: Notify Teams
uses: ecmwf-actions/notify-teams@v1
with:
incoming_webhook: ${{ secrets.incoming_webhook }}
needs_context: ${{ toJSON(needs) }}
|
.github/workflows/ci-node.yml
|
_svd: ../svd/stm32l4x3.svd
# SVD incorrectly labels APB1ENR1 bit 26 as USBF instead of USBFSEN.
RCC:
APB1ENR1:
_modify:
SP3EN:
name: SPI3EN
description: SPI peripheral 3 clock enable
USBF:
name: USBFSEN
description: USB FS clock enable
# SVD incorrectly shifts CRCEN/CRCRST/CRCSMEN 11 bits instead of 12
AHB1ENR:
_modify:
CRCEN:
bitOffset: 12
AHB1RSTR:
_modify:
CRCRST:
bitOffset: 12
AHB1SMENR:
_modify:
CRCSMEN:
bitOffset: 12
APB1RSTR1:
_add:
USBFSRST:
description: USB FS reset
bitOffset: 26
bitWidth: 1
_modify:
# The SVD calls ADC1 ADC.
ADC:
name: ADC1
USB:
baseAddress: "0x40006800"
USB:
_add:
_interrupts:
USB_FS:
description: USB event interrupt through EXTI
value: 67
BCDR:
description: Battery charging detector
addressOffset: 0x58
size: 0x10 # todo: 0x20? 16-bit field though.
access: read-write
resetValue: 0x0000
fields:
BCDEN:
description: Battery charging detector (BCD) enable
bitOffset: 0
bitWidth: 1
DCDEN:
description: Data contact detection (DCD) mode enable
bitOffset: 1
bitWidth: 1
PDEN:
description: Primary detection (PD) mode enable
bitOffset: 2
bitWidth: 1
SDEN:
description: Secondary detection (SD) mode enable
bitOffset: 3
bitWidth: 1
DCDET:
description: Data contact detection (DCD) status
bitOffset: 4
bitWidth: 1
access: read-only
PDET:
description: Primary detection (PD) status
bitOffset: 5
bitWidth: 1
access: read-only
SDET:
description: Secondary detection (SD) status
bitOffset: 6
bitWidth: 1
access: read-only
PS2DET:
description: DM pull-up detection status
bitOffset: 7
bitWidth: 1
access: read-only
DPPU:
description: DP pull-up control
bitOffset: 15
bitWidth: 1
# Merge the thousands of individal bit fields into a single field for each
# CAN filter register. This is not only much easier to use but also saves
# a huge amount of filespace and compilation time etc -- as much as 30% of all
# fields in many devices are just these CAN filter bank fields.
"CAN*":
"F?R?":
_merge:
- "FB*"
"F??R?":
_merge:
- "FB*"
MPU:
_strip:
- "MPU_"
_include:
- common_patches/4_nvic_prio_bits.yaml
- ./common_patches/merge_USART_CR2_ADDx_fields.yaml
- ./common_patches/merge_USART_CR2_ABRMODx_fields.yaml
- ./common_patches/merge_USART_CR1_DEDTx_fields.yaml
- ./common_patches/merge_USART_CR1_DEATx_fields.yaml
- ./common_patches/rename_USART_CR2_DATAINV_field.yaml
- ./common_patches/merge_LPUART_CR1_DEATx_fields.yaml
- ./common_patches/merge_LPUART_CR1_DEDTx_fields.yaml
- ./common_patches/rename_LPUART_CR2_DATAINV_field.yaml
- ./common_patches/merge_LPUART_CR2_ADDx_fields.yaml
- ./common_patches/merge_USART_BRR_fields.yaml
- ./common_patches/l4_crrcr.yaml
- common_patches/can/can.yaml
- common_patches/can/can_filter_bank.yaml
- ../peripherals/can/can.yaml
- common_patches/sai/sai_v1.yaml
- ../peripherals/gpio/gpio_v2.yaml
- common_patches/crc/crc_rename_init.yaml
- ../peripherals/crc/crc_advanced.yaml
- ../peripherals/crc/crc_idr_8bit.yaml
- ../peripherals/crc/crc_with_polysize.yaml
- ../peripherals/wwdg/wwdg.yaml
- ../peripherals/rcc/rcc_l4.yaml
- common_patches/tim/common.yaml
- ../peripherals/tim/tim_basic.yaml
- ../peripherals/tim/tim16.yaml
- ../peripherals/tim/tim6.yaml
- ../peripherals/tim/tim2_32bit.yaml
- common_patches/tim/tim2_32bit.yaml
- ../peripherals/tim/tim_advanced.yaml
- common_patches/tim/tim_ccr.yaml
- common_patches/tim/v2/l4.yaml
- ../peripherals/tim/v2/ccm.yaml
- ../peripherals/dma/dma_v1_with_remapping.yaml
- ../peripherals/iwdg/iwdg_with_WINR.yaml
- ../peripherals/exti/exti.yaml
- ../peripherals/i2c/i2c_v2.yaml
- ../peripherals/usart/lpuart_v2A.yaml
- ../peripherals/usart/usart_v2B1.yaml
- common_patches/rtc/rtc_bkpr.yaml
- common_patches/rtc/rtc_cr.yaml
- common_patches/tsc/tsc.yaml
- ./common_patches/flash/flash_boot0s.yaml
- ../peripherals/fw/fw_l0_l4.yaml
- ../peripherals/sai/sai.yaml
- common_patches/dma_interrupt_names.yaml
- ./common_patches/l4_adc_common.yaml
- ./common_patches/l4_adc_smpr.yaml
- ./common_patches/l4_adc_sqr1.yaml
- ./common_patches/l4_spi.yaml
- ./common_patches/l4_gpio_brr.yaml
- ../peripherals/spi/spi_l4.yaml
- ./common_patches/l4_lcd_segment.yaml
|
devices/stm32l4x3.yaml
|
items:
- uid: com.microsoft.azure.management.network.ConnectionMonitor.Definition
id: Definition
artifact: com.microsoft.azure:azure-mgmt-network:1.37.0
parent: com.microsoft.azure.management.network
langs:
- java
name: ConnectionMonitor.Definition
nameWithType: ConnectionMonitor.Definition
fullName: com.microsoft.azure.management.network.ConnectionMonitor.Definition
type: Interface
package: com.microsoft.azure.management.network
summary: The entirety of the connection monitor definition.
syntax:
content: public static interface ConnectionMonitor.Definition extends ConnectionMonitor.DefinitionStages.WithSource, ConnectionMonitor.DefinitionStages.WithDestination, ConnectionMonitor.DefinitionStages.WithDestinationPort, ConnectionMonitor.DefinitionStages.WithCreate
implements:
- com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithSource
- com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithDestination
- com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithDestinationPort
- com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithCreate
references:
- uid: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithSource
name: ConnectionMonitor.DefinitionStages.WithSource
nameWithType: ConnectionMonitor.DefinitionStages.WithSource
fullName: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithSource
- uid: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithDestination
name: ConnectionMonitor.DefinitionStages.WithDestination
nameWithType: ConnectionMonitor.DefinitionStages.WithDestination
fullName: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithDestination
- uid: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithDestinationPort
name: ConnectionMonitor.DefinitionStages.WithDestinationPort
nameWithType: ConnectionMonitor.DefinitionStages.WithDestinationPort
fullName: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithDestinationPort
- uid: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithCreate
name: ConnectionMonitor.DefinitionStages.WithCreate
nameWithType: ConnectionMonitor.DefinitionStages.WithCreate
fullName: com.microsoft.azure.management.network.ConnectionMonitor.DefinitionStages.WithCreate
|
docs-ref-autogen/com.microsoft.azure.management.network.ConnectionMonitor.Definition.yml
|
items:
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl
id: WithUrl
parent: com.microsoft.azure.cognitiveservices.vision.computervision
children:
- com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl(String)
href: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.yml
langs:
- java
name: ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl
nameWithType: ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl
type: Interface
source:
remote: &o0
path: sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVision.java
branch: master
repo: https://github.com/Azure/azure-sdk-for-java
path: sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVision.java
startLine: 1337
package: com.microsoft.azure.cognitiveservices.vision.computervision
summary: "<p>The stage of the definition to be specify url. </p>"
syntax:
content: public interface WithUrl
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl(String)
id: withUrl(String)
parent: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl
href: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.yml
langs:
- java
name: withUrl(String url)
nameWithType: ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl(String url)
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl(String url)
overload: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl*
type: Method
source:
remote: *o0
path: sdk/cognitiveservices/ms-azure-cs-computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVision.java
startLine: 1343
package: com.microsoft.azure.cognitiveservices.vision.computervision
summary: "<p>Publicly reachable URL of an image.</p>\r\n<p></p>"
syntax:
content: public ComputerVisionGenerateThumbnailDefinitionStages.WithExecute withUrl(String url)
parameters:
- id: url
type: "26831127"
return:
type: 0a9a87c1
description: <p>next definition stage </p>
references:
- uid: "26831127"
spec.java:
- name: String
fullName: String
- uid: 0a9a87c1
spec.java:
- name: ComputerVisionGenerateThumbnailDefinitionStages.WithExecute
fullName: ComputerVisionGenerateThumbnailDefinitionStages.WithExecute
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl*
name: withUrl
nameWithType: ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.withUrl
package: com.microsoft.azure.cognitiveservices.vision.computervision
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinition
parent: com.microsoft.azure.cognitiveservices.vision.computervision
href: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinition.yml
name: ComputerVision.ComputerVisionGenerateThumbnailDefinition
nameWithType: ComputerVision.ComputerVisionGenerateThumbnailDefinition
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinition
type: Interface
summary: "<p>The entirety of generateThumbnail definition. </p>"
syntax:
content: public interface ComputerVisionGenerateThumbnailDefinition extends ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithWidth,ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithHeight,ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl,ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithExecute
|
docs-ref-autogen/com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionGenerateThumbnailDefinitionStages.WithUrl.yml
|
{% set version = "1.0.1" %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-scatterd3
version: {{ version|replace("-", "_") }}
source:
url:
- {{ cran_mirror }}/src/contrib/scatterD3_{{ version }}.tar.gz
- {{ cran_mirror }}/src/contrib/Archive/scatterD3/scatterD3_{{ version }}.tar.gz
sha256: 950cde28cbbaa915c3469f60908a29095bff6035d1bb79abbeb6a85d7cc0f2a0
build:
merge_build_host: true # [win]
number: 0
noarch: generic
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- {{ posix }}zip # [win]
host:
- r-base
- r-digest
- r-ellipse
- r-htmlwidgets
run:
- r-base
- r-digest
- r-ellipse
- r-htmlwidgets
test:
commands:
- $R -e "library('scatterD3')" # [not win]
- "\"%R%\" -e \"library('scatterD3')\"" # [win]
about:
home: https://juba.github.io/scatterD3/
license: GPL-3.0-or-later
summary: "Creates 'D3' 'JavaScript' scatterplots from 'R' with interactive features : panning, zooming, tooltips, etc."
license_family: GPL3
license_file:
- {{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3
extra:
recipe-maintainers:
- conda-forge/r
# Package: scatterD3
# Type: Package
# Title: D3 JavaScript Scatterplot from R
# Version: 0.9.1
# Date: 2020-03-10
# Authors@R: c( person( "Julien", "Barnier", email = "<EMAIL>", role = c("aut", "cre") ), person( "Kent", "Russell" , role = c("aut", "ctb") , email = "<EMAIL>" ), person( "Mike", "Bostock" , role = c("aut", "cph") , comment = "d3.js library, http://d3js.org" ), person( "Susie", "Lu" , role = c("aut", "cph") , comment = "d3-legend library, http://d3-legend.susielu.com/" ), person( "Speros", "Kokenes" , role = c("aut", "cph") , comment = "d3-lasso-plugin library, https://github.com/skokenes/D3-Lasso-Plugin" ), person( "Evan", "Wang" , role = c("aut", "cph") , comment = "d3-labeler plugin, https://github.com/tinker10/D3-Labeler" ) )
# Maintainer: <NAME> <<EMAIL>>
# Description: Creates 'D3' 'JavaScript' scatterplots from 'R' with interactive features : panning, zooming, tooltips, etc.
# License: GPL (>= 3)
# VignetteBuilder: knitr
# Encoding: UTF-8
# URL: https://juba.github.io/scatterD3/
# BugReports: https://github.com/juba/scatterD3/issues
# LazyData: TRUE
# Enhances: shiny
# Imports: htmlwidgets, digest, ellipse
# Suggests: knitr, rmarkdown
# RoxygenNote: 7.0.2
# NeedsCompilation: no
# Packaged: 2020-03-10 11:08:37 UTC; julien
# Author: <NAME> [aut, cre], <NAME> [aut, ctb], <NAME> [aut, cph] (d3.js library, http://d3js.org), <NAME> [aut, cph] (d3-legend library, http://d3-legend.susielu.com/), <NAME> [aut, cph] (d3-lasso-plugin library, https://github.com/skokenes/D3-Lasso-Plugin), <NAME> [aut, cph] (d3-labeler plugin, https://github.com/tinker10/D3-Labeler)
# Repository: CRAN
# Date/Publication: 2020-03-10 12:30:02 UTC
|
recipe/meta.yaml
|
name: Build, test, and optionally publish
on: push
jobs:
build-and-upload-artifacts:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build the typescript code
run: yarn build
- uses: actions/upload-artifact@main
with:
name: build artifacts
path: build/
run-tests:
runs-on: ubuntu-latest
strategy:
matrix:
os: [ubuntu-latest]
node-version: [12.x, 14.x]
steps:
- uses: actions/checkout@v2
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
- run: yarn test
run-pre-commits:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 100 # need the history to do a changed files check below (source, origin)
- uses: actions/setup-python@v2
- name: Use Node.js 14.x
uses: actions/setup-node@v1
with:
node-version: 14.x
- uses: pre-commit/action@v2.0.2
with:
extra_args: --source ${{ github.event.pull_request.base.sha || 'HEAD~1' }} --origin ${{ github.event.pull_request.head.sha || 'HEAD' }}
run-depcheck:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Use Node.js 14.x
uses: actions/setup-node@v1
with:
node-version: 14.x
- run: yarn pnpify depcheck
build-to-npm:
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
needs:
- run-tests
- run-depcheck
- run-pre-commits
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: '14.x'
- name: Configure NPM authentication
run: |
yarn config set npmAlwaysAuth true
yarn config set npmAuthToken ${{ secrets.NPM_TOKEN }}
- name: Publish to yarn/npm
run: yarn npm publish
build-to-github-packages:
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
needs:
- run-tests
- run-depcheck
- run-pre-commits
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: '14.x'
- name: Configure Github Packages authentication
run: |
yarn config set npmAlwaysAuth true
yarn config set npmRegistryServer https://npm.pkg.github.com
yarn config set npmPublishRegistry https://npm.pkg.github.com
yarn config set npmAuthToken ${{ secrets.GITHUB_TOKEN }}
- name: Publish to Github packages
run: yarn npm publish
|
.github/workflows/ci.yml
|
---
## ZSH install
oh_my_zsh_custom_theme_info: { url: "https://github.com/romkatv/powerlevel10k.git", dir_dest_name: powerlevel10k }
oh_my_zsh_custom_theme: powerlevel10k/powerlevel10k
oh_my_zsh_default_plugins:
- ansible
- alias-finder
- encode64
- git
- colored-man-pages
- ubuntu
- aws
- tmux
- tmuxinator
oh_my_zsh_custom_plugins_info:
- { url: "https://github.com/junegunn/fzf.git", dir_dest_name : fzf }
- { url: "https://github.com/Treri/fzf-zsh.git", dir_dest_name: fzf-zsh }
- { url: "https://github.com/zsh-users/zsh-autosuggestions.git", dir_dest_name: zsh-autosuggestions }
- { url: "https://github.com/zdharma/fast-syntax-highlighting.git", dir_dest_name: fast-syntax-highlighting }
- { url: "https://github.com/ptavares/zsh-custom-functions.git", dir_dest_name: zsh-custom-functions }
- { url: "https://github.com/ptavares/zsh-tfenv.git", dir_dest_name: zsh-tfenv }
- { url: "https://github.com/ptavares/zsh-tgenv.git", dir_dest_name: zsh-tgenv }
- { url: "https://github.com/ptavares/zsh-pkenv.git", dir_dest_name: zsh-pkenv }
- { url: "https://github.com/ptavares/zsh-direnv.git", dir_dest_name: zsh-direnv }
- { url: "https://github.com/ptavares/zsh-kubectx.git", dir_dest_name: zsh-kubectx }
- { url: "https://github.com/ptavares/zsh-sdkman.git", dir_dest_name: zsh-sdkman }
- { url: "https://github.com/ptavares/zsh-exa.git", dir_dest_name: zsh-exa }
- { url: "https://github.com/ptavares/zsh-z.git", dir_dest_name: zsh-z }
- { url: "https://github.com/ptavares/zsh-auto-update-plugins.git", dir_dest_name: zsh-auto-update-plugins }
- { url: "https://github.com/b4b4r07/emoji-cli.git", dir_dest_name: emoji-cli }
oh_my_zsh_custom_plugins:
- fzf-zsh
- zsh-autosuggestions
- fast-syntax-highlighting
- zsh-custom-functions
- zsh-tfenv
- zsh-tgenv
- zsh-pkenv
- zsh-direnv
- zsh-kubectx
- zsh-sdkman
- zsh-exa
- zsh-z
- zsh-auto-update-plugins
- emoji-cli
oh_my_zsh_custom_plugins_command :
- "$HOME/.oh-my-zsh/custom/plugins/fzf/install --all"
- "fast-theme safari"
oh_my_zsh_custom_zsh_entries:
- "export EDITOR=vim"
- "# File Shortcuts"
- "alias D='cd ~/Téléchargements'"
- "alias P='cd ~/projects'"
- "alias T='cd ~/tools'"
- "# Program Shortcuts"
- "alias vi='vim -O'"
- "# AWS login"
- "alias logIntoIpponAWS='aws-google-auth -p ippon-admin-aws -d 28800 -I C00lefphs -S 563310275444 -r arn:aws:iam::308846910468:role/role-admin-sre-ops-federated -u <EMAIL>'"
- "# Tools Docker alias"
- "# kubectl completion"
- "if [ /usr/local/bin/kubectl ]; then source <(kubectl completion zsh); fi"
- "#Touchscreen alias"
- "alias touchscreenOn=\"/usr/bin/xinput enable 'ELAN Touchscreen'\""
- "alias touchscreenOff=\"/usr/bin/xinput disable 'ELAN Touchscreen' \""
|
group_vars/computer/system/zsh.yml
|
---
- name: Check existing install
register: postgresql_exporter_version_check
failed_when: False
changed_when: False
check_mode: False
shell: /usr/bin/postgresql-exporter --version 2>&1
tags:
- postgresql
- postgresql-exporter
- name: Fact version change
set_fact:
postgresql_exporter_version_changed: "{{ postgresql_exporter_version_check.rc != 0 or (postgresql_exporter_version_check.stdout_lines is defined and postgresql_exporter_version not in postgresql_exporter_version_check.stdout_lines[0]) | bool }}"
tags:
- postgresql
- postgresql-exporter
- name: Download release tarball
when: postgresql_exporter_version_changed
unarchive:
src: "{{ postgresql_exporter_download }}"
dest: /usr/local/src
remote_src: True
owner: root
group: root
mode: u=rw,g=r,o=r
tags:
- postgresql
- postgresql-exporter
- name: Copy exporter binary
when: postgresql_exporter_version_changed and not ansible_check_mode
notify:
- Restart postgresql-exporter
copy:
src: "/usr/local/src/postgres_exporter-{{ postgresql_exporter_version }}.linux-amd64/postgres_exporter"
dest: /usr/bin/postgresql-exporter
remote_src: True
owner: root
group: root
mode: u=rwx,g=rx,o=rx
tags:
- postgresql
- postgresql-exporter
- name: Write default config
notify:
- Restart postgresql-exporter
template:
src: exporter/default.j2
dest: /etc/default/postgresql-exporter
owner: root
group: root
mode: u=rw,g=r,o=r
tags:
- postgresql
- postgresql-exporter
- name: Write service file
notify:
- Restart postgresql-exporter
template:
src: exporter/service.j2
dest: /etc/systemd/system/postgresql-exporter.service
owner: root
group: root
mode: u=rw,g=r,o=r
tags:
- postgresql
- postgresql-exporter
- name: Start postgresql-exporter service
systemd:
name: postgresql-exporter
state: started
daemon_reload: True
masked: False
enabled: True
tags:
- postgresql
- postgresql-exporter
- name: Remove install directory
file:
path: "/usr/local/src/postgres_exporter-{{ postgresql_exporter_version }}.linux-amd64"
state: absent
tags:
- postgresql
- postgresql-exporter
...
|
tasks/exporter.yml
|
services:
- docker
env:
global:
- SYSTEMD_OPTS: "--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
- ANSIBLE_ROLE_LOCATION: "/etc/ansible/roles/role_under_test"
- playbook: test.yml
matrix:
# Stable Release Versions - 8.5
# SystemD
- distribution: Ubuntu
distribution_version: xenial
version: "8.5.12"
init: /lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: EL
distribution_version: "7"
version: "8.5.12"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: Fedora
distribution_version: "23"
version: "8.5.12"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: OracleLinux
distribution_version: "7"
version: "8.5.12"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
# Tomcat 8.0 - now superseded by 8.5
# SystemD
- distribution: Ubuntu
distribution_version: xenial
version: "8.0.42"
init: /lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: EL
distribution_version: "7"
version: "8.0.42"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: Fedora
distribution_version: "23"
version: "8.0.42"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: OracleLinux
distribution_version: "7"
version: "8.0.42"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
# Tomcat 7 Support
# SystemD
- distribution: Ubuntu
distribution_version: xenial
version: "7.0.76"
init: /lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: EL
distribution_version: "7"
version: "7.0.76"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: Fedora
distribution_version: "23"
version: "7.0.76"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: OracleLinux
distribution_version: "7"
version: "7.0.76"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
# Tomcat 9 Current Milestone
# SystemD
- distribution: Ubuntu
distribution_version: xenial
version: "9.0.0.M18"
init: /lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: EL
distribution_version: "7"
version: "9.0.0.M18"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: Fedora
distribution_version: "23"
version: "9.0.0.M18"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
- distribution: OracleLinux
distribution_version: "7"
version: "9.0.0.M18"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
# NO APR Installations
# Stable Release Versions - 8.5
# SystemD
- distribution: Ubuntu
distribution_version: xenial
version: "8.5.12"
init: /lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
playbook: no_apr.yml
- distribution: EL
distribution_version: "7"
version: "8.5.12"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
playbook: no_apr.yml
- distribution: Fedora
distribution_version: "23"
version: "8.5.12"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
playbook: no_apr.yml
- distribution: OracleLinux
distribution_version: "7"
version: "8.5.12"
init: /usr/lib/systemd/systemd
run_opts: "${SYSTEMD_OPTS}"
playbook: no_apr.yml
before_install:
# Pull container.
- 'docker pull ansiblecheck/ansiblecheck:${distribution,,}-${distribution_version}'
- container_id=$(mktemp)
# Start The Built Container In The Background
- 'docker run --detach --volume="${PWD}":${ANSIBLE_ROLE_LOCATION}:ro ${run_opts} ansiblecheck/ansiblecheck:"${distribution,,}"-"${distribution_version}" "${init}" > "${container_id}"'
install:
- 'docker exec --tty "$(cat ${container_id})" env TERM=xterm ansible-galaxy install -r ${ANSIBLE_ROLE_LOCATION}/requirements.yml'
script:
# Ansible syntax check.
- 'docker exec --tty "$(cat ${container_id})" env TERM=xterm ansible-playbook ${ANSIBLE_ROLE_LOCATION}/tests/${playbook} --syntax-check'
# Test role.
- 'docker exec "$(cat ${container_id})" ansible-playbook ${ANSIBLE_ROLE_LOCATION}/tests/${playbook} --extra-vars "tomcat_version=${version}"'
# Test Idempotence
- idempotence=$(mktemp)
- docker exec "$(cat ${container_id})" ansible-playbook ${ANSIBLE_ROLE_LOCATION}/tests/${playbook} --extra-vars "tomcat_version=${version}" | tee -a ${idempotence}
- >
tail ${idempotence}
| grep -q 'changed=0.*failed=0'
&& (echo 'Idempotence test: pass' && exit 0)
|| (echo 'Idempotence test: fail' && exit 1)
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
.travis.yml
|
messages:
unknownCommand: "%hataprefix%Hatalı bir komut girdiniz! Komutların listesi için &b/komutlar &cyazınız."
disallowedName: "Güvenlik gerekçesiyle bu isimle giriş yapmak yasaktır."
configReloaded: "%bilgiprefix%Yapılandırma yenilendi!"
noPermission: "%hataprefix%Bunun için izniniz yok!"
commandChatColorUsage: |
&a--- Kullanabileceğiniz Renkler ---
&f&l• &aYeşil, &bAçık_Mavi, &cAçık_Kırmızı, &dEflatun,
&f&l• &eSarı, &fBeyaz, &0Siyah, &1Koyu_Mavi,
&f&l• &2Koyu_Yeşil, &3Turkuaz, &4Koyu_Kırmızı, &5Koyu_Mor,
&f&l• &6Turuncu, &7Gri, &8Koyu_Gri, &9Mavi
&aÖrnek Kullanım: &b/renk Açık_Mavi
commandChatColorWrongColor: "%hataprefix%Hatalı bir renk adı girdiniz!"
commandChatColorSuccess: "%bilgiprefix%Konuşma renginiz %s olarak değiştirildi!"
commandCommandsHeader: '&e--------------- &rKomutlar: sayfa %page%/%maxpage% &e---------------'
commandCommandsNextPage: '&eBir sonraki sayfaya geçmek için &b/komutlar %page% &ekomutunu kullanın.'
commandCommandsContent: # TODO
- '&6/auction: &rAçık arttırma sisteminin ana komutudur.'
- '&6/back: &rIşınlanmadan ya da ölmeden önceki konumunuza döndürür. (VIP ve UVIP)'
- '&6/balance: &rPara miktarınızı gösterir.'
- '&6/bid: &rAçık arttırmaya belirttiğiniz teklifi verir.'
- '&6/challenges: &rSkyblock görevlerini gösterir.'
- '&6/clearinventory: &rEnvanterinizi sıfırlar.'
- '&6/ct: &rBir savaştaysanız kalan süreyi gösterir.'
- '&6/duyuru: &rAda warpı reklamı gibi duyurularınızı yapabileceğiniz komut.'
webInCombatDisabled: "%hataprefix%Savaş esnasında örümcek ağı koyamazsınız!"
chatHover: |
&aGönderme Zamanı: &b%time%
&aPara: &b%vault_eco_balance_formatted%
&a
&aÖldürme/Ölme Oranı: &b%uniostats_kdr%
&aGüç: &b%factionsuuid_player_power%
invalidVIPType: "%hataprefix%Hatalı bir VIP türü girdiniz."
vipRewardHasGiven: "%bilgiprefix%VIP ödülü başarıyla verildi."
playerIsNotOnline: "%hataprefix%Belirttiğiniz oyuncu oyunda değil."
questNotExist: "%hataprefix%Belirttiğiniz görev mevcut değil."
jobNotExist: "%hataprefix%Belirttiğiniz meslek mevcut değil."
questAlreadyCompleted: "%hataprefix%Bu görevi daha önce tamamlamışsınız."
commandCompleteQuestSuccess: "%bilgiprefix%Görev başarıyla tamamlandı!"
questIsNotCompleted: "%hataprefix%Görev tamamlanamadı. Öncelikle görev şartlarını yerine getirmelisiniz."
questsGUIitle: "&0Görevler"
questIsDaily: "Günlük Görev"
questIsOneTimeOnly: "Tek Seferlik Görev"
questClickToComplete: "&aGörevi tamamlamak için tıkla!"
questIsNotCompletedGui: "&c&lGÖREV TAMAMLANMADI"
questCompletionInfo: "&bilgiprefix%Bir görevin şartlarını yerine getirdiniz! Hemen /görevler yazın ve görevi bitirerek ödülünüzü alın!"
questYouMustBeInJob: |
&cBu görevi yapabilmek için
&c%job% mesleğinde olmalısınız!
questIsCompleted: "&a&lGÖREV TAMAMLANDI"
questRemainingTimeToReset: |
&6&lGörevin Sıfırlanmasına Kalan Süre:
&7%remainingTime%
questMenuLore: |
&7%questType%
&6&lGörev Şartları:
%questObjectives%
&6&lGörev Ödülleri:
%questRewards%
%questAction%
jobsGUITitle: "&0Meslekler"
jobMenuLore: |
&e
%jobInfo%
&7%jobLevel%
%clickAction%
jobLevel: |
&6Seviye: &f%jobLevel%
&6EXP: &f%jobExp%/%jobMaxExp%
jobLeftClickToJoin: '&aKatılmak &7için sol tıkla!'
jobRightClickToLeft: '&cAyrılmak &7için sağ tıkla!'
jobsGUIQuestsItemName: "&6Görevler"
jobsGUIQuestsItemLore:
- "&7Görevler menüsünü"
- "&7açmak için tıkla!"
jobsGUITop10ItemName: "&6Meslek Top 10"
jobsGUITop10ItemLore:
- "&7En yüksek seviyeli"
- "&7meslek oyuncularını"
- "&7görmek için tıkla!"
jobsGUISettingsItemName: "&6Ayarlar"
jobsGUISettingsItemLore:
- "&7Meslek ayarları"
- "&7yapmak için tıkla!"
topMenuGUITitle: "Meslek Top 10"
topMenuGUITitleWithJob: "%job% Top 10"
toptenPlayerLore: |
&e
&6Seviye: &f%jobLevel%
&6EXP: &f%jobExp%
&e
toptenJobLore: |
&e%job% &7mesleğinin en yüksek
&7seviyeli oyuncularını görmek
&7için tıkla!
settingsMenuGUITitle: "Meslek Ayarları"
settingsActionBar: "Para Kazanma Bilgisi"
settingsActionBarEnableLore:
- "&7Eylem çubuğunda görünen"
- "&7para ve xp kazanma bilgisini"
- "&aetkinleştir&7."
settingsActionBarDisableLore:
- "&7Eylem çubuğunda görünen"
- "&7para ve xp kazanma bilgisini"
- "&cdevre dışı &7bırak."
settingsBossBar: "Meslek Gelişim Bilgisi"
settingsBossBarEnableLore:
- "&7Boss barında görünen"
- "&7meslek seviye gelişimini"
- "&aetkinleştir&7."
settingsBossBarDisableLore:
- "&7Boss barında görünen"
- "&7meslek seviye gelişimini"
- "&cdevre dışı &7bırak."
|
src/main/resources/lang.yml
|
name: Build
on: push
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- uses: actions/setup-go@v1
with:
go-version: '1.15'
- name: Get the version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF##*/}
- name: Unit tests & coverage
run: |
make test build
- name: Full build & docker image
run: make docker DOCKER_IMAGE_TAG=${{ steps.get_version.outputs.VERSION }}
- uses: shogo82148/actions-goveralls@v1
with:
path-to-profile: coverage.out
- name: Create beta Release
id: create_beta_release
if: startsWith(github.ref, 'refs/tags/v') && (contains(github.ref, '-beta') || contains(github.ref, '-alpha'))
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.get_version.outputs.VERSION }}
release_name: ${{ steps.get_version.outputs.VERSION }}
draft: true
prerelease: true
- name: Create Release
id: create_release
if: startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-beta') && !contains(github.ref, '-alpha')
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.get_version.outputs.VERSION }}
release_name: ${{ steps.get_version.outputs.VERSION }}
draft: true
prerelease: false
- name: Upload beta Release Asset
id: upload-beta-release-asset
if: startsWith(github.ref, 'refs/tags/v') && (contains(github.ref, '-beta') || contains(github.ref, '-alpha'))
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_beta_release.outputs.upload_url }}
asset_path: ./mongodb_query_exporter
asset_name: mongodb_query_exporter
asset_content_type: application/octet-stream
- name: Upload Release Asset
id: upload-release-asset
if: startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-beta') && !contains(github.ref, '-alpha')
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./mongodb_query_exporter
asset_name: mongodb_query_exporter
asset_content_type: application/octet-stream
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
if: startsWith(github.ref, 'refs/tags/v')
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ <PASSWORD> }}
- name: Push to Docker Hub
uses: docker/build-push-action@v2
if: startsWith(github.ref, 'refs/tags/v')
with:
push: true
tags: raffis/mongodb-query-exporter:${{ steps.get_version.outputs.VERSION }}
- name: Run helm lint
id: lint
uses: WyriHaximus/github-action-helm3@v2
with:
exec: helm lint chart/
|
.github/workflows/action.yml
|
apiVersion: v1
kind: Secret
metadata:
name: github-token
namespace: flux-system
type: Opaque
stringData:
token: ENC[AES256_GCM,data:aDxCffgXvw6yFtr6gLEBrpK0jL4UVNHUqaHNwE7muPU2rwJ2L+qSPA==,iv:KsYAyRSV3Ee2TXsC6edvbK+pD+HrnteFEMmZS4yzSYc=,tag:GPibLqvkuLG7k/2q+BI28A==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age: []
lastmodified: "2021-12-04T18:58:28Z"
mac: ENC[AES256_GCM,data:9idgyPXc20wu1dL6HGmJCEtqXnq6nVp3aMRp8SYfb1oHHN0MRFqFjfIewXixDQt7zrpdTqGtb7Wdb/Iwpa67CTPg1wF6pz6zcDHnnIzHR93EiqKoefLCFiFtqbrAdZlqe7NcEl9mB7LETqNt7MeA/IGg0XnuhJpS4vjeCQui7Vc=,iv:RAvR44++MXfP02e5odLxcpW0TzuX3ccO0MQynSNfLO8=,tag:AwoF2d9H3dLFi1XzF/hAYQ==,type:str]
pgp:
- created_at: "2021-12-04T18:58:28Z"
enc: |
-----BEGIN PGP MESSAGE-----
hQIMA8LTdrN3Uc5/AQ//Qqgad9/KbqGiuwIOSJFRxYWQohPBo/z6LnoaL2JBMuQH
qzOlH4BC7GiZvcDgmX2RKOv97HtPd/ozl7YBgXWUQMLIlxrrBgVAZ3zORO4Jxm7+
4YFBTH1T4vHscW/RHyHR2c0JD9Lh4OedBIjKuSOV8ZgOv9UQdFl3jGhGZFC5x3kV
9PfZE5lepxNbykzxFICW3xYqBDt/M8CtnsLSF6HiJaHjUGZPaN8heYCYR3aRda+p
tW8U7OcbNBgz14uwad04ArkCgOjBIoArHGAJu/BSh4FCMHckAmZoFqCHDE+/Yfc/
NwsiaK6C4QrDFiPsj9EXF53qe5AZqfpZFmdGrFyHFndny4n8yIKwabZLIdBfT9TG
6/7cQ1VHqp2sGVl9wxEbUSRX3eyV/r/JdyasYQ6A5jHYYIHM1FHhy1zJKBNAlABW
3R403/InmAYmtSusBfbg+jftYBfs0RO0+rrZjN6UOl8A0os2M8SJR8d6rfKgE7ny
eD02erBsH9xjpB1flGOqzY+2Y4HYcPtN8Xy9YwAaKdXKezg2aPFEU+Ls/zc5ZUHe
KyvuOpbrbd68YfxFibSxmUMd8aRJQhO9sKtncV02/gqAhrVRAeVgkbMF2EHN8Sah
iJZhIH1PT4VofpEnaIMU9RbbPlIb6MM947PuKM9BJj2BB0kOJ+vSMEBD13UsYvfU
aAEJAhAU/Vsg3cpu2FgELW+a/+NiuFHZVr0PRq7ekDP0mhwiJ6AV8l45vWYFMScv
R66PeC2H8dAehLcXSqmv/K6/vnLsBTGtaPMyTfUIVzNWFyEli509HboMsR9L6Ji5
2PZ0jdRwcjOa
=VgWF
-----END PGP MESSAGE-----
fp: 4988A3C9ED6515B2E192F0ABE42278AB326CB047
encrypted_regex: ^(data|stringData|customRequestHeaders)$
version: 3.7.1
|
core/notifications/github/secret.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-20 17:25"
game: "Unreal Tournament 2004"
name: "DM-UCMP2-Saiko"
author: "Slainchild & Soma"
description: "There used to be a time that the Saiko company was a flourishing enterprise,\
\ a leader in the gas industry. However, for employees, the days were long and filled\
\ with many risks. It was only a matter of time before someone snapped. When it\
\ finally happened, it resulted in 7 casualties, all caused by one case of mental\
\ insanity. Unbeknownst of what it would do to the company, the newspapers nicknamed\
\ it the case of ''The Saiko killer''. Saiko's competitors cleverly played onto\
\ these circumstances and smothered the company's good name. Despite a quick namechange,\
\ the former Saiko company never recovered from the incident.||It's all gone now.\
\ The dreary halls no longer echo the loud activity that once took place here. Silence\
\ ruled the corridors for years, up until very recently. Be on the lookout when\
\ entering the former workfloor, because depression and fear were common factors\
\ when the place was active..."
releaseDate: "2005-09"
attachments:
- type: "IMAGE"
name: "DM-UCMP2-Saiko_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/U/DM-UCMP2-Saiko_shot_1.png"
originalFilename: "dm-ucmp2-saiko.zip"
hash: "919ba01c643d9d4edd6d0721242e5a416858aa06"
fileSize: 11121461
files:
- name: "DM-UCMP2-Saiko.ut2"
fileSize: 30467267
hash: "1ce02343c1e98b73134d74bfeefb27ca704b0b5f"
- name: "DM-UCMP2-Saiko.ogg"
fileSize: 2549851
hash: "7995283149b08c18da76d1340fb413e677cedfc3"
otherFiles: 2
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/DeathMatch/U/dm-ucmp2-saiko.zip"
main: true
repack: false
state: "OK"
- url: "https://gamefront.online/files2/service/thankyou?id=4337670"
main: false
repack: false
state: "MISSING"
- url: "http://ut2004.ut-files.com/index.php?dir=Maps/DeathMatch/MapsT/&file=dm-ucmp2-saiko.zip"
main: false
repack: false
state: "OK"
- url: "http://ut2004.ut-files.com/index.php?dir=Maps/DeathMatch/MapsU/&file=dm-ucmp2-saiko.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/DeathMatch/U/9/1/9ba01c/dm-ucmp2-saiko.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/DeathMatch/U/9/1/9ba01c/dm-ucmp2-saiko.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "Saiko"
playerCount: "2-8"
themes:
Tech: 0.7
Industrial: 0.3
bots: true
|
content/Unreal Tournament 2004/Maps/DeathMatch/U/9/1/9ba01c/dm-ucmp2-saiko_[919ba01c].yml
|
specs:
repository: &repository_spec
user: HUB_USERNAME
pwd: <PASSWORD>
urls:
api: https://hub.docker.com/v2/users/login/
repos: https://hub.docker.com/v2/repositories
registry:
bento-server: docker.io/bentoml/bento-server
dependencies:
cuda: &cuda_spec
cudart:
cudnn8:
libcublas:
libcurand:
libcusparse:
libcufft:
libcusolver:
# Each of our distros releases will follow the following:
releases: &base_spec
templates_dir: _
base_image: _
add_to_tags: _
multistage_image: True
header: |
# syntax = docker/dockerfile:1.2
#
# ===========================================
#
# THIS IS A GENERATED DOCKERFILE DO NOT EDIT.
#
# ===========================================
envars:
- LANG=C.UTF-8
- LC_ALL=C.UTF-8
- PYTHONDONTWRITEBYTECODE=1
cuda:
<<: *cuda_spec
cuda_prefix_url: _
cuda_requires: "cuda>=11.3 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441 driver>=450"
# Docker tag validation can be defined with the following:
tag:
fmt: "{release_type}-python{python_version}-{suffixes}"
release_type:
python_version:
suffixes:
# allows users to define custom registries
# this can also include GCR, ECR, Heroku, Quay.io etc.
repository:
docker.io:
<<: *repository_spec
cuda:
11.3.1: &cuda11_3_1
<<: *cuda_spec
cudart: 11.3.109-1
cudnn8: 8.2.0.53-1
libcublas: 1172.16.31.109-1
libcurand: 10.2.4.109-1
libcusparse: 11.6.0.109-1
libcufft: 10.4.2.109-1
libcusolver: 11.1.2.109-1
releases:
debian10:
<<: *base_spec
templates_dir: templates/debian
base_image: debian:buster-slim
add_to_tags: "debian"
cuda:
<<: *cuda11_3_1
cuda_prefix_url: "ubuntu2004"
centos8:
<<: *base_spec
templates_dir: templates/rhel
base_image: centos:8
add_to_tags: "centos8"
cuda:
<<: *cuda11_3_1
cuda_prefix_url: "rhel8"
centos7:
<<: *base_spec
templates_dir: templates/rhel
base_image: centos:7
add_to_tags: "centos7"
cuda:
<<: *cuda11_3_1
cuda_prefix_url: "rhel7"
amazonlinux2:
<<: *base_spec
templates_dir: templates/rhel
base_image: amazonlinux:2
add_to_tags: "amazonlinux2"
alpine3.14:
<<: *base_spec
templates_dir: templates/alpine
base_image: alpine:3.14
add_to_tags: "alpine3.14"
# Linux distros for each package are defined below:
# * currently there's only one package `bento-server`
# * distros are defined as <distro_releases><distro_version>
packages:
bento-server:
devel:
- debian10
- centos8
- centos7
runtime:
- debian10
- centos8
- centos7
- amazonlinux2
- alpine3.14
cudnn:
- debian10
- centos8
- centos7
|
docker/manifest.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: scality
name: scality
namespace: REPLACE_NAMESPACE
spec:
replicas: 1
selector:
matchLabels:
app: scality
template:
metadata:
labels:
app: scality
spec:
containers:
- image: quay.io/ocpmetal/s3server
imagePullPolicy: Always
name: s3server
env:
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: assisted-installer-s3
key: aws_secret_access_key
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: assisted-installer-s3
key: aws_access_key_id
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: assisted-installer-s3
key: aws_region
- name: BUCKET
valueFrom:
secretKeyRef:
name: assisted-installer-s3
key: bucket
- name: S3_ENDPOINT_URL
valueFrom:
secretKeyRef:
name: assisted-installer-s3
key: endpoint
- name: S3DATAPATH
valueFrom:
secretKeyRef:
name: assisted-installer-s3
key: s3_data_path
- name: S3METADATAPATH
valueFrom:
secretKeyRef:
name: assisted-installer-s3
key: s3_metadata_path
volumeMounts:
- mountPath: /mnt/data
name: scalityvol
resources:
limits:
cpu: 500m
memory: 2000Mi
requests:
cpu: 300m
memory: 2000Mi
volumes:
- name: scalityvol
persistentVolumeClaim:
claimName: scality-pv-claim
---
apiVersion: v1
kind: Service
metadata:
labels:
app: scality
# cloudserver-front supported as one of the default hostnames that can receive messages
# to support other hostnamnes they need to be defined with HOST_NAME environment variable.
name: cloudserver-front
namespace: REPLACE_NAMESPACE
spec:
ports:
- port: 8000
protocol: TCP
targetPort: 8000
selector:
app: scality
clusterIP: None
status:
loadBalancer: {}
|
deploy/s3/scality-deployment.yaml
|
common:
image:
repository: blacktop/elasticsearch
tag: "6.2.3"
pullPolicy: IfNotPresent
# Defines the service type for all outward-facing (non-discovery) services.
serviceType: ClusterIP
# Any extra or specific configuration that is needed can be added here.
config:
index.codec: best_compression
# If you want any plugins installed, give them here as a list. They will be
# passed to elasticsearch-plugin install {line here}
plugins:
# You need to enable this plugin if you want to use the TLS option at the bottom
# - com.floragunn:search-guard-ssl:6.0.0-24.beta1.1
env:
# Uncomment this if you get the "No up-and-running site-local (private)
# addresses" error.
# NETWORK_HOST: "_eth0_"
# The minimum number of masters that will be able to form a quorum. This
# should be (#masters / 2) + 1. Default is 2.
NUMBER_OF_MASTERS: "2"
# Client/ingest nodes can execute pre-processing pipelines, composed of
# one or more ingest processors. Depending on the type of operations performed
# by the ingest processors and the required resources, it may make sense to
# have dedicated ingest nodes, that will only perform this specific task.
client:
# It isn't common to need more than 2 client nodes.
replicas: 2
antiAffinity: "soft"
heapSize: 256m
# More info on what this setting does is in the config map. Only change this
# if you set the cpu limit to over 1 full cpu.
processors: 1
labels:
annotations:
resources:
limits:
cpu: 500m
memory: 384Mi
requests:
cpu: 100m
memory: 256Mi
# Data nodes hold the shards that contain the documents you have indexed. Data
# nodes handle data related operations like CRUD, search, and aggregations.
# These operations are I/O-, memory-, and CPU-intensive. It is important to
# monitor these resources and to add more data nodes if they are overloaded.
#
# The main benefit of having dedicated data nodes is the separation of the
# master and data roles.
data:
# This count will depend on your data and computation needs.
replicas: 2
antiAffinity: "soft"
storage: 12Gi
storageClass: "default"
heapSize: 256m
enableHTTP: false
# More info on what this setting does is in the config map. Only change this
# if you set the cpu limit to over 1 full cpu.
processors: 1
labels:
annotations:
resources:
limits:
cpu: 500m
memory: 384Mi
requests:
cpu: 100m
memory: 256Mi
# The master node is responsible for lightweight cluster-wide actions such as
# creating or deleting an index, tracking which nodes are part of the
# cluster, and deciding which shards to allocate to which nodes. It is
# important for cluster health to have a stable master node.
master:
# Master replica count should be (#clients / 2) + 1, and generally at least 3.
replicas: 3
antiAffinity: "soft"
storage: 2Gi
storageClass: "default"
heapSize: 256m
enableHTTP: false
# More info on what this setting does is in the config map. Only change this
# if you set the cpu limit to over 1 full cpu.
processors: 1
labels:
annotations:
resources:
limits:
cpu: 500m
memory: 384Mi
requests:
cpu: 100m
memory: 256Mi
curator:
enable: true
schedule: "0 1 * * *"
image: quay.io/lalamove/elasticsearch-curator
imageTag: "5.4.0"
imagePullPolicy: "IfNotPresent"
# Allows modification of the default age-based filter. If you require more
# sophisticated filtering, modify the action file specified in
# templates/es-curator-config.yaml.
age:
timestring: "%Y.%m.%d"
unit: "days"
unit_count: 3
service:
httpPort: 9200
transportPort: 9300
rbac:
create: true
tls:
enable: false
clusterDomain: cluster.local
image: quay.io/lalamove/certificate-init-container
imageTag: v0.2.0
imagePullPolicy: "IfNotPresent"
searchguard:
# If you enable searchguard, you must enable TLS as well, and pay close attention to the instructions after install
enable: false
plugin: com.floragunn:search-guard-6:6.2.3-22.0
enterprise_modules: false
kibana_user: kibana
admins:
users:
init:
image: quay.io/lalamove/sgadmin
imageTag: v6.2.3-22.0
imagePullPolicy: "IfNotPresent"
restartPolicy: Never
activeDeadlineSeconds: 600
|
values.yaml
|
name: SkuDescription
uid: '@azure/arm-appservice.SkuDescription'
package: '@azure/arm-appservice'
summary: Description of a SKU for a scalable resource.
fullName: SkuDescription
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: capabilities
uid: '@azure/arm-appservice.SkuDescription.capabilities'
package: '@azure/arm-appservice'
summary: 'Capabilities of the SKU, e.g., is traffic manager enabled?'
fullName: capabilities
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'capabilities?: Capability[]'
return:
description: ''
type: '<xref uid="@azure/arm-appservice.Capability" />[]'
- name: capacity
uid: '@azure/arm-appservice.SkuDescription.capacity'
package: '@azure/arm-appservice'
summary: Current number of instances assigned to the resource.
fullName: capacity
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'capacity?: undefined | number'
return:
description: ''
type: undefined | number
- name: family
uid: '@azure/arm-appservice.SkuDescription.family'
package: '@azure/arm-appservice'
summary: Family code of the resource SKU.
fullName: family
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'family?: undefined | string'
return:
description: ''
type: undefined | string
- name: locations
uid: '@azure/arm-appservice.SkuDescription.locations'
package: '@azure/arm-appservice'
summary: Locations of the SKU.
fullName: locations
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'locations?: string[]'
return:
description: ''
type: 'string[]'
- name: name
uid: '@azure/arm-appservice.SkuDescription.name'
package: '@azure/arm-appservice'
summary: Name of the resource SKU.
fullName: name
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'name?: undefined | string'
return:
description: ''
type: undefined | string
- name: size
uid: '@azure/arm-appservice.SkuDescription.size'
package: '@azure/arm-appservice'
summary: Size specifier of the resource SKU.
fullName: size
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'size?: undefined | string'
return:
description: ''
type: undefined | string
- name: skuCapacity
uid: '@azure/arm-appservice.SkuDescription.skuCapacity'
package: '@azure/arm-appservice'
summary: 'Min, max, and default scale values of the SKU.'
fullName: skuCapacity
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'skuCapacity?: SkuCapacity'
return:
description: ''
type: <xref uid="@azure/arm-appservice.SkuCapacity" />
- name: tier
uid: '@azure/arm-appservice.SkuDescription.tier'
package: '@azure/arm-appservice'
summary: Service tier of the resource SKU.
fullName: tier
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'tier?: undefined | string'
return:
description: ''
type: undefined | string
|
preview-packages/docs-ref-autogen/@azure/arm-appservice/SkuDescription.yml
|
---
title: Best Practices - yaml.info
links:
- format: Formatting
- file: File Extension
- plain: Plain Scalars
sections:
- name: intro
title: Best Practices
content:
- |
Here are some tips and tricks for how to write and format YAML files.
- name: format
title: Formatting
content:
- <h3>Indentation</h3>
- Tabs are forbidden as indentation.
- You can freely choose the number of spaces for indentation.
More than 8 probably don't make sense, and also this might be a hard limit
in the next YAML version.
- You can use different indentation inside of the same YAML document,
as long as it is the same for one level.
- The recommended number of spaces is `2`.
- yaml: |
# Two spaces
top level:
a: b
c: d
- yaml: |
# Different number of spaces are invalid
top level:
a: b
c: d
- yaml: |
# No tabs, please!
top level:
a: b
c: d
- The opinions on sequence indentation differ.
- |
The YAML creators recommend to use "zero-indented" sequences:
- yaml: |
a sequence:
- with
- three
- items
- <h3>Layout</h3>
- |
Nested sequences can be formatted in a compact way:
- yaml: |
sequence:
- level one
- - level two
- |
When you have a sequence with more than one level, flow style might be
more readable:
- yaml: |
---
dice rolls:
- - 2
- 5
- - 3
- 4
---
dice rolls:
- [2, 5]
- [3, 4]
- |
You cannot start a sequence or another mapping on the same line as
a mapping key:
- yaml: |
key: inner: map
- yaml: |
key: - sequence
- |
But you can start a mapping as a sequence item on the same line:
- yaml: |
---
sequence:
-
a: b
c: d
---
# compact
sequence:
- a: b
c: d
- name: file
title: File Extension
content:
- The recommended file extension is `.yaml`.
- You will find a lot of applications that chose `.yml` though. Some
also allow both extensions.
- name: plain
title: Plain Scalars
content:
- |
You do not need to [quote](#topic:quote) scalars in most cases. The
[Quoting Chapter](#topic:quote) has detailed information about that.
- |
If you are unsure, here are some hints. To be safe, use quotes:
- |
<ul>
<li>If it could be a special type, but you want a string (see [Schema](#topic:schema))
<li>If the scalar starts with a non-alphanumeric character
<li>If the scalar contains control characters
<li>If the scalar contains tabs
<li>If the scalar contains `:<space>`, `<space>#`, `-<space>`, `:<end-of-string>`
<li>If you are in flow style and the scalar contains one of`{}[],`
</ul>
|
content/learn/bestpractices.yaml
|
services:
prestashop.bundle.routing.module_route_loader:
class: 'PrestaShopBundle\Routing\YamlModuleLoader'
arguments:
- '@=service("prestashop.module_kernel.repository").getActiveModulesPaths()'
tags: [routing.loader]
prestashop.bundle.routing.converter.legacy_url_converter:
class: 'PrestaShopBundle\Routing\Converter\LegacyUrlConverter'
public: true
arguments:
- '@router'
- '@prestashop.bundle.routing.converter.cache_provider'
prestashop.bundle.routing.converter.legacy_parameters_converter:
class: 'PrestaShopBundle\Routing\Converter\LegacyParametersConverter'
public: true
prestashop.bundle.routing.converter.router_provider:
class: 'PrestaShopBundle\Routing\Converter\RouterProvider'
arguments:
- '@router'
# We set this alias so that we can override it in test environment (to avoid memory limit crashes)
prestashop.bundle.routing.converter.cache:
alias: Symfony\Component\Cache\Adapter\AdapterInterface
prestashop.bundle.routing.converter.cache_provider:
class: 'PrestaShopBundle\Routing\Converter\CacheProvider'
arguments:
- '@prestashop.bundle.routing.converter.router_provider'
- '@prestashop.bundle.routing.converter.cache'
- '@prestashop.bundle.routing.converter.routing_cache_key_generator'
prestashop.bundle.routing.converter.routing_cache_key_generator:
class: 'PrestaShopBundle\Routing\Converter\RoutingCacheKeyGenerator'
arguments:
- ["%kernel.project_dir%/src/PrestaShopBundle/Resources/config/routing/admin"]
- '@=service("prestashop.module_kernel.repository").getActiveModulesPaths()'
- '%kernel.environment%'
prestashop.bundle.routing.linter.security_annotation_linter:
class: 'PrestaShopBundle\Routing\Linter\SecurityAnnotationLinter'
arguments:
- '@annotation_reader'
- '@controller_name_converter'
prestashop.bundle.routing.linter.naming_convention_linter:
class: 'PrestaShopBundle\Routing\Linter\NamingConventionLinter'
arguments:
- '@controller_name_converter'
prestashop.bundle.routing.linter.legacy_link_linter:
class: 'PrestaShopBundle\Routing\Linter\LegacyLinkLinter'
prestashop.bundle.routing.linter.admin_route_provider:
class: 'PrestaShopBundle\Routing\Linter\AdminRouteProvider'
arguments:
- '@router'
|
apps/prestashop/htdocs/src/PrestaShopBundle/Resources/config/services/bundle/routing.yml
|
confd('harness:/'):
# base templates
- { src: docker/image/console/Dockerfile }
- { src: docker/image/console/root/entrypoint.sh }
- { src: docker/image/console/root/home/build/.my.cnf }
- { src: docker/image/console/root/lib/task/database/import.sh }
- { src: docker/image/console/root/lib/task/build/frontend.sh }
- { src: docker/image/console/root/lib/task/build/backend.sh }
- { src: docker/image/console/root/lib/task/composer/install.sh }
- { src: docker/image/console/root/lib/task/init.sh }
- { src: docker/image/console/root/lib/task/install.sh }
- { src: docker/image/console/root/lib/task/migrate.sh }
- { src: docker/image/console/root/lib/task/rabbitmq/vhosts.sh }
- { src: docker/image/console/root/lib/task/welcome.sh }
- { src: docker/image/console/root/usr/local/etc/php/php.ini }
- { src: docker/image/console/root/usr/local/etc/php/conf.d/docker-php-ext-blackfire.ini }
- { src: docker/image/console/root/usr/local/etc/php/conf.d/docker-php-ext-tideways.ini }
- { src: docker/image/console/root/usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini }
- { src: docker/image/lighthouse/Dockerfile }
- { src: docker/image/lighthouse/root/app/run.sh }
- { src: docker/image/nginx/Dockerfile }
- { src: docker/image/nginx/root/docker-entrypoint.d/config_render.sh }
- { src: docker/image/nginx/root/etc/nginx/conf.d/0-nginx.conf }
- { src: docker/image/nginx/root/etc/nginx/conf.d/default.conf.template }
- { src: docker/image/nginx/root/etc/ssl/certs/app.crt }
- { src: docker/image/nginx/root/etc/ssl/private/app.key }
- { src: docker/image/php-fpm/Dockerfile }
- { src: docker/image/php-fpm/root/entrypoint.sh }
- { src: docker/image/php-fpm/root/fix_app_permissions.sh }
- { src: docker/image/php-fpm/root/usr/local/etc/php/php.ini }
- { src: docker/image/php-fpm/root/usr/local/etc/php/conf.d/docker-php-ext-blackfire.ini }
- { src: docker/image/php-fpm/root/usr/local/etc/php/conf.d/docker-php-ext-tideways.ini }
- { src: docker/image/php-fpm/root/usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini }
- { src: docker/image/php-fpm/root/usr/local/etc/php-fpm.d/pool.conf.template }
- { src: docker/image/tls-offload/root/etc/nginx/conf.d/0-nginx.conf }
- { src: docker/image/tls-offload/root/etc/nginx/conf.d/default.conf }
- { src: docker/image/tls-offload/root/etc/ssl/certs/app.crt }
- { src: docker/image/tls-offload/root/etc/ssl/private/app.key }
- { src: docker/image/varnish/root/etc/varnish/default.vcl }
- { src: docker/image/cron/Dockerfile }
- { src: docker/image/cron/root/crontab }
- { src: docker/image/cron/root/entrypoint.sh }
- { src: docker/image/cron/root/usr/local/etc/php/php.ini }
- { src: application/overlay/Jenkinsfile }
- { src: application/overlay/auth.json }
- { src: application/overlay/.dockerignore, dst: workspace:/.dockerignore }
- { src: application/skeleton/README.md }
- { src: mutagen.yml, dst: workspace:/mutagen.yml } # docker-compose.yml render reads this file
- { src: docker-compose.yml, dst: workspace:/docker-compose.yml }
- { src: harness/scripts/enable.sh }
- { src: helm/app/_twig/templates/service/varnish/configmap.yaml, dst: harness:/helm/app/templates/service/varnish/configmap.yaml }
- { src: helm/app/values.yaml }
- { src: helm/app/values-production.yaml }
- { src: helm/app/values-preview.yaml }
- { src: helm/app/Chart.yaml }
- { src: helm/qa/requirements.yaml }
- { src: helm/qa/values.yaml }
- { src: helm/qa/Chart.yaml }
# harness-drupal templates
- { src: application/overlay/docroot/sites/default/settings.local.php }
- { src: application/overlay/phpcs.xml }
- { src: application/overlay/phpunit.xml }
- { src: application/skeleton/behat.yml }
- { src: application/skeleton/composer.json }
|
src/drupal8/harness/config/confd.yml
|
name: firebase_firestore_odm
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
pull_request:
paths:
- "packages/cloud_firestore/**"
- "packages/cloud_firestore_odm/**"
- ".github/workflows/firebase_firestore_odm.yaml"
push:
branches:
- master
paths-ignore:
- "docs/**"
- "**.md"
env:
FLUTTERFIRE_PLUGIN_SCOPE: "*cloud_firestore*"
FLUTTERFIRE_PLUGIN_SCOPE_EXAMPLE: "*cloud_firestore_odm_example*"
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v2.3.4
with:
fetch-depth: 0
- name: "Install Flutter"
run: ./.github/workflows/scripts/install-flutter.sh stable
- name: "Install Tools"
run: ./.github/workflows/scripts/install-tools.sh
- name: "Bootstrap Workspace"
run: |
melos bootstrap --scope="*firebase_core*" --scope="$FLUTTERFIRE_PLUGIN_SCOPE"
melos run odm:example_build_runner:build
melos run odm:generator_test_build_runner:build
# Needed because generating breaks the local linking of melos
melos bootstrap --scope="*firebase_core*" --scope="$FLUTTERFIRE_PLUGIN_SCOPE"
- name: "Flutter Test"
run: melos run odm:test:flutter --no-select
- name: "Dart Test"
run: melos run odm:test:dart --no-select
integration_test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v2.3.4
with:
fetch-depth: 0
- uses: actions/setup-java@v2
with:
distribution: 'temurin'
java-version: '11'
- name: "Install Flutter"
run: ./.github/workflows/scripts/install-flutter.sh stable
- name: "Install Tools"
run: |
./.github/workflows/scripts/install-tools.sh
sudo npm i -g firebase-tools
- name: "Bootstrap Workspace"
run: |
melos bootstrap --scope="*firebase_core*" --scope="$FLUTTERFIRE_PLUGIN_SCOPE"
melos run odm:example_build_runner:build
melos run odm:generator_test_build_runner:build
- name: Start Firebase Emulator
run: cd ./.github/workflows/scripts && ./start-firebase-emulator.sh
- name: "Drive Example"
run: ./.github/workflows/scripts/drive-example.sh web
|
.github/workflows/firebase_firestore_odm.yaml
|
name: PrebuiltDomain
uid: '@azure/cognitiveservices-luis-authoring.PrebuiltDomain'
package: '@azure/cognitiveservices-luis-authoring'
summary: Prebuilt Domain.
fullName: PrebuiltDomain
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: culture
uid: '@azure/cognitiveservices-luis-authoring.PrebuiltDomain.culture'
package: '@azure/cognitiveservices-luis-authoring'
summary: ''
fullName: culture
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'culture?: undefined | string'
return:
description: ''
type: undefined | string
- name: description
uid: '@azure/cognitiveservices-luis-authoring.PrebuiltDomain.description'
package: '@azure/cognitiveservices-luis-authoring'
summary: ''
fullName: description
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'description?: undefined | string'
return:
description: ''
type: undefined | string
- name: entities
uid: '@azure/cognitiveservices-luis-authoring.PrebuiltDomain.entities'
package: '@azure/cognitiveservices-luis-authoring'
summary: ''
fullName: entities
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'entities?: PrebuiltDomainItem[]'
return:
description: ''
type: >-
<xref uid="@azure/cognitiveservices-luis-authoring.PrebuiltDomainItem"
/>[]
- name: examples
uid: '@azure/cognitiveservices-luis-authoring.PrebuiltDomain.examples'
package: '@azure/cognitiveservices-luis-authoring'
summary: ''
fullName: examples
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'examples?: undefined | string'
return:
description: ''
type: undefined | string
- name: intents
uid: '@azure/cognitiveservices-luis-authoring.PrebuiltDomain.intents'
package: '@azure/cognitiveservices-luis-authoring'
summary: ''
fullName: intents
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'intents?: PrebuiltDomainItem[]'
return:
description: ''
type: >-
<xref uid="@azure/cognitiveservices-luis-authoring.PrebuiltDomainItem"
/>[]
- name: name
uid: '@azure/cognitiveservices-luis-authoring.PrebuiltDomain.name'
package: '@azure/cognitiveservices-luis-authoring'
summary: ''
fullName: name
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'name?: undefined | string'
return:
description: ''
type: undefined | string
|
docs-ref-autogen/@azure/cognitiveservices-luis-authoring/PrebuiltDomain.yml
|
get:
summary: Fetch prepared shift exchanges
description: |
To be used with Shift Exchanges API. Fetches an array of prepared Shift Exchanges for the requested shift. The shifts in the response will be filtered by user permissions (tags) and shifts that might potentionally overlap after exchange has occured.
parameters:
- name: shiftId
in: path
required: true
schema:
type: string
format: ObjectId
- name: from
in: query
description: Start of interval
required: true
schema:
type: string
format: date
example: '2020-01-01'
- name: to
in: query
description: End of interval
required: true
schema:
type: string
format: date
example: '2020-01-08'
tags:
- Shift Exchanges
responses:
200:
description: Object including exchangeable shifts and the permitted users for each shift.
content:
application/json:
schema:
type: array
items:
type: object
properties:
sender-id:
type: string
format: ObjectId
description: ID of exchange request sender.
example: 5dde7024eded8872705aabd2
sender-shift-id:
type: string
format: ObjectId
description: Shift to be changed out.
example: 5dde7024eded8872705aabd3
recipient-id:
type: string
format: ObjectId
description: ID of exchange request recipient.
example: 5dde7024eded8872705aabd4
recipient-shift-id:
type: string
format: ObjectId
description: Shift belonging to recipient, to be changed in.
example: 5dde7024eded8872705aabd5
400:
description: ShiftId invalid format
404:
description: Shift not found
|
spec/paths/shifts@{shiftId}@prepared-exchanges.yaml
|
image:
name: bullpup-keystone-${BUILD_TIMESTAMP}
distribution: ${DISTRO}
release: ${RELEASE}
description: |-
Bullpup Keystone Server {{ image.release }}
architecture: x86_64
source:
downloader: debootstrap
same_as: gutsy
url: http://archive.ubuntu.com/ubuntu
keyserver: keyserver.ubuntu.com
keys:
- <KEY>
targets:
lxc:
create-message: |-
You just created an {{ image.description }} container.
config:
- type: all
before: 5
content: |-
lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.common.conf
- type: all
after: 4
content: |-
lxc.include = LXC_TEMPLATE_CONFIG/common.conf
- type: all
content: |-
lxc.arch = {{ image.architecture_personality }}
files:
- path: /etc/hostname
generator: hostname
- path: /etc/hosts
generator: hosts
- path: /etc/resolvconf/resolv.conf.d/original
generator: remove
- path: /etc/resolvconf/resolv.conf.d/tail
generator: remove
- path: /etc/machine-id
generator: remove
packages:
manager: apt
update: true
cleanup: true
repositories:
- name: sources.list
url: |-
deb http://archive.ubuntu.com/ubuntu {{ image.release }} main restricted universe multiverse
deb http://archive.ubuntu.com/ubuntu {{ image.release }}-updates main restricted universe multiverse
deb http://security.ubuntu.com/ubuntu {{ image.release }}-security main restricted universe multiverse
architectures:
- amd64
- i386
sets:
- packages:
- apt-transport-https
- keystone
- apache2
- python3-openstackclient
action: install
actions:
- trigger: post-packages
action: |-
#!/bin/sh
set -eux
# Make sure the locale is built and functional
locale-gen en_US.UTF-8
update-locale LANG=en_US.UTF-8
apt-get update
apt-get upgrade -y
apt-get clean
apt-get autoclean
systemctl disable systemd-resolved
# Cleanup underlying /run
mount -o bind / /mnt
rm -rf /mnt/run/*
umount /mnt
# Cleanup temporary shadow paths
rm /etc/*-
mappings:
architecture_map: debian
|
containers/images/keystone.yaml
|
api_name: []
items:
- children: []
class: azure.servicefabric.models.cluster_health.ClusterHealth
fullName: azure.servicefabric.models.cluster_health.ClusterHealth
inheritance:
- inheritance:
- inheritance:
- type: builtins.object
type: msrest.serialization.Model
type: azure.servicefabric.models.entity_health.EntityHealth
langs:
- python
module: azure.servicefabric.models.cluster_health
name: ClusterHealth
source:
id: ClusterHealth
path: azure-servicefabric\azure\servicefabric\models\cluster_health.py
remote:
branch: master
path: azure-servicefabric\azure\servicefabric\models\cluster_health.py
repo: https://github.com/Azure/azure-sdk-for-python.git
startLine: 15
summary: 'Represents the health of the cluster.
Contains the cluster aggregated health state, the cluster application and
node health states as well as the health events and the unhealthy
evaluations.'
syntax:
content: ClusterHealth(**kwargs)
parameters:
- description: 'The HealthState representing the
aggregated health state of the entity computed by Health Manager.
The health evaluation of the entity reflects all events reported on the
entity and its children (if any).
The aggregation is done by applying the desired health policy. Possible
values include: ''Invalid'', ''Ok'', ''Warning'', ''Error'', ''Unknown'''
id: aggregated_health_state
type:
- str
- azure.servicefabric.models.HealthState
- description: The list of health events reported on the entity.
id: health_events
type:
- list[azure.servicefabric.models.HealthEvent]
- description: 'The unhealthy evaluations that show why the
current aggregated health state was returned by Health Manager.'
id: unhealthy_evaluations
type:
- list[azure.servicefabric.models.HealthEvaluationWrapper]
- description: 'Shows the health statistics for all children
types of the queried entity.'
id: health_statistics
type:
- azure.servicefabric.models.HealthStatistics
- description: 'Cluster node health states as found in the
health store.'
id: node_health_states
type:
- list[azure.servicefabric.models.NodeHealthState]
- description: 'Cluster application health states as
found in the health store.'
id: application_health_states
type:
- list[azure.servicefabric.models.ApplicationHealthState]
type: class
uid: azure.servicefabric.models.cluster_health.ClusterHealth
references:
- fullName: list[azure.servicefabric.models.HealthEvent]
name: list[HealthEvent]
spec.python:
- fullName: list
name: list
uid: list
- fullName: '['
name: '['
- fullName: azure.servicefabric.models.HealthEvent
name: HealthEvent
uid: azure.servicefabric.models.HealthEvent
- fullName: ']'
name: ']'
uid: list[azure.servicefabric.models.HealthEvent]
- fullName: list[azure.servicefabric.models.HealthEvaluationWrapper]
name: list[HealthEvaluationWrapper]
spec.python:
- fullName: list
name: list
uid: list
- fullName: '['
name: '['
- fullName: azure.servicefabric.models.HealthEvaluationWrapper
name: HealthEvaluationWrapper
uid: azure.servicefabric.models.HealthEvaluationWrapper
- fullName: ']'
name: ']'
uid: list[azure.servicefabric.models.HealthEvaluationWrapper]
- fullName: list[azure.servicefabric.models.NodeHealthState]
name: list[NodeHealthState]
spec.python:
- fullName: list
name: list
uid: list
- fullName: '['
name: '['
- fullName: azure.servicefabric.models.NodeHealthState
name: NodeHealthState
uid: azure.servicefabric.models.NodeHealthState
- fullName: ']'
name: ']'
uid: list[azure.servicefabric.models.NodeHealthState]
- fullName: list[azure.servicefabric.models.ApplicationHealthState]
name: list[ApplicationHealthState]
spec.python:
- fullName: list
name: list
uid: list
- fullName: '['
name: '['
- fullName: azure.servicefabric.models.ApplicationHealthState
name: ApplicationHealthState
uid: azure.servicefabric.models.ApplicationHealthState
- fullName: ']'
name: ']'
uid: list[azure.servicefabric.models.ApplicationHealthState]
|
docs-ref-autogen/azure-servicefabric/azure.servicefabric.models.cluster_health.ClusterHealth.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: skipper-ingress
namespace: kube-system
labels:
application: skipper-ingress
version: v0.11.40
component: ingress
spec:
strategy:
rollingUpdate:
maxSurge: 0
selector:
matchLabels:
application: skipper-ingress
template:
metadata:
labels:
application: skipper-ingress
version: v0.11.40
component: ingress
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: application
operator: In
values:
- skipper-ingress
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: skipper-ingress
nodeSelector:
kubernetes.io/role: worker
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
containers:
- name: skipper-ingress
image: registry.opensource.zalan.do/teapot/skipper:v0.12.0
ports:
- name: ingress-port
containerPort: 9999
hostPort: 9999
args:
- "skipper"
- "-kubernetes"
- "-kubernetes-in-cluster"
- "-kubernetes-path-mode=path-prefix"
- "-address=:9999"
- "-wait-first-route-load"
- "-proxy-preserve-host"
- "-serve-host-metrics"
- "-disable-metrics-compat"
- "-enable-profile"
- "-enable-ratelimits"
- "-experimental-upgrade"
- "-metrics-exp-decay-sample"
- "-reverse-source-predicate"
- "-lb-healthcheck-interval=3s"
- "-metrics-flavour=prometheus"
- "-enable-connection-metrics"
- "-max-audit-body=0"
- "-histogram-metric-buckets=.0001,.00025,.0005,.00075,.001,.0025,.005,.0075,.01,.025,.05,.075,.1,.2,.3,.4,.5,.75,1,2,3,4,5,7,10,15,20,30,60,120,300,600"
- "-expect-continue-timeout-backend=30s"
- "-keepalive-backend=30s"
- "-max-idle-connection-backend=0"
- "-response-header-timeout-backend=1m"
- "-timeout-backend=1m"
- "-tls-timeout-backend=1m"
- "-close-idle-conns-period=20s"
- "-idle-timeout-server=62s"
- "-read-timeout-server=5m"
- "-write-timeout-server=60s"
- '-default-filters-prepend=enableAccessLog(4,5) -> lifo(2000,20000,"3s")'
resources:
limits:
cpu: "4"
memory: "1Gi"
requests:
cpu: "4"
memory: "1Gi"
readinessProbe:
httpGet:
path: /kube-system/healthz
port: 9999
initialDelaySeconds: 60
timeoutSeconds: 5
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
|
docs/kubernetes/deploy/deployment/deployment.yaml
|
openapi: 3.0.1
info:
version: 1.0.0
title: MediaWiki Section Content API
description: Adding translated section content to the content API.
tags:
- name: Page content
description: page content in different formats
paths:
/segments/{title}:
get: &segments_title_get_spec
tags:
- Page content
summary: Fetches a segmented page to be used in machine translation
description: |
Use this end point to fetch the segmented content of a page. Clients should
use the returned content in conjunction with the [language transform
API](https://wikimedia.org/api/rest_v1/#!/Transform).
Stability: [experimental](https://www.mediawiki.org/wiki/API_versioning#Experimental)
parameters:
- name: title
in: path
description: 'Page title. Use underscores instead of spaces. Use percent-encoding. Example: `Main_Page`.'
required: true
schema:
type: string
responses:
200:
description: The segmented page for the given title and revision
content:
application/json:
schema:
type: object
properties:
sourceLanguage:
type: string
description: The source language of the page
title:
type: string
description: The title of the segmented page returned
revision:
type: integer
description: The revision ID of the segmented page
segmentedContent:
type: string
description: The segmented HTML body of the contents of the page
400:
description: Invalid revision
content:
application/problem+json:
schema:
$ref: '#/components/schemas/problem'
403:
description: Access to the specific revision is restricted
content:
application/problem+json:
schema:
$ref: '#/components/schemas/problem'
404:
description: Unknown page or revision
content:
application/problem+json:
schema:
$ref: '#/components/schemas/problem'
default:
description: Error
content:
application/problem+json:
schema:
$ref: '#/components/schemas/problem'
x-request-handler:
- get_from_cx:
request:
uri: '{{options.cx_host}}/v1/page/{domain}/{title}/{revision}'
x-monitor: false
/segments/{title}/{revision}:
get:
<<: *segments_title_get_spec
parameters:
- name: title
in: path
description: 'Page title. Use underscores instead of spaces. Use percent-encoding. Example: `Main_Page`.'
required: true
schema:
type: string
- name: revision
in: path
description: The revision id
required: true
schema:
type: integer
|
v1/content_segments.yaml
|
actions:
- utter_greet
- utter_did_that_help
- utter_goodbye
- utter_default
- utter_gratitude
- utter_ask_again
- utter_ticket_status
- utter_ticket_status_not_found
- utter_ticket_status_found
- utter_small_talk
- utter_whats_up
- utter_state
- utter_positive
- utter_negative
- utter_laugh
- utter_rude
- utter_apologies
- utter_compliment
- utter_welcomed
- utter_thank_u
- utter_positive_greet
- utter_negative_greet
intents:
- goodbye
- greet
- thanks
- current_matches
- affirm
- deny
- ticket_status
- whats_up
- compliment
- welcomed
- positive
- negative
- rude
- hobby
- thank_u
- state
- small_talk
- positive_greet
- negative_greet
templates:
utter_greet:
- 'Hello! <i class="em em-smiley"></i>'
- 'Hi! <i class="em em-slightly_smiling_face"></i>'
- 'Hey! <i class="em em-smiley"></i>'
- 'Greetings! <i class="em em-slightly_smiling_face"></i>'
- 'Hey there <i class="em em-smiley"></i>'
utter_did_that_help:
- text: "Did that help you?"
- text: "I hope that solved your query"
utter_goodbye:
- 'Talk to you later.'
- 'Bye bye :('
- 'Thank you! Bye!'
- 'see ya!'
utter_default:
- text: "I am sorry, I didn't get that. Could you please repeat your query?"
- text: "I am not sure what you are aiming for."
utter_gratitude:
- text: "Glad that I could be of help to you!\nBye"
utter_ask_again:
- text: "Okay! Let's start again, please tell me what do you need?"
- text: "No issues! Let's try this again.\n Please repeat your query?"
utter_ticket_status:
- text: "Please enter your ticket ID."
action_match_news:
- text: "Anything else do you want to know?"
utter_ticket_status_not_found:
- text: "Didn't found your ticket, could you please create it?"
utter_ticket_status_found:
- text: "Here's your ticket"
utter_rude:
- 'Kindly avoid using profane language. <i class="em em-expressionless"></i>'
utter_apologies:
- "That's alright. No worries."
utter_compliment:
- 'Thanks a lot. <i class="em em-smiley"></i>'
utter_laugh:
- 'laughing is good for your health. Keep it up. <i class="em em-innocent"></i>'
utter_positive:
- "Yes, Of Course"
- "Sure"
- "absolutely"
- "affirmative"
utter_negative:
- "nope"
- "nay"
- "not likely"
- "negative"
- "no way"
utter_whats_up:
- "Nothing much! </br> How have you been."
utter_state:
- 'I am doing Great. <i class="em em-smiley"></i> </br> How are you ?'
- 'Everything is going extremely well. <i class="em em-smiley"></i> </br> How are you ?'
utter_small_talk:
- 'Nice to meet you too <i class="em em-handshake"></i>'
- 'Thank you. It is very nice to meet you as well <i class="em em-handshake"></i>'
- 'Am glad to meet you too <i class="em em-handshake"></i>'
utter_welcomed:
- '<i class="em em-smiley"></i>'
utter_thank_u:
- 'Its my pleasure. <i class="em em-innocent"></i>'
utter_positive_greet:
- text: "That's great."
utter_negative_greet:
- text: 'Sorry to hear that. <i class="em em-pensive"></i></br> how can I help you today ?'
|
domain.yml
|
AllCops:
Exclude:
- bin/**/*
- config/*/*
- config/application.rb
- config/boot.rb
- config/deploy.rb
- config/environment.rb
- config/schedule.rb
- db/**/*
- log/**/*
- public/**/*
- spec/rails_helper.rb
- spec/spec_helper.rb
- tmp/**/*
- vendor/**/*
- node_modules/**/*
- Guardfile
- .gems/**/*
- spec/support/example_group_helper.rb
Rails:
Enabled: true
Metrics/ClassLength:
CountComments: false # count full line comments?
Max: 150
Enabled: false
Metrics/ModuleLength:
CountComments: false # count full line comments?
Max: 100
Enabled: false
Metrics/ParameterLists:
Max: 10
Style/AccessorMethodName:
Enabled: false
Style/AndOr:
Enabled: false
Style/Documentation:
Enabled: false
Style/FileName:
Enabled: false
Style/EmptyLinesAroundBlockBody:
Enabled: false
Style/EmptyLinesAroundClassBody:
Enabled: false
Style/EmptyLinesAroundModuleBody:
Enabled: false
Style/GuardClause:
Enabled: false
Style/IfUnlessModifier:
Enabled: false
Style/Semicolon:
AllowAsExpressionSeparator: true
Style/StringLiterals:
EnforcedStyle: single_quotes
Enabled: true
Style/StringLiteralsInInterpolation:
EnforcedStyle: single_quotes
Enabled: true
Style/ClassAndModuleChildren:
Enabled: false
Style/WordArray:
Enabled: false
Metrics/CyclomaticComplexity:
Enabled: false
Metrics/LineLength:
Enabled: false
Metrics/MethodLength:
CountComments: false # count full line comments?
Max: 20
Metrics/AbcSize:
Max: 60
Rails/HasAndBelongsToMany:
Enabled: false
Rails/TimeZone:
Enabled: false
Rails/Delegate:
Enabled: false
Style/BlockDelimiters:
Enabled: false
Style/RegexpLiteral:
AllowInnerSlashes: true
Style/PredicateName:
Enabled: false
Metrics/MethodLength:
Max: 80
Lint/AssignmentInCondition:
Enabled: false
Lint/NestedMethodDefinition:
Enabled: false
Metrics/AbcSize:
Max: 160
Metrics/PerceivedComplexity:
Max: 60
Style/FormatString:
EnforcedStyle: percent
Style/FrozenStringLiteralComment:
Enabled: false
|
.rubocop.yml
|
controller_routing:
resource: "@YosimitsoWorkingForumBundle/Controller/*"
type: annotation
workingforum_admin:
path: /admin
defaults: { _controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminController:indexAction }
workingforum_admin_forum_edit:
path: /admin/forum/edit/{id}
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminForumController:editAction }
requirements:
id: '\d+'
workingforum_admin_forum_add:
path: /admin/forum/add
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminForumController:addAction }
workingforum_admin_delete_forum:
path: /admin/forum/delete/{forum_id}
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminForumController:deleteForumAction }
requirements:
forum_id: '\d+'
workingforum_admin_forum_rules:
path: /admin/rules
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminRulesController:rulesAction }
workingforum_admin_edit_forum_rules:
path: /admin/rules/edit/{lang}
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminRulesController:rulesEditAction }
workingforum_admin_new_forum_rules:
path: /admin/rules/new/{lang}
defaults:
_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminRulesController:rulesNewAction
newLang: true
workingforum_admin_install_settings:
path: /admin/install-settings
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminController:installSettingsAction }
workingforum_admin_report:
path: /admin/report
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminReportController:reportAction }
workingforum_admin_report_history:
path: /admin/report/history
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminReportController:reportHistoryAction }
workingforum_admin_report_action_moderate:
path: /admin/reportaction/moderate
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminReportController:reportActionModerateAction }
workingforum_admin_report_action_good:
path: /admin/reportaction/good
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminReportController:reportActionGoodAction }
workingforum_admin_user:
path: /admin/users
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\Admin\AdminUsersController:userListAction }
workingforum_search:
path: /search
defaults: {_controller: Yosimitso\WorkingForumBundle\Controller\SearchController:indexAction }
workingforum_vote_up:
path: /voteup
defaults: { _controller: Yosimitso\WorkingForumBundle\Controller\PostController:voteUpAction }
|
Resources/config/routing.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: fabedge
name: fabdns
labels:
app: fabdns
spec:
replicas: 1
selector:
matchLabels:
app: fabdns
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 25%
maxUnavailable: 1
template:
metadata:
labels:
app: fabdns
spec:
containers:
- name: fabdns
image: fabedge/fabdns
imagePullPolicy: IfNotPresent
args:
- -conf
- /etc/fabdns/Corefile
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: 8181
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- containerPort: 53
name: dns-udp
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- mountPath: /etc/fabdns
name: config-volume
readOnly: true
dnsPolicy: Default
restartPolicy: Always
serviceAccountName: fabedge-operator
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
# 根据选择的边缘系统来配置,避免把fabdns部署到边缘节点
- key: node-role.kubernetes.io/edge
operator: DoesNotExist
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
name: fabdns
name: config-volume
|
deploy/fabdns.yaml
|
name: Iymrith, Ancient Blue Dragon
slug_name: iymrith-ancient-blue-dragon
challenge_rating: 23.0
experience: 50000
size: Gargantuan
type: dragon
subtype:
alignment: lawful evil
armor_class: 22
armor_class_type: natural armor
hit_points: 481
hit_dice: 26d20+208
speed: 40 ft., burrow 40 ft., fly 80 ft.
strength: 29
dexterity: 10
constitution: 27
intelligence: 18
wisdom: 17
charisma: 21
saving_throws: Dex +7, Con +15, Wis +10, Cha +12
skills: Perception +17, Stealth +7
senses: blindsight 60 ft., darkvision 120 ft.
languages: Common, Draconic, Giant, Terran
damage_resistances:
damage_immunities: lightning
damage_vulnerabilities:
condition_immunities:
actions:
- name: Multiattack
description: 'The dragon can use its Frightful Presence. It then makes three attacks: one with its bite and two with its claws.'
- name: Bite
description: 'Melee Weapon Attack: +16 to hit, reach 15 ft., one target. Hit: 20 (2d10 + 9) piercing damage plus 11 (2d10) lightning damage.'
- name: Claw
description: 'Melee Weapon Attack: +16 to hit, reach 10 ft., one target. Hit: 16 (2d6 + 9) slashing damage.'
- name: Tail
description: 'Melee Weapon Attack: +16 to hit, reach 20 ft., one target. Hit: 18 (2d8 + 9) bludgeoning damage.'
- name: Frightful Presence
description: Each creature of the dragon's choice that is within 120 feet of the dragon and aware of it must succeed on a DC 20 Wisdom saving throw or become frightened for 1 minute. A creature can repeat the saving throw at the end of each of its turns, ending the effect on itself on a success. If a creature's saving throw is successful or the effect ends for it, the creature is immune to the dragon's Frightful Presence for the next 24 hours.
- name: Lightning Breath (Recharge 5-6)
description: The dragon exhales lightning in a 120-foot line that is 10 feet wide. Each creature in that line must make a DC 23 Dexterity saving throw, taking 88 (16d10) lightning damage on a failed save, or half as much damage on a successful one.
- name: Change Shape
description: "Iymrith magically polymorphs into a female storm giant or back into her true form. She reverts to her true form if she dies. Any equipment she is wearing or carrying is absorbed or borne by the new form (the dragon's choice).\n\nIn storm giant form, Iymrith retains her alignment, hit points, Hit Dice, ability to speak, proficiencies, Legendary Resistance, lair actions, and Intelligence, Wisdom, and Charisma scores, as well as this action. Her statistics are otherwise replaced by those of the new form."
legendary_action_description: The iymrith, ancient blue dragon can take 3 legendary actions, choosing from the options below. Only one legendary action option can be used at a time, and only at the end of another creature's turn. The iymrith, ancient blue dragon regains spent legendary actions at the start of its turn.
legendary_actions:
- name: Detect
description: The dragon makes a Wisdom (Perception) check.
- name: Tail Attack
description: The dragon makes a tail attack.
- name: Wing Attack (Costs 2 Actions)
description: The dragon beats its wings. Each creature within 15 ft. of the dragon must succeed on a DC 24 Dexterity saving throw or take 16 (2d6 + 9) bludgeoning damage and be knocked prone. The dragon can then fly up to half its flying speed.
spell_casting:
- title: Innate Spellcasting
top_description: "Iymrith's innate spellcasting ability is Charisma (spell save DC 20). Iymrith's stone shape can create a living gargoyle instead of altering the stone as described in the spell description. She can innately cast the following spells, requiring no material components:"
bottom_description: ''
spell_list:
- title: 1/day
spells:
- name: counterspell
slug_name: counterspell
- name: detect magic
slug_name: detect-magic
- name: ice storm
slug_name: ice-storm
- name: stone shape
slug_name: stone-shape
- name: teleport
slug_name: teleport
|
data/monsters/iymrith-ancient-blue-dragon.yaml
|
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
name: grafana
namespace: ingress-nginx
spec:
serviceName: grafana-service
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
#serviceAccountName: grafana
initContainers:
- name: "init-chmod-data"
image: debian:9
imagePullPolicy: "IfNotPresent"
command: ["chmod", "777", "/var/lib/grafana"]
volumeMounts:
- name: grafana-data
mountPath: "/var/lib/grafana"
restartPolicy: Always
containers:
- name: grafana
image: grafana/grafana
imagePullPolicy: Always
# Mount in all the previously defined ConfigMaps as `volumeMounts`
# as well as the Grafana data volume
volumeMounts:
- name: config
mountPath: "/etc/grafana/"
- name: dashboards
mountPath: "/var/lib/grafana/dashboards"
- name: notifiers
mountPath: "/etc/grafana/provisioning/notifiers"
- name: datasources
mountPath: "/etc/grafana/provisioning/datasources/"
- name: dashboardproviders
mountPath: "/etc/grafana/provisioning/dashboards/"
- name: grafana-data
mountPath: "/var/lib/grafana"
ports:
#- name: service
# containerPort: 80
# protocol: TCP
- containerPort: 3000
protocol: TCP
# Set the `GF_SECURITY_ADMIN_USER` and `GF_SECURITY_ADMIN_PASSWORD` environment variables
# using the Secret defined in `grafana-secret.yaml`
#env:
# - name: GF_SECURITY_ADMIN_USER
# valueFrom:
# secretKeyRef:
# name: $APP_INSTANCE_NAME-grafana
# key: admin-user
# - name: GF_SECURITY_ADMIN_PASSWORD
# valueFrom:
# secretKeyRef:
# name: $APP_INSTANCE_NAME-grafana
# key: admin-password
livenessProbe:
httpGet:
path: /api/health
port: 3000
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 2500Mi
requests:
cpu: 100m
memory: 100Mi
# Define `configMap` volumes for the above ConfigMap files, and `volumeClaimTemplates`
# for Grafana's `2Gi` Block Storage data volume, which will be mounted to `/var/lib/grafana`.
volumes:
- name: config
configMap:
name: grafana-ini
- name: notifiers
configMap:
name: grafana-notifiers
- name: datasources
configMap:
name: grafana-datasources
- name: dashboardproviders
configMap:
name: grafana-dashboardproviders
- name: dashboards
configMap:
name: dashboards
volumeClaimTemplates:
- metadata:
name: grafana-data
spec:
storageClassName: do-block-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "2Gi"
|
grafana/grafana-stateful-set.yaml
|
attributes:
# The attribute to be controlled, this could be e.g. "state" or "brightness".
# A value of null creates a write-only attribute. This has to be used when you want
# to control a property whose current value is not reflected in any of the entity's
# attributes. Don't do this if not really necessary, since doing so means that
# Schedy won't be able to verify that the value has been transmitted correctly. If
# you must use a write-only attribute, you might also want to set send_retries to a
# low value in order to avoid excessive network load.
- attribute: first
- attribute: second
- ...
# Here you configure the values you want to be able to return from your schedule.
values:
# Each value is a list of the values for the individual attributes configured above.
# Schedy compares the entity's current attributes against the values defined here
# in order to find the value currently active.
# The special attribute value "*" is a wildcard and will, when used, match any
# value of that particular attribute.
# Additionally, you don't have to include all attributes in every single value,
# only the first N attributes which values are provided for are compared against
# the entity's state for the value to match.
- value: ["on", "*"]
# The services that have to be called in order to make the actor report this value.
calls:
# Which service to call
- service: ...
# Optionally, provide a mapping with data to be passed with the service call.
# You can use "{attr1}" as a placeholder for the value set for the first attribute,
# "{attr2}" for the value of the second attribute and so on to pass the correct
# attribute values to the service call as needed in order to bring the entity
# to the state represented by the value you returned from your schedule.
# The placeholder "{entity_id}" can be used to insert the actor's entity id.
# For instance, if the value
# ["on", 75]
# was returned by a schedule, the following sample would render to:
# {"param1": "something", "param2": 75}
data:
param1: "something"
param2: "{attr2}"
# Set to false if you don't want the entity_id field to be included in service
# data automatically.
#include_entity_id: true
# More values#
- ...
# Set this to true if you want Schedy to treat string attributes of an entity the
# same, no matter if they're reported in lower or upper case. This is handy for some
# MQTT devices, for instance, which sometimes report a state of "ON", while others say
# "on".
#ignore_case: false
|
docs/apps/schedy/actors/generic2/config.yaml
|
- name: "LVM Setup: Check LVM packages"
package:
name: '{{ item }}'
state: present
register: lvm_just_installed
with_items:
- lvm2
- name: "LVM Setup: re-compute facts now that lvm is installed"
setup:
when: lvm_just_installed is changed
- name: "LVM Setup: Check that the default VG is in the list of managed VGs"
tags:
- assertion
assert:
that:
- not(metalk8s_lvm_default_vg|bool) or metalk8s_lvm_default_vg in metalk8s_lvm_vgs
- name: 'LVM Setup: Compute list of all vgs'
set_fact:
metalk8s_lvm_all_vgs: >-
{
{%- for vg_name in metalk8s_lvm_vgs -%}
'{{ vg_name }}': {{ dict(
drives=vars['metalk8s_lvm_drives_' ~ vg_name]|default([]),
host_path = vars['metalk8s_host_path_' ~ vg_name]|default(
metalk8s_host_path_prefix ~ "/" ~ vg_name),
pv_dict=vars['metalk8s_lvm_lvs_' ~ vg_name]|default({}),
storageclass=vars['metalk8s_lvm_storageclass' ~ vg_name]
|default(metalk8s_default_storageclass),
vg_name=vg_name,
) }},
{%- endfor -%}
}
- debug:
var: metalk8s_lvm_all_vgs
when: debug|bool
# drives|length > 0 means we create the VG
# drives| length == 0 means the VG must exists
# - not(item.value.drives|length == 0 and item.key not in ansible_lvm.vgs)
# TODO: Check that device exists
# TODO: Check that device do not have any ansible_device_links if
# VG is to created
# TODO: Check that device of existing VG are the same if drives are specified
- name: "LVM Setup: Create the LVM Volume Groups"
lvg:
pvs: '{{ item.value.drives|join(",") }}'
vg: '{{ item.key }}'
state: present
register: vg_creation
with_dict: >-
{
{%- for vg_name, vg_prop in vg_list.items()
if vg_prop.drives -%}
'{{ vg_name }}': {{ vg_prop }},
{%- endfor -%}
}
- name: "LVM Setup: Gather fact with LVM data"
setup:
gather_subset: 'hardware'
filter: 'ansible_lvm'
when: vg_creation is changed
|
roles/setup_lvm_vg/tasks/main.yml
|
name: Build release
on:
push:
tags:
- v[0-9]+.[0-9]+.[0-9]+
jobs:
build-linux:
strategy:
fail-fast: false
matrix:
platform:
- deb
- rpm
- osx
arch:
- i386
- amd64
name: build for linux / macos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: cachix/install-nix-action@v9
- name: build
run: |
set -x
git fetch --tags -f
mkdir build-output/
PACKAGE="package-${{ matrix.platform }}"
echo "BUILD: $PLATFORM@${{ matrix.arch }}"
BUILD=$(nix-build --no-out-link --no-build-output -A $PACKAGE --argstr arch ${{ matrix.arch }})
cp -v $BUILD/* build-output/
- name: upload assets
uses: softprops/action-gh-release@v1
with:
files: build-output/*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
build-windows:
strategy:
fail-fast: false
matrix:
arch:
- i386
- amd64
name: build for windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: prepare
shell: bash
run: |
choco install nsis pandoc
git fetch --tags -f
echo "::set-env name=VERSION::$(git describe --tags --always)"
echo "::set-env name=LDFLAGS::-X github.com/j-keck/lsleases/pkg/daemon.version=%VERSION% -X main.version=%VERSION%"
echo "::set-env name=PLATFORM::${{ matrix.arch }}"
if [ ${{ matrix.arch }} == "i386" ]; then
echo "::set-env name=GOARCH::386"
else
echo "::set-env name=GOARCH::${{ matrix.arch }}"
fi
- name: build
run: |
go build -ldflags "${{ env['LDFLAGS'] }}" ./cmd/lsleases
go build -ldflags "${{ env['LDFLAGS'] }}" ./cmd/lsleasesd
pandoc -s -o manual-windows.html docs/manual-windows.org
mkdir build-installer
copy lsleases.exe build-installer\
copy lsleasesd.exe build-installer\
copy LICENSE build-installer\
copy manual-windows.html build-installer\
copy build\windows\installer.nsi build-installer\
copy build\windows\${{ matrix.arch }}\nssm.exe build-installer\
copy build\windows\*.bat build-installer\
pushd .
chdir build-installer\
makensis installer.nsi
popd
mkdir build-standalone\lsleases
copy lsleases.exe build-standalone\lsleases\
copy LICENSE build-standalone\lsleases\
copy manual-windows.html build-standalone\lsleases\
copy build\windows\capture-leases.bat build-standalone\lsleases\
pushd .
chdir build-standalone\
7z.exe a lsleases-${{ env['VERSION'] }}-${{ matrix.arch }}-windows-standalone.zip .\lsleases
popd
- name: upload assets
uses: softprops/action-gh-release@v1
with:
files: |
build-installer/lsleases-v*.exe
build-standalone/lsleases*.zip
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
.github/workflows/release.yml
|
uid: microsoft.quantum.diagnostics.assertqubitisinstatewithintolerance
name: AssertQubitIsInStateWithinTolerance
type: operation
namespace: Microsoft.Quantum.Diagnostics
summary: >-
予期される状態の qubit をアサートします。
`expected`複合ベクター、$ \ket{\psi} = \begin{bmatrix}a & b\ end {bmatrix} ^ {\mathrm{T}} $ を表します。
各 $a $、$b $ を表す組の最初の要素は複素数の実数部ですが、2番目の要素は虚数部です。
最後の引数は、アサーションが行われる許容範囲を定義します。
remarks: >-
次の Mathematica コードは、mi、mx、my、mz の式を検証するために使用できます。
```mathematica
{Id, X, Y, Z} = Table[PauliMatrix[k], {k, 0, 3}];
st = {{ reA + I imA }, { reB + I imB} };
M = st . ConjugateTranspose[st];
mx = Tr[M.X] // ComplexExpand;
my = Tr[M.Y] // ComplexExpand;
mz = Tr[M.Z] // ComplexExpand;
mi = Tr[M.Id] // ComplexExpand;
2 m == Id mi + X mx + Z mz + Y my // ComplexExpand // Simplify
```
許容範囲\_は $L、3次元の実数ベクトル (x ₂) 間の {\ infty} $ 距離です。 x ₃、x ₄) で定義され\_ている $ \langle\psi\_| \psi\rangle = x\_1 I + x\_2 x + x 3 Y + x 4 Z $ および実数ベクトル (y ₂、y ₃、y ₄) によって定義された複素数 = Y ₁ I + y ₂ x + y ₃ y + y ₄ Z
これは、Tr (複素数) と Tr (| ψ⟩⟨ψ |) が両方とも 1 (たとえば、x ₁ = 1/2、y ₁ = 1/2) であることを前提としています。
そうでない場合、関数は、(x ₂-x ₁、x ₃₁、x ₄-x ₁、x ₄ + x ₁) と (y ₂-y ₁、y ₃-y ₁、y ₄-y ₁、y ₄ + y ₁) が tolerance パラメーターより小さいことをアサートします。
examples: >-
```Q#
using (qubits = Qubit[2]) {
// Both qubits are initialized as |0〉: a=(1 + 0*i), b=(0 + 0*i)
AssertQubitIsInStateWithinTolerance((Complex(1., 0.), Complex(0., 0.)), qubits[0], 1e-5);
AssertQubitIsInStateWithinTolerance((Complex(1., 0.), Complex(0., 0.)), qubits[1], 1e-5);
Y(qubits[1]);
// Y |0〉 = i |1〉: a=(0 + 0*i), b=(0 + 1*i)
AssertQubitIsInStateWithinTolerance((Complex(0., 0.), Complex(0., 1.)), qubits[1], 1e-5);
}
```
syntax: 'operation AssertQubitIsInStateWithinTolerance (expected : (Microsoft.Quantum.Math.Complex, Microsoft.Quantum.Math.Complex), register : Qubit, tolerance : Double) : Unit'
input:
content: '(expected : (Microsoft.Quantum.Math.Complex, Microsoft.Quantum.Math.Complex), register : Qubit, tolerance : Double)'
types:
- name: expected
summary: $ \Ket{0}$ と $ \ket{1}$ には、それぞれ複雑な振幅が必要です。
types:
- uid: microsoft.quantum.math.complex
- uid: microsoft.quantum.math.complex
- name: register
summary: 状態がアサートされる qubit。 この qubit は、割り当てられた他の qubit から分離可能であり、entangled ではないと見なされます。
isPrimitive: true
uid: Qubit
- name: tolerance
summary: >-
実際の振幅が予期された値と異なることが許容される加法許容範囲。
詳細については、以下の解説を参照してください。
isPrimitive: true
uid: Double
output:
content: Unit
types:
- isPrimitive: true
uid: Unit
metadata:
ms.openlocfilehash: 77d28c122a44f39924242dedb6bd6163974e18c1
ms.sourcegitcommit: <KEY>
ms.translationtype: MT
ms.contentlocale: ja-JP
ms.lasthandoff: 05/01/2020
ms.locfileid: "82656604"
|
api/qsharp/microsoft.quantum.diagnostics.assertqubitisinstatewithintolerance.yml
|
---
apiVersion: v1
kind: Service
metadata:
labels:
run: bootstorage-svc
name: bootstorage-svc
namespace: kubernetes-starterkit
spec:
ports:
- port: 5000
protocol: TCP
targetPort: 5000
selector:
run: bootstorage-svc
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
run: bootstorage-svc
name: bootstorage-svc
namespace: kubernetes-starterkit
spec:
replicas: 1
selector:
matchLabels:
run: bootstorage-svc
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
run: bootstorage-svc
spec:
containers:
- image: subhakarkotta/bootstorage:latest
imagePullPolicy: Always
name: bootstorage-svc
env:
- name: REDIS_HOST
valueFrom:
secretKeyRef:
name: bootstorage-env
key: redis_host
- name: REDIS_PORT
valueFrom:
secretKeyRef:
name: bootstorage-env
key: redis_port
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: bootstorage-env
key: redis_password
ports:
- containerPort: 5000
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/bootstorage/healthz
port: 5000
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/bootstorage/healthz
port: 5000
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Secret
metadata:
name: bootstorage-env
namespace: kubernetes-starterkit
type: Opaque
data:
redis_password: <PASSWORD>==
stringData:
redis_host: redis
redis_port: '6379'
|
k8s/bootstorage.yaml
|
#- hosts: all
# become: yes
# become_user: root
- name: install curl
package:
name: curl
state: present
- name: create application directory
file:
path: "{{ install_dir }}"
state: directory
owner: root
group: root
mode: 0750
- name: download taste-os client
get_url:
url: "{{ client_url }}"
dest: "{{ install_dir }}"
validate_certs: false
environment:
http_proxy: "{{ proxy_url }}"
https_proxy: "{{ proxy_url }}"
- name: calculate sha512sum
stat:
path: "{{ install_dir }}/tos.tar"
checksum_algorithm: sha512
get_checksum: yes
register: tos_client_stat
- name: Fail if sha512sum of taste-os client is incorrect
fail:
msg: "Error, sha512sum of tos.tar is not correct."
when: enable_integrity_check and tos_client_stat.stat.checksum != client_sha512sum
- name: extract taste-os client
unarchive:
src: "{{ install_dir }}/tos.tar"
dest: "{{ install_dir }}"
remote_src: true
- name: set parameters in config.sh
lineinfile:
path: "{{ install_dir }}/tos/config.sh"
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
with_items:
- { regexp: "^STOKEN=(.*)", line: "STOKEN=\"{{ s_token }}\"" }
- { regexp: "^MNAME=(.*)", line: "MNAME=\"{{ machine_name }}\"" }
- { regexp: "^MACHINEID=(.*)", line: "MACHINEID=\"{{ machine_id }}\"" }
- { regexp: "^SYSTEMID=(.*)", line: "SYSTEMID=\"{{ system_id }}\"" }
- { regexp: "^RISKID=(.*)", line: "RISKID=\"{{ risk_id }}\"" }
- { regexp: "^COMPONENT_ID=(.*)", line: "COMPONENT_ID=\"{{ component_id }}\"" }
- { regexp: "^PROXY=(.*)", line: "PROXY=\"{{ proxy_url }}\"" }
- name: register machine on first run
command: ./taste_os.sh -m reg
args:
chdir: "{{ install_dir }}/tos"
creates: "{{ install_dir }}/tos/collect.sh"
- name: update collector script
command: ./taste_os.sh -m update
args:
chdir: "{{ install_dir }}/tos"
when: enable_collector_update|bool
- name: do scan
command: ./taste_os.sh -m scan
args:
chdir: "{{ install_dir }}/tos"
when: enable_scanning|bool
tags: skip_ansible_lint
- name: configure cron job
cron:
name: "taste-os scan"
hour: "{{ cronjob_hour }}"
minute: "{{ cronjob_minute }}"
user: root
job: "cd {{ install_dir }}/tos && ./taste_os.sh -m scan -d {{ max_scan_delay_time }} > /dev/null 2>&1"
cron_file: ansible_tos_scan
|
images/capi/ansible/roles/telekom-dependencies/tasks/tasteos.yml
|
name: OperationResult
uid: '@azure/arm-labservices.OperationResult'
package: '@azure/arm-labservices'
summary: A long running operation result
fullName: OperationResult
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: endTime
uid: '@azure/arm-labservices.OperationResult.endTime'
package: '@azure/arm-labservices'
summary: End time
fullName: endTime
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'endTime?: Date'
return:
description: ''
type: Date
- name: error
uid: '@azure/arm-labservices.OperationResult.error'
package: '@azure/arm-labservices'
summary: The error for a failure if the operation failed.
fullName: error
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'error?: ErrorDetail'
return:
description: ''
type: <xref uid="@azure/arm-labservices.ErrorDetail" />
- name: id
uid: '@azure/arm-labservices.OperationResult.id'
package: '@azure/arm-labservices'
summary: >-
Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
**NOTE: This property will not be serialized. It can only be populated by
the server.**
fullName: id
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'id?: undefined | string'
return:
description: ''
type: undefined | string
- name: name
uid: '@azure/arm-labservices.OperationResult.name'
package: '@azure/arm-labservices'
summary: >-
The name of the resource
**NOTE: This property will not be serialized. It can only be populated by
the server.**
fullName: name
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'name?: undefined | string'
return:
description: ''
type: undefined | string
- name: percentComplete
uid: '@azure/arm-labservices.OperationResult.percentComplete'
package: '@azure/arm-labservices'
summary: Percent completion
fullName: percentComplete
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'percentComplete?: undefined | number'
return:
description: ''
type: undefined | number
- name: startTime
uid: '@azure/arm-labservices.OperationResult.startTime'
package: '@azure/arm-labservices'
summary: Start time
fullName: startTime
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'startTime?: Date'
return:
description: ''
type: Date
- name: status
uid: '@azure/arm-labservices.OperationResult.status'
package: '@azure/arm-labservices'
summary: >-
The operation status. Possible values include: 'NotStarted', 'InProgress',
'Succeeded',
'Failed', 'Canceled'
fullName: status
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'status: OperationStatus'
return:
description: ''
type: <xref uid="@azure/arm-labservices.OperationStatus" />
|
docs-ref-autogen/@azure/arm-labservices/OperationResult.yml
|
groups:
- name: concourse
jobs:
- update-bosh-concourse
- deploy-concourse
resource_types:
- name: bosh2-deployment
type: docker-image
source:
repository: cloudfoundry/bosh-deployment-resource
resources:
- name: gcp-stemcell
type: bosh-io-stemcell
source:
name: bosh-google-kvm-ubuntu-trusty-go_agent
- name: capi-ci-private
type: git
source:
branch: master
private_key: ((private_key))
uri: git@github.com:cloudfoundry/capi-ci-private.git
- name: capi-ci
type: git
source:
branch: master
private_key: ((private_key))
uri: git@github.com:cloudfoundry/capi-ci.git
- name: concourse
type: github-release
source:
access_token: ((github_access_token))
repository: concourse
owner: concourse
- name: postgres
type: bosh-io-release
source:
repository: cloudfoundry/postgres-release
# Using <NAME>'s fork to get load-balancers for concourse from bbl
# We can switch back after this PR is merged: https://github.com/cloudfoundry/cf-deployment-concourse-tasks/pull/40/files
- name: cf-deployment-concourse-tasks
type: git
source:
branch: lb-type
private_key: ((private_key))
uri: git@github.com:mcwumbly/cf-deployment-concourse-tasks.git
- name: concourse-deployment
type: bosh2-deployment
source:
deployment: concourse
ignore_ssl: true
# Duplicates of capi-ci-private
- name: concourse-config
type: git
source:
private_key: ((private_key))
uri: git@github.com:cloudfoundry/capi-ci-private.git
- name: concourse-bbl-state
type: git
source:
private_key: ((private_key))
branch: master
uri: git@github.com:cloudfoundry/capi-ci-private.git
# Scheduling
- name: weekly-sunday
type: time
source:
location: America/Los_Angeles
start: 23:00
stop: 23:30
days: [Sunday]
jobs:
- name: update-bosh-concourse
serial: true
plan:
- aggregate:
- get: cf-deployment-concourse-tasks
- get: concourse-bbl-state
- get: capi-ci
- get: capi-ci-private
- get: weekly-sunday
trigger: true
- task: update-bosh-concourse
file: cf-deployment-concourse-tasks/bbl-up/task.yml
input_mapping:
bbl-state: concourse-bbl-state
ops-files: capi-ci-private
bbl-plan-patch: concourse-bbl-state
# - Per the comment on bbl-up task.yml...If no patch is necessary, we suggest you use
# `input_mapping` to map your `bbl-state` resource
# to `bbl-plan-patch` in order to satisfy this
# required input.
params:
BBL_STATE_DIR: "ci"
BBL_IAAS: "gcp"
BBL_LB_CERT: "certs/ci.cf-app.com/certificate.crt"
BBL_LB_KEY: "certs/ci.cf-app.com/certificate.key"
BBL_LB_TYPE: "concourse"
BBL_ENV_NAME: "concourse"
BBL_GCP_SERVICE_ACCOUNT_KEY: ((ci_gcp_json_key))
BBL_GCP_PROJECT_ID: ((ci_gcp_project_id))
BBL_GCP_ZONE: ((ci_gcp_zone))
BBL_GCP_REGION: ((ci_gcp_region))
ensure:
put: concourse-bbl-state
params:
repository: updated-bbl-state
rebase: true
- task: run-bosh-cleanup
file: capi-ci/ci/bbl-tasks/run-bosh-cleanup.yml
input_mapping:
bbl-state: concourse-bbl-state
params:
BBL_STATE_DIR: "ci"
- name: deploy-concourse
serial: true
interruptible: true
plan:
- aggregate:
- get: concourse
version: { tag: 'v3.6.0' }
params:
globs:
- concourse-*.tgz
- garden-runc-*.tgz
- get: postgres
version: { version: '21' }
- get: concourse-config
- get: gcp-stemcell
- get: capi-ci-private
- get: capi-ci
- task: extract-bbl-environment
file: capi-ci/ci/bbl-tasks/extract_bbl_environment.yml
params:
ENV_NAME: ci
DEPLOYMENT_NAME: concourse
- put: concourse-deployment
params:
source_file: environment/metadata
manifest: concourse-config/ci/concourse.yml
ops_files:
- capi-ci/concourse-deployment-operations/gcp.yml
releases:
- concourse/concourse-*.tgz
- concourse/garden-runc-*.tgz
- postgres/*.tgz
stemcells:
- gcp-stemcell/*.tgz
vars:
ci_atc_certificate: ((ci_atc_certificate))
ci_atc_private_key: ((ci_atc_private_key))
|
ci/pipeline-concourse.yml
|
items:
- uid: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin
commentId: T:SuperOffice.Web.Redirection.ISoFormsRedirectPlugin
id: ISoFormsRedirectPlugin
parent: SuperOffice.Web.Redirection
children:
- SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect(System.String@)
langs:
- csharp
- vb
name: ISoFormsRedirectPlugin
nameWithType: ISoFormsRedirectPlugin
fullName: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin
type: Interface
source:
path: SuperOffice.DCFWeb
isExternal: true
assemblies:
- SuperOffice.DCFWeb
namespace: SuperOffice.Web.Redirection
syntax:
content: 'public interface ISoFormsRedirectPlugin : IPlugin'
content.vb: >-
Public Interface ISoFormsRedirectPlugin
Inherits IPlugin
modifiers.csharp:
- public
- interface
modifiers.vb:
- Public
- Interface
- uid: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect(System.String@)
commentId: M:SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect(System.String@)
id: ShouldRedirect(System.String@)
parent: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin
langs:
- csharp
- vb
name: ShouldRedirect(out String)
nameWithType: ISoFormsRedirectPlugin.ShouldRedirect(out String)
fullName: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect(out System.String)
type: Method
source:
path: SuperOffice.DCFWeb
isExternal: true
assemblies:
- SuperOffice.DCFWeb
namespace: SuperOffice.Web.Redirection
syntax:
content: bool ShouldRedirect(out string redirectUrl)
parameters:
- id: redirectUrl
type: System.String
return:
type: System.Boolean
content.vb: Function ShouldRedirect(ByRef redirectUrl As String) As Boolean
overload: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect*
nameWithType.vb: ISoFormsRedirectPlugin.ShouldRedirect(ByRef String)
fullName.vb: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect(ByRef System.String)
name.vb: ShouldRedirect(ByRef String)
references:
- uid: SuperOffice.Web.Redirection
commentId: N:SuperOffice.Web.Redirection
isExternal: true
name: SuperOffice.Web.Redirection
nameWithType: SuperOffice.Web.Redirection
fullName: SuperOffice.Web.Redirection
- uid: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect*
commentId: Overload:SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect
isExternal: true
name: ShouldRedirect
nameWithType: ISoFormsRedirectPlugin.ShouldRedirect
fullName: SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.ShouldRedirect
- uid: System.String
commentId: T:System.String
parent: System
isExternal: true
name: String
nameWithType: String
fullName: System.String
- uid: System.Boolean
commentId: T:System.Boolean
parent: System
isExternal: true
name: Boolean
nameWithType: Boolean
fullName: System.Boolean
- uid: System
commentId: N:System
isExternal: true
name: System
nameWithType: System
fullName: System
|
docs/api-reference/web/SuperOffice.Web.Redirection.ISoFormsRedirectPlugin.yml
|
---
apiVersion: mongodb.com/v1
kind: MongoDB
metadata:
name: {{ .Values.replicaSetName | lower }}
spec:
members: {{ .Values.replicas }}
version: {{ .Values.mongoDBVersion | quote }}
featureCompatibilityVersion: {{ .Values.mongoDBFCV | default "5.0" | quote }}
opsManager:
configMapRef:
name: project-{{ .Values.opsManager.projectName | lower }}
credentials: {{ .Values.opsManager.omSecret }}
type: ReplicaSet
persistent: true
podSpec:
podTemplate:
spec:
imagePullSecrets:
- name: regcred
initContainers:
- name: mongodb-enterprise-init-database
containers:
- name: mongodb-enterprise-database
resources:
limits:
cpu: {{ .Values.resources.limits.cpu }}
memory: {{ .Values.resources.limits.mem }}
requests:
cpu: {{ .Values.resources.requests.cpu }}
memory: {{ .Values.resources.requests.mem }}
podAntiAffinityTopologyKey: "kubernetes.io/hostname"
persistence:
{{- if eq (.Values.storage.persistenceType | lower) "single"}}
single:
storage: {{ .Values.storage.single.size }}
storageClass: {{ .Values.storage.single.storageClass }}
{{- else }}
multiple:
data:
storage: {{ .Values.storage.multi.data.size }}
storageClass: {{ .Values.storage.multi.data.storageClass }}
journal:
storage: {{ .Values.storage.multi.journal.size }}
storageClass: {{ .Values.storage.multi.journal.storageClass }}
logs:
storage: {{ .Values.storage.multi.logs.size }}
storageClass: {{ .Values.storage.multi.logs.storageClass }}
{{- end }}
agent:
startupOptions:
logLevel: {{ .Values.logLevel }}
security:
authentication:
enabled: true
modes:
{{- if .Values.auth.scram.enabled }}
- SCRAM
{{- end }}
{{- if .Values.auth.ldap.enabled }}
- LDAP
ldap:
servers:
{{- range .Values.auth.ldap.servers }}
- {{ . }}
{{- end }}
transportSecurity: {{ if .Values.auth.ldap.ldaps }}tls{{ end }}
{{- if .Values.auth.ldap.ldaps }}
caConfigMapRef:
name: {{ .Values.auth.ldap.caConfigMap }}
key: ca-pem
{{- end }}
bindQueryUser: {{ .Values.auth.ldap.bindUserDN }}
bindQueryPasswordSecretRef:
name: {{ .Values.auth.ldap.bindUserSecret }}
userToDNMapping: {{ .Values.auth.ldap.userToDNMapping | quote}}
authzQueryTemplate: {{ .Values.auth.ldap.authzQueryTemplate | quote | default "" }}
{{- end }}
{{- if .Values.tls.enabled }}
internalCluster: "X509"
agents:
mode: SCRAM
certsSecretPrefix: "mdb"
tls:
ca: {{ .Values.tls.caConfigMap }}
{{- end }}
{{ if and .Values.extAccess.enabled .Values.tls.enabled -}}
connectivity:
replicaSetHorizons:
{{- range .Values.extAccess.ports }}
- "external": "{{ .horizonName }}:{{ .port }}"
{{- end }}
{{ end -}}
additionalMongodConfig:
{{- if .Values.tls.enabled }}
net:
tls:
mode: requireTLS
disabledProtocols: TLS1_0,TLS1_1
setParameter:
suppressNoTLSPeerCertificateWarning: true
{{- end }}
syslog:
{{- if eq (.Values.logLevel | upper) "DEBUG" }}
verbosity: 2
{{- end }}
timeStampFormat: iso8601-local
{{- if .Values.kmip.enabled }}
security:
enableEncryption: true
kmip:
serverName: {{ .Values.kmip.host }}
port: {{ .Values.kmip.port }}
clientCertificateFile: /mongodb-automation/server.pem
serverCAFile: /mongodb-automation/ca.pem
{{- end }}
|
charts/templates/mongodb-replica-set.yaml
|
name: CI
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
push:
branches: [master]
pull_request:
branches: [master]
release:
types: [published]
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
CI-Matrix:
name: ${{ matrix.name }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- os: windows-latest
name: Win32-MSVC
cmake_options: -A Win32
- os: windows-latest
name: Win64-MSVC
cmake_options: -A x64
- os: ubuntu-latest
name: Linux64-GCC
cmake_options: -DCMAKE_C_FLAGS=-m64 -DCMAKE_CXX_FLAGS=-m64
- os: macos-latest
name: MacOS64-Clang
cmake_options: -DCMAKE_C_FLAGS=-m64 -DCMAKE_CXX_FLAGS=-m64
steps:
- name: 'Install OpenGL SDL2'
if: matrix.os == 'ubuntu-latest'
run: sudo apt-get install libgl1-mesa-dev libsdl2-dev
- name: 'Install SDL2'
if: matrix.os == 'macos-latest'
run: brew install SDL2
- name: 'Checkout recursive'
uses: actions/checkout@v2
with:
submodules: recursive
- name: 'CMake Build Debug'
run: cmake -S ${{ github.workspace }} -B ${{ github.workspace }}/debug -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX="${{ github.workspace }}/install/FastNoise2" -DFASTNOISE2_NOISETOOL=OFF -DFASTNOISE2_TESTS=OFF ${{ matrix.cmake_options }}
- name: 'CMake Install Debug'
run: cmake --build ${{ github.workspace }}/debug --config Debug --target install --parallel 4
- name: 'CMake Build Release'
run: cmake -S ${{ github.workspace }} -B ${{ github.workspace }}/release -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="${{ github.workspace }}/install/FastNoise2" -DFASTNOISE2_NOISETOOL=ON -DFASTNOISE2_TESTS=ON ${{ matrix.cmake_options }}
- name: 'CMake Install Release'
run: cmake --build ${{ github.workspace }}/release --config Release --target install --parallel 4
- name: 'Upload artifact'
uses: actions/upload-artifact@v2
with:
name: ${{ matrix.name }}
path: ${{ github.workspace }}/install/
- name: 'Zip artifacts'
if: github.event_name == 'release'
uses: papeloto/action-zip@v1
with:
files: install/
recursive: true
dest: ${{ matrix.name }}.zip
- name: 'Upload release artifacts'
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: ${{ github.workspace }}/${{ matrix.name }}.zip
asset_name: FastNoise2-${{ matrix.name }}.zip
tag: ${{ github.ref }}
|
.github/workflows/main.yml
|
version: '2'
services:
dns_SOA_interne:
image: resystit/bind9
container_name: dns_SOA_interne
ports:
- "53:53/udp"
- "53:53/tcp"
volumes:
- ./dns/bind:/etc/bind
- ./dns/bind/db.wt20.ephec-ti.be:/var/cache/bind/db.wt20.ephec-ti.be
mail:
image: tvial/docker-mailserver:latest
hostname: mail
domainname: wt20.ephec-ti.be
ports:
- "25:25"
- "143:143"
- "587:587"
- "993:993"
- "110:110"
- "995:995"
volumes:
- ./mail/ingoing:/var/mail
- ./mail/state:/var/mail-state
- /etc/letsencrypt:/etc/letsencrypt/
- /etc/localtime:/etc/localtime:ro
- ./mail/config/:/tmp/docker-mailserver/
environment:
- ENABLE_SPAMASSASSIN=1 # antispam
- ENABLE_CLAMAV=1 # antivirus
- ENABLE_FAIL2BAN=0
- ENABLE_POSTGREY=0 # filtre par liste grise
- ENABLE_FETCHMAIL=1
- ENABLE_POP3=1
- ONE_DIR=1
- DMS_DEBUG=1 # env = dev
- SA_TAG=2.0
- SA_TAG2=6.31
- SASL_PASSWD="<PASSWORD>"
- SSL_TYPE=letsencrypt
flask:
build:
context: ./flask/
environment:
- POSTGRES_USER=woodytoys
- POSTGRES_DB=woodytoys
- POSTGRES_PASSWORD=<PASSWORD>
volumes:
- ./flask:/code
links:
- postgres
postgres:
image: postgres:alpine
volumes:
- ./flask/sql/:/docker-entrypoint-initdb.d/
environment:
- POSTGRES_USER=woodytoys
- POSTGRES_DB=woodytoys
- POSTGRES_PASSWORD=<PASSWORD>
nginx:
image: nginx:alpine
volumes:
- ./nginx/www:/var/www/
- ./nginx/conf.d:/etc/nginx/conf.d
- /etc/letsencrypt/:/etc/letsencrypt/
ports:
- "80:80"
- "443:443"
links:
- flask
dns:
image: resystit/bind9
dns_SOA_interne:
image: resystit/bind9
container_name: dns_SOA_interne
ports:
- "53:53/udp"
- "53:53/tcp"
volumes:
- ./dns/bind:/etc/bind
- ./dns/bind/internal.db.wt20.ephec-ti.be:/var/cache/bind/internal.db.wt20.ephec-ti.be
- ./dns/bind/external.db.wt20.ephec-ti.be:/var/cache/bind/external.db.wt20.ephec-ti.be
- ./dns/resolv.conf:/etc/resolv.conf
asterisk:
container_name: asterisk
image: matiuso/debisterisk
ports:
- "5060:5060/udp"
- "5060:5060/tcp"
volumes:
- ./asterisk/users.conf:/etc/asterisk/users.conf
- ./asterisk/sip.conf:/etc/asterisk/sip.conf
- ./asterisk/extensions.conf:/etc/asterisk/extensions.conf
- ./asterisk/logger.conf:/etc/asterisk/logger.conf
- ./asterisk/voicemail.conf:/etc/asterisk/voicemail.conf
- /var/log/asterisk:/var/log/asterisk
# - ./asterisk/rtp.conf:/etc/asterisk/rtp.conf
# - ./asterisk/pjsip.conf:/etc/asterisk/pjsip.conf
|
docker-compose.yml
|
---
- name: Issue 00743 - Add some pools - Provider
bigip_pool:
pool: "{{ item }}"
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
loop:
- abc_log
- name: Issue 00743 - Create pool member - Provider
bigip_pool_member:
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
pool: abc_log
state: present
host: 10.0.0.100
port: 50002
partition: Common
register: result
- name: Issue 00743 - Assert Create pool member - Provider
assert:
that:
- result is changed
- result is success
- name: Issue 00743 - Create pool member - Idempotent check - Provider
bigip_pool_member:
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
pool: abc_log
state: present
host: 10.0.0.100
port: 50002
partition: Common
register: result
- name: Issue 00743 - Assert Create pool member - Idempotent check - Provider
assert:
that:
- result is not changed
- result is success
- name: Issue 00743 - Set fact for testing
set_fact:
pools:
- name: f
monitor_type: single
monitors:
- HIPPO_Health
members:
- host: 3.3.3.3
port: 80
- host: 4.4.4.4
port: 8080
- host: 5.5.5.5
port: 443
- name: g
members:
- host: 192.168.127.12
port: 80
- host: 172.16.58.3
port: 8080
- host: 8.8.8.8
port: 443
monitor_http:
- name: HIPPO_Health
interval: 10
timeout: 31
receive: UP
send: GET /rest/server/ping\\r\\n
- name: Issue 00743 - Create HTTP monitors
bigip_monitor_http:
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
name: "{{ item.name }}"
state: "{{ item.state | default(omit) }}"
interval: "{{ item.interval }}"
timeout: "{{ item.timeout }}"
receive: "{{ item.receive }}"
receive_disable: "{{ item.receive_disable | default(omit) }}"
send: "{{ item.send }}"
loop: "{{ monitor_http }}"
- name: Issue 00743 - Create Pool
bigip_pool:
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
name: "{{ item.name }}"
state: "{{ item.state | default(omit) }}"
lb_method: "{{ item.lb_method | default(omit)}}"
monitor_type: "{{ item.monitor_type | default(omit) }}"
monitors: "{{ item.monitors | default(omit) }}"
service_down_action: "{{ item.service_down_action | default(omit) }}"
loop: "{{ pools }}"
- name: Issue 00743 - Create Pool members with_subelements
bigip_pool_member:
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
pool: "{{ item.0.name }}"
state: "{{ item.1.state | default(omit) }}"
host: "{{ item.1.host }}"
port: "{{ item.1.port }}"
partition: "{{ item.1.partition | default(omit) }}"
with_subelements:
- "{{ pools }}"
- members
- name: Issue 00743 - Remove pools - Provider
bigip_pool:
pool: "{{ item }}"
state: absent
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
loop:
- abc_log
- f
- g
- name: Issue 00743 - Remove HTTP monitors
bigip_monitor_http:
provider:
password: "{{ <PASSWORD> }}"
server: "{{ ansible_host }}"
server_port: "{{ bigip_port }}"
user: "{{ bigip_username }}"
validate_certs: "{{ validate_certs }}"
name: "{{ item.name }}"
state: absent
loop: "{{ monitor_http }}"
|
f5-ansible/test/integration/targets/bigip_pool_member/tasks/provider/issue-00743.yaml
|
---
registered_authorities:
- citationauthorities
- conceptauthorities
- locationauthorities
- orgauthorities
- personauthorities
- placeauthorities
- taxonomyauthority
registered_procedures:
- Acquisition
- CollectionObject
- ConditionCheck
- Exhibition
- Group
- Intake
- LoanIn
- LoanOut
- Media
- Movement
- ObjectExit
- ValuationControl
registered_profiles:
acquisition:
type: Procedures
enabled: false
required_headers:
- acquisitionreferencenumber
config:
Acquisition:
mapper: CoreAcquisition
identifier: acquisitionreferencenumber
title: acquisitionreferencenumber
Authorities:
- name_field: acquisitionauthorizer
authority_type: Person
authority_subtype: person
- name_field: ownerPerson
authority_type: Person
authority_subtype: person
- name_field: ownerOrganization
authority_type: Organization
authority_subtype: organization
cataloging:
type: Procedures
enabled: true
required_headers:
- objectnumber
config:
CollectionObject:
mapper: OHCCollectionObject
identifier: objectnumber
title: objectnumber
Authorities:
- name_field: contentperson
authority_type: Person
authority_subtype: person
- name_field: inscriber
authority_type: Person
authority_subtype: person
- name_field: productionperson
authority_type: Person
authority_subtype: person
- name_field: productionorg
authority_type: Organization
authority_subtype: organization
Vocabularies:
- name_field: titlelanguage
vocabulary_subtype: languages
- name_field: titletranslationlanguage
vocabulary_subtype: languages
hierarchies:
type: Hierarchies
enabled: true
required_headers:
- type
- narrower
- broader
config: {}
movement:
type: Procedures
enabled: true
required_headers:
- movementreferencenumber
config:
Movement:
mapper: CoreMovement
identifier: movementreferencenumber
title: movementreferencenumber
Authorities:
- name_field: movementcontact
authority_type: Person
authority_subtype: person
- name_field: inventorycontact
authority_type: Person
authority_subtype: person
nomenclature:
type: Authorities
enabled: true
identify_by_column: termdisplayname
required_headers:
- termdisplayname
defaults:
historicalstatus: current
termlanguage: English
termprefforlang: true
termsource: AASLH Nomenclature
termstatus: accepted
termtype: descriptor
config:
mapper: CoreConcept
name_field: termdisplayname
authority_type: Concept
authority_subtype: nomenclature
Authorities:
- name_field: termsource
authority_type: Citation
authority_subtype: citation
Vocabularies:
- name_field: termlanguage
vocabulary_subtype: languages
osteology:
type: Procedures
enabled: true
required_headers:
- inventoryid
config:
Osteology:
mapper: AnthroOsteology
identifier: inventoryid
title: inventoryid
Authorities:
- name_field: sexdeterminationanalyst
authority_type: Person
authority_subtype: person
- name_field: osteoageestimateanalyst
authority_type: Person
authority_subtype: person
- name_field: inventoryanalyst
authority_type: Person
authority_subtype: person
relationships:
type: Relationships
enabled: true
required_headers:
- subjectdocumenttype
- subjectidentifier
- objectdocumenttype
- objectidentifier
config: {}
|
lib/collectionspace/converter/ohc/config.yml
|
ApiEndpoints:
handler: index.appHandler
timeout: 20
memory: '{{parent.api_lambda_memory}}'
source: 'node_modules/@cumulus/api/dist/'
apiRole: true
urs: true
envs:
EARTHDATA_BASE_URL: '{{parent.urs_url}}'
EARTHDATA_CLIENT_ID: '{{EARTHDATA_CLIENT_ID}}'
EARTHDATA_CLIENT_PASSWORD: '{{EAR<PASSWORD>_CLIENT_PASSWORD}}'
OAUTH_PROVIDER: '{{parent.oauth.provider}}'
AccessTokensTable:
function: Ref
value: AccessTokensTableDynamoDB
AsyncOperationsTable:
function: Ref
value: AsyncOperationsTableDynamoDB
CollectionsTable:
function: Ref
value: CollectionsTableDynamoDB
ExecutionsTable:
function: Ref
value: ExecutionsTableDynamoDB
GranulesTable:
function: Ref
value: GranulesTableDynamoDB
PdrsTable:
function: Ref
value: PdrsTableDynamoDB
ProvidersTable:
function: Ref
value: ProvidersTableDynamoDB
RulesTable:
function: Ref
value: RulesTableDynamoDB
UsersTable:
function: Ref
value: UsersTableDynamoDB
BulkDeleteLambda:
function: Ref
value: BulkDeleteLambdaFunction
AsyncOperationTaskDefinition:
function: Ref
value: AsyncOperationTaskDefinition
EcsCluster:
function: Ref
value: EcsCluster
system_bucket: '{{parent.system_bucket}}'
invoke:
function: "Ref"
value: ScheduleSFLambdaFunction
invokeArn:
function: "Ref"
value: ScheduleSFLambdaFunction
invokeReconcileLambda:
function: "Ref"
value: CreateReconciliationReportLambdaFunction
messageConsumer:
function: "Ref"
value: messageConsumerLambdaFunction
KinesisInboundEventLogger:
function: "Ref"
value: KinesisInboundEventLoggerLambdaFunction
cmr_provider: '{{parent.cmr.provider}}'
cmr_client_id: '{{parent.cmr.clientId}}'
cmr_username: '{{parent.cmr.username}}'
cmr_password:
function: "Ref"
value: "CmrPassword"
ES_HOST:
function: Ref
value: ElasticSearchDomain
TOKEN_SECRET: '{{TOKEN_SECRET}}'
apiGateway:
- api: backend
path: '{proxy+}'
method: any
|
packages/api/config/api.yml
|
require:
- rubocop-rails
- rubocop-rspec
AllCops:
NewCops: enable
Exclude:
- 'vendor/**/*'
- 'bin/*'
- 'spec/rails_helper.rb'
- 'spec/spec_helper.rb'
- 'spec/dummy/**/*'
- 'spec/dummy/*'
- 'test/dummy/**/*'
- 'test/dummy/*'
- 'gemfiles/**/*'
- 'tmp/**/*'
Gemspec/RequiredRubyVersion:
Enabled: false
Layout/LineLength:
Max: 120
Exclude:
- 'spec/**/*'
- 'test/**/*'
Lint/MixedRegexpCaptureTypes:
Enabled: false
Metrics/BlockLength:
CountComments: false
Max: 25
Exclude:
- '*.gemspec'
- 'spec/**/*'
- 'test/**/*'
Metrics/ClassLength:
Exclude:
- 'spec/**/*'
- 'test/**/*'
Style/AccessorGrouping:
Enabled: false
Style/ClassAndModuleChildren:
Exclude:
# as most of the tests have nested module/class definitions and look like:
#
# ```ruby
# require 'test_helper'
#
# class Wallaby::Test < ActiveSupport::TestCase
# test 'truth' do
# assert_kind_of Module, Wallaby
# end
# end
# ```
- 'test/**/*'
Style/Documentation:
Exclude:
- 'spec/**/*'
- 'test/**/*'
Style/FormatStringToken:
Exclude:
- 'spec/**/*_spec.rb'
- 'test/**/*_test.rb'
Style/FrozenStringLiteralComment:
Exclude:
- 'spec/**/*'
- 'test/**/*'
Style/ModuleFunction:
Enabled: false
Style/SingleLineMethods:
Exclude:
- 'spec/**/*_spec.rb'
- 'test/**/*_test.rb'
Style/SlicingWithRange:
Enabled: false
Style/StringConcatenation:
Exclude:
- 'spec/**/*'
- 'test/**/*'
Style/PercentLiteralDelimiters:
PreferredDelimiters:
default: ()
'%i': ()
'%w': ()
'%W': ()
Style/RedundantBegin:
Enabled: false
Layout/EmptyLinesAroundArguments:
Exclude:
- 'spec/**/*_spec.rb'
- 'test/**/*_test.rb'
Layout/HashAlignment:
Exclude:
- 'spec/**/*_spec.rb'
- 'test/**/*_test.rb'
Rails/ApplicationRecord:
Enabled: false
Rails/ContentTag:
Enabled: false
Rails/HttpPositionalArguments:
Enabled: false
Rails/IndexWith:
Enabled: false
RSpec/DescribeClass:
Enabled: false
RSpec/ExampleLength:
Enabled: false
RSpec/FactoryBot/CreateList:
Enabled: false
RSpec/FilePath:
Enabled: false
RSpec/LeakyConstantDeclaration:
Enabled: false
RSpec/LetSetup:
Enabled: false
Rails/LinkToBlank:
Enabled: false
RSpec/MessageSpies:
Enabled: false
RSpec/MultipleDescribes:
Enabled: false
RSpec/MultipleExpectations:
Enabled: false
RSpec/MultipleMemoizedHelpers:
Enabled: false
RSpec/NamedSubject:
Enabled: false
RSpec/NestedGroups:
Enabled: false
RSpec/Rails/HttpStatus:
Enabled: false
RSpec/StubbedMock:
Enabled: false
|
rubocop.yml
|
- position: 1
driverNumber: 20
driverId: heinz-harald-frentzen
constructorId: arrows
engineManufacturerId: cosworth
tyreManufacturerId: bridgestone
time: "1:20.875"
gap:
interval:
laps: 16
- position: 2
driverNumber: 1
driverId: michael-schumacher
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: bridgestone
time: "1:20.972"
gap: "+0.097"
interval: "+0.097"
laps: 17
- position: 3
driverNumber: 3
driverId: david-coulthard
constructorId: mclaren
engineManufacturerId: mercedes
tyreManufacturerId: michelin
time: "1:21.547"
gap: "+0.672"
interval: "+0.575"
laps: 15
- position: 4
driverNumber: 14
driverId: jarno-trulli
constructorId: renault
engineManufacturerId: renault
tyreManufacturerId: michelin
time: "1:21.952"
gap: "+1.077"
interval: "+0.405"
laps: 14
- position: 5
driverNumber: 4
driverId: kimi-raikkonen
constructorId: mclaren
engineManufacturerId: mercedes
tyreManufacturerId: michelin
time: "1:22.052"
gap: "+1.177"
interval: "+0.100"
laps: 15
- position: 6
driverNumber: 2
driverId: rubens-barrichello
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: bridgestone
time: "1:22.076"
gap: "+1.201"
interval: "+0.024"
laps: 16
- position: 7
driverNumber: 5
driverId: ralf-schumacher
constructorId: williams
engineManufacturerId: bmw
tyreManufacturerId: michelin
time: "1:22.196"
gap: "+1.321"
interval: "+0.120"
laps: 13
- position: 8
driverNumber: 12
driverId: olivier-panis
constructorId: bar
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:22.388"
gap: "+1.513"
interval: "+0.192"
laps: 15
- position: 9
driverNumber: 9
driverId: giancarlo-fisichella
constructorId: jordan
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:22.390"
gap: "+1.515"
interval: "+0.002"
laps: 15
- position: 10
driverNumber: 24
driverId: mika-salo
constructorId: toyota
engineManufacturerId: toyota
tyreManufacturerId: michelin
time: "1:22.842"
gap: "+1.967"
interval: "+0.452"
laps: 16
- position: 11
driverNumber: 15
driverId: jenson-button
constructorId: renault
engineManufacturerId: renault
tyreManufacturerId: michelin
time: "1:23.227"
gap: "+2.352"
interval: "+0.385"
laps: 8
- position: 12
driverNumber: 25
driverId: allan-mcnish
constructorId: toyota
engineManufacturerId: toyota
tyreManufacturerId: michelin
time: "1:23.345"
gap: "+2.470"
interval: "+0.118"
laps: 15
- position: 13
driverNumber: 23
driverId: mark-webber
constructorId: minardi
engineManufacturerId: asiatech
tyreManufacturerId: michelin
time: "1:23.374"
gap: "+2.499"
interval: "+0.029"
laps: 14
- position: 14
driverNumber: 11
driverId: jacques-villeneuve
constructorId: bar
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:23.400"
gap: "+2.525"
interval: "+0.026"
laps: 15
- position: 15
driverNumber: 21
driverId: enrique-bernoldi
constructorId: arrows
engineManufacturerId: cosworth
tyreManufacturerId: bridgestone
time: "1:23.642"
gap: "+2.767"
interval: "+0.242"
laps: 13
- position: 16
driverNumber: 8
driverId: felipe-massa
constructorId: sauber
engineManufacturerId: petronas
tyreManufacturerId: bridgestone
time: "1:23.868"
gap: "+2.993"
interval: "+0.226"
laps: 15
- position: 17
driverNumber: 7
driverId: nick-heidfeld
constructorId: sauber
engineManufacturerId: petronas
tyreManufacturerId: bridgestone
time: "1:23.929"
gap: "+3.054"
interval: "+0.061"
laps: 15
- position: 18
driverNumber: 17
driverId: pedro-de-la-rosa
constructorId: jaguar
engineManufacturerId: cosworth
tyreManufacturerId: michelin
time: "1:24.071"
gap: "+3.196"
interval: "+0.142"
laps: 12
- position: 19
driverNumber: 10
driverId: takuma-sato
constructorId: jordan
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:24.169"
gap: "+3.294"
interval: "+0.098"
laps: 15
- position: 20
driverNumber: 6
driverId: juan-pablo-montoya
constructorId: williams
engineManufacturerId: bmw
tyreManufacturerId: michelin
time: "1:24.309"
gap: "+3.434"
interval: "+0.140"
laps: 14
- position: 21
driverNumber: 16
driverId: eddie-irvine
constructorId: jaguar
engineManufacturerId: cosworth
tyreManufacturerId: michelin
time: "1:24.972"
gap: "+4.097"
interval: "+0.663"
laps: 17
- position: 22
driverNumber: 22
driverId: alex-yoong
constructorId: minardi
engineManufacturerId: asiatech
tyreManufacturerId: michelin
time: "1:25.366"
gap: "+4.491"
interval: "+0.394"
laps: 5
|
src/data/seasons/2002/races/07-monaco/warming-up-results.yml
|