code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
version: '2'
services:
registry:
image: goharbor/registry-photon:v2.6.2-v1.7.5
container_name: registry
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- registry_storage:/storage:z
- ./config/registry/config.yml:/etc/registry/config.yml:z
- ../common/root.crt:/etc/registry/root.crt:z
networks:
- harbor
dns_search: .
registryctl:
image: goharbor/harbor-registryctl:v1.7.5
container_name: registryctl
env_file:
- ./config/registryctl/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- registry_storage:/storage:z
- ./config/registry/config.yml:/etc/registry/config.yml:z
- ./config/registryctl/config.yml:/etc/registryctl/config.yml:z
networks:
- harbor
dns_search: .
postgresql:
image: goharbor/harbor-db:v1.7.5
container_name: harbor-db
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
networks:
- harbor
dns_search: .
env_file:
- ./config/db/env
adminserver:
image: goharbor/harbor-adminserver:v1.7.5
container_name: harbor-adminserver
env_file:
- ./config/adminserver/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- ../common/secretkey:/etc/adminserver/key:z
depends_on:
- postgresql
networks:
- harbor
dns_search: .
core:
image: goharbor/harbor-core:v1.7.5
container_name: harbor-core
env_file:
- ./config/core/env
restart: always
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
volumes:
- ./config/core/app.conf:/etc/core/app.conf:z
- ./config/core/private_key.pem:/etc/core/private_key.pem:z
- ../common/secretkey:/etc/core/key:z
networks:
- harbor
- harbor-chartmuseum
dns_search: .
depends_on:
- adminserver
- registry
portal:
image: goharbor/harbor-portal:v1.7.5
container_name: harbor-portal
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
networks:
- harbor
dns_search: .
depends_on:
- core
jobservice:
image: goharbor/harbor-jobservice:v1.7.5
container_name: harbor-jobservice
env_file:
- ./config/jobservice/env
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
volumes:
- ./config/jobservice/config.yml:/etc/jobservice/config.yml:z
networks:
- harbor
dns_search: .
depends_on:
- redis
- core
- adminserver
redis:
image: goharbor/redis-photon:v1.7.5
container_name: redis
restart: always
volumes:
- redis_data:/var/lib/redis/:z
networks:
- harbor
- harbor-chartmuseum
dns_search: .
proxy:
image: goharbor/nginx-photon:v1.7.5
container_name: nginx
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- NET_BIND_SERVICE
volumes:
- ./config/nginx/nginx.conf:/etc/nginx/nginx.conf:z
networks:
- harbor
dns_search: .
ports:
- 80:80
- 443:443
- 4443:4443
depends_on:
- postgresql
- registry
- core
- portal
chartmuseum:
container_name: chartmuseum
image: goharbor/chartmuseum-photon:v0.8.1-v1.7.5
restart: always
cap_drop:
- ALL
cap_add:
- CHOWN
- DAC_OVERRIDE
- SETGID
- SETUID
networks:
- harbor-chartmuseum
dns_search: .
depends_on:
- redis
env_file:
./config/chartserver/env
networks:
harbor:
external: false
harbor-chartmuseum:
external: false
volumes:
registry_storage:
redis_data:
|
harbor/tests/compose/harbor-1.7.0/docker-compose.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-20 02:13"
variationOf: "e181ce53882c01b3cc427878ffd9e1225fef8315"
game: "Unreal Tournament"
name: "JB-IndusRageXL"
author: "TheSpoonDog + Sexmachine"
description: "None"
releaseDate: "2002-09"
attachments:
- type: "IMAGE"
name: "JB-IndusRageXL_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Jailbreak/I/JB-IndusRageXL_shot_1.png"
originalFilename: "jb-indusragexl.zip"
hash: "58ff0ed7fc11cb7b51cb15b0e4e4647eddb3d684"
fileSize: 1346146
files:
- name: "JailBreak.u"
fileSize: 1483009
hash: "ce891867bb5d95830a45674ec8cde6c765f01a2f"
- name: "JBArena.u"
fileSize: 93157
hash: "289937bfef80b1bd875b6c53e96060119c04369b"
- name: "JB-IndusRageXL.unr"
fileSize: 2906578
hash: "b365b736e15e50aa4c949ea0191e566951048310"
- name: "JBSpecials.u"
fileSize: 324160
hash: "ac3ed7c3c3d3fdc9fe8304204f008c495f34c075"
otherFiles: 1
dependencies:
JB-IndusRageXL.unr:
- status: "OK"
name: "JBSpecials"
- status: "OK"
name: "JailBreak"
- status: "OK"
name: "JBArena"
JBArena.u:
- status: "OK"
name: "JailBreak"
JailBreak.u:
- status: "MISSING"
name: "JBAudio"
JBSpecials.u:
- status: "OK"
name: "JailBreak"
downloads:
- url: "http://medor.no-ip.org/index.php?dir=Maps/Jailbreak&file=jb-indusragexl.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/Jailbreak/I/5/8/ff0ed7/jb-indusragexl.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/Jailbreak/I/5/8/ff0ed7/jb-indusragexl.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/Jailbreak/I/5/8/ff0ed7/jb-indusragexl.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Jailbreak"
title: "Industrial Rage!XL"
playerCount: "6-12"
themes: {}
bots: true
|
content/Unreal Tournament/Maps/Jailbreak/I/5/8/ff0ed7/jb-indusragexl_[58ff0ed7].yml
|
---
apiVersion: v1
kind: Namespace
metadata:
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: scylla-manager
spec:
selector:
matchLabels:
app: scylla-manager
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: scylla-manager
spec:
volumes:
- name: scylla-manager-config-volume
configMap:
name: manager-config
containers:
- name: scylla-manager
image: manager:latest
imagePullPolicy: IfNotPresent
command:
- /usr/bin/scylla-manager
args:
- --config-file=/mnt/etc/scylla-manager/scylla-manager.yaml
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
cpu: 500m
memory: 500Mi
requests:
cpu: 500m
memory: 500Mi
volumeMounts:
- mountPath: /mnt/etc/scylla-manager
name: scylla-manager-config-volume
---
apiVersion: v1
kind: Service
metadata:
name: manager-service
labels:
app: scylla-manager
spec:
ports:
- port: 80
targetPort: 5080
protocol: TCP
selector:
app: scylla-manager
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
control-plane: scylla-manager-controller
name: controller
namespace: scylla-manager
spec:
selector:
matchLabels:
control-plane: scylla-manager-controller
serviceName: controller-service
template:
metadata:
labels:
control-plane: scylla-manager-controller
spec:
containers:
- args:
- manager-controller
- --log-level=debug
command:
- /scylla-operator
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: operator:latest
imagePullPolicy: IfNotPresent
name: scylla-manager-controller
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
terminationGracePeriodSeconds: 10
volumes:
- name: scylla-manager-config-volume
configMap:
name: manager-config
|
config/manager/manager/manager.yaml
|
namespace: kubeflow
resources:
- katib-controller-deployment.yaml
- katib-controller-rbac.yaml
- katib-controller-secret.yaml
- katib-controller-service.yaml
- katib-db-deployment.yaml
- katib-db-pvc.yaml
- katib-db-secret.yaml
- katib-db-service.yaml
- katib-manager-deployment.yaml
- katib-manager-rest-deployment.yaml
- katib-manager-rest-service.yaml
- katib-manager-service.yaml
- katib-ui-deployment.yaml
- katib-ui-rbac.yaml
- katib-ui-service.yaml
- metrics-collector-rbac.yaml
- metrics-collector-template-configmap.yaml
- suggestion-bayesianoptimization-deployment.yaml
- suggestion-bayesianoptimization-service.yaml
- suggestion-grid-deployment.yaml
- suggestion-grid-service.yaml
- suggestion-hyperband-deployment.yaml
- suggestion-hyperband-service.yaml
- suggestion-nasrl-deployment.yaml
- suggestion-nasrl-service.yaml
- suggestion-random-deployment.yaml
- suggestion-random-service.yaml
- trial-template.yaml
configMapGenerator:
- name: katib-parameters
env: params.env
generatorOptions:
disableNameSuffixHash: true
images:
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/katib-controller
newTag: v0.6.0-rc.0
- name: mysql
newTag: 8.0.3
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/katib-manager
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/katib-manager-rest
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/katib-ui
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/metrics-collector
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/suggestion-hyperband
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/suggestion-bayesianoptimization
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/suggestion-grid
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/suggestion-random
newTag: v0.6.0-rc.0
- name: gcr.io/kubeflow-images-public/katib/v1alpha2/suggestion-nasrl
newTag: v0.6.0-rc.0
vars:
- name: clusterDomain
objref:
kind: ConfigMap
name: katib-parameters
apiVersion: v1
fieldref:
fieldpath: data.clusterDomain
- name: namespace
objref:
kind: Service
name: katib-ui
apiVersion: v1
fieldref:
fieldpath: metadata.namespace
configurations:
- params.yaml
|
katib-v1alpha2/katib-operators/base/kustomization.yaml
|
---
!ruby/sym archives:
-
- !ruby/sym core_id
- !ruby/sym integer
- !ruby/sym index:
true
-
- !ruby/sym name
- !ruby/sym string
-
- !ruby/sym path
- !ruby/sym string
-
- !ruby/sym xmlns
- !ruby/sym string
- !ruby/sym default:
'http://rs.tdwg.org/dwc/text/'
-
- !ruby/sym xmlns__xs
- !ruby/sym string
- !ruby/sym default:
'http://www.w3.org/2001/XMLSchema'
-
- !ruby/sym xmlns__xsi
- !ruby/sym string
- !ruby/sym default:
'http://www.w3.org/2001/XMLSchema-instance'
-
- !ruby/sym xsi__schema_location
- !ruby/sym string
- !ruby/sym default:
'http://rs.tdwg.org/dwc/text/ http://rs.tdwg.org/dwc/text/tdwg_dwc_text.xsd'
!ruby/sym attributes:
-
- !ruby/sym entity_id
- !ruby/sym integer
- !ruby/sym index:
true
-
- !ruby/sym type
- !ruby/sym string
- !ruby/sym default:
string
-
- !ruby/sym name
- !ruby/sym string
-
!ruby/sym index: true
!ruby/sym null: false
-
- !ruby/sym term
- !ruby/sym string
-
- !ruby/sym default
- !ruby/sym string
-
- !ruby/sym index
- !ruby/sym integer
-
- !ruby/sym max_content_length
- !ruby/sym integer
-
- !ruby/sym unambiguous
- !ruby/sym boolean
- !ruby/sym default:
true
!ruby/sym entities:
-
- !ruby/sym archive_id
- !ruby/sym integer
- !ruby/sym index:
true
-
- !ruby/sym core_id
- !ruby/sym integer
- !ruby/sym index:
true
-
- !ruby/sym name
- !ruby/sym string
- !ruby/sym null:
false
-
- !ruby/sym term
- !ruby/sym string
-
- !ruby/sym is_core
- !ruby/sym boolean
-
- !ruby/sym key_column
- !ruby/sym integer
-
- !ruby/sym fields_enclosed_by
- !ruby/sym string
- !ruby/sym default:
'"'
-
- !ruby/sym fields_terminated_by
- !ruby/sym string
- !ruby/sym default:
','
-
- !ruby/sym lines_terminated_by
- !ruby/sym string
- !ruby/sym default:
'\r\n'
!ruby/sym content_files:
-
- !ruby/sym entity_id
- !ruby/sym integer
- !ruby/sym index:
true
-
- !ruby/sym name
- !ruby/sym string
- !ruby/sym null:
false
-
- !ruby/sym path
- !ruby/sym string
-
- !ruby/sym is_loaded
- !ruby/sym boolean
- !ruby/sym default:
false
|
lib/dwcr/metaschema/metaschema_tables.yml
|
title: Azure’da Linux sanal makineleri
summary: Azure’da Linux sanal makinelerini oluşturma ve yönetme belgeleri.
metadata:
title: Azure’da Linux sanal makineleri
description: Azure’da Linux sanal makinelerini oluşturma ve yönetme belgeleri.
services: virtual-machines-linux
ms.service: virtual-machines-linux
ms.topic: landing-page
author: cynthn
ms.author: cynthn
ms.date: 09/19/2019
ms.openlocfilehash: 12c3447bc7e46e45d05711e1b0c7a59f8d8d0218
ms.sourcegitcommit: 253d4c7ab41e4eb11cd9995190cd5536fcec5a3c
ms.translationtype: HT
ms.contentlocale: tr-TR
ms.lasthandoff: 03/25/2020
ms.locfileid: "77115815"
landingContent:
- title: En yeni özellikler
linkLists:
- linkListType: whats-new
links:
- text: Spot VM'ler
url: spot-vms.md
- text: Ayrılmış konaklar
url: dedicated-hosts.md
- text: Yakınlık yerleştirme grupları
url: proximity-placement-groups.md
- title: başlarken
linkLists:
- linkListType: quickstart
links:
- text: Azure CLI
url: https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-quick-create-cli
- text: Azure portalı
url: https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-quick-create-portal
- text: Azure PowerShell
url: https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-quick-create-powershell
- title: Adım adım kılavuzlar
linkLists:
- linkListType: tutorial
links:
- text: Linux VM’lerini oluşturma ve yönetme
url: /azure/virtual-machines/linux/tutorial-manage-vm
- text: VM Diskleri
url: /azure/virtual-machines/linux/tutorial-manage-disks
- text: VM yapılandırmasını otomatikleştirme
url: /azure/virtual-machines/linux/tutorial-automate-vm-deployment
- text: Özel VM görüntüleri oluşturma
url: /azure/virtual-machines/linux/tutorial-custom-images
- text: Sanal ağlar ile VM’leri yönetme
url: /azure/virtual-machines/linux/tutorial-virtual-network
- title: Kendi hızınızda ilerleyebileceğiniz eğitim
linkLists:
- linkListType: learn
links:
- text: " Azure'da bir Linux sanal makinesi oluşturma"
url: https://docs.microsoft.com/learn/modules/create-linux-virtual-machine-in-azure/
|
articles/virtual-machines/linux/index.yml
|
album: ''
artist: <NAME> God
composer: ''
excerpt: 'no'
genre: Rock
has_bleed: 'no'
instrumental: 'no'
mix_filename: LambOfGod_Requiem_MIX.wav
origin: Mogg
producer: ''
raw_dir: LambOfGod_Requiem_RAW
stem_dir: LambOfGod_Requiem_STEMS
stems:
S01:
component: ''
filename: LambOfGod_Requiem_STEM_01.wav
instrument: drum set
raw:
R01:
filename: LambOfGod_Requiem_RAW_01_01.wav
instrument: drum set
R02:
filename: LambOfGod_Requiem_RAW_01_02.wav
instrument: drum set
S02:
component: ''
filename: LambOfGod_Requiem_STEM_02.wav
instrument: distorted electric guitar
raw:
R01:
filename: LambOfGod_Requiem_RAW_02_01.wav
instrument: distorted electric guitar
R02:
filename: LambOfGod_Requiem_RAW_02_02.wav
instrument: distorted electric guitar
S03:
component: bass
filename: LambOfGod_Requiem_STEM_03.wav
instrument: electric bass
raw:
R01:
filename: LambOfGod_Requiem_RAW_03_01.wav
instrument: electric bass
R02:
filename: LambOfGod_Requiem_RAW_03_02.wav
instrument: electric bass
S04:
component: melody
filename: LambOfGod_Requiem_STEM_04.wav
instrument: distorted electric guitar
raw:
R01:
filename: LambOfGod_Requiem_RAW_04_01.wav
instrument: distorted electric guitar
R02:
filename: LambOfGod_Requiem_RAW_04_02.wav
instrument: distorted electric guitar
S05:
component: ''
filename: LambOfGod_Requiem_STEM_05.wav
instrument: distorted electric guitar
raw:
R01:
filename: LambOfGod_Requiem_RAW_05_01.wav
instrument: distorted electric guitar
R02:
filename: LambOfGod_Requiem_RAW_05_02.wav
instrument: distorted electric guitar
S06:
component: melody
filename: LambOfGod_Requiem_STEM_06.wav
instrument: male singer
raw:
R01:
filename: LambOfGod_Requiem_RAW_06_01.wav
instrument: male singer
R02:
filename: LambOfGod_Requiem_RAW_06_02.wav
instrument: male singer
S07:
component: ''
filename: LambOfGod_Requiem_STEM_07.wav
instrument: fx/processed sound
raw:
R01:
filename: LambOfGod_Requiem_RAW_07_01.wav
instrument: fx/processed sound
R02:
filename: LambOfGod_Requiem_RAW_07_02.wav
instrument: fx/processed sound
S08:
component: melody
filename: LambOfGod_Requiem_STEM_08.wav
instrument: male screamer
raw:
R01:
filename: LambOfGod_Requiem_RAW_08_01.wav
instrument: male screamer
R02:
filename: LambOfGod_Requiem_RAW_08_02.wav
instrument: male screamer
title: Requiem
version: 1.2
website: ''
|
medleydb/data/Metadata/LambOfGod_Requiem_METADATA.yaml
|
parameters:
python_version: ''
architecture: ''
prefix: ''
gpu: false
num_build_jobs: 1
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: ${{ parameters.python_version }}
architecture: ${{ parameters.architecture }}
- script: |
${{ parameters.prefix }} python -m pip install -U pip setuptools
${{ parameters.prefix }} python -m pip install -U -r requirements.txt
displayName: "Install dependencies"
- script: |
${{ parameters.prefix }} python setup.py build_ext --inplace -j ${{ parameters.num_build_jobs }}
${{ parameters.prefix }} python setup.py sdist --formats=gztar
displayName: "Compile and build sdist"
- task: DeleteFiles@1
inputs:
contents: "spacy"
displayName: "Delete source directory"
- script: |
${{ parameters.prefix }} python -m pip freeze --exclude torch --exclude cupy-cuda110 > installed.txt
${{ parameters.prefix }} python -m pip uninstall -y -r installed.txt
displayName: "Uninstall all packages"
- bash: |
${{ parameters.prefix }} SDIST=$(python -c "import os;print(os.listdir('./dist')[-1])" 2>&1)
${{ parameters.prefix }} python -m pip install dist/$SDIST
displayName: "Install from sdist"
- script: |
${{ parameters.prefix }} python -m pip install -U -r requirements.txt
displayName: "Install test requirements"
- script: |
${{ parameters.prefix }} python -m pip install -U cupy-cuda110
${{ parameters.prefix }} python -m pip install "torch==1.7.1+cu110" -f https://download.pytorch.org/whl/torch_stable.html
displayName: "Install GPU requirements"
condition: eq(${{ parameters.gpu }}, true)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy
displayName: "Run CPU tests"
condition: eq(${{ parameters.gpu }}, false)
- script: |
${{ parameters.prefix }} python -m pytest --pyargs spacy -p spacy.tests.enable_gpu
displayName: "Run GPU tests"
condition: eq(${{ parameters.gpu }}, true)
|
.github/azure-steps.yml
|
fluidDefs:
air:
label: air
weight: 0
value: 0
water:
label: water
weight: 1 #weight is per litre
value: 1
nutrition:
water: 1
saltWater:
label: salt water
description: must be purified before it can be used
weight: 1 #weight is per litre
value: 0.5
juice:
label: fruit juice
weight: 1.1
value: 2
nutrition:
water: 0.8
food: 0.2
ink:
label: ink
description: a black fluid used to write words on paper. Can't be used directly, but recharges pens and quills. Must be kept in a sealed container.
weight: 1
value: 50 #per litre
fillPriority: smallest #will look for a small container as opposed to a large one, if not set defaults to largest
# very useful for small-volume items that you only want vials of as opposed to litres of
sealed: True
bleach:
label: bleach
description: a clear, watery fluid that is NOT WATER. useful for removing colours, inks and stains from clothing or paper
weight: 1.2 #kg/L, g/L doesn't matter. praise the metric system
value: 5
glue:
label: glue
description: a sticky bonding agent for all your stick-things-to-other-things needs. must be kept in a sealed container.
weight: 1.2 #kg/L, g/L doesn't matter. praise the metric system
value: 8
sealed: True
fillPriority: smallest
#
# wineRed:
# label: red wine
# description: a rich red alcoholic beverage, it's very fancy and some leeani may like it a lot.
# weight: 0.9
# value: 20
# nutrition:
# water: -0.1 #dehydrating
# alcohol: 10%
# morale: 10 #out of 100
#
sodaLemonade:
label: lemonade
description: a fizzy drink made from lemons and lots of sugar that leaves a tingle on the tongue
weight: 1
value: 5
nutrition:
water: 1
morale: 5
sodaCola:
label: cola
description: a fizzy drink made from...who knows...that leaves a tingle on the tongue
weight: 1
value: 5
nutrition:
water: 1
morale: 7
|
data/defs/fluids.yaml
|
---
documentation:
description: |
Recipe pre-processes ERA-Interim and ERA-5 data
for use in the GlobWat hydrological model.
GlobWat is a Global Hydrological Model provided by FAO
http://www.fao.org/nr/water/aquamaps/
By default, PET is calculated by the De Bruin method (De Bruin et al. 2016).
However, users can choose the Langbein method by setting the langbein_pet
(Langbein et al. 1949) in the recipe to True.
More information about the De Bruin method can be found at:
https://doi.org/10.1175/JHM-D-15-0006.1 page 1376, equation 6.
More information about the Langbein method can be found at:
https://doi.org/10.3133/cir52 page 8, figure 1.
An example of using Langbein method can be found at:
https://doi.org/10.1080/02626667.2017.1332416 page 1472, equation 7.
A target_grid_file has been generated from one of the models sample files,
see the recipe documentation.
authors:
- abdollahi_banafsheh
- alidoost_sarah
maintainer:
- abdollahi_banafsheh
projects:
- ewatercycle
references:
- acknow_project
- debruin16ams
- hoogeveen15hess
- langbein1949usgs
preprocessors:
area_selection:
extract_region:
start_longitude: 40
end_longitude: 65
start_latitude: 25
end_latitude: 40
diagnostics:
diagnostic_daily_GlobWat:
description: daily precipitation of ERA5 & ERA-Interim
additional_datasets:
- {dataset: ERA-Interim, project: OBS6, tier: 3, type: reanaly, version: 1}
- {dataset: ERA5, project: OBS6, type: reanaly, version: '1', tier: 3}
variables:
pr: &daily_var
mip: day
start_year: 1986
end_year: 2016
# comment preprocessor to process data on the global scale
preprocessor: area_selection
tas: *daily_var
# comment psl, rsds and rsdt if langbein_pet is True
psl: *daily_var
rsds: *daily_var
rsdt:
<<: *daily_var
mip: CFday
scripts:
script:
script: hydrology/globwat.py
target_grid_file: 'globwat/globwat_target_grid.nc'
evaporation_method: debruin # options: debruin or langbein
regrid_scheme: area_weighted
diagnostic_monthly_GlobWat:
description: monthly precipitation and potential evaporation of ERA5 & ERA-Interim
additional_datasets:
- {dataset: ERA-Interim, project: OBS6, tier: 3, type: reanaly, version: 1}
- {dataset: ERA5, project: native6, type: reanaly, version: '1', tier: 3}
variables:
pr: &var_monthly
mip: Amon
start_year: 1986
end_year: 2016
# comment preprocessor to process data on the global scale
preprocessor: area_selection
tas: *var_monthly
# comment psl, rsds and rsdt if langbein_pet is True
psl: *var_monthly
rsds: *var_monthly
rsdt: *var_monthly
scripts:
script:
script: hydrology/globwat.py
target_grid_file: 'globwat/globwat_target_grid.nc'
evaporation_method: debruin # options: debruin or langbein
regrid_scheme: area_weighted
|
esmvaltool/recipes/hydrology/recipe_globwat.yml
|
defaults:
request_headers:
content-type: application/x-yaml
X-Auth-Token: $ENVIRON['TEST_AUTH_TOKEN']
response_headers:
content-type: application/x-yaml
verbose: true
tests:
- name: purge
desc: Begin testing from known state.
DELETE: /api/v1.0/revisions
status: 204
response_headers: null
- name: encrypt_generic_document_for_secret_substitution
desc: |
Create documents using a generic document type (armada/Generic/v1) as the
substitution source with storagePolicy=encrypted.
PUT: /api/v1.0/buckets/secret/documents
status: 200
data: |-
---
schema: deckhand/LayeringPolicy/v1
metadata:
schema: metadata/Control/v1
name: layering-policy
data:
layerOrder:
- site
---
# Generic document as substitution source.
schema: armada/Generic/v1
metadata:
name: example-armada-cert
schema: metadata/Document/v1
layeringDefinition:
abstract: false
layer: site
storagePolicy: encrypted
data: ARMADA CERTIFICATE DATA
---
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: armada-chart-01
# We don't need to encrypt the destination document.
storagePolicy: cleartext
layeringDefinition:
abstract: false
layer: site
substitutions:
- dest:
path: .chart.values.tls.certificate
src:
schema: armada/Generic/v1
name: example-armada-cert
path: .
data: {}
...
- name: verify_multiple_revision_documents_returns_secret_ref
desc: Verify that secret ref was created for example-armada-cert among multiple created documents.
GET: /api/v1.0/revisions/$RESPONSE['$.[0].status.revision']/documents
status: 200
query_parameters:
metadata.name: example-armada-cert
cleartext-secrets: 'true'
response_multidoc_jsonpaths:
$.`len`: 1
# NOTE(felipemonteiro): jsonpath-rw-ext uses a 1 character separator (rather than allowing a string)
# leading to this nastiness:
$.[0].data.`split(:, 0, 1)` + "://" + $.[0].data.`split(/, 2, 3)`: $ENVIRON['TEST_BARBICAN_URL']
- name: verify_generic_secret_created_in_barbican
desc: Validate that the generic secret gets stored with secret_type opaque.
GET: $ENVIRON['TEST_BARBICAN_URL']/v1/secrets/$RESPONSE['$.[0].data.`split(/, 5, -1)`']
status: 200
request_headers:
content-type: application/json
response_headers:
content-type: /^application\/json$|^application\/json;\ charset=UTF-8$/
response_json_paths:
$.status: ACTIVE
$.name: example-armada-cert
# Default type for documents with generic schema.
$.secret_type: opaque
- name: verify_secret_payload_in_destination_document
desc: Verify secret payload is injected in destination document as well as example-armada-cert.
GET: /api/v1.0/revisions/$HISTORY['encrypt_generic_document_for_secret_substitution'].$RESPONSE['$.[0].status.revision']/rendered-documents
status: 200
query_parameters:
cleartext-secrets: true
metadata.name:
- armada-chart-01
- example-armada-cert
sort: metadata.name
response_multidoc_jsonpaths:
$.`len`: 2
$.[0].metadata.name: armada-chart-01
$.[0].data:
chart:
values:
tls:
certificate: ARMADA CERTIFICATE DATA
$.[1].metadata.name: example-armada-cert
$.[1].data: ARMADA CERTIFICATE DATA
|
deckhand/tests/integration/gabbits/document-substitution-secret-generic.yaml
|
name: Build binary package
on:
push:
tags:
- "v*.*.*"
jobs:
test:
name: Run tests
runs-on: windows-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.8'
- name: Install test dependencies
uses: BSFishy/pip-action@v1
with:
packages: |
black
isort
- name: Check code with black
run: black --check .
- name: Check code with isort
run: isort --profile black . --check-only
build:
name: Build installer
runs-on: windows-latest
needs:
- test
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.8'
- name: Install poetry
uses: BSFishy/pip-action@v1
with:
packages: |
poetry
- name: Install project dependencies
run: |
poetry install
- name: Read version name
id: version
run: echo "::set-output name=version::${{ github.ref_name }}" | sed 's/::v/::/'
- name: Build pyinstaller package
run: |
poetry run pyinstaller -D -w -i win64_logo.ico -n spicetify-beat-saber-backend --version-file win64_version_info.txt start.py
- name: Add WiX Toolset to PATH
shell: bash
run: echo "C:\Program Files (x86)\WiX Toolset v3.11\bin" >> $GITHUB_PATH
- name: Build MSI installer
shell: cmd
run: |
candle win64_installer.wxs -dVERSION=${{ steps.version.outputs.version }}
light win64_installer.wixobj -ext WixUIExtension -out beatsaber-backend-${{ steps.version.outputs.version }}.msi
- name: Upload workflow artifact
uses: actions/upload-artifact@v2
with:
name: beatsaber-backend-${{ steps.version.outputs.version }}
path: |
dist/*
*.msi
*.wixpdb
- name: Publish GitHub release
uses: softprops/action-gh-release@v1
with:
files: |
*.msi
|
.github/workflows/build.yml
|
version: "2"
services:
mongo:
image: "mongo:3.2"
hostname: mongo
expose:
- "27017"
ports:
- "28017:27017"
dns: #doesn't seem to work anymore
- 172.20.0.5
volumes:
- ./contrib/scripts/docker-compose/resolv.conf:/etc/resolv.conf
- ./contrib/scripts/meteor/delete_collections.sh:/tmp/delete_collections.sh
networks:
static-network:
ipv4_address: 172.20.0.3
bind:
image: "sameersbn/bind:9.11.3-20180713"
hostname: ns
expose:
- "53"
ports:
- "53:53/udp"
- "10000:10000"
dns:
- 172.20.0.5
- 8.8.8.8
environment:
- ROOT_PASSWORD=<PASSWORD>
command: /contrib/scripts/docker-compose/update_bind.sh
#stdin_open: true
#tty: true
volumes:
- "./contrib/:/contrib"
- ./contrib/scripts/docker-compose/resolv.conf:/etc/resolv.conf
networks:
static-network:
ipv4_address: 172.20.0.5
mail:
image: "esminis/mail-server-postfix-vm-pop3d"
hostname: mail
expose:
- "25"
- "465"
- "995"
ports:
- "8443:8443"
- "25:25"
- "110:110"
- "465:465"
- "995:995"
dns:
- 172.20.0.5
- 8.8.8.8
dns_search: ci-doichain.org
depends_on:
- bind
volumes:
- "./contrib/:/contrib"
- ./contrib/scripts/docker-compose/resolv.conf:/etc/resolv.conf
command: bash -c 'cp /contrib/scripts/sasl/sasldb2 /etc/ && cp -r /contrib/scripts/tequila/* /opt/tequila/domains/ && chown -R tequila:tequila /opt/tequila/domains && mkdir -p /var/spool/virtual/ci-doichain.org && chown tequila:tequila /var/spool/virtual/ci-doichain.org && service postfix reload && sleep infinity'
networks:
static-network:
ipv4_address: 172.20.0.4
alice:
image: "doichain/node-only:0.16.3.2"
hostname: alice
ports:
- "18543:18332"
expose:
- "18332"
environment:
- REGTEST=true
- RPC_ALLOW_IP=::/0
- RPC_PASSWORD=<PASSWORD>
- DAPP_URL=http://172.20.0.9:3000/
# - DAPP_URL=http://172.20.0.8:3000/ #use this when tests run inside dokcer
dns:
- 172.20.0.5
- 8.8.8.8
dns_search: ci-doichain.org
volumes:
- ./contrib/scripts/docker-compose/resolv.conf:/etc/resolv.conf
stdin_open: true
tty: true
networks:
static-network:
ipv4_address: 172.20.0.6
bob:
image: "doichain/node-only:0.16.3.2"
hostname: bob
ports:
- "18544:18332"
expose:
- "18332"
environment:
- REGTEST=true
- RPC_ALLOW_IP=::/0
- RPC_PASSWORD=<PASSWORD>
- DAPP_URL=http://172.20.0.9:4000/
# - DAPP_URL=http://172.20.0.8:4000 #use this when tests run inside docker
dns:
- 172.20.0.5
- 8.8.8.8
dns_search: ci-doichain.org
volumes:
- ./contrib/scripts/docker-compose/resolv.conf:/etc/resolv.conf
stdin_open: true
tty: true
networks:
static-network:
ipv4_address: 172.20.0.7
# regtest:
# image: "doichain/dapp-only"
# hostname: regtest
# #build: .
# volumes:
# - ./contrib/scripts/docker-compose/resolv.conf:/etc/resolv.conf
# - "/var/run/docker.sock:/var/run/docker.sock"
# - ".:/home/doichain/dapp" #TODO you can enable this during development but not during testing in CI (permission denided)
# dns:
# - 172.20.0.5
# - 8.8.8.8
# dns_search: ci-doichain.org
# #&& export MONGO_URL=mongodb://mongo:27017/alice && meteor npm cache verify && meteor npm install && meteor npm run test-d-compose-alice-mocha
# command: bash -c "./contrib/scripts/meteor/start_dapp.sh && sleep infinity"
# ports:
# - "3000:3000"
# - "4000:4000"
# expose:
# - "3000"
# - "4000"
# stdin_open: true
# tty: true
# privileged: true
# depends_on:
# - mongo
# - alice
# - bob
# - bind
# - mail
# networks:
# static-network:
# ipv4_address: 172.20.0.8
dockerhost:
image: qoomon/docker-host
cap_add: [ 'NET_ADMIN', 'NET_RAW' ]
# volumes:
# - ./contrib/scripts/docker-compose/resolv.conf:/etc/resolv.conf
restart: on-failure
expose:
- "3000"
- "4000"
networks:
static-network:
ipv4_address: 172.20.0.9
networks:
static-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
gateway: 172.20.0.1
|
docker-compose-test-regtest.yml
|
--- !<SKIN>
contentType: "SKIN"
firstIndex: "2018-12-26 14:28"
game: "Unreal Tournament 2004"
name: "BloodFalcon"
author: "Unknown"
description: "None"
releaseDate: "2003-12"
attachments:
- type: "IMAGE"
name: "BloodFalcon_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/B/BloodFalcon_shot_1.png"
- type: "IMAGE"
name: "BloodFalcon_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/B/BloodFalcon_shot_2.png"
- type: "IMAGE"
name: "BloodFalcon_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/B/BloodFalcon_shot_3.png"
originalFilename: "captainfalcon_bloodfalcon_v16_ut2k3.zip"
hash: "d0189235947ce84cc33f3254deafc5727cb6dee0"
fileSize: 1477566
files:
- name: "CaptainFalcon.utx"
fileSize: 1224100
hash: "94c832845bd7b8e5e8a88352b4c800e76b9ad541"
- name: "BloodFalcon.upl"
fileSize: 358
hash: "d24f3b65d96e504359494ca4b70761d0b3bb6263"
- name: "BloodFalcon.utx"
fileSize: 1224092
hash: "d6e96a83f3e2e605c3d0e0677fa7e298164010c3"
- name: "CaptainFalcon.upl"
fileSize: 373
hash: "10c387fc278944cd5ee00e703c74534c2ace7f27"
otherFiles: 2
dependencies: {}
downloads:
- url: "https://gamefront.online/files2/service/thankyou?id=2005738"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Skins/B/captainfalcon_bloodfalcon_v16_ut2k3.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Skins/B/d/0/189235/captainfalcon_bloodfalcon_v16_ut2k3.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Skins/B/d/0/189235/captainfalcon_bloodfalcon_v16_ut2k3.zip"
main: false
repack: false
state: "OK"
deleted: false
skins:
- "BloodFalcon"
- "CaptainFalcon"
faces: []
model: "Unknown"
teamSkins: false
|
content/Unreal Tournament 2004/Skins/B/d/0/189235/bloodfalcon_[d0189235].yml
|
apiVersion: pipecd.dev/v1beta1
kind: Piped
spec:
projectID: test-project
pipedID: test-piped
pipedKeyFile: etc/piped/key
apiAddress: your-pipecd.domain
webAddress: https://your-pipecd.domain
syncInterval: 1m
git:
username: username
email: <EMAIL>
sshKeyFile: /etc/piped-secret/ssh-key
repositories:
- repoId: repo1
remote: <EMAIL>:org/repo1.git
branch: master
- repoId: repo2
remote: <EMAIL>:org/repo2.git
branch: master
chartRepositories:
- name: fantastic-charts
address: https://fantastic-charts.storage.googleapis.com
- name: private-charts
address: https://private-charts.com
username: basic-username
password: <PASSWORD>
cloudProviders:
- name: kubernetes-default
type: KUBERNETES
config:
appStateInformer:
includeResources:
- apiVersion: pipecd.dev/v1beta1
- apiVersion: networking.gke.io/v1beta1
kind: ManagedCertificate
excludeResources:
- apiVersion: v1
kind: Endpoints
- name: terraform
type: TERRAFORM
config:
vars:
- "project=gcp-project"
- "region=us-centra1"
- name: cloudrun
type: CLOUDRUN
config:
project: gcp-project-id
region: cloud-run-region
credentialsFile: /etc/piped-secret/gcp-service-account.json
- name: lambda
type: LAMBDA
config:
region: us-east-1
analysisProviders:
- name: prometheus-dev
type: PROMETHEUS
config:
address: https://your-prometheus.dev
- name: datadog-dev
type: DATADOG
config:
address: https://your-datadog.dev
apiKeyFile: /etc/piped-secret/datadog-api-key
applicationKeyFile: /etc/piped-secret/datadog-application-key
- name: stackdriver-dev
type: STACKDRIVER
config:
serviceAccountFile: /etc/piped-secret/gcp-service-account.json
imageProviders:
- name: my-dockerhub
type: DOCKER_HUB
config:
username: foo
passwordFile: /etc/piped-secret/dockerhub-pass
- name: my-gcr
type: GCR
config:
serviceAccountFile: /etc/piped-secret/gcr-service-account.json
- name: my-ecr
type: ECR
config:
region: us-west-2
registryId: default
credentialsFile: /etc/piped-secret/aws-credentials
profile: user1
notifications:
routes:
- name: dev-slack
envs:
- dev
receiver: dev-slack-channel
- name: prod-slack
events:
- DEPLOYMENT_STARTED
- DEPLOYMENT_COMPLETED
envs:
- dev
receiver: prod-slack-channel
- name: all-events-to-ci
receiver: ci-webhook
receivers:
- name: dev-slack-channel
slack:
hookURL: https://slack.com/dev
- name: prod-slack-channel
slack:
hookURL: https://slack.com/prod
- name: ci-webhook
webhook:
url: https://pipecd.dev/dev-hook
sealedSecretManagement:
type: SEALING_KEY
config:
privateKeyFile: /etc/piped-secret/sealing-private-key
publicKeyFile: /etc/piped-secret/sealing-public-key
# type: GCP_KMS
# config:
# keyName: key-name
# decryptServiceAccountFile: /etc/piped-secret/decrypt-service-account.json
# encryptServiceAccountFile: /etc/piped-secret/encrypt-service-account.json
imageWatcher:
checkInterval: 10m
gitRepos:
- repoId: foo
commitMessage: Update image
includes:
- imagewatcher-dev.yaml
- imagewatcher-stg.yaml
|
pkg/config/testdata/piped/piped-config.yaml
|
title: Documentation de la famille de produits Visual Studio
summary: Explorez la documentation de la famille de produits Visual Studio.
brand: visual-studio
metadata:
title: Documentation de la famille de produits Visual Studio
description: Explorez la documentation de la famille de produits Visual Studio.
ms.topic: hub-page
ms.custom: contperfq1
ms.date: 05/13/2020
searchScope: []
titleSuffix: ''
ms.author: ghogen
author: ghogen
ms.manager: jillfra
monikerRange: '>=vs-2017'
hide_bc: true
ms.openlocfilehash: 27efe1b7a8dd936745a0aac97b50dcbb5e8c78ab
ms.sourcegitcommit: 13cf7569f62c746708a6ced1187d8173eda7397c
ms.translationtype: HT
ms.contentlocale: fr-FR
ms.lasthandoff: 09/25/2020
ms.locfileid: "91352149"
productDirectory:
items:
- title: Visual Studio
imageSrc: ./media/vs-ide-2019.svg
links:
- url: ./windows/index.yml?view=vs-2019&preserve-view=true
text: Visual Studio 2019 (version actuelle)
- url: ./windows/index.yml?view=vs-2017&preserve-view=true
text: Visual Studio 2017
- url: /previous-versions/visualstudio/
text: Versions antérieures
- url: /visualstudio/releases/2019/release-notes
text: Notes de publication
- url: https://visualstudio.microsoft.com/
text: Page du produit
- title: Visual Studio pour Mac
imageSrc: ./media/vs-mac-2019.svg
links:
- url: /visualstudio/mac
text: Visual Studio 2019 pour Mac (version actuelle)
- url: /visualstudio/mac/?view=vsmac-2017
text: Visual Studio 2017 pour Mac
- url: /visualstudio/releasenotes/vs2019-mac-relnotes
text: Notes de publication
- url: https://visualstudio.microsoft.com/vs/mac/
text: Page du produit
- title: Visual Studio Code
imageSrc: ./media/vs-code-logo.svg
links:
- url: https://code.visualstudio.com/docs
text: Documentation
- url: https://code.visualstudio.com/updates
text: Notes de publication
- url: https://code.visualstudio.com
text: Page du produit
- title: Visual Studio Codespaces
imageSrc: ./media/vs-ide-2019.svg
links:
- url: /visualstudio/codespaces/
text: Documentation
- url: https://code.visualstudio.com/docs/remote/codespaces
text: Utilisation avec Visual Studio Code
- url: https://visualstudio.microsoft.com/services/visual-studio-codespaces/
text: Page du produit
- title: Visual Studio App Center
imageSrc: ./media/vs-mobile-center.svg
links:
- url: /appcenter
text: Documentation
- url: https://visualstudio.microsoft.com/app-center/
text: Portail
- title: Abonnements Visual Studio
imageSrc: ./media/vs-subscription-temp.svg
links:
- url: /visualstudio/subscriptions
text: Documentation
- url: https://visualstudio.microsoft.com/subscriptions
text: Informations sur visualstudio.com
- url: https://my.visualstudio.com
text: Portail des abonnés
- url: https://manage.visualstudio.com
text: Portail administrateur
|
docs/index.yml
|
uid: "com.microsoft.azure.management.mediaservices.v2018_07_01.StreamingLocators.listPathsAsync*"
fullName: "com.microsoft.azure.management.mediaservices.v2018_07_01.StreamingLocators.listPathsAsync"
name: "listPathsAsync"
nameWithType: "StreamingLocators.listPathsAsync"
members:
- uid: "com.microsoft.azure.management.mediaservices.v2018_07_01.StreamingLocators.listPathsAsync(java.lang.String,java.lang.String,java.lang.String)"
fullName: "com.microsoft.azure.management.mediaservices.v2018_07_01.StreamingLocators.listPathsAsync(String resourceGroupName, String accountName, String streamingLocatorName)"
name: "listPathsAsync(String resourceGroupName, String accountName, String streamingLocatorName)"
nameWithType: "StreamingLocators.listPathsAsync(String resourceGroupName, String accountName, String streamingLocatorName)"
summary: "List Paths. List Paths supported by this Streaming Locator."
parameters:
- description: "The name of the resource group within the Azure subscription."
name: "resourceGroupName"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "The Media Services account name."
name: "accountName"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "The Streaming Locator name."
name: "streamingLocatorName"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
syntax: "public abstract Observable<ListPathsResponse> listPathsAsync(String resourceGroupName, String accountName, String streamingLocatorName)"
returns:
description: "the observable for the request"
type: "<xref href=\"rx.Observable?alt=rx.Observable&text=Observable\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.microsoft.azure.management.mediaservices.v2018_07_01.ListPathsResponse?alt=com.microsoft.azure.management.mediaservices.v2018_07_01.ListPathsResponse&text=ListPathsResponse\" data-throw-if-not-resolved=\"False\" />>"
type: "method"
metadata: {}
package: "com.microsoft.azure.management.mediaservices.v2018_07_01"
artifact: com.microsoft.azure.mediaservices.v2018_07_01:azure-mgmt-media:1.0.0-beta-5
|
docs-ref-autogen/com.microsoft.azure.management.mediaservices.v2018_07_01.StreamingLocators.listPathsAsync.yml
|
language: php
sudo: false
cache:
directories:
- $HOME/.composer/cache/files
php:
- 7.1
- 7.2
- 7.3
env:
global:
- TEST_COMMAND="composer test"
- PULI_VERSION=1.0.0-beta10
branches:
except:
- /^analysis-.*$/
matrix:
fast_finish: true
include:
- name: PHPSpec code coverage
php: 7.1
# Disable code coverage until https://github.com/leanphp/phpspec-code-coverage/pull/38 is released
# DEPENDENCIES="leanphp/phpspec-code-coverage:^4.2" TEST_COMMAND="composer test-ci"
env: COMPOSER_FLAGS="--prefer-stable --prefer-lowest" COVERAGE=true TEST_COMMAND="composer test" PULI_VERSION=1.0.0-beta9
- name: PHPUnit tests
php: 7.3
env: TEST_COMMAND="./vendor/bin/phpunit" DEPENDENCIES="phpunit/phpunit:^7.5 nyholm/psr7:^1.0 kriswallsmith/buzz:^1.0 php-http/curl-client:^1.0 php-http/message"
- name: PHPUnit test with nothing installed
php: 7.3
env: TEST_COMMAND="./vendor/bin/phpunit --group=NothingInstalled" DEPENDENCIES="phpunit/phpunit:^7.5"
- name: Test Install
php: 7.3
install:
- |
# install_test is a helper to create folded reports (From Symfony)
install_test () {
local title="$1 \"$2\" ..."
local fold=$(date +%s%N)
echo -e "travis_fold:start:$fold"
echo -e "\\e[1;34m$title\\e[0m"
echo "./tests/install.sh \"$1\" \"$2\" \"$3\""
./tests/install.sh "$1" "$2" "$3" 2>&1
local ok=$?
(exit $ok) &&
echo -e "\\e[32mOK\\e[0m $title\\n\\ntravis_fold:end:$fold" ||
echo -e "\\e[41mKO\\e[0m $title\\n"
(exit $ok)
}
export -f install_test
script:
# Test that we find Guzzle
- install_test will-find "Http\Discovery\HttpClientDiscovery::find();" "php-http/guzzle6-adapter"
# Test that we find a client with Symfony and Guzzle PSR-7
- install_test will-find "Http\Discovery\HttpClientDiscovery::find();" "symfony/http-client:5.* php-http/httplug php-http/message-factory guzzlehttp/psr7:1.* http-interop/http-factory-guzzle"
# We should fail if we dont have php-http/message-factory or PSR-17
- install_test cant-find "Http\Discovery\HttpClientDiscovery::find();" "symfony/http-client:5.* php-http/httplug php-http/message-factory guzzlehttp/psr7:1.*"
- install_test cant-find "Http\Discovery\HttpClientDiscovery::find();" "symfony/http-client:5.* php-http/httplug guzzlehttp/psr7:1.* http-interop/http-factory-guzzle"
# We should be able to find a client when Symfony is only partly installed and we have guzzle adapter installed
- install_test will-find "Http\Discovery\HttpClientDiscovery::find();" "symfony/http-client:5.* php-http/guzzle6-adapter php-http/httplug php-http/message-factory guzzlehttp/psr7:1.*"
before_install:
- if [[ $COVERAGE != true ]]; then phpenv config-rm xdebug.ini || true; fi
- if ! [ -z "$DEPENDENCIES" ]; then composer require --no-update ${DEPENDENCIES}; fi;
install:
- wget https://github.com/puli/cli/releases/download/1.0.0-beta10/puli.phar && chmod +x puli.phar
- composer require puli/composer-plugin:${PULI_VERSION} --no-update
- composer update ${COMPOSER_FLAGS} --prefer-dist --no-interaction
script:
- $TEST_COMMAND
after_success:
- if [[ $COVERAGE = true ]]; then wget https://scrutinizer-ci.com/ocular.phar; fi
- if [[ $COVERAGE = true ]]; then php ocular.phar code-coverage:upload --format=php-clover build/coverage.xml; fi
|
.travis.yml
|
root: index
subtrees:
- caption: Contents
entries:
- file: Introduction.md
title: Introduction
- file: core_features.md
- file: training/index.rst
- file: examples/index.md
title: Example Notebooks
subtrees:
- entries:
- file: examples/getting-started-movielens/index.md
title: Getting Started with MovieLens
entries:
- file: examples/getting-started-movielens/01-Download-Convert.ipynb
title: Download and Convert
- file: examples/getting-started-movielens/02-ETL-with-NVTabular.ipynb
title: ETL with NVTabular
- file: examples/getting-started-movielens/03-Training-with-HugeCTR.ipynb
title: Train with HugeCTR
- file: examples/getting-started-movielens/03-Training-with-TF.ipynb
title: Train with TensorFlow
- file: examples/getting-started-movielens/03-Training-with-PyTorch.ipynb
title: Train with PyTorch
- file: examples/getting-started-movielens/04-Triton-Inference-with-HugeCTR.ipynb
title: Serve a HugeCTR Model
- file: examples/getting-started-movielens/04-Triton-Inference-with-TF.ipynb
title: Serve a TensorFlow Model
- file: examples/advanced-ops-outbrain/index.md
title: Advanced Ops with Outbrain
entries:
- file: examples/advanced-ops-outbrain/01-Download-Convert.ipynb
- file: examples/advanced-ops-outbrain/02-ETL-with-NVTabular.ipynb
- file: examples/advanced-ops-outbrain/03-Training-with-TF.ipynb
- file: examples/scaling-criteo/index.md
entries:
- file: examples/scaling-criteo/01-Download-Convert.ipynb
title: Download and Convert
- file: examples/scaling-criteo/02-ETL-with-NVTabular.ipynb
title: ETL with NVTabular
- file: examples/scaling-criteo/03-Training-with-HugeCTR.ipynb
title: Train with HugeCTR
- file: examples/scaling-criteo/03-Training-with-TF.ipynb
title: Train with TensorFlow
- file: examples/scaling-criteo/03-Training-with-FastAI.ipynb
title: Train with FastAI
- file: examples/scaling-criteo/04-Triton-Inference-with-HugeCTR.ipynb
title: Serve a HugeCTR Model
- file: examples/scaling-criteo/04-Triton-Inference-with-TF.ipynb
title: Serve a TensorFlow Model
- file: examples/tabular-data-rossmann/index.md
title: Applying Techniques to Rossmann Stores Data
entries:
- file: examples/tabular-data-rossmann/01-Download-Convert.ipynb
- file: examples/tabular-data-rossmann/02-ETL-with-NVTabular.ipynb
- file: examples/tabular-data-rossmann/03-Training-with-FastAI.ipynb
- file: examples/tabular-data-rossmann/03-Training-with-TF.ipynb
- file: examples/tabular-data-rossmann/03-Training-with-PyTorch.ipynb
- file: examples/multi-gpu-movielens/index.md
entries:
- file: examples/multi-gpu-movielens/01-03-MultiGPU-Download-Convert-ETL-with-NVTabular-Training-with-TensorFlow.ipynb
- file: examples/multi-gpu-toy-example/multi-gpu_dask.ipynb
- file: examples/winning-solution-recsys2020-twitter/01-02-04-Download-Convert-ETL-with-NVTabular-Training-with-XGBoost.ipynb
title: Winning Solution of the RecSys2020 Competition
- file: api
title: API Documentation
- file: resources/index
title: Additional Resources
|
docs/source/toc.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2019-05-11 15:16"
game: "Unreal Tournament 2004"
name: "AS-U4V-SmallQuest-FINAL10"
author: "|U4V|$hinji"
description: "None"
releaseDate: "2005-06"
attachments:
- type: "IMAGE"
name: "AS-U4V-SmallQuest-FINAL10_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Assault/U/AS-U4V-SmallQuest-FINAL10_shot_1.png"
- type: "IMAGE"
name: "AS-U4V-SmallQuest-FINAL10_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Assault/U/AS-U4V-SmallQuest-FINAL10_shot_2.png"
originalFilename: "as-u4v-smallquest-final10.zip"
hash: "1eb2b70e3b55696183517660c7ab8b1019ba552e"
fileSize: 9980435
files:
- name: "BattleBike_Tex.utx"
fileSize: 14333964
hash: "e5654de96f7bc027220dc5d3d936361cbef2a0b6"
- name: "BattleBike_Snds.uax"
fileSize: 689914
hash: "aa14be9936f28294e6cebb9682d5bbaf9642bb70"
- name: "TFAMap.u"
fileSize: 21814
hash: "4769d810763cd19e6218101f9409ae9bb5076f5b"
- name: "2K5_Stargate_SM.usx"
fileSize: 2419520
hash: "32c06a2ed84331ed7e78c486ca139e65dbe21108"
- name: "2k5_ShinjiLogo.usx"
fileSize: 2320772
hash: "b73ecd2168cc529270aaa897e3cb66a1a7351afc"
- name: "AS-U4V-SmallQuest-FINAL10.ut2"
fileSize: 21289343
hash: "0132a320de75eb68f9ac86ac7287d73c9facaa71"
- name: "BattleBikes.u"
fileSize: 283363
hash: "54c5e543097e5554ba39f2a6f1358f99d39415ad"
- name: "BattleBikes_ST.usx"
fileSize: 1161468
hash: "406dcb3a7aef68eae00f3ce7b73efe937e92fbdc"
- name: "BattleBikes_Anim.ukx"
fileSize: 3973744
hash: "9265a8d2e0a61e500eeedc9739d921f50c4a90f3"
otherFiles: 1
dependencies:
BattleBikes.u:
- status: "OK"
name: "BattleBikes_ST"
- status: "OK"
name: "BattleBikes_Anim"
- status: "OK"
name: "BattleBike_Snds"
- status: "OK"
name: "BattleBike_Tex"
BattleBikes_Anim.ukx:
- status: "OK"
name: "BattleBike_Tex"
BattleBikes_ST.usx:
- status: "OK"
name: "BattleBike_Tex"
AS-U4V-SmallQuest-FINAL10.ut2:
- status: "OK"
name: "BattleBikes"
- status: "OK"
name: "TFAMap"
- status: "OK"
name: "2k5_ShinjiLogo"
- status: "OK"
name: "2K5_Stargate_SM"
downloads:
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=5317"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/Assault/U/as-u4v-smallquest-final10.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/Assault/U/1/e/b2b70e/as-u4v-smallquest-final10.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/Assault/U/1/e/b2b70e/as-u4v-smallquest-final10.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Assault"
title: "Smallquest"
playerCount: "Unknown"
themes:
Tech: 0.6
Industrial: 0.1
Ancient: 0.2
bots: false
|
content/Unreal Tournament 2004/Maps/Assault/U/1/e/b2b70e/as-u4v-smallquest-final10_[1eb2b70e].yml
|
version: 2
general:
branches:
ignore:
- gh-pages
jobs:
"coverage-gcc":
docker:
- image: johnmcfarlane/cnl_ci:gcc-10
steps:
- checkout
- run:
name: Set up system
command: /root/project/.circleci/setup.sh lcov parallel
- restore_cache:
key: cache
- run:
name: Run coverage
command: bash -c /root/project/scripts/coverage.sh
- store_artifacts:
path: coverage-report
- save_cache:
key: cache
paths:
- ~/.ccache
- ~/.conan/data
"clang-static-analyzer":
docker:
- image: johnmcfarlane/cnl_ci:clang-10
steps:
- checkout
- run:
name: Set up system
command: /root/project/.circleci/setup.sh
- restore_cache:
key: cache
- run:
name: Run analyzer
command: /root/project/scripts/clang-static-analyzer.sh
- store_artifacts:
path: scan-build
- save_cache:
key: cache
paths:
- ~/.ccache
- ~/.conan/data
"cppcheck-gcc":
docker:
- image: johnmcfarlane/cnl_ci:gcc-10
steps:
- checkout
- run:
name: Set up system
command: /root/project/.circleci/setup.sh cppcheck
- restore_cache:
key: cache
- run:
name: Generate cppcheck error list
command: cppcheck --errorlist > cppcheck-errors.xml
- store_artifacts:
path: cppcheck-errors.xml
- run:
name: Run cppcheck
command: bash -c /root/project/scripts/cppcheck.sh
- save_cache:
key: cache
paths:
- ~/.ccache
- ~/.conan/data
"sanitize-gcc":
docker:
- image: johnmcfarlane/cnl_ci:gcc-10
steps:
- checkout
- run:
name: Set up system
command: /root/project/.circleci/setup.sh parallel
- restore_cache:
key: cache
- run:
name: Run sanitizer
command: bash -c /root/project/scripts/sanitize.sh
- save_cache:
key: cache
paths:
- ~/.ccache
- ~/.conan/data
"valgrind-gcc":
docker:
- image: johnmcfarlane/cnl_ci:gcc-10
steps:
- checkout
- run:
name: Set up system
command: /root/project/.circleci/setup.sh parallel valgrind
- restore_cache:
key: cache
- run:
name: Run Valgrind Memcheck
command: bash -c /root/project/scripts/valgrind.sh
- save_cache:
key: cache
paths:
- ~/.ccache
- ~/.conan/data
"clang-tidy":
docker:
- image: johnmcfarlane/cnl_ci:clang-10
steps:
- checkout
- run:
name: Set up system
command: /root/project/.circleci/setup.sh clang-tidy
- restore_cache:
key: cache
- run:
name: Run Clang-Tidy
command: /root/project/scripts/clang-tidy.sh
- save_cache:
key: cache
paths:
- ~/.ccache
- ~/.conan/data
"sanitize-clang":
docker:
- image: johnmcfarlane/cnl_ci:clang-10
steps:
- checkout
- run:
name: Set up system
command: /root/project/.circleci/setup.sh parallel
- restore_cache:
key: cache
- run:
name: Run sanitizer
command: /root/project/scripts/sanitize.sh
- save_cache:
key: cache
paths:
- ~/.ccache
- ~/.conan/data
"shellcheck":
docker:
- image: johnmcfarlane/cnl_ci:base-20.04
steps:
- checkout
- run:
name: Install ShellCheck linter
command: |
apt update --yes --quiet
apt upgrade --yes --quiet
apt install --yes --quiet shellcheck
- run:
name: Run ShellCheck
command: /root/project/scripts/shellcheck.sh
workflows:
version: 2
test:
jobs:
- "coverage-gcc"
- "clang-static-analyzer"
- "cppcheck-gcc"
- "sanitize-gcc"
- "valgrind-gcc"
- "clang-tidy"
- "sanitize-clang"
- "shellcheck"
|
.circleci/config.yml
|
markdown: kramdown
highlighter: rouge
kramdown:
auto_ids: true
# Permalinks
permalink: pretty
# Server
source: "site"
destination: ./_gh_pages
host: 0.0.0.0
port: 9001
baseurl: ""
# boosted mod
url: http://boosted.orange.com
# end boosted mod
encoding: UTF-8
exclude:
- docs/4.1/assets/scss/
- .git/
- .github/
- assets/scss/
- build/
- js/
- dist/
- assets/
- node_modules/
- nuget/
- scss/
- vendor/
- CODE_OF_CONDUCT.md
- composer.json
- Gemfile
- Gemfile.lock
- package.js
- package.json
- package-lock.json
- README.md
- sache.json
- twbsconfig.yml
plugins:
- jekyll-redirect-from
- jekyll-sitemap
- jekyll-toc
# boosted mod
grid:
xs:
breakpoint: 0
width: 302
gutter: 10
desc: Extra small devices
subdesc: Portrait phone (<480px) (target 320 screen resolutions)
sm:
breakpoint: 480
width: 458
gutter: 10
desc: Small devices
subdesc: Landscape phones (≥480px - <768px) (target 480 screen resolutions)
md:
breakpoint: 768
width: 724
gutter: 20
desc: Medium devices
subdesc: Tablet (≥768px - <960px) (target 768 screen resolutions)
lg:
breakpoint: 980
width: 960
gutter: 20
desc: Large devices
subdesc: Desktop (≥960px - <1120px) (target 1024 screen resolutions)
xl:
breakpoint: 1220
width: 1180
gutter: 20
desc: Extra large devices
subdesc: Desktop (≥1220px - <1380px) (target 1280 screen resolutions)
xxl:
breakpoint: 1380
width: 1300
gutter: 20
desc: Extra extra large devices
subdesc: Desktop (≥1380px) (target 1440 screen resolutions)
# end boosted mod
# Social
title: Boosted
description: Based on Bootstrap the most popular library.
twitter: getbootstrap
authors: "<NAME>, <NAME>, and Bootstrap contributors"
social_image_path: /docs/4.1/assets/brand/bootstrap-social.png
social_logo_path: /docs/4.1/assets/brand/bootstrap-social-logo.png
# Custom variables
current_version: 4.1.3
current_ruby_version: 4.1.3
docs_version: 4.1
repo: "https://github.com/Orange-OpenSource/Orange-Boosted-Bootstrap"
slack: "https://bootstrap-slack.herokuapp.com"
blog: "https://blog.getbootstrap.com"
expo: "https://expo.getbootstrap.com"
jobs: "https://jobs.getbootstrap.com"
themes: "https://themes.getbootstrap.com"
download:
source: "https://github.com/Orange-OpenSource/Orange-Boosted-Bootstrap/archive/v4.1.3.zip"
dist: "https://github.com/Orange-OpenSource/Orange-Boosted-Bootstrap/releases/download/v4.1.3/boosted-4.1.3-dist.zip"
cdn:
# See https://www.srihash.org for info on how to generate the hashes
css: "https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css"
css_hash: "sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO"
js: "https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
js_hash: "sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
jquery: "https://code.jquery.com/jquery-3.3.1.slim.min.js"
jquery_hash: "sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"
popper: "https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js"
popper_hash: "sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49"
tablesorter: "https://cdnjs.cloudflare.com/ajax/libs/jquery.tablesorter/2.29.4/js/jquery.tablesorter.min.js"
tablesorter_hash: "sha256-eMa5IMxi2DRTBbnBSoK/sTKxDkVo8uqM7jMgKFfMoiU="
toc:
min_level: 2
max_level: 4
|
bundle/base/web/netuno/scripts/sbadmin4/plugins/boosted/_config.yml
|
include: package:flutter_lints/flutter.yaml
linter:
rules:
avoid_bool_literals_in_conditional_expressions: true
avoid_catches_without_on_clauses: true
avoid_redundant_argument_values: true
avoid_returning_this: true
avoid_setters_without_getters: true
avoid_slow_async_io: true
avoid_types_on_closure_parameters: true
avoid_unused_constructor_parameters: true
avoid_void_async: true
cancel_subscriptions: true
cascade_invocations: true
directives_ordering: true
flutter_style_todos: true
join_return_with_assignment: true
missing_whitespace_between_adjacent_strings: true
no_leading_underscores_for_local_identifiers: true
omit_local_variable_types: true
one_member_abstracts: true
only_throw_errors: true
parameter_assignments: true
prefer_asserts_in_initializer_lists: true
prefer_asserts_with_message: true
prefer_constructors_over_static_methods: true
prefer_expression_function_bodies: true
prefer_final_in_for_each: true
prefer_final_locals: true
prefer_foreach: true
prefer_if_elements_to_conditional_expressions: true
prefer_interpolation_to_compose_strings: true
prefer_int_literals: true
prefer_null_aware_method_calls: true
prefer_relative_imports: true
prefer_single_quotes: true
sized_box_shrink_expand: true
sort_constructors_first: true
sort_pub_dependencies: true
sort_unnamed_constructors_first: true
type_annotate_public_apis: true
unnecessary_await_in_return: true
unnecessary_constructor_name: true
unnecessary_lambdas: true
unnecessary_late: true
unnecessary_parenthesis: true
use_if_null_to_convert_nulls_to_bools: true
analyzer:
exclude:
- "**/*.freezed.dart"
- "**/*.g.dart"
plugins:
- dart_code_metrics
dart_code_metrics:
anti-patterns:
- long-method
- long-parameter-list
metrics:
cyclomatic-complexity: 20
maximum-nesting-level: 5
number-of-parameters: 4
source-lines-of-code: 50
metrics-exclude:
- test/**
rules:
- always-remove-listener
- avoid-returning-widgets
- avoid-unnecessary-setstate
- avoid-wrapping-in-padding
- newline-before-return
- no-boolean-literal-compare
- no-empty-block
- no-equal-then-else
- prefer-conditional-expressions
- prefer-const-border-radius
- prefer-extracting-callbacks
- prefer-single-widget-per-file:
ignore-private-widgets: true
- prefer-trailing-comma
|
analysis_options.yaml
|
code: BFR
name: BondFixedRate
label: Bond - Fixed Rate
description: >
A fixed rate bond is a bond that pays the same level of interest over its entire term. An investor
who wants to earn a guaranteed interest rate for a specified term could purchase a fixed rate bond
in the form of a Treasury, corporate bond, municipal bond, or certificate of deposit (CD).
metadata:
kind: Debt Security
examples:
- Chicago, Midway Airport, Series 2001A
fields:
- name: Name
label: Name
description: Name of bond
type: varchar
example: City of Chicago, Midway Airport, Series 2001A
- name: BondType
label: Bond Type
description: >
Type of bond.
C - Corporate
M - Municipal
G - Government / Sovereign
type: fixedchar
size: 1
options:
- C
- M
- G
example: C
required: true
- name: ISIN
label: ISIN/CUSIP (optional)
description: >
International Securities Identification Number or Committee on Uniform Securities
Identification Procedures.
type: varchar
example: US0004026250
- name: Collateral
label: Collateral
description: An asset that secures securing the bond. If null, then the bond is unsecured.
type: varchar
varSize: small
- name: ParValue
label: Par Value
description: Par value of the bond. The value that will be paid at maturity.
type: CurrencyValue
required: true
- name: InterestRate
label: Interest Rate
description: The fixed interest rate of the bond.
type: Rate
- name: InterestPaymentInitialDate
label: Interest Payment Intiial Date
description: Unix epoch date time (in seconds) for the first interest payment.
type: TimestampSeconds
only_valid_when:
field_name: InterestRate
required_when:
field_name: InterestRate
- name: InterestPaymentDateDeltas
label: Interest Payment Date Deltas
description: >
Number of seconds from the previous interest payment until the next payment. A delta in
seconds from the previous payment.
type: Seconds[]
listSize: small
only_valid_when:
field_name: InterestRate
required_when:
field_name: InterestRate
- name: LatePaymentPenaltyRate
label: Late Payment Penalty Rate
description: The rate of the penalty per the penalty period.
type: Rate
- name: LatePaymentWindow
label: Late Payment Window
description: >
The amount of time after a payment is due before the late payment penalty is applied.
type: TimestampSeconds
only_valid_when:
field_name: LatePaymentPenaltyRate
- name: LatePaymentPenaltyPeriod
label: Late Payment Penalty Period
description: The period at which the late payment penalty accrues.
type: TimestampSeconds
only_valid_when:
field_name: LatePaymentPenaltyRate
- name: MaturityDate
label: Maturity Date
description: The date of the maturity of the bond. When the par value is paid.
type: Timestamp
required: true
- name: AgeRestriction
label: Age Restriction
type: AgeRestriction
description: Age restriction is used to specify required ages for asset ownership.
- name: TransfersPermitted
label: Transfers Permitted
description: >
Set to true if transfers are permitted between two parties, otherwise set to false to prevent
peer-to-peer transfers.
type: bool
|
src/assets/develop/security/BFR - Bond Fixed Rate.yaml
|
language: php
dist: trusty
sudo: false
notifications:
email: false
branches:
only:
- master
matrix:
include:
- php: 7.1
env:
- LARAVEL_VERSION=5.5.*
- SYMFONY_VERSION=^3.0
- PHPUNIT_VERSION=^6.5
- php: 7.1
env:
- LARAVEL_VERSION=5.6.*
- SYMFONY_VERSION=^4.0
- PHPUNIT_VERSION=^7.0
- php: 7.1
env:
- LARAVEL_VERSION=5.7.*
- SYMFONY_VERSION=^4.1
- PHPUNIT_VERSION=^7.0
- php: 7.1
env:
- LARAVEL_VERSION=5.8.*
- SYMFONY_VERSION=^4.1
- PHPUNIT_VERSION=^7.5
- php: 7.2
env:
- LARAVEL_VERSION=5.5.*
- SYMFONY_VERSION=^3.0
- PHPUNIT_VERSION=^6.5
- php: 7.2
env:
- LARAVEL_VERSION=5.6.*
- SYMFONY_VERSION=^4.0
- PHPUNIT_VERSION=^7.0
- php: 7.2
env:
- LARAVEL_VERSION=5.7.*
- SYMFONY_VERSION=^4.1
- PHPUNIT_VERSION=^7.0
- php: 7.2
env:
- LARAVEL_VERSION=5.8.*
- SYMFONY_VERSION=^4.1
- PHPUNIT_VERSION=^7.5
install:
- composer require "laravel/framework:${LARAVEL_VERSION}" --no-update -n
- composer require "symfony/lts:${SYMFONY_VERSION}" --no-update -n
- composer require "phpunit/phpunit:${PHPUNIT_VERSION}" --dev --no-update -n
- travis_retry composer install --no-suggest --prefer-dist -n -o
before_script:
- if [ "$TRAVIS_PHP_VERSION" == "7.1" ]; then curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter; fi
- if [ "$TRAVIS_PHP_VERSION" == "7.1" ]; then chmod +x ./cc-test-reporter; fi
- if [ "$TRAVIS_PHP_VERSION" == "7.1" ]; then ./cc-test-reporter before-build; fi
script:
- if [ "$TRAVIS_PHP_VERSION" == "7.1" ]; then vendor/bin/phpunit --coverage-clover build/logs/clover.xml; fi
- if [ "$TRAVIS_PHP_VERSION" != "7.1" ]; then vendor/bin/phpunit; fi
after_script:
- if [ "$TRAVIS_PHP_VERSION" == "7.1" ]; then ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT; fi
|
.travis.yml
|
version: '2'
services:
mysqldb:
build:
context: ./kiwi-database
dockerfile: mysql.dockerfile
networks:
- service-network
environment:
- MYSQL_ROOT_PASSWORD=<PASSWORD>
ports:
- "3306"
mongodb:
image: mongo
restart: always
networks:
- content-network
- user-network
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: <PASSWORD>
entry:
build:
context: ./kiwi-entry
dockerfile: entry.dockerfile
ports:
- "8000:8000"
networks:
- user-network
- service-network
- content-network
environment:
- MONGO_HOST=mongodb
- MONGO_PORT=27017
- MONGO_USER=root
- MONGO_DB=content
- MONGO_COLLECTION=items
- MONGO_PWD=<PASSWORD>
- USER_SERVICE=http://user:80
- SWITCHER_SERVICE=http://engine:8000
- CONTENT_SERVICE=http://content:5000/items
- UNVOTED_THRESHOLD=50
- SELF=http://entry:8000/content
content:
build:
context: ./kiwi-content-crawler
dockerfile: content.dockerfile
ports:
- "5000"
networks:
- content-network
restart: always
environment:
- MONGO_HOST=mongodb
- MONGO_PORT=27017
- MONGO_USER=root
- MONGO_DB=content
- MONGO_COLLECTION=items
- MONGO_PWD=<PASSWORD>
- IMGUR_CLIENT_ID=<your-key-here>
user:
build:
context: ./kiwi-user-manager
dockerfile: user.dockerfile
ports:
- "80"
networks:
- user-network
environment:
- KIWI_USER_MANAGER_DB_HOST=mongodb
- KIWI_USER_MANAGER_DB_DATABASE=users
- KIWI_USER_MANAGER_DB_USER=root
- KIWI_USER_MANAGER_DB_PASSWORD=<PASSWORD>
engine:
build:
context: ./kiwi-engine-selector
dockerfile: engine.dockerfile
ports:
- "8000"
networks:
- service-network
restart: always
environment:
- RECOMMENDERS=[
'svd=http://svd:8000',
'content=http://content-rec:8000',
'knn=http://knn:8000',
'random=http://random:8000'
]
content-rec:
build:
context: ./kiwi-content
dockerfile: service.dockerfile
ports:
- "8000"
networks:
- service-network
restart: always
environment:
- MSQL_USER=content
- MSQL_PWD=<PASSWORD>
- MSQL_HOST=mysqldb
- MSQL_PORT=3306
- MSQL_DATABASE=content_recommender
- MIN_RATING=0
- MAX_RATING=1
- POS_CUTOFF=0
volumes:
- './kiwi-content/kiwi:/var/www/kiwi'
random:
build:
context: ./kiwi-random
dockerfile: service.dockerfile
ports:
- "8000"
networks:
- service-network
restart: always
environment:
- MIN_RATING=0
- MAX_RATING=1
- MSQL_USER=random
- MSQL_PWD=<PASSWORD>
- MSQL_HOST=mysqldb
- MSQL_PORT=3306
- MSQL_DATABASE=random_recommender
volumes:
- './kiwi-random/kiwi:/var/www/kiwi'
knn:
build:
context: ./kiwi-surprise
dockerfile: surprise.dockerfile
ports:
- "8000"
networks:
- service-network
restart: always
environment:
- MIN_RATING=0
- MAX_RATING=1
- MSQL_USER=knn
- MSQL_PWD=<PASSWORD>
- MSQL_HOST=mysqldb
- MSQL_PORT=3306
- MSQL_DATABASE=knn_recommender
- RETRAIN_ON_REQUEST=['/feedback']
- ALGO_PATH=algorithms/knn_item_msd_zscore.py
volumes:
- './kiwi-surprise/kiwi:/kiwi'
svd:
build:
context: ./kiwi-surprise
dockerfile: surprise.dockerfile
ports:
- "8000"
networks:
- service-network
restart: always
environment:
- MIN_RATING=0
- MAX_RATING=1
- MSQL_USER=svd
- MSQL_PWD=<PASSWORD>
- MSQL_HOST=mysqldb
- MSQL_PORT=3306
- MSQL_DATABASE=svd_recommender
- RETRAIN_ON_REQUEST=['/feedback']
- ALGO_PATH=algorithms/svd.py
volumes:
- './kiwi-surprise/kiwi:/kiwi'
# requires a prior build step of the ionic framework
webapp:
build:
context: ./kiwi-mobile-frontend
dockerfile: webapp.dockerfile
networks:
- webapp-network
ports:
- "80:80"
networks:
service-network:
driver: bridge
content-network:
driver: bridge
user-network:
driver: bridge
webapp-network:
driver: bridge
|
docker-compose-deploy.yml
|
stages:
- Platform Tests
- Analysis
variables:
GIT_SUBMODULE_STRATEGY: recursive
CTEST_OUTPUT_ON_FAILURE: '1'
# Platform Tests
.platform_unix_template: &platform_unix_template
stage: Platform Tests
tags:
- docker-linux
script:
- mkdir build && cd build
- cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON
- ninja
- ctest
.platform_windows_template: &platform_windows_template
stage: Platform Tests
image: stabletec/build-foe:windows
tags:
- docker-windows
- amd64
script:
- mkdir build; cd build
- cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON
- ninja
- ninja test
.platform_macos_template: &platform_macos_template
stage: Platform Tests
tags:
- macos
- amd64
dependencies: []
script:
- mkdir build && cd build
- cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=ON -DBUILD_TESTS=ON
- ninja
- ctest
Fedora/GCC:
image: stabletec/build-foe:fedora
variables:
CC: gcc
CXX: g++
<<: *platform_unix_template
Fedora/Clang:
image: stabletec/build-foe:fedora
variables:
CC: clang
CXX: clang++
<<: *platform_unix_template
Ubuntu/GCC:
image: stabletec/build-foe:ubuntu
variables:
CC: gcc
CXX: g++
<<: *platform_unix_template
Ubuntu/Clang:
image: stabletec/build-foe:ubuntu
variables:
CC: clang
CXX: clang++
<<: *platform_unix_template
Windows/MSVC:
<<: *platform_windows_template
Windows/Clang-cl:
variables:
CC: clang-cl
CXX: clang-cl
<<: *platform_windows_template
macOS/Clang (amd64):
variables:
CC: clang
CXX: clang++
<<: *platform_macos_template
# Analysis
Code Coverage:
stage: Analysis
image: stabletec/build-foe:fedora
tags:
- docker-linux
dependencies: []
variables:
CC: clang
CXX: clang++
script:
- mkdir build && cd build
- cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DCODE_COVERAGE=ON -DBUILD_TESTS=ON
- ninja
- ninja ccov-VkSerializationTests
- ninja ccov-report-VkSerializationTests
artifacts:
name: CodeCoverage
paths:
- build/ccov
expire_in: 1 day
|
.gitlab-ci.yml
|
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: br0testbot
namespace: bots
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://br0-space.github.io/helm-charts
chart: br0bot
sourceRef:
kind: HelmRepository
name: br0-charts
namespace: flux-system
version: '0.2.18'
values:
image:
repository: ghcr.io/br0-space/bot
tag: main
imagePullPolicy: Always
replicaCount: 1
ingress:
enabled: true
className: "nginx"
hosts:
- host: br0testbot.br0.space
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: br0testbot-space-tls
hosts:
- br0testbot.br0.space
annotations:
external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}"
external-dns/is-public: "true"
resources:
limits:
cpu: 300m
memory: 128Mi
requests:
cpu: 300m
memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 2
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector:
kubernetes.io/arch: amd64
bot:
buzzwords:
buzzwordsMatcher:
- trigger: "huan"
reply: "Und wieder wurde einer zum Huan ernannt, *Huan* ist jetzt auf *%d*"
- trigger: "tzn"
reply: "Schon wieder ist etwas am Arsch: *Tzn* ist jetzt auf *%d*"
- trigger: "(bier|saufen|besoffen)"
reply: "Scho wieder am saufem, Bier ist jetzt auf *%d*"
- trigger: "420"
reply: "Nice! *420* ist jetzt auf *%d*"
- trigger: "(weggesynct|weggesynced)"
reply: "`rm -rf /` Weggesynct ist jetzt auf *%d*"
- trigger: "sync"
reply: "Der Sync ist stark in diesem! Sync ist damit auf *%d*"
- trigger: "obegfoin"
reply: "Immer ist irgendwas kaputt! Obegfoin ist jetzt auf *%d*"
- trigger: "lel"
reply: "Und schon wieder wurde extrem laut gelacht! Lel ist jetzt auf *%d*"
- trigger: "behindert"
reply: "Und schon wieder war einer behindert! Behindert ist jetzt auf *%d*"
- trigger: "kartoffel"
reply: "Kartoffel? 🤜🤛👊 Pommes! Schon *%d* Kilo zugenommen"
- trigger: "vapoctl"
reply: "🥦 schon wieder raucht sich einer dicht, schon *%d* mal"
telegram:
apiKey: ${SECRET_BR0TESTBOT_TOKEN}
webhookurl: br0testbot.br0.space
postgresql:
image:
debug: true
postgresqlUsername: br0testbot
# This just needs to be set. It will use a second entry in existingSecret for postgresql-postgres-password
postgresqlPassword: <PASSWORD>
install: true
postgresqlDatabase: br0testbot
persistence:
storageClass: 'nfs-client'
enabled: true
size: 15G
metrics:
enabled: true
# resources: {}
service:
type: ClusterIP
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9187'
serviceMonitor:
enabled: true
additionalLabels: {}
|
cluster/apps/bots/br0testbot/helm-release.yaml
|
image: $CI_REGISTRY/signageos/docker-node:12.16.1-alpine-build
services:
- docker:dind
stages:
- prepare
- test
- build
- publish
- release
cache:
untracked: true
key: "$CI_PROJECT_ID"
paths:
- node_modules/
before_script:
- export VERSION=`cat ./VERSION`
- export TAG=$([ "$CI_COMMIT_TAG" == "" ] && echo $CI_COMMIT_REF_NAME || echo $(echo ${VERSION}-latest | sed -E 's/^[0-9].[0-9].[0-9]-([a-zA-Z0-9]+)(\.[0-9]+)?(-[a-zA-Z0-9]+)?$/\1/p' | tail -n1))
- npx --userconfig ./.npmrc @signageos/lib version-upgrade $VERSION
- mkdir -p dist
- npm install
prepare:
image: $CI_REGISTRY/signageos/docker:master
stage: prepare
before_script: []
script:
- ci-prepare
artifacts:
when: on_success
paths:
- ./VERSION
test:general:
stage: test
except:
- tags
script:
- npm test
test:lint:
stage: test
except:
- tags
script:
- npm run lint
test:changelog:
image: $CI_REGISTRY/signageos/docker:master
stage: test
except:
- tags
allow_failure: true
script:
- ci-test-changelog
test:check:
stage: test
except:
- tags
allow_failure: true
script:
- npm run check
build:
stage: build
dependencies:
- prepare
script:
- npm run prepare --production
- npm run escheck
artifacts:
when: on_success
paths:
- dist/
publish:npm:
stage: publish
dependencies:
- prepare
- build
script:
- echo 'registry=${NPM_REGISTRY_URL}' > .npmrc
- echo 'always-auth=true' >> .npmrc
- echo '//${NPM_REGISTRY_HOST}/:_authToken="${<PASSWORD>}"' >> .npmrc
- npm publish --ignore-scripts
publish:npm-public:
stage: publish
only:
- tags
- master
dependencies:
- prepare
- build
script:
- echo 'registry=${NPM_REGISTRY_URL}' > .npmrc
- echo 'always-auth=true' >> .npmrc
- echo '//${NPM_REGISTRY_HOST}/:_authToken="${NPM_AUTH_TOKEN}"' >> .npmrc
- export NPM_REGISTRY_URL=https://registry.npmjs.org/
- export NPM_REGISTRY_HOST=registry.npmjs.org
- export NPM_AUTH_TOKEN=$PUBLIC_NPM_AUTH_TOKEN
- tools/prepare-npm-public.js
- npm publish --ignore-scripts
release:tag:
image: $CI_REGISTRY/signageos/docker:master
stage: release
only:
- master
when: manual
allow_failure: false
before_script: []
script:
- ci-release-tag
release:notes:
image: $CI_REGISTRY/signageos/docker:master
stage: release
only:
- tags
script:
- ci-release-notes
|
.gitlab-ci.yml
|
homepage: https://github.com/snoyberg/mono-traversable#readme
changelog-type: markdown
hash: 0ada7712bd8291c958cbd0db1ccc8374936f260cfa41422d7a5d979d1c7ea757
test-bench-deps: {}
maintainer: <EMAIL>
synopsis: DEPRECATED Functionality merged into the conduit package itself
changelog: ! "# 1.3.0\n\n* Deprecated; functionality moved into conduit package itself\n\n#
1.2.0\n\n* Switch over to `MonadUnliftIO`\n\n# 1.1.2\n\n* Add `chunksOfE` and `chunksOfExactlyE`
combinators\n\n# 1.1.1\n\n* Add `asum` combinator\n\n# 1.1.0\n\n* Don't generalize
I/O functions to `IOData`, instead specialize to\n `ByteString`. See:\n http://www.snoyman.com/blog/2016/12/beware-of-readfile#real-world-failures\n\n#
1.0.8.3\n\n* Fix version bounds for chunked-data/mono-traversable combos\n\n# 1.0.8.2\n\n*
Fix foldl1 not being \"a strict left fold\" as advertised.\n [#115](https://github.com/snoyberg/mono-traversable/pull/115)\n\n#
1.0.8.1\n\n* Break on single elements and defer monoid concatenation until yield\n
\ [#111](https://github.com/snoyberg/mono-traversable/pull/111)\n\n# 1.0.8\n\n*
Add lower bound on conduit 1.2.8 (make it easier to follow [the\n reskin](http://www.snoyman.com/blog/2016/09/proposed-conduit-reskin).\n\n#
1.0.7\n\n* Add `sourceFileBS` and `sinkFileBS`\n\n# 1.0.6\n\n* Add `peekForeverE`
combinator\n\n# 1.0.5\n\n* Add head, headDef and lastDef combinators\n\n# 1.0.4\n\n*
Move into mono-traversable repo, support mono-traversable 1.0\n\n# 1.0.3.1\n\n*
Support for QuickCheck 2.8.2\n\n# 1.0.3\n\n* sourceRandomWith [#19](https://github.com/fpco/conduit-combinators/pull/19)\n\n#
1.0.2\n\n* Make mapAccumWhile & mapAccumS strict in accumulator state [#18](https://github.com/fpco/conduit-combinators/pull/18)\n\n#
1.0.1\n\n* mapAccumWhile, mapAccumWhileM, mapAccumS\n\n# 1.0.0\n\n* Drop system-filepath/system-fileio\n\n#
0.3.1\n\n* `peekForever`\n\n# 0.3.0\n\nStream fusion enabled, drop compatibility
with older conduit\n"
basic-deps:
base: ! '>=4.9 && <5'
all-versions:
- 0.1.0.0
- 0.2.0.0
- 0.2.0.1
- 0.2.1
- 0.2.2
- 0.2.3
- 0.2.3.1
- 0.2.4
- 0.2.4.1
- 0.2.5
- 0.2.5.1
- 0.2.5.2
- 0.2.6
- 0.2.6.1
- 0.2.7
- 0.2.8
- 0.2.8.2
- 0.2.8.3
- 0.3.0
- 0.3.0.1
- 0.3.0.2
- 0.3.0.3
- 0.3.0.4
- 0.3.0.5
- 0.3.0.6
- 0.3.1
- 1.0.0
- 1.0.1
- 1.0.1.1
- 1.0.2
- 1.0.3
- 1.0.3.1
- 1.0.4
- 1.0.5
- 1.0.6
- 1.0.7
- 1.0.8
- 1.0.8.1
- 1.0.8.2
- 1.0.8.3
- 1.1.0
- 1.1.1
- 1.1.2
- 1.3.0
author: <NAME>
latest: 1.3.0
description-type: markdown
description: ! 'conduit-combinators
===================
Commonly used conduit functions, for both chunked and unchunked data. For more
information about conduit in general, and how this package in particular fits
into the ecosystem, see [the conduit
homepage](https://github.com/snoyberg/conduit#readme).
'
license-name: MIT
|
packages/co/conduit-combinators.yaml
|
trigger:
- master
pool:
vmImage: 'vs2017-win2016'
variables:
status: 'fail'
steps:
- checkout: self
persistCredentials: "true"
- powershell: (new-object System.Net.WebClient).DownloadFile('http://www.nespowerpak.com/nesasm/NESASM3.zip','NESASM3.zip')
displayName: 'Download NESASM3 (NES assembler) for Windows'
- task: ExtractFiles@1
displayName: 'Extract NESASM3'
inputs:
archiveFilePatterns: NESASM3.zip
destinationFolder: NESASM3
- task: Bash@3
displayName: 'Add current date and time to NES game (source file)'
inputs:
targetType: 'inline'
script: |
current_date=`date +"%^b %d, %Y"`
current_time=`date +"%0l:%M %p"`
sed -i "s/.db \"... .., 20..\"/.db \"$current_date\"/" game/test.asm
sed -i "s/.db \"..:.. .M\"/.db \"$current_time\"/" game/test.asm
cat game/test.asm
- task: CmdLine@2
displayName: 'Assemble source file to create NES ROM'
inputs:
script: 'cd D:\a\1\s\game && D:\a\1\s\NESASM3\NESASM3.exe D:\a\1\s\game\test.asm'
- task: Bash@3
displayName: 'Check assembly status'
inputs:
targetType: 'inline'
script: |
result=$(cd D:/a/1/s/game && D:/a/1/s/NESASM3/NESASM3.exe D:/a/1/s/game/test.asm | grep error)
echo $result
echo "##vso[task.setvariable variable=status]$result"
- task: Bash@3
displayName: 'Fail build only if assembly fails'
inputs:
targetType: 'inline'
script: exit 1
condition: contains(variables['status'], 'error')
- powershell: (new-object System.Net.WebClient).DownloadFile('https://downloads.sourceforge.net/project/fceultra/Binaries/2.2.3/fceux-2.2.3-win32.zip?r=https%3A%2F%2Fsourceforge.net%2Fprojects%2Ffceultra%2Ffiles%2FBinaries%2F2.2.3%2Ffceux-2.2.3-win32.zip%2Fdownload&ts=1559878465','FCEUX.zip')
displayName: 'Download FCEUX (NES emulator) for Windows'
- task: ExtractFiles@1
displayName: 'Extract FCEUX'
inputs:
archiveFilePatterns: FCEUX.zip
destinationFolder: build/fceux
cleanDestinationFolder: false
- powershell: Start-Process D:\a\1\s\build\fceux\fceux.exe "-lua D:\a\1\s\build\screenshot.lua D:\a\1\s\game\test.nes"
displayName: 'Open NES ROM in FCEUX and run Lua script to take screenshot'
- task: Bash@3
displayName: 'Wait 10 seconds then confirm screenshot was created'
inputs:
targetType: 'inline'
script: sleep 10 && cd D:/a/1/s/build && ls && cat screenshot.png
- task: Bash@3
displayName: 'Configure git'
inputs:
targetType: 'inline'
script: git config user.email "<EMAIL>" && git config user.name "NES Build Pipeline"
- task: Bash@3
displayName: 'Remove NESASM3 and FCEUX from build folder'
inputs:
targetType: 'inline'
script: 'rm NESASM3.zip FCEUX.zip build/fceux/7z.dll build/fceux/auxlib.lua build/fceux/fceux.chm build/fceux/fceux.exe build/fceux/lua5.1.dll build/fceux/lua51.dll build/fceux/taseditor.chm && rm -rf build/fceux/luaScripts/ build/fceux/palettes/ build/fceux/tools/ NESASM3 && ls && cd build && ls'
- task: Bash@3
displayName: 'Update caption in README.md with date and time of update'
inputs:
targetType: 'inline'
script: |
current_date=`date +"%A, %B %d, %Y at %T"`
sed -i "6c<figcaption>Screenshot updated $current_date UTC+00:00.</figcaption>" readme.md
cat readme.md
- task: Bash@3
displayName: 'Commit changes and push ROM, ROM screenshot, and README back to GitHub repo (master branch)'
inputs:
targetType: 'inline'
script: cd D:/a/1/s/ && ls && git add --all && git commit -m "Adds latest ROM, screenshot, and README from build pipeline. [skip ci]" && git push origin HEAD:master
continueOnError: true
|
azure-pipelines.yml
|
AWSTemplateFormatVersion: "2010-09-09"
Description: "Enables IRSA for the aws-node daemonset (qs-1r15gqig4)"
Metadata:
QSLint:
Exclusions: [W9002, W9003, W9004, W9006]
Parameters:
ClusterName:
Type: String
OIDCProvider:
Type: String
Resources:
RBACRole:
Type: "AWSQS::Kubernetes::Resource"
DeletionPolicy: Retain
Properties:
ClusterName: !Ref ClusterName
Namespace: kube-system
Manifest: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: aws-node-enable-irsa
rules:
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- patch
- get
- apiGroups: [""]
resources:
- serviceaccounts
verbs:
- patch
- get
ServiceAccount:
Type: "AWSQS::Kubernetes::Resource"
DeletionPolicy: Retain
Properties:
ClusterName: !Ref ClusterName
Namespace: kube-system
Manifest: !Sub |
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-node-enable-irsa
annotations:
eks.amazonaws.com/role-arn: ${AWSNodeIAMRole.Arn}
ServiceClusterRoleBinding:
Type: "AWSQS::Kubernetes::Resource"
DependsOn: [ServiceAccount, RBACRole]
DeletionPolicy: Retain
Properties:
ClusterName: !Ref ClusterName
Namespace: kube-system
Manifest: |
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: aws-node-enable-irsa
subjects:
- kind: ServiceAccount
name: aws-node-enable-irsa
namespace: kube-system
roleRef:
kind: ClusterRole
name: aws-node-enable-irsa
apiGroup: rbac.authorization.k8s.io
AWSNodeIAMRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument: !Sub |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:${AWS::Partition}:iam::${AWS::AccountId}:oidc-provider/${OIDCProvider}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${OIDCProvider}:sub": "system:serviceaccount:kube-system:aws-node"
}
}
}
]
}
Path: "/"
ManagedPolicyArns:
- !Sub 'arn:${AWS::Partition}:iam::aws:policy/AmazonEKS_CNI_Policy'
JobResource:
Type: "AWSQS::Kubernetes::Resource"
DeletionPolicy: Retain
DependsOn: ServiceClusterRoleBinding
Properties:
ClusterName: !Ref ClusterName
Namespace: kube-system
Manifest: !Sub |
apiVersion: batch/v1
kind: Job
metadata:
name: aws-node-enable-irsa
spec:
template:
spec:
serviceAccountName: aws-node-enable-irsa
nodeSelector:
kubernetes.io/os: linux
containers:
- name: aws-node-enable-irsa
image: amazonlinux:2
command: ["/bin/bash","-c"]
args:
- >
sleep 10;
if [ `uname -m` = 'x86_64' ]; then ARCH="amd64"; else ARCH="arm64"; fi;
curl --retry 5 -o kubectl https://amazon-eks.s3-us-west-2.amazonaws.com/1.21.2/2021-07-05/bin/linux/$ARCH/kubectl;
chmod +x ./kubectl;
./kubectl -n kube-system patch sa aws-node -p '{"metadata": {"annotations": {"eks.amazonaws.com/role-arn": "${AWSNodeIAMRole.Arn}" }}}';
./kubectl -n kube-system patch daemonset aws-node -p '{"spec": {"template": {"metadata": {"annotations": {"irsa": "enabled"}}}}}';
RETRIES=0 ;
while true ; do
DESIRED=$(./kubectl get daemonset aws-node -n kube-system -o jsonpath={.status.desiredNumberScheduled}|| exit 1);
UPDATED=$(./kubectl get daemonset aws-node -n kube-system -o jsonpath={.status.updatedNumberScheduled}|| exit 1);
READY=$(./kubectl get daemonset aws-node -n kube-system -o jsonpath={.status.numberReady}|| exit 1);
if [[ "$DESIRED" == "$UPDATED" && "$DESIRED" == "$READY" ]]; then break ; fi ;
echo "Desired: $DESIRED Updated: $UPDATED Ready: $READY" ;
((RETRIES=RETRIES+1));
if [ $RETRIES -gt 40 ]; then echo "Timed out waiting for pods to become ready"; exit 1; fi;
sleep 15;
done
restartPolicy: OnFailure
backoffLimit: 4
|
templates/aws-node-daemonset-IRSA.template.yaml
|
items:
- uid: >-
azure-arm-recoveryservices-siterecovery.RecoveryPlanAutomationRunbookActionDetails
name: RecoveryPlanAutomationRunbookActionDetails
fullName: RecoveryPlanAutomationRunbookActionDetails
children:
- >-
azure-arm-recoveryservices-siterecovery.RecoveryPlanAutomationRunbookActionDetails.fabricLocation
- >-
azure-arm-recoveryservices-siterecovery.RecoveryPlanAutomationRunbookActionDetails.runbookId
- >-
azure-arm-recoveryservices-siterecovery.RecoveryPlanAutomationRunbookActionDetails.timeout
langs:
- typeScript
type: interface
summary: ''
extends:
name: azure-arm-recoveryservices-siterecovery.RecoveryPlanActionDetails
package: azure-arm-recoveryservices-siterecovery
- uid: >-
azure-arm-recoveryservices-siterecovery.RecoveryPlanAutomationRunbookActionDetails.fabricLocation
name: fabricLocation
fullName: fabricLocation
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: 'fabricLocation: string'
return:
type:
- string
package: azure-arm-recoveryservices-siterecovery
- uid: >-
azure-arm-recoveryservices-siterecovery.RecoveryPlanAutomationRunbookActionDetails.runbookId
name: runbookId
fullName: runbookId
children: []
langs:
- typeScript
type: property
summary: ''
optional: true
syntax:
content: 'runbookId?: string'
return:
type:
- string
package: azure-arm-recoveryservices-siterecovery
- uid: >-
azure-arm-recoveryservices-siterecovery.RecoveryPlanAutomationRunbookActionDetails.timeout
name: timeout
fullName: timeout
children: []
langs:
- typeScript
type: property
summary: ''
optional: true
syntax:
content: 'timeout?: string'
return:
type:
- string
package: azure-arm-recoveryservices-siterecovery
references:
- uid: azure-arm-recoveryservices-siterecovery.RecoveryPlanActionDetails
name: RecoveryPlanActionDetails
spec.typeScript:
- name: RecoveryPlanActionDetails
fullName: RecoveryPlanActionDetails
uid: azure-arm-recoveryservices-siterecovery.RecoveryPlanActionDetails
|
docs-ref-autogen/azure-arm-recoveryservices-siterecovery/RecoveryPlanAutomationRunbookActionDetails.yml
|
Nogrod\eBaySDK\Trading\DiscountPriceInfoType:
virtual_properties:
ns_prop:
exp: '"urn:ebay:apis:eBLBaseComponents"'
serialized_name: xmlns
xml_attribute: true
properties:
originalRetailPrice:
expose: true
access_type: public_method
serialized_name: OriginalRetailPrice
xml_element:
cdata: false
namespace: 'urn:ebay:apis:eBLBaseComponents'
accessor:
getter: getOriginalRetailPrice
setter: setOriginalRetailPrice
type: Nogrod\eBaySDK\Trading\AmountType
minimumAdvertisedPrice:
expose: true
access_type: public_method
serialized_name: MinimumAdvertisedPrice
xml_element:
cdata: false
namespace: 'urn:ebay:apis:eBLBaseComponents'
accessor:
getter: getMinimumAdvertisedPrice
setter: setMinimumAdvertisedPrice
type: Nogrod\eBaySDK\Trading\AmountType
minimumAdvertisedPriceExposure:
expose: true
access_type: public_method
serialized_name: MinimumAdvertisedPriceExposure
xml_element:
cdata: false
namespace: 'urn:ebay:apis:eBLBaseComponents'
accessor:
getter: getMinimumAdvertisedPriceExposure
setter: setMinimumAdvertisedPriceExposure
type: string
pricingTreatment:
expose: true
access_type: public_method
serialized_name: PricingTreatment
xml_element:
cdata: false
namespace: 'urn:ebay:apis:eBLBaseComponents'
accessor:
getter: getPricingTreatment
setter: setPricingTreatment
type: string
soldOneBay:
expose: true
access_type: public_method
serialized_name: SoldOneBay
xml_element:
cdata: false
namespace: 'urn:ebay:apis:eBLBaseComponents'
accessor:
getter: getSoldOneBay
setter: setSoldOneBay
type: bool
soldOffeBay:
expose: true
access_type: public_method
serialized_name: SoldOffeBay
xml_element:
cdata: false
namespace: 'urn:ebay:apis:eBLBaseComponents'
accessor:
getter: getSoldOffeBay
setter: setSoldOffeBay
type: bool
madeForOutletComparisonPrice:
expose: true
access_type: public_method
serialized_name: MadeForOutletComparisonPrice
xml_element:
cdata: false
namespace: 'urn:ebay:apis:eBLBaseComponents'
accessor:
getter: getMadeForOutletComparisonPrice
setter: setMadeForOutletComparisonPrice
type: Nogrod\eBaySDK\Trading\AmountType
|
metadata/Trading/DiscountPriceInfoType.yml
|
uid: "com.azure.resourcemanager.keyvault.fluent.VaultsClient.listBySubscription*"
fullName: "com.azure.resourcemanager.keyvault.fluent.VaultsClient.listBySubscription"
name: "listBySubscription"
nameWithType: "VaultsClient.listBySubscription"
members:
- uid: "com.azure.resourcemanager.keyvault.fluent.VaultsClient.listBySubscription()"
fullName: "com.azure.resourcemanager.keyvault.fluent.VaultsClient.listBySubscription()"
name: "listBySubscription()"
nameWithType: "VaultsClient.listBySubscription()"
summary: "The List operation gets information about the vaults associated with the subscription."
syntax: "public abstract PagedIterable<VaultInner> listBySubscription()"
returns:
description: "list of vaults."
type: "<xref href=\"com.azure.core.http.rest.PagedIterable?alt=com.azure.core.http.rest.PagedIterable&text=PagedIterable\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.azure.resourcemanager.keyvault.fluent.models.VaultInner?alt=com.azure.resourcemanager.keyvault.fluent.models.VaultInner&text=VaultInner\" data-throw-if-not-resolved=\"False\" />>"
- uid: "com.azure.resourcemanager.keyvault.fluent.VaultsClient.listBySubscription(java.lang.Integer,com.azure.core.util.Context)"
fullName: "com.azure.resourcemanager.keyvault.fluent.VaultsClient.listBySubscription(Integer top, Context context)"
name: "listBySubscription(Integer top, Context context)"
nameWithType: "VaultsClient.listBySubscription(Integer top, Context context)"
summary: "The List operation gets information about the vaults associated with the subscription."
parameters:
- description: "Maximum number of results to return."
name: "top"
type: "<xref href=\"java.lang.Integer?alt=java.lang.Integer&text=Integer\" data-throw-if-not-resolved=\"False\" />"
- description: "The context to associate with this operation."
name: "context"
type: "<xref href=\"com.azure.core.util.Context?alt=com.azure.core.util.Context&text=Context\" data-throw-if-not-resolved=\"False\" />"
syntax: "public abstract PagedIterable<VaultInner> listBySubscription(Integer top, Context context)"
returns:
description: "list of vaults."
type: "<xref href=\"com.azure.core.http.rest.PagedIterable?alt=com.azure.core.http.rest.PagedIterable&text=PagedIterable\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.azure.resourcemanager.keyvault.fluent.models.VaultInner?alt=com.azure.resourcemanager.keyvault.fluent.models.VaultInner&text=VaultInner\" data-throw-if-not-resolved=\"False\" />>"
type: "method"
metadata: {}
package: "com.azure.resourcemanager.keyvault.fluent"
artifact: com.azure.resourcemanager:azure-resourcemanager-keyvault:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.keyvault.fluent.VaultsClient.listBySubscription.yml
|
---
upcoming_changes:
-
location: Migration.uploadUrlTemplate
description: "`uploadUrlTemplate` será removido. Use `uploadUrl` em vez disso."
reason: "'uploadUrlTemplate' está sendo removido porque não é uma URL padrão e adiciona um passo extra do usuário."
date: '2019-04-01T00:00:00+00:00'
criticality: breaking
owner: tambling
-
location: Organization.members
description: "`members` serão removidos. Use `Organization.membersWithRole` em vez disso."
reason: O campo de 'members' está obsoleto e será removido em breve.
date: '2019-04-01T00:00:00+00:00'
criticality: breaking
owner: xuorig
-
location: ContributionsCollection.firstIssueContribution.ignoreTimeRange
description: "`ignoreTimeRange` will be removed. Use a `ContributionsCollection` starting sufficiently far back"
reason: ignore_time_range será removido
date: '2019-07-01T00:00:00+00:00'
criticality: breaking
owner: seejohnrun
-
location: ContributionsCollection.firstPullRequestContribution.ignoreTimeRange
description: "`ignoreTimeRange` will be removed. Use a `ContributionsCollection` starting sufficiently far back"
reason: ignore_time_range será removido
date: '2019-07-01T00:00:00+00:00'
criticality: breaking
owner: seejohnrun
-
location: ContributionsCollection.firstRepositoryContribution.ignoreTimeRange
description: "`ignoreTimeRange` will be removed. Use a `ContributionsCollection` starting sufficiently far back"
reason: ignore_time_range será removido
date: '2019-07-01T00:00:00+00:00'
criticality: breaking
owner: seejohnrun
-
location: ContributionsCollection.joinedGitHubContribution.ignoreTimeRange
description: "`ignoreTimeRange` will be removed. Use a `ContributionsCollection` starting sufficiently far back"
reason: ignore_time_range será removido
date: '2019-07-01T00:00:00+00:00'
criticality: breaking
owner: seejohnrun
-
location: ReviewDismissedEvent.message
description: "`message` will be removed. Use `dismissalMessage` instead."
reason: "`message` is being removed because it not nullable, whereas the underlying field is optional."
date: '2019-07-01T00:00:00+00:00'
criticality: breaking
owner: nickvanw
-
location: ReviewDismissedEvent.messageHtml
description: "`messageHtml` will be removed. Use `dismissalMessageHTML` instead."
reason: "`messageHtml` is being removed because it not nullable, whereas the underlying field is optional."
date: '2019-07-01T00:00:00+00:00'
criticality: breaking
owner: nickvanw
-
location: ContributionOrder.field
description: "'field' será removido. Apenas um campo de pedido é suportado."
reason: "`field` será removido."
date: '2019-10-01T00:00:00+00:00'
criticality: breaking
owner: dinahshi
-
location: Issue.timeline
description: "`timeline` será removido. Use Issue.timelineItems em vez disso."
reason: "`timeline` será removido"
date: '2019-10-01T00:00:00+00:00'
criticality: breaking
owner: mikesea
-
location: Organization.pinnedRepositories
description: "`pinnedRepositories` será removido. Use ProfileOwner.pinnedItems em vez disso."
reason: pinnedRepositories será removido
date: '2019-10-01T00:00:00+00:00'
criticality: breaking
owner: cheshire137
-
location: PullRequest.timeline
description: "`timeline` será removido. Use PullRequest.timelineItems em vez disso."
reason: "`timeline` será removido"
date: '2019-10-01T00:00:00+00:00'
criticality: breaking
owner: mikesea
-
location: RepositoryOwner.pinnedRepositories
description: "`pinnedRepositories` será removido. Use ProfileOwner.pinnedItems em vez disso."
reason: pinnedRepositories será removido
date: '2019-10-01T00:00:00+00:00'
criticality: breaking
owner: cheshire137
-
location: User.pinnedRepositories
description: "`pinnedRepositories` será removido. Use ProfileOwner.pinnedItems em vez disso."
reason: pinnedRepositories será removido
date: '2019-10-01T00:00:00+00:00'
criticality: breaking
owner: cheshire137
|
translations/pt-BR/data/graphql/2.18/graphql_upcoming_changes.public-enterprise.yml
|
---
http_interactions:
- request:
method: post
uri: https://aps.fidor.de/beneficiaries
body:
encoding: UTF-8
string: '{"account_id":"91318832","unique_name":"jj_doey","contact":{"name":"<NAME>","address_line_1":"Contact Address 1","address_line_2":"Contact Address
2","city":"Contact City","country":"Contact Country"},"bank":{"name":"Bank
Name","address_line_1":"Bank Address 1","address_line_2":"Bank Address 2","city":"Bank
City","country":"Bank Country"},"routing_type":"UAE_DOMESTIC","routing_info":{"account_number":"AE070331234567890123456","swift_code":"ARABAEADSHJ"}}'
headers:
Authorization:
- Bearer <PASSWORD>
Accept:
- application/vnd.fidor.de; version=1,text/json
Content-Type:
- application/json
response:
status:
code: 201
message:
headers:
x-frame-options:
- SAMEORIGIN
x-xss-protection:
- 1; mode=block
x-content-type-options:
- nosniff
x-fidor-confirmation-path:
- "/fidor_api/confirmable/actions/0de1b9c4-d821-41e2-a753-cf63bab56d66"
content-type:
- application/json; charset=utf-8
x-ratelimit-limit:
- '60'
x-ratelimit-remaining:
- '59'
x-ratelimit-reset:
- '119'
cache-control:
- no-store, must-revalidate, private, max-age=0
x-request-id:
- 43bbf8ef-50ee-4177-a6bd-5f79d5298266
x-runtime:
- '0.190157'
set-cookie:
- __profilin=p%3Dt; path=/, __profilin=p%3Dt; path=/, __profilin=p%3Dt; path=/
x-miniprofiler-ids:
- '["78j9poesmtgqdritpvw7","e0geagr997lhneqvy9jo","io7mgmtts1q8xexel0f1","hgs7uxullw051c6v3phk","by3rv24d7ooy4ogvoz6z","k6kopttd2oxkz6ckqylu","fengurtqi9pdwbzt2fr9","m4wodja7mafrlizeypmx","c4id4svfm3hjtnlog1v4","4e5er1y1zmalmtlo705a"]'
connection:
- close
server:
- thin
body:
encoding: UTF-8
string: '{"id":"9e6155e9-9b6f-464f-a14f-4db44e2ea1e2","account_id":"91318832","unique_name":"jj_doey","contact":{"name":"<NAME>","address_line_1":"Contact Address 1","address_line_2":"Contact Address
2","city":"Contact City","country":"Contact Country"},"bank":{"name":"Bank
Name","address_line_1":"Bank Address 1","address_line_2":"Bank Address 2","city":"Bank
City","country":"Bank Country"},"routing_type":"UAE_DOMESTIC","routing_info":{"account_number":"AE070331234567890123456","swift_code":"ARABAEADSHJ"},"verified":false,"created_at":"2016-09-08T10:13:02Z","updated_at":"2016-09-08T10:13:02Z"}'
http_version:
recorded_at: Thu, 08 Sep 2016 10:13:02 GMT
recorded_with: VCR 2.9.3
|
spec/fixtures/vcr_cassettes/beneficiary/uae_domestic/save_success.yml
|
jobs:
deploy:
executor: gcp-gke/default
parameters:
cluster-name:
description: "Cluster name"
type: string
environment:
default: "staging"
description: Environment to deploy to
type: string
steps:
- checkout
- add_ssh_keys
- gcp-gke/install
- gcp-gke/update-kubeconfig-with-credentials:
cluster: <<parameters.cluster-name>>
perform-login: true
- helm/install-helm-client:
version: v3.2.4
- run:
name: Install helm-helmfile
command: |
wget https://github.com/roboll/helmfile/releases/download/v0.138.7/helmfile_linux_amd64
mv helmfile_linux_amd64 helmfile
chmod +x helmfile
sudo wget https://github.com/mozilla/sops/releases/download/v3.6.1/sops-v3.6.1.linux -O /usr/local/bin/sops
sudo chmod +x /usr/local/bin/sops
SKIP_SOPS_INSTALL=true helm plugin install https://github.com/jkroepke/helm-secrets
curl -L https://github.com/databus23/helm-diff/releases/download/v3.1.3/helm-diff-linux.tgz | tar -C /home/circleci/.local/share/helm/plugins -xzv
export GOOGLE_APPLICATION_CREDENTIALS="/home/circleci/gcloud-service-key.json"
./helmfile --environment <<parameters.environment>> apply
test:
executor: node/default
parameters:
environment:
default: "staging"
description: Environment to test on
type: string
steps:
- checkout
- node/install-packages
- run: npm run test:<<parameters.environment>>
version: 2.1
orbs:
helm: circleci/helm@1.2.0
gcp-gke: circleci/gcp-gke@1.2.0
node: circleci/node@4.2.0
workflows:
deployment-staging:
jobs:
- deploy:
name: staging-deployment
cluster-name: ipfs-cluster
environment: staging
filters: &filters-staging
branches:
only: main
tags:
ignore: /.*/
- test:
requires:
- staging-deployment
filters:
<<: *filters-staging
deployment-production:
jobs:
- deploy:
name: prod-deployment
cluster-name: ipfs-cluster
environment: prod
type: approval
filters: &filters-production
branches:
only: main
tags:
ignore: /.*/
- test:
requires:
- prod-deployment
filters:
<<: *filters-production
|
.circleci/config.yml
|
version: "3.7"
# https://docs.docker.com/compose/compose-file/compose-file-v3/
services:
nginx:
image: nginx:alpine
# https://hub.docker.com/_/nginx
volumes:
- website:/var/www
- ./config/website_nginx.conf:/etc/nginx/templates/nginx.conf.template
# UNCOMMENT IF USING NGINX FOR SSL TERMINATION
# - ./config/yourdomain.com.crt:/etc/nginx/certs/yourdomain.com.crt
# - ./config/yourdomain.com.key:/etc/nginx/certs/yourdomain.com.key
ports:
# CHANGE TO "80:80" IF USING NGINX FOR SSL TERMINATION
- "9080:80"
# UNCOMMENT IF USING NGINX FOR SSL TERMINATION
# - "443:443"
environment:
- NGINX_ENVSUBST_OUTPUT_DIR=/etc/nginx
# https://nginx.org/en/docs/ngx_core_module.html#worker_processes
- NGINX_WORKER_PROCESSES=auto
# https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile
- NGINX_WORKER_RLIMIT_NOFILE=65535
# https://nginx.org/en/docs/ngx_core_module.html#worker_connections
- NGINX_WORKER_CONNECTIONS=65535
# https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
- NGINX_CLIENT_MAX_BODY_SIZE=10m
# https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout
- NGINX_CLIENT_BODY_TIMEOUT=30s
- NGINX_HOST=yourdomain.test www.yourdomain.test
- GIT_REPO_NAME=GIT_REPO_NAME
# UNCOMMENT IF USING NGINX FOR SSL
# - NGINX_SSL_CERT=yourdomain.test.crt
# - NGINX_SSL_KEY=yourdomain.test.key
githugo:
image: zeigren/githugo:latest
# https://hub.docker.com/r/zeigren/githugo
volumes:
- website:/git
# UNCOMMENT IF USING A REPO WITH SSH
# - ./config/ssh_private:/root/.ssh/id_rsa
# - ./config/ssh_public:/root/.ssh/id_rsa.pub
# - ./config/remote_ssh:/root/.ssh/known_hosts
environment:
- REPO=LINK_TO_GIT_REPO
- FOLDER=GIT_REPO_NAME
deploy:
restart_policy:
# UNCOMMENT DELAY AND COMMENT CONDITION IF YOU WANT IT TO PERIODICALLY REBUILD
condition: none
# delay: 4h
volumes:
website:
|
docker-compose.yml
|
version: "2"
services:
app:
image: eugenmayer/rundeck:2.11.3
labels:
traefik.enable: true
traefik.port: 4440
traefik.frontend.rule: ${TRAEFIK_FRONTEND_RULE}
traefik.acme: ${TRAEFIK_FRONTEND_HTTPS_ENABLE}
io.rancher.container.create_agent: 'true'
io.rancher.container.agent.role: 'environment'
io.rancher.container.pull_image: always
depends_on:
- db
environment:
DB_TYPE: postgresql
DB_HOST: db
DB_PORT: 5432
DB_NAME: rundeckdb
DB_USER: rundeck
DB_PASSWORD: ${DB_PASSWORD}
EXTERNAL_SERVER_URL: ${EXTERNAL_SERVER_URL}
RUNDECK_ADMIN_PASSWORD: ${RUNDECK_ADMIN_PASSWORD}
RDECK_JVM_SETTINGS: "${RDECK_JVM_SETTINGS}"
RUNDECK_STORAGE_PROVIDER: ${RUNDECK_STORAGE_PROVIDER}
RUNDECK_PROJECT_STORAGE_TYPE: ${RUNDECK_PROJECT_STORAGE_TYPE}
RUNDECK_WITH_SSL: ${RUNDECK_WITH_SSL}
# this will ensure RD will not try to connect from the outer URL, keep it intern and use RD_URL
# see https://rundeck.github.io/rundeck-cli/configuration/
RD_BYPASS_URL: ${EXTERNAL_SERVER_URL}
{{- if eq .Values.RUNDECK_WITH_SSL "true" }}
RD_URL: http://localhost:4443
SERVER_URL: http://localhost:4443
{{- else}}
RD_URL: http://localhost:4440
SERVER_URL: http://localhost:4440
{{- end }}
RD_USER: admin
RD_PASSWORD: ${RUNDECK_ADMIN_PASSWORD}
volumes:
- rundeck_plugins:/opt/rundeck-plugins
- rundeck_config:/etc/rundeck
- rundeck_logs:/var/log/rundeck
- rundeck_logs_jobs:/var/lib/rundeck/logs
- rundeck_storage:/var/lib/rundeck/var/storage
- rundeck_tmp:/var/rundeck
db:
image: postgres:9.6
volumes:
- rundeck_db:/var/lib/postgresql
environment:
POSTGRES_USER: ${DB_USER}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: rundeckdb
labels:
io.rancher.container.pull_image: always
volumes:
rundeck_config:
driver: local
rundeck_tmp:
driver: local
rundeck_storage:
driver: local
rundeck_logs:
driver: local
rundeck_logs_jobs:
driver: local
rundeck_plugins:
driver: local
rundeck_plugins:
driver: local
rundeck_db:
driver: local
|
templates/rundeck/16/docker-compose.yml
|
---
AWSTemplateFormatVersion: '2010-09-09'
Description: The Wort Pipeline
Parameters:
GitHubToken:
NoEcho: 'true'
Type: String
Description: Secret. It might look something like 9b189a1654643522561f7b3ebd44a1531a4287af
OAuthToken with access to Repo. Go to https://github.com/settings/tokens
GitHubUser:
Type: String
Description: GitHub UserName
Default: krwenholz
Repo:
Type: String
Description: GitHub Repo to pull from. Only the Name. not the URL
Default: wort
Branch:
Type: String
Description: Branch to use from Repo. Only the Name. not the URL
Default: master
Resources:
PipelineRole:
Type: AWS::IAM::Role
Properties:
Path: "/"
ManagedPolicyArns:
# TODO: create policy resources separately and then attach them here
- arn:aws:iam::aws:policy/AdministratorAccess
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service:
- codepipeline.amazonaws.com
Action: sts:AssumeRole
ProductionRole:
Type: AWS::IAM::Role
Properties:
Path: "/"
ManagedPolicyArns:
# TODO: create policy resources separately and then attach them here
- arn:aws:iam::aws:policy/AdministratorAccess
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service:
- cloudformation.amazonaws.com
Action: sts:AssumeRole
ArtifactStoreBucket:
Type: AWS::S3::Bucket
Properties:
VersioningConfiguration:
Status: Enabled
AccessControl: BucketOwnerFullControl
AppPipeline:
DependsOn:
- PipelineRole
Type: AWS::CodePipeline::Pipeline
Properties:
RoleArn: !GetAtt PipelineRole.Arn
ArtifactStore:
Type: S3
Location: !Ref ArtifactStoreBucket
Stages:
- Name: Source
Actions:
- InputArtifacts: []
Name: Source
ActionTypeId:
Category: Source
Owner: ThirdParty
Version: '1'
Provider: GitHub
OutputArtifacts:
- Name: SourceOutput
Configuration:
Owner: !Ref GitHubUser
Repo: !Ref Repo
Branch: !Ref Branch
OAuthToken: !Ref GitHubToken
RunOrder: 1
- Name: Build
Actions:
- Name: Build
ActionTypeId:
Category: Build
Owner: AWS
Provider: CodeBuild
Version: 1
InputArtifacts:
- Name: SourceOutput
OutputArtifacts:
- Name: BuildOutput
Configuration:
ProjectName: !Ref BuildProject
RunOrder: 1
- Name: Production
Actions:
- Name: PromoteEnvironment
ActionTypeId:
Category: Deploy
Owner: AWS
Version: '1'
Provider: CloudFormation
InputArtifacts:
- Name: BuildOutput
- Name: SourceOutput
OutputArtifacts: []
Configuration:
ActionMode: REPLACE_ON_FAILURE
RoleArn: !GetAtt ProductionRole.Arn
StackName: !Sub ${AWS::StackName}ProductionStack
TemplatePath: BuildOutput::stack.yaml
Capabilities: CAPABILITY_IAM
ParameterOverrides: |
{
"LambdaSourceBucket": {"Fn::GetArtifactAtt": ["BuildOutput", "BucketName"]},
"LambdaJarKey": {"Fn::GetArtifactAtt": ["BuildOutput", "ObjectKey"]}
}
RunOrder: 1
BuildProject:
Type: AWS::CodeBuild::Project
Properties:
TimeoutInMinutes: 10
Artifacts:
Type: CODEPIPELINE
Environment:
ComputeType: BUILD_GENERAL1_SMALL
Image: clojure:latest
Type: LINUX_CONTAINER
Name: !Sub ${AWS::StackName}BuildProject
ServiceRole: !Ref BuildRole
Source:
Type: CODEPIPELINE
BuildRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
Effect: Allow
Principal:
Service: codebuild.amazonaws.com
Action: sts:AssumeRole
ManagedPolicyArns:
- arn:aws:iam::aws:policy/AdministratorAccess
Outputs:
CodePipelineURL:
Value:
Fn::Join:
- ''
- - https://console.aws.amazon.com/codepipeline/home?region=
- Ref: AWS::Region
- "#/view/"
- Ref: AppPipeline
|
pipeline-definition.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2019-05-10 22:18"
game: "Unreal Tournament"
name: "DM-Ghetto"
author: "Graham \"Erlkoenig\" Justice"
description: "Welcome to the Ghetto. Got what is takes to survive?"
releaseDate: "1999-11"
attachments:
- type: "IMAGE"
name: "DM-Ghetto_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/G/DM-Ghetto_shot_1.png"
- type: "IMAGE"
name: "DM-Ghetto_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/G/DM-Ghetto_shot_2.png"
originalFilename: "dm-ghetto.zip"
hash: "64714cb770b014b6b87f5bdfa3d69dd5a4b21e0a"
fileSize: 7626942
files:
- name: "Citytex.utx"
fileSize: 939520
hash: "46811cff13ac3a8aa535019e8ebaa657d332e99d"
- name: "richrig.utx"
fileSize: 2234223
hash: "e2115772791315c91cdf6ab99e687d2f94e5685d"
- name: "SGTech1.utx"
fileSize: 7953567
hash: "ec01e4c9011a8cb2d08ebd12d255373f83d98fc1"
- name: "DM-Ghetto.unr"
fileSize: 2419210
hash: "29581c2ed05797445aa1b2b139782369050a3950"
otherFiles: 2
dependencies:
DM-Ghetto.unr:
- status: "OK"
name: "richrig"
- status: "OK"
name: "Citytex"
- status: "OK"
name: "SGTech1"
downloads:
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=4012"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/DeathMatch/G/6/4/714cb7/dm-ghetto.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/DeathMatch/G/6/4/714cb7/dm-ghetto.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/DeathMatch/G/6/4/714cb7/dm-ghetto.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DM-Ghetto"
playerCount: "6-12"
themes:
Tech: 0.4
Skaarj Crypt: 0.1
Industrial: 0.3
Skaarj Tech: 0.3
bots: true
|
content/Unreal Tournament/Maps/DeathMatch/G/6/4/714cb7/dm-ghetto_[64714cb7].yml
|
items:
- uid: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469
id: _fcfdb29bfda41c2f73ceb92cfeca1469
parent: com.microsoft.azure.management.mediaservices.v2018__03__30__preview
children:
- com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469.withPriority(Priority)
href: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469.yml
langs:
- java
name: Job.DefinitionStages.WithPriority
nameWithType: Job.DefinitionStages.WithPriority
fullName: com.microsoft.azure.management.mediaservices.v2018_03_30_preview.Job.DefinitionStages.WithPriority
type: Interface
source:
remote: &o0
path: mediaservices/v2018_03_30_preview/src/main/java/com/microsoft/azure/management/mediaservices/v2018_03_30_preview/Job.java
branch: mgmt-generated
repo: https://github.com/Azure/azure-sdk-for-java
path: mediaservices/v2018_03_30_preview/src/main/java/com/microsoft/azure/management/mediaservices/v2018_03_30_preview/Job.java
startLine: 135
package: com.microsoft.azure.management.mediaservices.v2018__03__30__preview
summary: "<p>The stage of the job definition allowing to specify Priority. </p>"
syntax:
content: public interface WithPriority
- uid: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469.withPriority(Priority)
id: withPriority(Priority)
parent: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469
href: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469.yml
langs:
- java
name: withPriority(Priority priority)
nameWithType: Job.DefinitionStages.WithPriority.withPriority(Priority priority)
fullName: com.microsoft.azure.management.mediaservices.v2018_03_30_preview.Job.DefinitionStages.WithPriority.withPriority(Priority priority)
overload: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469.withPriority*
type: Method
source:
remote: *o0
path: mediaservices/v2018_03_30_preview/src/main/java/com/microsoft/azure/management/mediaservices/v2018_03_30_preview/Job.java
startLine: 139
package: com.microsoft.azure.management.mediaservices.v2018__03__30__preview
summary: "<p>Specifies priority. </p>"
syntax:
content: public WithCreate withPriority(Priority priority)
parameters:
- id: priority
type: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._priority
return:
type: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._a7c2dbee9c3351c75f6b2e4cf2c277e1
references:
- uid: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469.withPriority*
name: withPriority
nameWithType: Job.DefinitionStages.WithPriority.withPriority
fullName: com.microsoft.azure.management.mediaservices.v2018_03_30_preview.Job.DefinitionStages.WithPriority.withPriority
package: com.microsoft.azure.management.mediaservices.v2018__03__30__preview
- uid: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._a7c2dbee9c3351c75f6b2e4cf2c277e1
parent: com.microsoft.azure.management.mediaservices.v2018__03__30__preview
href: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._a7c2dbee9c3351c75f6b2e4cf2c277e1.yml
name: Job.DefinitionStages.WithCreate
nameWithType: Job.DefinitionStages.WithCreate
fullName: com.microsoft.azure.management.mediaservices.v2018_03_30_preview.Job.DefinitionStages.WithCreate
type: Interface
summary: <p>The stage of the definition which contains all the minimum required inputs for the resource to be created (via <xref uid="" data-throw-if-not-resolved="false">WithCreate#create()</xref>), but also allows for any other optional settings to be specified. </p>
syntax:
content: public interface WithCreate extends Job.DefinitionStages.WithDescription,Job.DefinitionStages.WithPriority
- uid: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._priority
parent: com.microsoft.azure.management.mediaservices.v2018__03__30__preview
href: com.microsoft.azure.management.mediaservices.v2018__03__30__preview._priority.yml
name: Priority
nameWithType: Priority
fullName: com.microsoft.azure.management.mediaservices.v2018_03_30_preview.Priority
type: Enum
summary: <p>Defines values for Priority. </p>
syntax:
content: public enum Priority
|
docs-ref-autogen/com.microsoft.azure.management.mediaservices.v2018__03__30__preview._fcfdb29bfda41c2f73ceb92cfeca1469.yml
|
---
# Defines deployment design and assigns role to server groups
- hosts:
- iscsigws
- iscsi-gws # for backward compatibility only!
gather_facts: false
any_errors_fatal: true
become: true
tags: always
vars:
delegate_facts_host: True
pre_tasks:
# If we can't get python2 installed before any module is used we will fail
# so just try what we can to get it installed
- import_tasks: raw_install_python.yml
- name: gather facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when:
- not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- name: gather and delegate facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
run_once: true
when: delegate_facts_host | bool
tasks:
- import_role:
name: ceph-prerun
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
- import_role:
name: ceph-validate
- import_role:
name: ceph-infra
- import_role:
name: ceph-common
- hosts:
- iscsigws
- iscsi-gws # for backward compatibility only!
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph iscsi gateway install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_iscsi_gw:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-iscsi-gw
post_tasks:
- name: set ceph iscsi gw install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_iscsi_gw:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
|
iscsi.yml
|
kind: DaemonSet
image:
repository: registry.gitlab.com/arm-research/smarter/edge-workloads/plugin-nvme-temp
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
updateStrategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations:
node.kubernetes.io/bootstrap-checkpoint: "true"
securityContext:
privileged: true
#service:
# type: ClusterIP
# port: 80
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 100m
memory: 16Mi
requests:
cpu: 10m
memory: 16Mi
autoscaling:
enabled: false
# minReplicas: 1
# maxReplicas: 100
# targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector:
resource.nvme: "true"
tolerations: []
affinity: {}
volumeMounts:
- name: dev-dir
mountPath: /dev
Volumes:
- name: dev-dir
hostPath:
path: /dev
env:
- name: SAGE_DEBUG
value: "enabled"
- name: NVME_DEVICE
value: "/dev/nvme0"
- name: POLL_PERIOD
value: "60"
- name: WAGGLE_PLUGIN_HOST
value: wes-rabbitmq
- name: WAGGLE_PLUGIN_PORT
value: "5672"
- name: WAGGLE_PLUGIN_NAME
value: "plugin-nvme-temp"
- name: WAGGLE_PLUGIN_VERSION
value: "0.1.6"
- name: MQTT_BROKER_HOST
value: "fluent-bit"
- name: RABBITMQ_EXCHANGE
value: to-beehive
- name: WAGGLE_HOST_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: RABBITMQ_USERNAME
valueFrom:
secretKeyRef:
key: username
name: wes-rabbitmq-service-account-secret
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: wes-rabbitmq-service-account-secret
envFrom:
- configMapRef:
name: waggle-config
|
helm/values.yaml
|
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: "{{ template "pd.tidb.fullname" . }}"
labels:
app: "{{ template "pd.tidb.fullname" . }}"
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
serviceName: "{{ template "pd.tidb.fullname" . }}"
replicas: {{ default 3 .Values.pd.replicaCount }}
updateStrategy:
type: RollingUpdate
template:
metadata:
name: {{ template "tidb.fullname" . }}
labels:
heritage: "{{ .Release.Service }}"
release: "{{ .Release.Name }}"
app: {{ template "tidb.fullname" . }}
component: "{{ template "pd.tidb.fullname" . }}"
spec:
containers:
- name: "{{ template "pd.tidb.fullname" . }}"
image: "{{ .Values.pd.image }}"
imagePullPolicy: "{{ .Values.pd.imagePullPolicy }}"
env:
- name: INITIAL_CLUSTER_SIZE
value: {{ default 3 .Values.replicaCount | quote }}
- name: SET_NAME
value: "{{ template "pd.tidb.fullname" . }}"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- "/bin/sh"
- "-ec"
- |
HOSTNAME=$(hostname)
PEERS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:{{ .Values.pd.service.PeerPort }}"
done
/pd-server --name=${HOSTNAME} \
--client-urls=http://0.0.0.0:{{ .Values.pd.service.ClientPort }} \
--peer-urls=http://0.0.0.0:{{ .Values.pd.service.PeerPort }} \
--advertise-client-urls=http://$(MY_POD_IP):{{ .Values.pd.service.ClientPort }} \
--advertise-peer-urls=http://${HOSTNAME}.${SET_NAME}:{{ .Values.pd.service.PeerPort }} \
--initial-cluster ${PEERS}
ports:
- containerPort: {{ .Values.pd.service.ClientPort }}
name: pd-server
protocol: TCP
- containerPort: {{ .Values.pd.service.PeerPort }}
name: peer
protocol: TCP
resources:
{{ toYaml .Values.pd.resources | indent 10 }}
|
tidb/templates/pd-statefulset.yaml
|
- position: 1
driverNumber: 36
driverId: stefan-johansson
constructorId: onyx
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "1:18.623"
gap:
interval:
laps:
- position: 2
driverNumber: 30
driverId: philippe-alliot
constructorId: lola
engineManufacturerId: lamborghini
tyreManufacturerId: goodyear
time: "1:19.164"
gap: "+0.541"
interval: "+0.541"
laps:
- position: 3
driverNumber: 31
driverId: roberto-moreno
constructorId: coloni
engineManufacturerId: ford
tyreManufacturerId: pirelli
time: "1:19.780"
gap: "+1.157"
interval: "+0.616"
laps:
- position: 4
driverNumber: 29
driverId: michele-alboreto
constructorId: lola
engineManufacturerId: lamborghini
tyreManufacturerId: goodyear
time: "1:19.869"
gap: "+1.246"
interval: "+0.089"
laps:
- position: 5
driverNumber: 37
driverId: jj-lehto
constructorId: onyx
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "1:20.880"
gap: "+2.257"
interval: "+1.011"
laps:
- position: 6
driverNumber: 18
driverId: piercarlo-ghinzani
constructorId: osella
engineManufacturerId: ford
tyreManufacturerId: pirelli
time: "1:21.021"
gap: "+2.398"
interval: "+0.141"
laps:
- position: 7
driverNumber: 33
driverId: oscar-larrauri
constructorId: eurobrun
engineManufacturerId: judd
tyreManufacturerId: pirelli
time: "1:21.326"
gap: "+2.703"
interval: "+0.305"
laps:
- position: 8
driverNumber: 40
driverId: gabriele-tarquini
constructorId: ags
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "1:21.881"
gap: "+3.258"
interval: "+0.555"
laps:
- position: 9
driverNumber: 35
driverId: aguri-suzuki
constructorId: zakspeed
engineManufacturerId: yamaha
tyreManufacturerId: pirelli
time: "1:24.116"
gap: "+5.493"
interval: "+2.235"
laps:
- position: 10
driverNumber: 34
driverId: bernd-schneider
constructorId: zakspeed
engineManufacturerId: yamaha
tyreManufacturerId: pirelli
time: "1:24.732"
gap: "+6.109"
interval: "+0.616"
laps:
- position: 11
driverNumber: 32
driverId: enrico-bertaggia
constructorId: coloni
engineManufacturerId: ford
tyreManufacturerId: pirelli
time: "1:28.526"
gap: "+9.903"
interval: "+3.794"
laps:
- position: EX
driverNumber: 41
driverId: yannick-dalmas
constructorId: ags
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "1:19.320"
gap: "+0.697"
interval:
laps:
- position: EX
driverNumber: 17
driverId: nicola-larini
constructorId: osella
engineManufacturerId: ford
tyreManufacturerId: pirelli
time: "1:22.688"
gap: "+4.065"
interval:
laps:
|
src/data/seasons/1989/races/13-portugal/pre-qualifying-results.yml
|
api_name: []
items:
- children:
- azure.mgmt.datafactory.models.PolybaseSettingsRejectType.percentage
- azure.mgmt.datafactory.models.PolybaseSettingsRejectType.value
class: azure.mgmt.datafactory.models.PolybaseSettingsRejectType
fullName: azure.mgmt.datafactory.models.PolybaseSettingsRejectType
inheritance:
- inheritance:
- type: builtins.object
type: enum.Enum
langs:
- python
module: azure.mgmt.datafactory.models
name: PolybaseSettingsRejectType
source:
id: PolybaseSettingsRejectType
path: azure-mgmt-datafactory\azure\mgmt\datafactory\models\data_factory_management_client_enums.py
remote:
branch: master
path: azure-mgmt-datafactory\azure\mgmt\datafactory\models\data_factory_management_client_enums.py
repo: https://github.com/Azure/azure-sdk-for-python.git
startLine: 354
summary: 'An enumeration.
'
syntax: {}
type: class
uid: azure.mgmt.datafactory.models.PolybaseSettingsRejectType
- fullName: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.percentage
id: percentage
langs:
- python
module: azure.mgmt.datafactory.models
name: percentage
parent: azure.mgmt.datafactory.models.PolybaseSettingsRejectType
syntax:
content: percentage = 'percentage'
return:
type:
- azure.mgmt.datafactory.models.PolybaseSettingsRejectType
type: attribute
uid: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.percentage
- fullName: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.value
id: value
langs:
- python
module: azure.mgmt.datafactory.models
name: value
parent: azure.mgmt.datafactory.models.PolybaseSettingsRejectType
syntax:
content: value = 'value'
return:
type:
- azure.mgmt.datafactory.models.PolybaseSettingsRejectType
type: attribute
uid: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.value
references:
- fullName: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.percentage
isExternal: false
name: percentage
parent: azure.mgmt.datafactory.models.PolybaseSettingsRejectType
uid: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.percentage
- fullName: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.value
isExternal: false
name: value
parent: azure.mgmt.datafactory.models.PolybaseSettingsRejectType
uid: azure.mgmt.datafactory.models.PolybaseSettingsRejectType.value
|
docs-ref-autogen/azure-mgmt-datafactory/azure.mgmt.datafactory.models.PolybaseSettingsRejectType.yml
|
Urbem\CoreBundle\Entity\Compras\Mapa:
type: entity
table: compras.mapa
repositoryClass: Urbem\CoreBundle\Repository\Patrimonio\Compras\MapaRepository
id:
exercicio:
type: string
generator:
strategy: NONE
codMapa:
type: integer
generator:
strategy: NONE
column: cod_mapa
fields:
codObjeto:
type: integer
nullable: false
column: cod_objeto
timestamp:
type: datetimemicrosecondpk
nullable: false
codTipoLicitacao:
type: integer
nullable: false
column: cod_tipo_licitacao
manyToOne:
fkComprasObjeto:
targetEntity: Urbem\CoreBundle\Entity\Compras\Objeto
inversedBy: fkComprasMapas
joinColumns:
cod_objeto:
referencedColumnName: cod_objeto
fkComprasTipoLicitacao:
targetEntity: Urbem\CoreBundle\Entity\Compras\TipoLicitacao
inversedBy: fkComprasMapas
joinColumns:
cod_tipo_licitacao:
referencedColumnName: cod_tipo_licitacao
oneToMany:
fkComprasCompraDiretas:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Compras\CompraDireta
mappedBy: fkComprasMapa
fkComprasMapaCotacoes:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Compras\MapaCotacao
mappedBy: fkComprasMapa
fkComprasMapaSolicitacoes:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Compras\MapaSolicitacao
mappedBy: fkComprasMapa
fkLicitacaoLicitacoes:
cascade:
- persist
- remove
orphanRemoval: true
targetEntity: Urbem\CoreBundle\Entity\Licitacao\Licitacao
mappedBy: fkComprasMapa
oneToOne:
fkComprasMapaModalidade:
cascade:
- persist
- remove
targetEntity: Urbem\CoreBundle\Entity\Compras\MapaModalidade
mappedBy: fkComprasMapa
joinColumns:
exercicio:
referencedColumnName: exercicio
cod_mapa:
referencedColumnName: cod_mapa
fkTceamConfiguracaoArquivoLicitacao:
cascade:
- persist
- remove
targetEntity: Urbem\CoreBundle\Entity\Tceam\ConfiguracaoArquivoLicitacao
mappedBy: fkComprasMapa
joinColumns:
exercicio:
referencedColumnName: exercicio
cod_mapa:
referencedColumnName: cod_mapa
|
src/Urbem/CoreBundle/Resources/config/doctrine/Compras.Mapa.orm.yml
|
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: awscli-eks-cluster-create
namespace: tekton-pipelines
spec:
description: |
Create an EKS cluster.
This Task can be used to create an EKS cluster for a given VPC Subnets, security groups and service role in an AWS account and write a kubeconfig to a desired location that
can be used by other tasks (in a context with kubectl) to make requests to the cluster.
params:
- name: cluster-name
description: The name of the EKS cluster you want to spin.
- name: kubernetes-version
default: "1.21"
description: The EKS version to install.
- name: region
default: "us-west-2"
description: The region where the cluster is in.
- name: endpoint
default: ""
description: "aws eks enpoint to create clusters against"
- name: subnets
description: list of subnet ids of the VPC for eks cluster.
default: subnet-08e09533840c2e713,subnet-0387141ab3c555cf7,subnet-049c59ad6d2a3c8f3,subnet-0b407d4568173c8d9,subnet-0cc45316116d910e4,subnet-0344ce6cdfef574c1
- name: securitygroups
description: list of securitygroup ids as a string of the associated VPC
default: sg-03e8016a74d88416c
- name: servicerole
description: servicerole arn to be used for eks cluster
default: arn:aws:iam::197575167141:role/eks-1-16-hakuna-04-12-202-AWSServiceRoleForAmazonE-21BHME9G7CUD
workspaces:
- name: config
description: |
A workspace into which a kubeconfig file called `kubeconfig` will be written that will contain the information required to access the cluster. The `kubeconfig` will expect to use [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator/) to authenticate, so in order for it to be used it must be run in a container which contains both `kubectl` and `aws-iam-authenticator`.
steps:
- name: write-kubeconfig
image: alpine/k8s:1.22.6
script: |
echo "Approving KCM requests"
kubectl certificate approve $(kubectl get csr | grep "Pending" | awk '{print $1}') 2>/dev/null || true
ENDPOINT_FLAG=""
if [ -n "$(params.endpoint)" ]; then
ENDPOINT_FLAG="--endpoint $(params.endpoint)"
fi
CREATED_CLUSTER=$(aws eks $ENDPOINT_FLAG list-clusters --region $(params.region) --query 'clusters[?@==`'$(params.cluster-name)'`]' --output text )
echo "CREATED_CLUSTER=$CREATED_CLUSTER"
if [ "$CREATED_CLUSTER" == "" ]; then
aws eks create-cluster --name $(params.cluster-name) --region $(params.region) --kubernetes-version $(params.kubernetes-version) --role-arn $(params.servicerole) --resources-vpc-config subnetIds=$(params.subnets),securityGroupIds=$(params.securitygroups) $ENDPOINT_FLAG
fi
aws eks $ENDPOINT_FLAG --region $(params.region) wait cluster-active --name $(params.cluster-name)
aws eks $ENDPOINT_FLAG update-kubeconfig --name $(params.cluster-name) --region $(params.region)
cp /root/.kube/config $(workspaces.config.path)/kubeconfig
|
tests/tasks/setup/eks/awscli-cp.yaml
|
service: site
frameworkVersion: ">=1.2.0 <2.0.0"
custom:
s3Bucket: ${env:FAS_SITE_S3BUCKET}
apiEndpointHost: ${env:FAS_API_ENDPOINT_HOST}
servicesStage: api
provider:
name: aws
region: eu-west-1
resources:
Resources:
WebAppS3Bucket:
Type: AWS::S3::Bucket
Properties:
BucketName: ${self:custom.s3Bucket}
AccessControl: PublicRead
WebAppS3BucketPolicy:
Type: 'AWS::S3::BucketPolicy'
Properties:
Bucket:
Ref: WebAppS3Bucket
PolicyDocument:
Statement:
- Action:
- 's3:GetObject'
Effect: 'Allow'
Principal:
CanonicalUser: !GetAtt CfOriginAccessIdentity.S3CanonicalUserId
Resource: arn:aws:s3:::${self:custom.s3Bucket}/*
CfOriginAccessIdentity:
Type: "AWS::CloudFront::CloudFrontOriginAccessIdentity"
Properties:
CloudFrontOriginAccessIdentityConfig:
Comment: 'Access S3 bucket content only through CloudFront'
WebAppCloudFrontDistribution:
Type: AWS::CloudFront::Distribution
Properties:
DistributionConfig:
Enabled: 'true'
DefaultRootObject: index.html
CustomErrorResponses:
- ErrorCode: 404
ResponseCode: 200
ResponsePagePath: /index.html
Origins:
- Id: WebApp
DomainName: ${self:custom.s3Bucket}.s3.amazonaws.com
S3OriginConfig:
OriginAccessIdentity:
!Join
- ''
- - 'origin-access-identity/cloudfront/'
- Ref: CfOriginAccessIdentity
- Id: WebApi
DomainName: ${self:custom.apiEndpointHost}
CustomOriginConfig:
HTTPPort: 80
HTTPSPort: 443
OriginProtocolPolicy: https-only
DefaultCacheBehavior:
AllowedMethods:
- GET
- HEAD
ForwardedValues:
QueryString: 'true'
Cookies:
Forward: none
TargetOriginId: WebApp
ViewerProtocolPolicy: redirect-to-https
CacheBehaviors:
- AllowedMethods:
- DELETE
- GET
- HEAD
- OPTIONS
- PATCH
- POST
- PUT
TargetOriginId: WebApi
PathPattern: /${self:custom.servicesStage}/*
DefaultTTL: 0
MinTTL: 0
MaxTTL: 0
ForwardedValues:
QueryString: 'true'
Headers:
- Accept
- Referer
- Authorization
- Content-Type
ViewerProtocolPolicy: https-only
|
src/site/serverless.yml
|
interactions:
- request:
body: '{"createTransactionModel": {"companyCode": "DEFAULT", "type": "SalesOrder",
"lines": [{"quantity": 3, "amount": "15.00", "taxCode": "PC040156", "taxIncluded":
false, "itemCode": "123", "description": "Test product"}, {"quantity": 1, "amount":
"10.000", "taxCode": "FR020100", "taxIncluded": false, "itemCode": "Shipping",
"description": null}], "code": "8206d21f-1ec8-4f04-85fd-211f29e20757", "date":
"2021-01-25", "customerCode": 0, "addresses": {"shipFrom": {"line1": "T\u0119czowa
7", "line2": "", "city": "WROC\u0141AW", "region": "", "country": "PL", "postalCode":
"53-601"}, "shipTo": {"line1": "O\u0142awska 10", "line2": "", "city": "WROC\u0141AW",
"region": "", "country": "PL", "postalCode": "53-105"}}, "commit": false, "currencyCode":
"USD", "email": ""}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Authorization:
- Basic Og==
Connection:
- keep-alive
Content-Length:
- '767'
User-Agent:
- python-requests/2.25.1
method: POST
uri: https://rest.avatax.com/api/v2/transactions/createoradjust
response:
body:
string: !!binary |
H4sIAAAAAAAEA+1XTW/jOAz9K4XPTWE7H21zy6ZToECQFE26A8xiDorNJJpxbK8kd5od9L/Poyyl
dtPsdL962iAHmyIpko96or8HMg2G4WmQFCkFw+AiDgdpHK06ESUXnd4q7HUu+qu0E0fRKr6kODzv
nwesvS1FvruBbdyLL8/7p0EqDDuIwzjqhFEn7kOtFLst5ebqlSVthKk0DBa0LQsl1A76Zleyj7nI
SM9USgqypTDJZlxHxztXSlGe7Jzkfn4FIT0mG5Gv6Q4bjV9TSCptii2pey3WtKh3YbvcSLO719T0
X2v+SnlaKCcP7cb1QkNEj4jdTAuEDAVFSZEnMiMUZSUyTadBViTCyCJ/dq8IyRqZrycHS2WlkIUm
m/iz0xVxvo0INVenJKWbfk1hRDbaFlVuAEn/DIha0QcbIhDeS66kTmq1Z9lCPAbD/hnDaK3wLpYZ
oGh4gmwssqTKUGNkWGuL9AsqyxjfkUBAqMS0MCMrhZatwNdGQR4QNuoRDKM2aB9WK0qMfKDXWqWJ
Liw5E9oKmdVl3xapXElKDywXUXfYvRx247Mo6g7OB/EnxOO1gbmy3XvRjS5jpC0eDx2E4dD+2TCT
OaFdf/vuD4xRItcCQRc5O0JQrDKttku07TCIYPP2rktJJ0qW7Ay2C9LmpFRFWiUGblKHmIfX4lb3
3iuiMSnjAmr3p9Q3hrZ7aI2q0KESItedUdx1eXqvEfoIef1eCXtOgmGXa69oFdWlx1Psn1xbHxQR
LlFbNu1xd9WN1dzACpudtdd0cd2Ow14Y9Qe1JxZyeoNBfAS2Wu8mT7IqbbReSgY9cwzCCbBzVTsE
1p4XENQwuJ3Au6J1jZN9ewWIL5WS2kfPFlYwFVsmt9vZZDRl0mIGpJHWcp1T6s+71XQENZ4uvG0q
baP5BT6/ljDzIneA+pLa5lCWcMOzuBvi9yYEnOtZZcqKmw5IzaslS7kswawWuRzmRuSpUOkJE269
MqrMplBgU2/i4D5EloNzu3k/cOGlrmxziKpcmtnqF6El3xO3pDy132MB61JPi/xWaL3YqGrPuVJf
E+rsGHjPt65MbApv3NkMpOta+Gmv11X0yzWJOtP2Esx8b++9QXaYdSbFUmauPkhnTlkGonj6fBoA
RM7CbFRRrTdX+z7F0sa3EdJNCm1ucl2BdxK6ViTXG3C9DedBmH27zTtAfdzp2ObEQs1IHpXw6fSt
DBbznm++N9+JpOYbWZa4PhEb063v+ghlaFOVvSb+HlXFZ91DprJ1Nm1kvaKr/fVdGIdRyMMCKzqi
6p1f9KzgCDX+T1To4RZR+bLyCNIE2FbRMcd/ylM2AE9IfFYd3QFYL30nmmqTzT+kqTotT2ovWMou
vi9JXR8nKVCfSFNFWv9s6FriLsRVtJvQA/FE+EmW/OXB5MBTyiwT3/RXcRLxqWShG1j4sYt1SBOQ
Mp4+3s3Gk9FHxtjf73guQboic2j3u50oZO8vBgKc9jtrw1dlHPajEKMJ5mRpMIHUm2RFvt6//ikH
/yShBSV/FN/ECX+F/Qv5DEI7qLqBwg04fzkfwKWr7ZY/4nhCflGefT3tldSacPaDzNtGJgTWmjOC
IU8ZQOhwkILqcargxkD9oHNkoPHHHIqNww/95mcRK/EF3CIv9xXVvihqIe75hfdgD3Y9OtpZEu9P
n59+ABdpKdKODwAA
headers:
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json; charset=utf-8
Date:
- Mon, 25 Jan 2021 13:39:32 GMT
Location:
- /api/v2/companies/242975/transactions/0
Server:
- Kestrel
ServerDuration:
- '00:00:00.0173888'
Transfer-Encoding:
- chunked
Vary:
- Accept-Encoding
status:
code: 201
message: Created
version: 1
|
saleor/plugins/avatax/tests/cassettes/test_calculate_checkout_total_uses_default_calculation[True-21.99-27.74-5.0-False].yaml
|
---
maximum-69-number.c:
topics:
- greedy
- math
difficulty: easy
first-missing-positive.c:
topics:
- array
- hash table
- negative painting
difficulty: hard
climbing-stairs.c:
topics:
- math
- dynamic programming
- memoization
difficulty: easy
first-unique-character-in-a-string.c:
topics:
- string
- hash table
- queue
- counting
difficulty: easy
reverse-linked-list.c:
topics:
- linked list
- recursion
- important
difficulty: easy
reverse-integer.c:
topics:
- math
difficulty: easy
remove-element.c:
topics:
- array
- two pointers
difficulty: easy
max-consecutive-ones.c:
topics:
- array
difficulty: easy
squares-of-a-sorted-array.c:
topics:
- array
- sorting
- two pointers
difficulty: easy
valid-mountain-array.c:
topics:
- array
difficulty: easy
number-of-islands.c:
topics:
- array
- dfs
- bfs
- union find
- matrix
difficulty: medium
uniqe-paths.c:
topics:
- math
- dynamic programming
- combinatorics
difficulty: medium
unique-paths-II.c:
topics:
- array
- dynamic programming
- matrix
difficulty: medium
longest-substring-without-repeating-characters.c:
topics:
- string
- hash table
- sliding window
difficulty: medium
convert-binary-number-in-a-linked-list-to-integer.c:
topics:
- linked list
- math
difficulty: easy
roman-to-integer.c:
topics:
- math
- string
- hash table
difficulty: easy
add-two-numbers.c:
topics:
- linked list
- math
- hash table
difficulty: medium
palindrome-number.c:
topics:
- math
difficulty: easy
longest-common-prefix.c:
topics:
- string
difficulty: easy
cells-with-odd-values-in-a-matrix.c:
topics:
- array
- math
- simulation
difficulty: easy
rotate-image.c:
topics:
- array
- math
- matrix
difficulty: medium
move-zeroes.c:
topics:
- array
- two pointers
difficulty: easy
find-numbers-with-even-number-of-digits.c:
topics:
- array
difficulty: easy
remove-duplicates-from-sorted-array.c:
topics:
- array
- two pointers
difficulty: easy
number-of-good-pairs.cpp:
topics:
- array
- hash table
- math
- counting
difficulty: easy
valid-parentheses.cpp:
topics:
- stack
- string
difficulty: easy
kids-with-the-greatest-number-of-candies.cpp:
topics:
- array
difficulty: easy
number-of-1-bits.cpp:
topics:
- bit manipulation
difficulty: easy
two-sum.cpp:
topics:
- array
- hash table
- important
difficulty: easy
find-all-duplicates-in-an-array.cpp:
topics:
- array
- hash table
- negative painting
difficulty: easy
shuffle-the-array.cpp:
topics:
- array
difficulty: easy
check-if-n-and-its-double-exist.cpp:
topics:
- array
- hash table
- two pointers
- binary search
- sorting
difficulty: easy
running-sum-of-1d-array.cpp:
topics:
- array
- prefix sum
difficulty: easy
powers-of-two.cpp:
topics:
- math
- bit manipulation
- recursion
difficulty: easy
island-perimeter.cpp:
topics:
- array
- bfs
- dfs
- matrix
difficulty: easy
invert-binary-tree.cpp:
topics:
- tree
- binary tree
- bfs
- dfs
- important
difficulty: easy
longest-substring-without-repeating-characters.ts:
topics:
- string
- hash table
- sliding window
difficulty: medium
|
leetcode/index.yaml
|
kind: List
apiVersion: v1
items:
- apiVersion: v1
kind: ImageStream
metadata:
name: branchprotector
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/branchprotector:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: boskoscli
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-staging-boskos/boskosctl:v20210203-17d4b61
- apiVersion: v1
kind: ImageStream
metadata:
name: cherrypicker
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/cherrypicker:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: clonerefs
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/clonerefs:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: crier
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/crier:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: deck
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/deck:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: entrypoint
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/entrypoint:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: gcsupload
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/gcsupload:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: hook
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/hook:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: horologium
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/horologium:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: initupload
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/initupload:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: jenkins-operator
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/jenkins-operator:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: needs-rebase
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/needs-rebase:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: refresh
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/refresh:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: sidecar
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/sidecar:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: sinker
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/sinker:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: tide
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/tide:v20210206-3c06bdc6cf
- apiVersion: v1
kind: ImageStream
metadata:
name: tot
namespace: ci
spec:
lookupPolicy:
local: true
tags:
- name: latest
from:
kind: DockerImage
name: gcr.io/k8s-prow/tot:v20210206-3c06bdc6cf
|
core-services/prow/03_deployment/adapter_imagestreams.yaml
|
name: ProcessedEvent
uid: azure-devops-extension-api.ProcessedEvent
package: azure-devops-extension-api
summary: ''
fullName: ProcessedEvent
isPreview: false
isDeprecated: false
type: interface
properties:
- name: actors
uid: azure-devops-extension-api.ProcessedEvent.actors
package: azure-devops-extension-api
summary: All of the users that were associated with this event and their role.
fullName: actors
isPreview: false
isDeprecated: false
syntax:
content: 'actors: EventActor[]'
return:
type: '<xref uid="azure-devops-extension-api.EventActor" />[]'
description: ''
- name: allowedChannels
uid: azure-devops-extension-api.ProcessedEvent.allowedChannels
package: azure-devops-extension-api
summary: ''
fullName: allowedChannels
isPreview: false
isDeprecated: false
syntax:
content: 'allowedChannels: string'
return:
type: string
description: ''
- name: artifactUri
uid: azure-devops-extension-api.ProcessedEvent.artifactUri
package: azure-devops-extension-api
summary: ''
fullName: artifactUri
isPreview: false
isDeprecated: false
syntax:
content: 'artifactUri: string'
return:
type: string
description: ''
- name: deliveryIdentities
uid: azure-devops-extension-api.ProcessedEvent.deliveryIdentities
package: azure-devops-extension-api
summary: ''
fullName: deliveryIdentities
isPreview: false
isDeprecated: false
syntax:
content: 'deliveryIdentities: ProcessingIdentities'
return:
type: <xref uid="azure-devops-extension-api.ProcessingIdentities" />
description: ''
- name: evaluations
uid: azure-devops-extension-api.ProcessedEvent.evaluations
package: azure-devops-extension-api
summary: Evaluations for each user
fullName: evaluations
isPreview: false
isDeprecated: false
syntax:
content: 'evaluations: [key: string]: SubscriptionEvaluation'
return:
type: >-
[key: string]\: <xref
uid="azure-devops-extension-api.SubscriptionEvaluation" />
description: ''
- name: eventId
uid: azure-devops-extension-api.ProcessedEvent.eventId
package: azure-devops-extension-api
summary: ''
fullName: eventId
isPreview: false
isDeprecated: false
syntax:
content: 'eventId: number'
return:
type: number
description: ''
- name: exclusions
uid: azure-devops-extension-api.ProcessedEvent.exclusions
package: azure-devops-extension-api
summary: >-
Which members were excluded from evaluation (only applies to ActorMatcher
subscriptions)
fullName: exclusions
isPreview: false
isDeprecated: false
syntax:
content: 'exclusions: EventActor[]'
return:
type: '<xref uid="azure-devops-extension-api.EventActor" />[]'
description: ''
- name: inclusions
uid: azure-devops-extension-api.ProcessedEvent.inclusions
package: azure-devops-extension-api
summary: >-
Which members were included for evaluation (only applies to ActorMatcher
subscriptions)
fullName: inclusions
isPreview: false
isDeprecated: false
syntax:
content: 'inclusions: EventActor[]'
return:
type: '<xref uid="azure-devops-extension-api.EventActor" />[]'
description: ''
- name: notifications
uid: azure-devops-extension-api.ProcessedEvent.notifications
package: azure-devops-extension-api
summary: ''
fullName: notifications
isPreview: false
isDeprecated: false
syntax:
content: 'notifications: GeneratedNotification[]'
return:
type: '<xref uid="azure-devops-extension-api.GeneratedNotification" />[]'
description: ''
|
docs-ref-autogen/azure-devops-extension-api/ProcessedEvent.yml
|
title: Résumé
name: <NAME>
jobtitle: Data Analyst
contact:
- icon: fa-envelope
text: <EMAIL>
- icon: fa-phone-square
text: (480) 306-3520
- icon: fa-globe
text: your-website.com
link: https://www.linkedin.com/in/zhixin-wang-642405149/
- icon: fa-github
text: your-github
link: https://github.com/detarblasigt
education:
- university: Arizona State University
duration: Aug 2019 - May 2020
location: Tempe, AZ
major: Master of Science in Business Analytics
- university: Arizona State University
duration: Aug 2016 - May 2019
location: Tempe, AZ
major: Bachelor of Science in Computer Information Systems, 3.8 GPA
skills:
- title: Data Mining
items: Python-numpy, pandas, sklearn
- title: Machine Learning
items: Decision Tree, Logistic Regression, SVM, Random Forest, KNN
- title: Web & Visualization
items: C#, HTML, CSS, JavaScript, Tableau
experience:
- title: Software Engineer Assistant
duration: May 2019 - Jul 2019
company: CISDI Chongqing Information Technology Co., Ltd.
description: Participated in the optimization of the converter intelligent steelmaking system to improve functionalities; Achieved real-time video image acquisition using C# based on Pylon Camera software package tools and SDK; Connected Oracle database to Windows application and generated SQL script to retrieve and store data; Optimized the user interface and used DataGridView control to edit tabular data and store data in Oracle database; Created control chart display interface and program to generate report using C# Windows application
projects:
- name: Credit Card Fraud Detection - Kaggle
description:
contribution: Individual project
- name: Database Architecture Project
description: Designed, developed and implemented relational database to provide enhanced business value for organization; Master entity-relationship framework to create entity relationship diagram (ERD) for business rules; Generated standard SQL and complex scripts to construct databases and retrieve data for operational needs; Standardized database application security, integrity, complexity and specialization of labor\n
contribution: Team developer
- name: Feedback Forum Web Development Project
description: Built customized websites for Feedback Forum and transfer data between multiple Web forms; Applied skills of HTML, CSS, JavaScript, jQuery to develop and test the website; Utilized Bootstrap framework for user interface design to enhance functionalities; Demonstrated knowledge of project management techniques to manage information technologic project\n
contribution: Team developer
languages:
- name: English
proficiency: Work proficiency
- name: Mandarin Chinese
proficiency: Native
|
_data/resume.yml
|
- name: Installing Prerequisites for Kubernetes
apt:
name: apt-transport-https
state: present
- name: Configure module for CRIO
copy:
src: crio.conf
dest: /etc/modules-load.d/crio.conf
mode: 0644
- name: "sysctl"
shell: "sysctl --system"
- name: Put kubernetes-cri Conf file
copy:
src: 99-kubernetes-cri.conf
dest: /etc/sysctl.d/99-kubernetes-cri.conf
mode: 0644
- name: Creates directory
file:
path: /etc/cni/net.d
state: directory
- name: Put 10-crio.conf template
template:
src: 10-crio.conf.j2
dest: /etc/cni/net.d/10-crio.conf
mode: 777
- name: Setting OS Variable for CRIO
shell: cat /etc/os-release | grep VERSION_ID | sed 's/"//g' | cut -d'=' -f2
register: os
changed_when: False
- set_fact:
OS: "xUbuntu_{{ os.stdout }}"
- name: Getting Version for CRIO
shell: echo {{ kube_version }} | cut -d'.' -f1,2
register: version
changed_when: False
- set_fact:
VERSION: "{{ version.stdout }}"
- name: Get CRIO apt-key
apt_key:
url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{ OS }}/Release.key
state: present
- name: Get CRIO apt-key
apt_key:
url: https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:{{ VERSION }}/{{ OS }}/Release.key
state: present
- name: Add CRIO stable repo
apt_repository:
repo: deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{ OS }}/ /
state: present
filename: cri-o-stable
- name: Add CRIO stable repo
apt_repository:
repo: deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{{ VERSION }}/{{ OS }}/ /
state: present
filename: cri-o-{{ VERSION }}
- name: Install Required Rackages
apt:
name: "{{ packages }}"
update_cache: yes
state: present
vars:
packages:
- cri-o
- cri-tools
- cri-o-runc
- runc
- name: Configuring cgroup driver
copy:
src: 02-cgroup-manager.conf
dest: /etc/crio/crio.conf.d/02-cgroup-manager.conf
mode: 0644
- name: "Put storage template"
template:
src: storage.conf.j2
dest: /etc/containers/storage.conf
mode: 0777
- name: Restart CRIO
systemd:
state: restarted
enabled: yes
daemon_reload: yes
name: crio
|
roles/container-runtime/tasks/crio.yml
|
name: build
on:
push:
branches:
- main
- release-v*
pull_request:
workflow_dispatch:
env:
GOX_OUTPUT: "release/{{.Arch}}/{{.OS}}/{{.Dir}}"
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
gh_ci_key: ${{ secrets.GH_CI_KEY }}
jobs:
mac-os-build:
name: Build Mac OS binaries
runs-on: macos-10.15
steps:
- name: Git Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: '~1.17.1'
- name: Install Ziti CI
uses: netfoundry/ziti-ci@v1
- name: Build and Test
run: |
pip install --upgrade pip
pip install awscli
go get github.com/mitchellh/gox
$(go env GOPATH)/bin/ziti-ci generate-build-info common/version/info_generated.go version
CGO_ENABLED=true $(go env GOPATH)/bin/gox -os=darwin -arch=amd64 -output=$GOX_OUTPUT ./...
aws s3 sync --no-progress release/ s3://ziti-cmd-build-tmp/${{ github.run_id }}
windows-build:
name: Build Windows binaries
runs-on: windows-2019
steps:
- name: Git Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: '~1.17.1'
- name: Install Ziti CI
uses: netfoundry/ziti-ci@v1
- name: Build and Test
shell: bash
run: |
choco install python --version 3.6.8
python -m pip install --upgrade pip
pip install awscli
go get github.com/mitchellh/gox
$(go env GOPATH)/bin/ziti-ci generate-build-info common/version/info_generated.go version
CGO_ENABLED=true $(go env GOPATH)/bin/gox -os=windows -arch=amd64 -output=$GOX_OUTPUT ./...
aws s3 sync --no-progress release/ s3://ziti-cmd-build-tmp/${{ github.run_id }}
tag-and-publish:
name: Build Linux Binaries, Tag and Publish
runs-on: ubuntu-18.04
needs: [ mac-os-build, windows-build ]
steps:
- name: Git Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: '~1.17.1'
- name: Install Python
uses: actions/setup-python@v2
with:
python-version: '3.7'
- name: Install Ziti CI
uses: netfoundry/ziti-ci@v1
- name: Build and Test
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
sudo apt-get -yq install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf
python -m pip install --upgrade pip
pip install awscli
$(go env GOPATH)/bin/ziti-ci configure-git
$(go env GOPATH)/bin/ziti-ci generate-build-info common/version/info_generated.go version
go get github.com/mitchellh/gox
CGO_ENABLED=true $(go env GOPATH)/bin/gox -os=linux -arch=amd64 -output=$GOX_OUTPUT ./...
CC=arm-linux-gnueabihf-gcc CGO_ENABLED=true gox -cgo -os=linux -arch=arm -output=$GOX_OUTPUT ./...
aws s3 sync --no-progress s3://ziti-cmd-build-tmp/${{ github.run_id }} release/
aws s3 rm --recursive s3://ziti-cmd-build-tmp/${{ github.run_id }}
- name: Publish
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release-v')
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
$(go env GOPATH)/bin/ziti-ci tag -v -f version
$(go env GOPATH)/bin/ziti-ci publish-to-github
- name: Trigger Smoketest
env:
JFROG_API_KEY: ${{ secrets.JFROG_API_KEY }}
jenkins_user: ${{ secrets.JENKINS_USER }}
jenkins_user_token: ${{ secrets.JENKINS_USER_TOKEN }}
jenkins_job_token: ${{ secrets.JENKINS_JOB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
$(go env GOPATH)/bin/ziti-ci publish-to-artifactory
$(go env GOPATH)/bin/ziti-ci trigger-jenkins-smoke-build
|
.github/workflows/main.yml
|
version: 2
jobs:
build:
docker:
- image: circleci/golang:1.16
steps:
- checkout
- restore_cache:
key: dependency-cache-{{ checksum "go.sum" }}
- run:
name: Download dependencies
command: go mod download
- save_cache:
key: dependency-cache-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
- run:
name: Linting
command: |
go get -u golang.org/x/lint/golint
golint -set_exit_status
- setup_remote_docker
- run:
name: Run tests without license
command: |
# Spin up environment
ACCEPT_EULA=yes docker-compose -f docker-compose.only-engine.yml up -d
CONTAINER_ID=$(docker ps -aqf "name=qix-engine_1")
TEST_HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}' "$CONTAINER_ID")
# Start a container for test execution
TEST_CONTAINER=$(docker run -e TEST_HOST=$TEST_HOST -e CGO_ENABLED=0 -d golang:1.15-alpine tail -f /dev/null)
# Copy dependencies and code into container
docker cp /go/pkg $TEST_CONTAINER:/go/pkg
docker cp . $TEST_CONTAINER:/core-using-licenses
# Sleep for 10s to make sure the license service has contacted the backend.
sleep 10s
# Execute tests
docker exec $TEST_CONTAINER sh -c 'cd /core-using-licenses && go test -timeout 30s ./test/no_license_test.go ./test/utils_test.go -count=1'
# Bring down the docker-compose and test container
docker-compose -f docker-compose.only-engine.yml down
docker kill $TEST_CONTAINER
- run:
name: Run tests with license
command: |
# Spin up environment
ACCEPT_EULA=yes docker-compose -f docker-compose.engine-and-license-service.yml up -d
CONTAINER_ID=$(docker ps -aqf "name=qix-engine_1")
TEST_HOST=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}' "$CONTAINER_ID")
# Start a container for test execution
TEST_CONTAINER=$(docker run -e TEST_HOST=$TEST_HOST -e CGO_ENABLED=0 -d golang:1.15-alpine tail -f /dev/null)
# Copy dependencies and code into container
docker cp /go/pkg $TEST_CONTAINER:/go/pkg
docker cp . $TEST_CONTAINER:/core-using-licenses
# Sleep for 10s to make sure the license service has contacted the backend.
sleep 10s
# Execute tests
docker exec $TEST_CONTAINER sh -c 'cd /core-using-licenses && go test -timeout 30s ./test/with_license_test.go ./test/utils_test.go -count=1'
# Bring down the docker-compose
docker-compose -f docker-compose.engine-and-license-service.yml down
docker kill $TEST_CONTAINER
|
.circleci/config.yml
|
baseURL: "https://ahmedoumhella.github.io/portfolio"
languageCode: "en-us"
title: "<NAME>"
theme: "./.."
relativeURLs: true
googleanalytics: UA-xxxxxx-x
disqusShortname:
Paginate: 3
markup:
goldmark:
renderer:
unsafe: true
taxonomies:
tag: "tags"
category: "categories"
enableEmoji: true
params:
version: 3
customCSS: false
favicon: "/images/img.jpg"
description: "personal portfolio"
# Navbar Menus
customMenus:
# home
disable_home: false
profile_image: "/images/img.jpg"
name: "<NAME>"
namecolor: ffc107
profession: "Fullstack developer JAVA JEE/ANGULAR"
github: "https://github.com/ahmedoumhella"
linkedin: "https://www.linkedin.com/in/ahmed-oumhella-a676619b/"
twitter: "https://twitter.com/Aoumhella1"
instagram: "https://www.instagram.com/oumhellaahmed/"
facebook: "https://web.facebook.com/ahmed.jhjkllhhgjahmed"
# About
disable_about: false
descripe_l1_person: "Hi, I'm <NAME>, Young engineer, aged 24, study at the National School of Applied Sciences of Agadir (ENSA), specializing in Computer Engineering.
with more than 1 years of experience in analysis and programming of web projects targeting technologies such as: JAVA / JEE, Angular, Spring ... adopting Agile work methodologies"
descripe_l2_person: "you can take a look on my [GitHub Account](https://github.com/ahmedoumhella)"
skills:
- name: Backend(JAVA/JEE, JPA/HIBERNATE ,SPRING)
percentage: 90
- name: Frontend(Angular, JavaScript, Bootstrap, Html5, Css3)
percentage: 85
- name: SGBD(MySQL, Oracle, H2, MongoDB)
percentage: 80
- name: Web-Services(Rest, Soap)
percentage: 80
- name: Devops(GIT, Docker, SonarQube, Jenkins)
percentage: 70
- name: Analyse/Conception(UML, Merise)
percentage: 70
# Experience
disable_experience: false
experience:
- company: "Oxyliom Solutions"
id: "oxyliom-developer"
role: "Software developer"
startdate: Aug 2020
enddate: present
featuredpoints:
- point: Participation in the design and implementation of new features.
- company: "Oxyliom Solutions"
id: "oxyliom-training"
role: "traineeship"
startdate: Feb 2020
enddate: Aug 2020
featuredpoints:
- point: Implementation of strong authentication solutions applied on CIAM systems.
# Education
disable_education: false
education:
- degree: "Engineer"
branch: "Computer Engineering"
university: "National School of Applied Sciences(Ibn Zohr university)"
startedyear: "2015"
graduation: "2020"
- degree: "baccalaureate"
branch: "Mathematical sciences-A"
university: "Hassan II"
startedyear: "2015"
graduation: "2015"
# Projects
disable_projects: true
projects:
- name: "ENSAINO"
img: /images/project.jpg
githublink: "https://github.com/"
featuredlink: "/"
description: "Design and production of a web and mobile application for the management of reservations."
# Achievements
disable_achievements: false
achievements:
- title: Hackatoon winner
description: 2nd screenDay hackatoon prize organized in ENSA AGADIR.
img: /images/achiv1.jpg
url: https://facebook.com/hackandpitch
- title: Competition winner
description: 1st prize in the local robotics competition at ENSA AGADIR.
img: /images/achiv2.jpg
url: https://facebook.com/crrtensa
# Contact
disable_contact: false
contact_content: My inbox is always open. Whether you have a question or just want to say hi, I'll try my best to get back to you!
email: "<EMAIL>"
# Footer
socialiconfooter: true
recentposts: true
# Blog
showdescription: false
tweet: false
|
website/config.yaml
|
version: "3"
services:
cert_gen:
container_name: "cert-gen"
image: paulczar/omgwtfssl
volumes:
- certs:/certs
labels:
- "traefik.enable=false"
traefik:
build: traefik/
container_name: "traefik"
networks:
- default
- inside
ports:
- 80:80
- 443:443
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- certs:/certs
- logs:/logs
labels:
- "traefik.frontend.rule=PathPrefixStrip:/api"
- "traefik.frontend.auth.basic.usersFile=/etc/traefik/.htpasswd"
- "traefik.port=8081"
templates:
build : templates/
container_name: "templates"
networks:
- inside
labels:
- "traefik.enable=false"
public-files:
image: aikain/simplehttpserver:0.1
container_name: "public-files"
volumes:
- files:/var/www/
- logs:/var/www/logs
networks:
- inside
labels:
- "traefik.sec.frontend.rule=PathPrefixStrip:/files/logs"
- "traefik.sec.port=80"
- "traefik.sec.frontend.auth.basic.users=admin:$$apr1$$JWufnu2u$$jK16K8EczmfIBDk5p3xw6/"
- "traefik.nosec.frontend.rule=PathPrefixStrip:/files"
- "traefik.nosec.port=80"
portainer:
image: portainer/portainer
container_name: "portainer-app"
networks:
- inside
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /opt/portainer/data:/data
command: --host=unix:///var/run/docker.sock --logo "https://i.postimg.cc/h4w2WjZT/rcbanner.png" --templates "http://templates/templates.yml"
labels:
- "traefik.frontend.rule=PathPrefixStrip:/portainer"
- "traefik.port=9000"
- "traefik.passHostHeader=true"
- "traefik.docker.network=redcloud_inside"
- "traefik.backend.loadbalancer.swarm=true"
- "traefik.backend.loadbalancer.method=drr"
# https://github.com/containous/traefik/issues/563#issuecomment-421360934
- "traefik.frontend.redirect.regex=^(.*)/portainer$$"
- "traefik.frontend.redirect.replacement=$$1/portainer/"
- "traefik.frontend.rule=PathPrefix:/portainer;ReplacePathRegex: ^/portainer/(.*) /$$1"
volumes:
certs:
logs:
files:
networks:
inside:
external: false
|
docker-compose.yml
|
resource: system:filter
service: apigwFilter
# Default sensitivity for actions
defaultActionSeverity: notice
# default severity for errors
defaultErrorSeverity: error
import:
- github.com/cortezaproject/corteza-server/system/types
props:
- name: filter
type: "*types.ApigwFilter"
fields: [ ID, ref ]
- name: search
type: "*types.ApigwFilterFilter"
fields: []
actions:
- action: search
log: "searched for filter"
severity: info
- action: lookup
log: "looked-up for a {{filter}}"
severity: info
- action: create
log: "created {{filter}}"
- action: update
log: "updated {{filter}}"
- action: delete
log: "deleted {{filter}}"
- action: undelete
log: "undeleted {{filter}}"
errors:
- error: notFound
message: "filter not found"
severity: warning
- error: invalidID
message: "invalid ID"
severity: warning
- error: invalidRoute
message: "invalid route"
severity: warning
- error: notAllowedToCreate
message: "not allowed to create a filter"
log: "failed to create a route; insufficient permissions"
- error: notAllowedToRead
message: "not allowed to read this filter"
log: "failed to read {{filter}}; insufficient permissions"
- error: notAllowedToUpdate
message: "not allowed to update this filter"
log: "failed to update {{filter}}; insufficient permissions"
- error: notAllowedToDelete
message: "not allowed to delete this filter"
log: "failed to delete {{filter}}; insufficient permissions"
- error: notAllowedToUndelete
message: "not allowed to undelete this filter"
log: "failed to undelete {{filter}}; insufficient permissions"
- error: asyncRouteTooManyProcessers
message: "processer already exists for this async route"
log: "failed to add {{filter}}; too many processers, async route"
- error: asyncRouteTooManyAfterFilters
message: "no after filters are allowd for this async route"
log: "failed to add {{filter}}; too many afterfilters, async route"
|
system/service/apigw_filter_actions.yaml
|
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: contrailmonitors.contrail.juniper.net
spec:
group: contrail.juniper.net
names:
kind: Contrailmonitor
listKind: ContrailmonitorList
plural: contrailmonitors
singular: contrailmonitor
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Contrailmonitor is the Schema for the contrailmonitors API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ContrailmonitorSpec defines the desired state of Contrailmonitor
properties:
serviceConfiguration:
description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
Important: Run "operator-sdk generate k8s" to regenerate code after
modifying this file Add custom validation using kubebuilder tags:
https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
properties:
cassandraInstance:
type: string
commandInstance:
type: string
configInstance:
type: string
controlInstance:
type: string
keystoneInstance:
type: string
memcachedInstance:
type: string
postgresInstance:
type: string
provisionmanagerInstance:
type: string
rabbitmqInstance:
type: string
webuiInstance:
type: string
zookeeperInstance:
type: string
type: object
required:
- serviceConfiguration
type: object
status:
description: ContrailmonitorStatus defines the observed state of Contrailmonitor
properties:
active:
description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
of cluster Important: Run "operator-sdk generate k8s" to regenerate
code after modifying this file Add custom validation using kubebuilder
tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
type: boolean
name:
type: string
required:
- name
type: object
type: object
served: true
storage: true
subresources:
status: {}
|
deploy/crds/contrail.juniper.net_contrailmonitors_crd.yaml
|
- name: Create directory with mode setting
file:
path: /tmp/postgres
owner: postgres
state: directory
mode: "1755"
tags: [ install, update ]
- name: copy required files to /tmp/postgres
copy:
src: "../../development/postgres/{{ item }}"
dest: /tmp/postgres
owner: postgres
mode: "0644"
loop:
- cQube.sql
- config.sql
- infrastructure_master.csv
- udise_config.csv
- truncate_tables.sql
tags: [ install, update ]
- name: Run queries from SQL script
become: true
become_user: postgres
postgresql_query:
db: "{{ db_name }}"
path_to_script: /tmp/postgres/cQube.sql
tags: [ install, update ]
- name: checking for infra table
shell: psql "host=localhost dbname={{ db_name }} user={{ db_user }} password={{ <PASSWORD> }}" -c "select case when (select count(*) from infrastructure_master)=0 then 0 else 1 end;"
register: output_infra
tags: [ install, update ]
- name: Copying table structure from csv file
shell: psql "host=localhost dbname={{ db_name }} user={{ db_user }} password={{ <PASSWORD> }}" -c "copy infrastructure_master(infrastructure_name,infrastructure_category,score,status) from '/tmp/postgres/infrastructure_master.csv' delimiter ',' csv header;"
when: output_infra.stdout_lines[2].strip() == "0"
tags: [ install, update ]
- name: checking for udise table
shell: psql "host=localhost dbname={{ db_name }} user={{ db_user }} password={{ <PASSWORD> }}" -c "select case when (select count(*) from udise_config)=0 then 0 else 1 end;"
register: output_udise
tags: [ install, update ]
- name: Copying table structure from csv file
shell: psql "host=localhost dbname={{ db_name }} user={{ db_user }} password={{ db_password }}" -c "copy udise_config(id,description,column_name,type,indice_id,status,score,trans_columns,metric_config,direction) from '/tmp/postgres/udise_config.csv' delimiter '|' csv header;"
when: output_udise.stdout_lines[2].strip() == "0"
tags: [ install, update ]
- name: checking for nifi_template_info table
shell: psql "host=localhost dbname={{ db_name }} user={{ db_user }} password={{ <PASSWORD> }}" -c "select case when (select count(*) from nifi_template_info)=0 then 0 else 1 end;"
register: output_nifi
tags: [ install, update ]
- name: Copying table structure from csv file
shell: psql "host=localhost dbname={{ db_name }} user={{ db_user }} password={{ <PASSWORD> }}" -c "copy nifi_template_info(template,status) from '{{ base_dir }}/cqube/datasource.csv' delimiter ',' csv header;"
when: output_nifi.stdout_lines[2].strip() == "0"
tags: [ install, update ]
- name: Run queries from SQL script
become: true
become_user: postgres
postgresql_query:
db: "{{ db_name }}"
path_to_script: /tmp/postgres/config.sql
tags: [ install, update ]
- name: Truncating previous tables
shell: psql "host=localhost dbname={{ db_name }} user={{ db_user }} password={{ <PASSWORD> }}" -a -q -f "/tmp/postgres/truncate_tables.sql"
when: datasource_status == "unmatched"
tags: [ install, update ]
- name: Clearing s3 output bucket data
shell: aws s3 rm s3://{{ s3_output_bucket }} --recursive
when: datasource_status == "unmatched"
tags: [ install, update ]
- name: Stopping postgresql
service:
name: postgresql
state: stopped
tags: [ install, update ]
- name: Starting postgresql
service:
name: postgresql
state: started
tags: [ install, update ]
|
ansible/installation_scripts/roles/createdb/tasks/initiate_infra.yml
|
- image_path: /images/showcase/IMG_0225.jpg
caption: "Sandy Hook, New Jersey"
- image_path: /images/showcase/IMG_0363.jpg
- image_path: /images/showcase/IMG_0686.jpg
- image_path: /images/showcase/IMG_4865.jpg
- image_path: /images/showcase/IMG_2512.jpg
- image_path: /images/showcase/IMG_0996.jpg
- image_path: /images/showcase/IMG_1163.jpg
- image_path: /images/showcase/IMG_9259.jpg
- image_path: /images/showcase/IMG_2662.jpg
- image_path: /images/showcase/IMG_2068.jpg
- image_path: /images/showcase/IMG_4857.jpg
- image_path: /images/showcase/IMG_2536.jpg
- image_path: /images/showcase/IMG_2128.jpg
- image_path: /images/showcase/IMG_2190.jpg
- image_path: /images/showcase/IMG_2624.jpg
- image_path: /images/showcase/IMG_2658.jpg
- image_path: /images/showcase/IMG_3091.jpg
- image_path: /images/showcase/IMG_3502.jpg
- image_path: /images/showcase/IMG_0510.jpg
- image_path: /images/showcase/IMG_3574.jpg
- image_path: /images/showcase/IMG_3678.jpg
- image_path: /images/showcase/IMG_4507.jpg
- image_path: /images/showcase/IMG_4805.jpg
- image_path: /images/showcase/IMG_4868.jpg
- image_path: /images/showcase/IMG_3488.jpg
- image_path: /images/showcase/IMG_5441.jpg
- image_path: /images/showcase/IMG_5461.jpg
- image_path: /images/showcase/IMG_0546.jpg
- image_path: /images/showcase/IMG_5662.jpg
- image_path: /images/showcase/IMG_5817.jpg
- image_path: /images/showcase/IMG_6018.jpg
- image_path: /images/showcase/IMG_0584.jpg
- image_path: /images/showcase/IMG_6102.jpg
- image_path: /images/showcase/IMG_9089.jpg
- image_path: /images/showcase/IMG_3947.jpg
- image_path: /images/showcase/IMG_9223.jpg
- image_path: /images/showcase/IMG_6310.jpg
- image_path: /images/showcase/IMG_6170.jpg
- image_path: /images/showcase/IMG_6204.jpg
- image_path: /images/showcase/IMG_6253.jpg
- image_path: /images/showcase/IMG_0793.jpg
- image_path: /images/showcase/IMG_6384.jpg
- image_path: /images/showcase/IMG_6444.jpg
- image_path: /images/showcase/IMG_6459.jpg
- image_path: /images/showcase/IMG_6924.jpg
- image_path: /images/showcase/IMG_7000.jpg
- image_path: /images/showcase/IMG_4076.jpg
- image_path: /images/showcase/IMG_8710.jpg
- image_path: /images/showcase/IMG_7252.jpg
- image_path: /images/showcase/IMG_7273.jpg
- image_path: /images/showcase/IMG_7276.jpg
- image_path: /images/showcase/IMG_0812.jpg
- image_path: /images/showcase/IMG_7333.jpg
- image_path: /images/showcase/IMG_3401.jpg
- image_path: /images/showcase/IMG_7495.jpg
- image_path: /images/showcase/IMG_8994.jpg
- image_path: /images/showcase/IMG_9226.jpg
- image_path: /images/showcase/IMG_7505.jpg
- image_path: /images/showcase/IMG_7657.jpg
- image_path: /images/showcase/IMG_7914.jpg
- image_path: /images/showcase/IMG_7991.jpg
- image_path: /images/showcase/IMG_8048.jpg
- image_path: /images/showcase/IMG_8562.jpg
- image_path: /images/showcase/IMG_2720.jpg
- image_path: /images/showcase/IMG_0952.jpg
- image_path: /images/showcase/IMG_2795.jpg
- image_path: /images/showcase/IMG_9366.jpg
- image_path: /images/showcase/IMG_6177.jpg
- image_path: /images/showcase/IMG_9305.jpg
|
_data/photography.yml
|
---
name: Push
on:
push:
branches:
- master
tags-ignore:
- '**'
jobs:
lint:
runs-on: ubuntu-latest
steps:
# Install golang
- uses: actions/setup-go@v2
with:
go-version: '^1.13.1'
# Setup gopath
- name: Setting up GOPATH
run: |
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
# Checkout to the latest commit
# On specific directory/path
- uses: actions/checkout@v2
with:
path: go/src/github.com/${{github.repository}}
#TODO: Add Dockerfile linting
# Running go-lint
- name: Checking Go-Lint
run : |
sudo apt-get update && sudo apt-get install golint
cd go/src/github.com/${{github.repository}}
make gotasks
push:
runs-on: ubuntu-latest
steps:
# Install golang
- uses: actions/setup-go@v2
with:
go-version: '^1.13.1'
# Setup gopath
- name: Setting up GOPATH
run: |
echo "GOPATH=${GITHUB_WORKSPACE}/go" >> $GITHUB_ENV
# Checkout to the latest commit
# On specific directory/path
- uses: actions/checkout@v2
with:
path: go/src/github.com/${{github.repository}}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Build Docker Image
env:
DOCKER_REPO: litmuschaos
DOCKER_IMAGE: go-runner
DOCKER_TAG: ci
run: |
cd go/src/github.com/${{github.repository}}
make experiment-build
- name: Push Docker Image
env:
DOCKER_REPO: litmuschaos
DOCKER_IMAGE: go-runner
DOCKER_TAG: ci
DNAME: ${{ secrets.DNAME }}
DPASS: ${{ secrets.DPASS }}
run: |
cd go/src/github.com/${{github.repository}}
make push
trivy:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: setup trivy
run: |
wget https://github.com/aquasecurity/trivy/releases/download/v0.11.0/trivy_0.11.0_Linux-64bit.tar.gz
tar zxvf trivy_0.11.0_Linux-64bit.tar.gz
make trivy-check
|
.github/workflows/push.yml
|
title: L'iniziativa - PA digitale 2026
name: L'iniziativa - PA digitale 2026
hero:
title: "Soluzioni standard"
body: Per le misure con una platea ampia di beneficiari (oltre 1.000 PA), è prevista una modalità di accesso per soluzioni standard. Un percorso semplificato e guidato che va dalla richiesta dei finanziamenti all'erogazione dei fondi.
verticalTimeline:
- index: 0
title: 'Richiesta dei fondi'
icon: '/assets/gears.svg'
headerParagraph: "Soluzioni a misura di PA per semplificare la richiesta di finanziamenti e ridurre gli oneri amministrativi per tutta la PA."
iconl: '/assets/icons-come-funziona/box.svg'
iconr: '/assets/icons-come-funziona/multi-box.svg'
littleTitlel: 'SOLUZIONI STANDARD'
bodyParagraphl: "Ogni PA, in base a tipologia e dimensione, potrà accedere alle misure attraverso <strong>soluzioni standard</strong>, ciascuna con un valore economico predefinito. <strong>Non sarà necessario scrivere e presentare progetti</strong> per ricevere finanziamenti."
littleTitler: 'REGISTRAZIONE UNICA'
bodyParagraphr: 'Per semplificare l’accesso ai fondi del PNRR le amministrazioni potranno accedere - con <strong>un’unica registrazione</strong> - a più misure, richiedendo, per esempio, sia finanziamenti per la migrazione al cloud che per il miglioramento dei siti web.'
- index: 1
title: 'Realizzazione delle iniziative'
icon: '/assets/shuttle.svg'
headerParagraph: 'Un portale dedicato e un team di supporto sul territorio. L’importanza di accompagnare le amministrazioni con competenze e strumenti.'
iconl: '/assets/icons-come-funziona/computer.svg'
iconr: '/assets/icons-come-funziona/italia-pin.svg'
littleTitlel: 'UN PORTALE DEDICATO'
bodyParagraphl: "“PA digitale 2026” <strong>accompagnerà gli Enti con risorse e informazioni</strong> lungo tutto il percorso di attuazione delle misure previste dal PNRR: dalla prima fase informativa, che precede l’avvio degli avvisi, al momento dell’accesso ai fondi e fino all’implementazione stessa delle iniziative. I <strong>fornitori saranno scelti dalla PA</strong> anche avvalendosi di fornitori certificati attraverso strumenti Consip"
littleTitler: 'UN TEAM SUL TERRITORIO'
bodyParagraphr: 'Per sostenere la transizione digitale dei singoli Enti, nasce un team dedicato: il <strong>Transformation Office</strong>. Questa struttura, che sarà anche dislocata sul territorio con referenti locali, è parte del Dipartimento per la trasformazione digitale, e farà da ponte con amministrazioni locali e fornitori IT della PA, con <strong>assistenza informativa e tecnica.</strong>'
- index: 2
title: 'Erogazione dei fondi'
icon: '/assets/paper-plane.svg'
headerParagraph: "Processi per l'erogazione dei fondi più semplici, per mettere al centro l'importanza dei risultati da raggiungere."
iconl: '/assets/icons-come-funziona/online.svg'
iconr: '/assets/icons-come-funziona/document-check.svg'
littleTitlel: '100% ONLINE'
bodyParagraphl: "Attraverso “PA digitale 2026” le amministrazioni potranno <strong>accedere ad un’area riservata</strong>, per seguire la gestione amministrativa delle singole iniziative finanziate attraverso l’azione del Dipartimento per la trasformazione digitale. Con l’avvio degli avvisi avranno infatti la possibilità non solo di fare <strong>richiesta per i fondi</strong>, ma anche di <strong>produrre i dati relativi all’avanzamento delle iniziative, ricevere comunicazioni dedicate</strong> e <strong>inviare documentazioni ufficiali per l’erogazione delle risorse."
littleTitler: 'EROGAZIONI PER OBIETTIVI'
bodyParagraphr: 'Per semplificare l’erogazione delle risorse, i contributi saranno riconosciuti alle amministrazioni sulla base del <strong>raggiungimento di specifici obiettivi predefiniti</strong>. Il processo di rendicontazione sarà quindi alleggerito, e <strong>non sarà necessario rendicontare le singole spese effettuate per ottenere i fondi.</strong>'
heroMode:
category: "MODALITÀ DI ACCESSO 2"
title: "Modalità alternativa per accedere alle risorse per la transizione digitale"
body: <strong>Presentazione progetti</strong></br>La modalità di accesso prevista per le misure con una platea ristretta di beneficiari (fino a 1.000 PA).
link: /iniziativa/presentazione-progetti
btnLabel: SCOPRI DI PIÙ
btnAria: Scopri di più su presentazione progetti
|
contents/iniziativa/soluzione-standard.yml
|
uid: azure.mgmt.labservices.models.environment_setting_fragment.EnvironmentSettingFragment
name: EnvironmentSettingFragment
fullName: azure.mgmt.labservices.models.environment_setting_fragment.EnvironmentSettingFragment
module: azure.mgmt.labservices.models.environment_setting_fragment
inheritances:
- azure.mgmt.labservices.models.resource.Resource
summary: 'Represents settings of an environment, from which environment instances
would be created.
Variables are only populated by the server, and will be ignored when
sending a request.'
constructor:
syntax: EnvironmentSettingFragment(**kwargs)
parameters:
- name: location
description: The location of the resource.
isRequired: true
types:
- <xref:str>
- name: tags
description: The tags of the resource.
isRequired: true
types:
- <xref:dict>[<xref:str>, <xref:str>]
- name: configuration_state
description: 'Describes the user''s progress in configuring
their environment setting. Possible values include: ''NotApplicable'',
''Completed'''
isRequired: true
types:
- <xref:str>
- <xref:azure.mgmt.labservices.models.ConfigurationState>
- name: description
description: Describes the environment and its resource settings
isRequired: true
types:
- <xref:str>
- name: title
description: 'Brief title describing the environment and its resource
settings'
isRequired: true
types:
- <xref:str>
- name: resource_settings
description: The resource specific settings
isRequired: true
types:
- <xref:azure.mgmt.labservices.models.ResourceSettingsFragment>
- name: provisioning_state
description: The provisioning status of the resource.
isRequired: true
types:
- <xref:str>
- name: unique_identifier
description: 'The unique immutable identifier of a resource
(Guid).'
isRequired: true
types:
- <xref:str>
variables:
- description: The identifier of the resource.
name: id
types:
- <xref:str>
- description: The name of the resource.
name: name
types:
- <xref:str>
- description: The type of the resource.
name: type
types:
- <xref:str>
|
docs-ref-autogen/azure-mgmt-labservices/azure.mgmt.labservices.models.environment_setting_fragment.EnvironmentSettingFragment.yml
|
uid: microsoft.quantum.preparation.statepreparationpositivecoefficients
name: StatePreparationPositiveCoefficients
type: function
namespace: microsoft.quantum.preparation
summary: |-
Returns an operation that prepares the given quantum state.
The returned operation $U$ prepares an arbitrary quantum
state $\ket{\psi}$ with positive coefficients $\alpha_j\ge 0$ from
the $n$-qubit computational basis state $\ket{0...0}$.
The action of U on a newly-allocated register is given by
$$
\begin{align}
U \ket{0\cdots 0} = \ket{\psi} = \frac{\sum_{j=0}^{2^n-1}\alpha_j \ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|\alpha_j|^2}}.
\end{align}
$$
remarks: |-
Negative input coefficients $\alpha_j < 0$ will be treated as though
positive with value $|\alpha_j|$. `coefficients` will be padded with
elements $\alpha_j = 0.0$ if fewer than $2^n$ are specified.
examples: |-
The following snippet prepares the quantum state $\ket{\psi}=\sqrt{1/8}\ket{0}+\sqrt{7/8}\ket{2}$
in the qubit register `qubitsLE`.
```qsharp
let amplitudes = [Sqrt(0.125), 0.0, Sqrt(0.875), 0.0];
let op = StatePreparationPositiveCoefficients(amplitudes);
using (qubits = Qubit[2]) {
let qubitsLE = LittleEndian(qubits);
op(qubitsLE);
}
```
syntax: 'function StatePreparationPositiveCoefficients (coefficients : Double[]) :
(Microsoft.Quantum.Arithmetic.LittleEndian => Unit is Adj + Ctl)'
input:
content: '(coefficients : Double[])'
types:
- name: coefficients
summary: |-
Array of up to $2^n$ coefficients $\alpha_j$. The $j$th coefficient
indexes the number state $\ket{j}$ encoded in little-endian format.
isArray: true
isPrimitive: true
uid: Double
output:
content: (Microsoft.Quantum.Arithmetic.LittleEndian => Unit is Adj + Ctl)
types:
- summary: A state-preparation unitary operation $U$.
isOperation: true
input:
types:
- uid: microsoft.quantum.arithmetic.littleendian
output:
types:
- isPrimitive: true
uid: Unit
functors:
- Adjoint
- Controlled
|
api/qsharp/microsoft.quantum.preparation.statepreparationpositivecoefficients.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2019-05-11 09:51"
game: "Unreal Tournament 2004"
name: "CTF-RisingSun"
author: "Monasta"
description: "this is the first map ive ever relased for Any game. Its a Medium sized\
\ map with a fairly simple layout and uses the same style as orbital2. have fun\
\ :)"
releaseDate: "2003-06"
attachments:
- type: "IMAGE"
name: "ctf-risingsun_shot_bfb6bc08_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/R/ctf-risingsun_shot_bfb6bc08_3.png"
- type: "IMAGE"
name: "ctf-risingsun_shot_bfb6bc08_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/R/ctf-risingsun_shot_bfb6bc08_4.png"
- type: "IMAGE"
name: "ctf-risingsun_shot_bfb6bc08_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/R/ctf-risingsun_shot_bfb6bc08_2.png"
- type: "IMAGE"
name: "ctf-risingsun_shot_bfb6bc08_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/R/ctf-risingsun_shot_bfb6bc08_1.png"
originalFilename: "ctf-risingsunrev2.zip"
hash: "bfb6bc087b8869d4fd4f87bf7c9a5af8726af0f1"
fileSize: 3528428
files:
- name: "CTF-RisingSun.ut2"
fileSize: 11954503
hash: "9676a806ee4f86e234e19eb97a9e1fb5b996d6c3"
otherFiles: 2
dependencies: {}
downloads:
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=1717"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/R/ctf-risingsunrev2.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/R/b/f/b6bc08/ctf-risingsunrev2.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/R/b/f/b6bc08/ctf-risingsunrev2.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "Rising Sun"
playerCount: "8-16"
themes:
Tech: 1.0
bots: true
|
content/Unreal Tournament 2004/Maps/Capture The Flag/R/b/f/b6bc08/ctf-risingsun_[bfb6bc08].yml
|
- position: 1
driverNumber: 6
driverId: nico-rosberg
constructorId: mercedes
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:44.152"
gap:
interval:
laps: 34
- position: 2
driverNumber: 7
driverId: kimi-raikkonen
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:44.427"
gap: "+0.275"
interval: "+0.275"
laps: 34
- position: 3
driverNumber: 33
driverId: max-verstappen
constructorId: red-bull
engineManufacturerId: tag-heuer
tyreManufacturerId: pirelli
time: "1:44.532"
gap: "+0.380"
interval: "+0.105"
laps: 29
- position: 4
driverNumber: 3
driverId: daniel-ricciardo
constructorId: red-bull
engineManufacturerId: tag-heuer
tyreManufacturerId: pirelli
time: "1:44.557"
gap: "+0.405"
interval: "+0.025"
laps: 26
- position: 5
driverNumber: 5
driverId: sebastian-vettel
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:45.161"
gap: "+1.009"
interval: "+0.604"
laps: 33
- position: 6
driverNumber: 27
driverId: nico-hulkenberg
constructorId: force-india
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:45.182"
gap: "+1.030"
interval: "+0.021"
laps: 35
- position: 7
driverNumber: 44
driverId: lewis-hamilton
constructorId: mercedes
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:45.275"
gap: "+1.123"
interval: "+0.093"
laps: 10
- position: 8
driverNumber: 55
driverId: carlos-sainz-jr
constructorId: toro-rosso
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:45.507"
gap: "+1.355"
interval: "+0.232"
laps: 33
- position: 9
driverNumber: 14
driverId: fernando-alonso
constructorId: mclaren
engineManufacturerId: honda
tyreManufacturerId: pirelli
time: "1:45.779"
gap: "+1.627"
interval: "+0.272"
laps: 30
- position: 10
driverNumber: 26
driverId: daniil-kvyat
constructorId: toro-rosso
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:46.029"
gap: "+1.877"
interval: "+0.250"
laps: 35
- position: 11
driverNumber: 11
driverId: sergio-perez
constructorId: force-india
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:46.063"
gap: "+1.911"
interval: "+0.034"
laps: 26
- position: 12
driverNumber: 22
driverId: jenson-button
constructorId: mclaren
engineManufacturerId: honda
tyreManufacturerId: pirelli
time: "1:46.574"
gap: "+2.422"
interval: "+0.511"
laps: 30
- position: 13
driverNumber: 21
driverId: esteban-gutierrez
constructorId: haas
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:46.727"
gap: "+2.575"
interval: "+0.153"
laps: 36
- position: 14
driverNumber: 19
driverId: felipe-massa
constructorId: williams
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:46.856"
gap: "+2.704"
interval: "+0.129"
laps: 30
- position: 15
driverNumber: 77
driverId: valtteri-bottas
constructorId: williams
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:46.960"
gap: "+2.808"
interval: "+0.104"
laps: 30
- position: 16
driverNumber: 20
driverId: kevin-magnussen
constructorId: renault
engineManufacturerId: renault
tyreManufacturerId: pirelli
time: "1:47.161"
gap: "+3.009"
interval: "+0.201"
laps: 30
- position: 17
driverNumber: 30
driverId: jolyon-palmer
constructorId: renault
engineManufacturerId: renault
tyreManufacturerId: pirelli
time: "1:47.166"
gap: "+3.014"
interval: "+0.005"
laps: 34
- position: 18
driverNumber: 12
driverId: felipe-nasr
constructorId: sauber
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:47.531"
gap: "+3.379"
interval: "+0.365"
laps: 29
- position: 19
driverNumber: 8
driverId: romain-grosjean
constructorId: haas
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:48.391"
gap: "+4.239"
interval: "+0.860"
laps: 12
- position: 20
driverNumber: 9
driverId: marcus-ericsson
constructorId: sauber
engineManufacturerId: ferrari
tyreManufacturerId: pirelli
time: "1:48.487"
gap: "+4.335"
interval: "+0.096"
laps: 32
- position: 21
driverNumber: 94
driverId: pascal-wehrlein
constructorId: manor
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:48.505"
gap: "+4.353"
interval: "+0.018"
laps: 33
- position: 22
driverNumber: 31
driverId: esteban-ocon
constructorId: manor
engineManufacturerId: mercedes
tyreManufacturerId: pirelli
time: "1:48.823"
gap: "+4.671"
interval: "+0.318"
laps: 29
|
src/data/seasons/2016/races/15-singapore/free-practice-2-results.yml
|
uid: southcentralus.api.cognitive.microsoft.com.customvision.v1.2.training.cognitiveservices.createtag.createtag
name: Create Tag
service: Cognitive Services
groupName: Create Tag
apiVersion: "1.2"
summary: Create a tag for the project
consumes: []
produces:
- application/json
- text/json
- application/xml
- text/xml
paths:
- content: POST https://southcentralus.api.cognitive.microsoft.com/customvision/v1.2/Training/projects/{projectId}/tags?name={name}
- content: POST https://southcentralus.api.cognitive.microsoft.com/customvision/v1.2/Training/projects/{projectId}/tags?name={name}&description={description}
isOptional: true
uriParameters:
- name: projectId
in: path
isRequired: true
description: The project id
types:
- uid: string
format: uuid
- name: name
in: query
isRequired: true
description: The tag name
types:
- uid: string
- name: description
in: query
description: Optional description for the tag
types:
- uid: string
responses:
- name: 200 OK
description: OK
types:
- uid: Tag
requestHeader:
- name: Training-Key
in: header
isRequired: true
types:
- uid: string
definitions:
- name: Tag
description: Represents a Tag
kind: object
properties:
- name: Id
isReadyOnly: true
description: Gets the Tag ID
types:
- uid: string
- name: Name
description: Gets or sets the name of the tag
types:
- uid: string
- name: Description
description: Gets or sets the description of the tag
types:
- uid: string
- name: ImageCount
isReadyOnly: true
description: Gets the number of images with this tag
types:
- uid: integer
examples:
- name: Successful CreateTag request
request:
uri: POST https://southcentralus.api.cognitive.microsoft.com/customvision/v1.2/Training/projects/bc3f7dad-5544-468c-8573-3ef04d55463e/tags?name=Tag1&description=Description of Tag1
headers:
- name: Training-Key
value: '{API Key}'
responses:
- statusCode: "200"
body: >-
{
"Id": "9e27bc1b-7ae7-4e3b-a4e5-36153479dc01",
"Name": "Tag1",
"Description": "Description of Tag1",
"ImageCount": 0
}
security: []
|
docs-ref-autogen/cognitiveservices/customvision/CreateTag/CreateTag.yml
|
de:
devise:
confirmations:
confirmed: "Vielen Dank für Deine Registrierung. Bitte melde dich jetzt an."
confirmed_and_signed_in: "Vielen Dank für Deine Registrierung. Du bist jetzt angemeldet."
send_instructions: "Du erhältst in wenigen Minuten eine E-Mail, mit der Du Deine Registrierung bestätigen kannst."
send_paranoid_instructions: "Falls Deine E-Mail-Adresse in unserer Datenbank existiert erhältst Du in wenigen Minuten eine E-Mail mit der Du Deine Registrierung bestätigen kannst."
failure:
already_authenticated: "Du bist bereits angemeldet."
inactive: "Dein Account ist nicht aktiv."
invalid: "Ungültige Anmeldedaten."
invalid_token: "Der Anmelde-Token ist ungültig."
locked: "Dein Account ist gesperrt."
not_found_in_database: "E-Mail-Adresse oder Passwort ungültig."
timeout: "Deine Sitzung ist abgelaufen, bitte melde Dich erneut an."
unauthenticated: "Du musst Dich anmelden oder registrieren, bevor Du fortfahren kannst."
unconfirmed: "Du musst Deinen Account bestätigen, bevor Du fortfahren kannst."
passwords:
no_token: "Du kannst diese Seite nur von dem Link aus einer E-Mail zum Passwort-Zurücksetzen aufrufen. Wenn du einen solchen Link aufgerufen hast stelle bitte sicher, dass du die vollständige Adresse aufrufst."
send_instructions: "Du erhältst in wenigen Minuten eine E-Mail mit der Anleitung, wie Du Dein Passwort zurücksetzen kannst."
send_paranoid_instructions: "Falls Deine E-Mail-Adresse in unserer Datenbank existiert erhältst Du in wenigen Minuten eine E-Mail mit der Anleitung, wie Du Dein Passwort zurücksetzen kannst."
updated: "Dein Passwort wurde geändert. Du bist jetzt angemeldet."
updated_not_active: "Dein Passwort wurde geändert."
registrations:
destroyed: "Dein Account wurde gelöscht."
signed_up: "Du hast dich erfolgreich registriert."
signed_up_but_inactive: "Du hast dich erfolgreich registriert. Wir konnten Dich noch nicht anmelden, da Dein Account inaktiv ist."
signed_up_but_locked: "Du hast dich erfolgreich registriert. Wir konnten Dich noch nicht anmelden, da Dein Account gesperrt ist."
signed_up_but_unconfirmed: "Du hast Dich erfolgreich registriert. Wir konnten Dich noch nicht anmelden, da Dein Account noch nicht bestätigt ist. Du erhältst in Kürze eine E-Mail mit der Anleitung, wie Du Deinen Account freischalten kannst."
update_needs_confirmation: "Deine Daten wurden aktualisiert, aber Du musst Deine neue E-Mail-Adresse bestätigen. Du erhälsts in wenigen Minuten eine E-Mail, mit der Du die Änderung Deiner E-Mail-Adresse abschließen kannst."
updated: "Deine Daten wurden aktualisiert."
sessions:
signed_in: "Erfolgreich angemeldet."
signed_out: "Erfolgreich abgemeldet."
unlocks:
send_instructions: "Du erhältst in wenigen Minuten eine E-Mail mit der Anleitung, wie Du Deinen Account entsperren kannst."
send_paranoid_instructions: "Falls Deine E-Mail-Adresse in unserer Datenbank existiert erhältst Du in wenigen Minuten eine E-Mail mit der Anleitung, wie Du Deinen Account entsperren kannst."
unlocked: "Dein Account wurde entsperrt. Du bist jetzt angemeldet."
|
config/locales/devise.de.yml
|
---
- name: Incluse os-specific setup
tags: [ setup ]
include: "setup_{{ ansible_os_family | lower }}.yml"
# sudo swapoff -a
- name: swapoff
tags: [ setup ]
become: true
command: swapoff -a
# sudo sed -i '/ swap / s/^/#/' /etc/fstab
- name: Disable SWAP in fstab
tags: [ setup ]
become: true
replace:
path: /etc/fstab
regexp: '^([^#].*\sswap\s.*)$'
replace: '# \1'
# sudo ulimit -l unlimited
# sudo echo "elasticsearch soft memlock unlimited" >> /etc/security/limits.conf
# sudo echo "elasticsearch hard memlock unlimited" >> /etc/security/limits.conf
- name: Add or modify memlock for soft and hard
tags: [ setup ]
become: true
pam_limits:
domain: elasticsearch
limit_type: "{{ item }}"
limit_item: memlock
value: unlimited
loop: [ soft, hard]
# sudo ulimit -n 65536
# sudo echo "elasticsearch - nofile 65535" >> /etc/security/limits.conf
- name: Add or modify nofile
tags: [ setup ]
become: true
pam_limits:
domain: elasticsearch
limit_type: '-'
limit_item: nofile
value: '65535'
# sudo sysctl -w vm.max_map_count=262144
# sudo echo "vm.max_map_count=262144" >> /etc/sysctl.conf
- name: sysctl vm.max_map_count
tags: [ setup ]
become: true
sysctl:
name: vm.max_map_count
value: '262144'
state: present
# sudo echo "LimitMEMLOCK=infinity" >> /usr/lib/systemd/system/elasticsearch.service
- name: Add or modify LimitMEMLOCK
tags: [ setup ]
become: true
lineinfile:
path: /usr/lib/systemd/system/elasticsearch.service
regexp: '^LimitMEMLOCK='
line: LimitMEMLOCK=infinity
insertafter: '^\[Service\]'
# sudo echo "RestartSec=5s" >> /usr/lib/systemd/system/elasticsearch.service
- name: Add or modify RestartSec
tags: [ setup ]
become: true
lineinfile:
path: /usr/lib/systemd/system/elasticsearch.service
regexp: '^RestartSec='
line: RestartSec=5s
insertafter: '^\[Service\]'
# sudo echo "Restart=on-failure" >> /usr/lib/systemd/system/elasticsearch.service
- name: Add or modify Restart
tags: [ setup ]
become: true
lineinfile:
path: /usr/lib/systemd/system/elasticsearch.service
regexp: '^Restart='
line: Restart=on-failure
insertafter: '^\[Service\]'
# sudo systemctl daemon-reload
# sudo systemctl enable elasticsearch
- name: Enable elasticsearch service
tags: [ setup ]
become: true
systemd:
name: elasticsearch
enabled: yes
daemon_reload: yes
|
elasticsearch/tasks/setup.yml
|
---
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
- name: Converge
hosts: all
pre_tasks:
- name: Create puppet hieradata directory
file:
path: /etc/puppet/hieradata
state: directory
tasks:
- name: Create ansible_managed.json
include_role:
name: tripleo_hieradata
tasks_from: ansible_hieradata.yml
- name: Check file exists
when:
- not ansible_check_mode|bool
block:
- name: Stat file
stat:
path: /etc/puppet/hieradata/ansible_managed.json
become: true
register: _managed_file
- name: Assert file exists
assert:
that:
- _managed_file.stat.exists
- name: Check file contents
when:
- not ansible_check_mode|bool
block:
- name: Get contents
slurp:
src: /etc/puppet/hieradata/ansible_managed.json
become: true
register: _managed_file
- name: Set contents fact
set_fact:
_data: "{{ _managed_file['content'] | b64decode }}"
- name: Assert file contents
assert:
that:
- _data == {}
- name: Configure data
include_role:
name: tripleo_hieradata
tasks_from: ansible_hieradata.yml
vars:
hieradata_ansible_data:
my_var: foo
- name: Check file contents
when:
- not ansible_check_mode|bool
block:
- name: Get contents
slurp:
src: /etc/puppet/hieradata/ansible_managed.json
become: true
register: _managed_file
- name: Set contents fact
set_fact:
_data: "{{ _managed_file['content'] | b64decode }}"
- name: Set expected
set_fact:
expected:
my_var: foo
- name: Assert file contents
assert:
that:
- _data == expected
|
tripleo_ansible/roles/tripleo_hieradata/molecule/ansible_hieradata/converge.yml
|
groups:
- name: tf-alarm
rules:
- alert: system_defined_pending_cassandra_compaction_tasks
expr: pending_compaction_tasks > 300
labels:
severity: page
annotations:
summary: Pending compaction tasks in cassandra crossed the configured threshold.
- alert: system_defined_conf_in_correct
expr: system_defined_conf_incorrect > 0.5
labels:
severity: page
annotations:
summary: ContrailConfig missing or incorrect. Configuration pushed to Ifmap as ContrailConfig is missing/incorrect.
- alert: system_defined_node_status
expr: system_defined_node_status > 0.5
labels:
severity: page
annotations:
summary: Node Failure. NodeStatus UVE not present.
- alert: system_defined_partial_sysinfo
expr: system_defined_parital_sysinfo > 0.5
labels:
severity: page
annotations:
summary: System Info Incomplete.
- alert: system_defined_package_version_mismatch
expr: system_defined_package_version_mismatch > 0.5
labels:
severity: page
annotations:
summary: There is a mismatch between installed and running package version.
- alert: system_defined_core_files
expr: system_defined_core_files > 0.5
labels:
severity: page
annotations:
summary: A core file has been generated on the node.
- alert: system_defined_process_connectivity
expr: system_defined_process_connectivity > 0.5
labels:
severity: page
annotations:
summary: Process(es) reporting as non-functional.
- alert: system_defined_process_connectivity_2
expr: process_status > 0.5
labels:
severity: page
annotations:
summary: Process(es) reporting as non-functional.
- alert: system_defined_process_status
expr: system_defined_process_status > 0.5
labels:
severity: page
annotations:
summary: Process Failure.
- alert: system_defined_process_connectivity_2
expr: process_info > 0.5
labels:
severity: page
annotations:
summary: Process Failure.
- alert: system_defined_disk_usage_critical
expr: disk_usage > 90
labels:
severity: page
annotations:
summary: Disk usage crosses critical threshold limit.
- alert: system_defined_disk_usage_high
expr: 70 < disk_usage < 90
labels:
severity: page
annotations:
summary: Disk usage crosses high threshold limit.
- alert: system_defined_address_mismatch_control
expr: system_defined_address_mismatch_control > 0.5
labels:
severity: page
annotations:
summary: Control Node IP Address mismatch.
- alert: system_defined_bgp_connectivity
expr: num_bgp_peer - num_up_bgp_peer > 0.5
labels:
severity: page
annotations:
summary: BGP peer mismatch. Not enough BGP peers are up.
- alert: system_defined_xmpp_connectivity
expr: num_xmpp_peer - num_up_xmpp_peer > 0.5
labels:
severity: page
annotations:
summary: XMPP peer mismatch.
- alert: system_defined_address_mismatch_compute
expr: system_defined_address_mismatch_compute > 0.5
labels:
severity: page
annotations:
summary: Compute Node IP Address mismatch.
- alert: system_defined_vrouter_limit_exceeded
expr: system_defined_vrouter_limit_exceeded > 0.5
labels:
severity: page
annotations:
summary: Agent resource usage exceeded configured watermark for resource.
- alert: system_defined_vrouter_table_limit_exceeded
expr: system_defined_vrouter_table_limit_exceeded > 0.5
labels:
severity: page
annotations:
summary: Agent resource usage exceeded table size for resource in vrouter.
- alert: system_defined_vrouter_interface
expr: down_interface_count > 0.5
labels:
severity: page
annotations:
summary: Vrouter interface(s) down.
- alert: number of Functional vRouters is less than 2
expr: count(process_status{node_type="vrouter", module_id="contrail_vrouter_agent"} == 0) < 2
labels:
severity: page
annotations:
summary: number of Functional vRouters is less than 2. managed service by svc-monitor cannot be started.
- alert: loadbalancer pool status is down
expr: loadbalancer_pool_status < 0.5
labels:
severity: email
annotations:
summary: loadbalancer pool status is down. some service is not served.
|
tf-alarm.yml
|
Description: Security Group for ElastiCache Redis
Parameters:
VPCId:
Type: String
# Source Security Group Id Parameters
# App1-phpSecurityGroup:
# Type: AWS::EC2::SecurityGroup::Id
#
# App2-phpSecurityGroup:
# Type: AWS::EC2::SecurityGroup::Id
#
# App3-javaSecurityGroup:
# Type: AWS::EC2::SecurityGroup::Id
#
# App4-phpSecurityGroup:
# Type: AWS::EC2::SecurityGroup::Id
#
# App5-phpSecurityGroup:
# Type: AWS::EC2::SecurityGroup::Id
#
# App6-phpSecurityGroup:
# Type: AWS::EC2::SecurityGroup::Id
#
# App7-phpSecurityGroup:
# Type: AWS::EC2::SecurityGroup::Id
# Tagging Parameters
Environment:
Type: String
AllowedValues:
- Production
- UAT
- Test
BusinessUnit:
Type: String
AllowedValues:
- Charitable
- Corporate
- Gaming
- Lotteries
- Wagering
ConstraintDescription: Must be Charitable | Corporate | Gaming | Lotteries | Wagering
Department:
Type: String
MinLength: '3'
MaxLength: '255'
Default: BrandName
Application:
Type: String
MinLength: '3'
MaxLength: '255'
Default: SomePlatform
Component:
Type: String
MinLength: '3'
MaxLength: '255'
Default: ElastiCacheRedis
Resources:
# Create Security Group
SecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: !Sub ${Component} ${Application} Security Group
VpcId: !Ref VPCId
Tags:
- Key: Name
Value: !Sub ${Environment}-${Component}
- Key: Environment
Value: !Ref Environment
- Key: BusinessUnit
Value: !Ref BusinessUnit
- Key: Application
Value: !Ref Application
- Key: Component
Value: !Ref Component
# Add rules to Security Group
# App1-phpIngressRule:
# Type: AWS::EC2::SecurityGroupIngress
# Properties:
# IpProtocol: tcp
# FromPort: '6379'
# ToPort: '6379'
# SourceSecurityGroupId: !Ref App1-phpSecurityGroup
# GroupId: !Ref SecurityGroup
#
# App2-phpIngressRule:
# Type: AWS::EC2::SecurityGroupIngress
# Properties:
# IpProtocol: tcp
# FromPort: '6379'
# ToPort: '6379'
# SourceSecurityGroupId: !Ref App2-phpSecurityGroup
# GroupId: !Ref SecurityGroup
#
# App3-javaIngressRule:
# Type: AWS::EC2::SecurityGroupIngress
# Properties:
# IpProtocol: tcp
# FromPort: '6379'
# ToPort: '6379'
# SourceSecurityGroupId: !Ref App3-javaSecurityGroup
# GroupId: !Ref SecurityGroup
#
# App4-phpIngressRule:
# Type: AWS::EC2::SecurityGroupIngress
# Properties:
# IpProtocol: tcp
# FromPort: '6379'
# ToPort: '6379'
# SourceSecurityGroupId: !Ref App4-phpSecurityGroup
# GroupId: !Ref SecurityGroup
#
# PosIntergationIngressRule:
# Type: AWS::EC2::SecurityGroupIngress
# Properties:
# IpProtocol: tcp
# FromPort: '6379'
# ToPort: '6379'
# SourceSecurityGroupId: !Ref App5-phpSecurityGroup
# GroupId: !Ref SecurityGroup
#
# App6-phpIngressRule:
# Type: AWS::EC2::SecurityGroupIngress
# Properties:
# IpProtocol: tcp
# FromPort: '6379'
# ToPort: '6379'
# SourceSecurityGroupId: !Ref App6-phpSecurityGroup
# GroupId: !Ref SecurityGroup
#
# App7-phpIngressRule:
# Type: AWS::EC2::SecurityGroupIngress
# Properties:
# IpProtocol: tcp
# FromPort: '6379'
# ToPort: '6379'
# SourceSecurityGroupId: !Ref App7-phpSecurityGroup
# GroupId: !Ref SecurityGroup
Outputs:
SecurityGroup:
Description: The ElastiCache Redis Security Group
Value: !Ref SecurityGroup
Export:
Name: !Sub "${AWS::StackName}-SecurityGroup"
|
#4-Master-Securitygroup/EC2-SecurityGroup-ElastiCacheRedis.yaml
|
title: Azure Dev Spaces belgeleri
summary: Azure Dev Spaces, kodunuzu daha büyük bir uygulama bağlamında kolayca çalıştırmanıza ve hata ayıklamanıza olanak tanıyan bir uzantıdır. Kodunuzu uçtan uca test edebilir, kümede çalışan koddaki kesme noktalarına isabet edebilir ve bir geliştirme kümesini ekip üyeleri arasında müdahale olmadan paylaşabilirsiniz.
metadata:
title: Azure Dev Spaces belgeleri
description: Azure Dev Spaces, kodunuzu daha büyük bir uygulama bağlamında kolayca çalıştırmanıza ve hata ayıklamanıza olanak tanıyan bir uzantıdır. Kodunuzu uçtan uca test edebilir, kümede çalışan koddaki kesme noktalarına isabet edebilir ve bir geliştirme kümesini ekip üyeleri arasında müdahale olmadan paylaşabilirsiniz.
services: azure-dev-spaces
ms.service: azure-dev-spaces
ms.topic: landing-page
author: zr-msft
ms.author: zarhoads
manager: gwallace
ms.date: 09/30/2019
ms.openlocfilehash: 752482e33573de4e466b7812616d03f04d2cf395
ms.sourcegitcommit: 253d4c7ab41e4eb11cd9995190cd5536fcec5a3c
ms.translationtype: HT
ms.contentlocale: tr-TR
ms.lasthandoff: 03/25/2020
ms.locfileid: "78196221"
landingContent:
- title: Azure Dev Spaces Hakkında
linkLists:
- linkListType: overview
links:
- text: Azure Dev Spaces nedir?
url: about.md
- linkListType: concept
links:
- text: Azure Dev Spaces nasıl çalışır?
url: how-dev-spaces-works.md
- linkListType: how-to-guide
links:
- text: Azure Dev Spaces'i yükleme
url: how-to/install-dev-spaces.md
- title: Mikro hizmetleri geliştirme ve test etme
linkLists:
- linkListType: quickstart
links:
- text: Mikro hizmeti çalıştırma ve hatalarını ayıklama
url: quickstart-netcore.md
- text: Bir dizi mikro hizmette takımla işbirliği yapma
url: quickstart-team-development.md
- linkListType: how-to-guide
links:
- text: Daha iyi bir uçtan uca test için Dev Spaces'i CI/CD işlem hattınızla tümleştirme
url: how-to/setup-cicd.md
- title: Gelişmiş senaryolar
linkLists:
- linkListType: how-to-guide
links:
- text: Özel NuGet akışı kullanma
url: how-to/use-custom-nuget-feed.md
- text: Özel Helm deposu kullanma
url: how-to/use-private-helm-repo.md
- text: Windows kapsayıcıları ile etkileşim kurma
url: how-to/run-dev-spaces-windows-containers.md
- text: Özel giriş ve HTTPS için traefik kullanma
url: how-to/ingress-https-traefik.md
- text: Özel giriş ve HTTPS için NGINX kullanma
url: how-to/ingress-https-nginx.md
- title: Önizleme özellikleri
linkLists:
- linkListType: how-to-guide
links:
- text: Azure Dev Spaces ile GitHub Eylemlerini Kullanma
url: how-to/github-actions.md
- text: Geliştirme makinenizi AKS kümesine bağlama
url: how-to/connect.md
- text: Azure Dev Spaces kümenizi Helm 3 kullanacak şekilde yapılandırma
url: how-to/helm-3.md
|
articles/dev-spaces/index.yml
|
require:
- rubocop-performance
- rubocop-rails
- rubocop-rake
- rubocop-rspec
AllCops:
ExtraDetails: true
Exclude:
- "db/**/*"
- "config/**/*"
- "script/**/*"
- "bin/{rails,rake}"
- "generated/**/*.rb"
- "node_modules/**/*"
- "tmp/**/*"
- "vendor/**/*"
- ".git/**/*"
Layout/EmptyLines: { Enabled: false }
Layout/HashAlignment: { EnforcedHashRocketStyle: key, EnforcedColonStyle: key }
Layout/LineLength: { Enabled: false }
Layout/MultilineMethodCallIndentation: { Enabled: true, EnforcedStyle: indented }
Layout/ParameterAlignment: { Enabled: true, EnforcedStyle: with_first_parameter }
Layout/SpaceInsideHashLiteralBraces: { Enabled: true, EnforcedStyle: compact }
Layout/TrailingEmptyLines: { Enabled: true }
Lint/UnusedMethodArgument: { Enabled: true, AutoCorrect: false }
Lint/UselessAssignment: { Enabled: false }
Metrics: { Enabled: false }
RSpec/ContextWording: { Enabled: true, Prefixes: ["with", "without", "when", "and", "for", "unless", "if"] }
RSpec/DescribeClass: { Enabled: true, Exclude: ["lib/**/*", "spec/tasks/**/*"] }
RSpec/ExampleLength: { Enabled: false }
RSpec/FilePath: { Enabled: false }
RSpec/MultipleMemoizedHelpers: { Enabled: false }
RSpec/NamedSubject: { Enabled: false }
RSpec/NestedGroups: { Enabled: false }
Style/ClassAndModuleChildren: { Enabled: false }
Style/Documentation: { Enabled: false }
Style/DocumentationMethod: { Enabled: false }
Style/EmptyMethod: { Enabled: true, EnforcedStyle: expanded }
Style/FrozenStringLiteralComment: { Enabled: true, EnforcedStyle: always_true, AutoCorrect: true }
Style/GlobalVars: { AllowedVariables: "$stats" }
Style/HashEachMethods: { Enabled: true }
Style/HashTransformKeys: { Enabled: true }
Style/HashTransformValues: { Enabled: true }
Style/IfUnlessModifier: { Enabled: false }
Style/RescueModifier: { Enabled: false }
Style/StringLiterals: { Enabled: true, EnforcedStyle: double_quotes }
Style/TrailingCommaInArguments: { EnforcedStyleForMultiline: comma }
Style/TrailingCommaInArrayLiteral: { EnforcedStyleForMultiline: comma }
Style/TrailingCommaInHashLiteral: { EnforcedStyleForMultiline: comma }
|
.rubocop.yml
|
---
- name: sync project code from local dir
synchronize:
src: '{{ local_project_dir }}'
dest: '{{ project_dir }}'
recursive: yes
delete: yes
verify_host: yes
rsync_opts:
- "--exclude=*.pyc"
- "--exclude=__pycache__"
- "--exclude=ansible"
- "--exclude=docs"
- "--exclude=node_modules"
- "--exclude=tests"
- "--exclude=*.sqlite"
- "--exclude=celerybeat.pid"
- "--exclude=celerybeat-schedule"
tags:
- flask
- flask.import_articles
- name: create uwsgi.ini file
template:
src: uwsgi.ini.j2
dest: '{{ project_dir }}/uwsgi.ini'
- name: clear .pyc files
command: find . -name '*.pyc' -delete
args:
chdir: '{{ project_dir }}'
changed_when: False
- name: clear __pycache__ folders
command: find . -name __pycache__ -type d -exec rm -fr {} +
args:
chdir: '{{ project_dir }}'
changed_when: False
- name: make sure static files dir exists
file:
path: '{{ FLASK_STATIC_FOLDER }}'
owner: '{{ nginx_user }}'
group: '{{ nginx_user }}'
state: directory
tags:
- flask.static_files
- flask.import_articles
- name: copy static files
synchronize:
src: '{{ local_project_dir }}/{{ static_dir_name }}/'
dest: '{{ FLASK_STATIC_FOLDER }}'
recursive: yes
delete: yes
rsync_opts:
- "--exclude=*.stats"
tags: flask.static_files
- name: copy articles static files
synchronize:
src: '{{ local_project_dir }}/{{ articles_dir_name }}/'
dest: '{{ FLASK_STATIC_FOLDER }}/{{ articles_dir_name }}'
recursive: yes
delete: yes
rsync_opts:
- "--exclude=*.md"
tags:
- flask.static_files
- flask.import_articles
- name: set ownership of static files to {{ nginx_user }}
file:
path: '{{ FLASK_STATIC_FOLDER }}'
owner: '{{ nginx_user }}'
group: '{{ nginx_user }}'
state: directory
recurse: yes
tags:
- flask.static_files
- flask.import_articles
- name: install pip-tools
pip: virtualenv={{ project_virtualenv_dir }} name=pip-tools
- name: install packages from requirements.txt
command: "{{ project_virtualenv_dir }}/bin/pip-sync {{ requirements_path }}"
args:
chdir: '{{ project_dir }}'
- name: run database migrations
when: run_migrations is defined and run_migrations
command: "{{ venv_exec }} {{ python3 }} manage.py --env=prod --no-warn db upgrade"
args:
chdir: '{{ project_dir }}'
tags: flask.run_migrations
- name: run database fixtures [dev]
when: run_fixtures is defined and run_fixtures and app_env == 'dev'
command: "{{ venv_exec }} {{ python3 }} manage.py --env=prod --no-warn db fixtures fixtures.json --reset"
args:
chdir: '{{ project_dir }}'
tags: flask.run_fixtures
- name: run database fixtures [prod]
when: run_fixtures is defined and run_fixtures and app_env == 'prod'
command: "{{ venv_exec }} {{ python3 }} manage.py --env=prod --no-warn db fixtures fixtures.json"
args:
chdir: '{{ project_dir }}'
tags: flask.run_fixtures
- name: import articles [dev]
when: import_articles is defined and import_articles and app_env == 'dev'
command: "{{ venv_exec }} {{ python3 }} manage.py --env=prod --no-warn blog import-articles --reset"
args:
chdir: '{{ project_dir }}'
tags: flask.import_articles
- name: import articles [prod]
when: import_articles is defined and import_articles and app_env == 'prod'
command: "{{ venv_exec }} {{ python3 }} manage.py --env=prod --no-warn blog import-articles"
args:
chdir: '{{ project_dir }}'
tags: flask.import_articles
|
v2/ansible/roles/flask/tasks/deploy_app.yaml
|
name: Build - $(Build.BuildId)
trigger:
branches:
include:
- master
stages:
- stage: Build
displayName: Build with esy
jobs:
- job: Linux
pool:
vmImage: ubuntu-latest
variables:
CACHE_FOLDER: $(Pipeline.Workspace)/cache
steps:
- template: .ci/build-platform.yml
- template: .ci/utils/prepare-cache.yml
- job: macOS
pool:
vmImage: macOS-latest
variables:
CACHE_FOLDER: $(Pipeline.Workspace)/cache
steps:
- template: .ci/build-platform.yml
- template: .ci/utils/prepare-cache.yml
# - job: Windows
# pool:
# vmImage: windows-latest
# variables:
# ESY__PREFIX: $(Pipeline.Workspace)\.esy
# steps:
# - template: .ci/build-platform.yml
# parameters:
# esyLockPath: $(Build.SourcesDirectory)\esy.lock\index.json
# installFolderPath: \3_
- stage: Test_versions
dependsOn: []
displayName: Test building with multiple OCaml versions
jobs:
- job:
variables:
CACHE_FOLDER: $(Pipeline.Workspace)/cache
strategy:
matrix:
4_08_1:
OCAML_VERSION: 4.08.1
4_09_0:
OCAML_VERSION: 4.09.0
steps:
- template: .ci/utils/use-node.yml
- template: .ci/utils/use-esy.yml
- template: .ci/utils/cache.yml
parameters:
cache_key: esy.lock/index.json | "$(OCAML_VERSION)"
- bash: ./test-version.sh $(OCAML_VERSION)
displayName: Test building with OCaml version $(OCAML_VERSION)
- template: .ci/utils/prepare-cache.yml
- stage: Build_NPM
displayName: Build NPM packages
dependsOn: [build, Test_versions]
jobs:
- job:
pool:
vmImage: ubuntu-latest
steps:
- template: .ci/create-npm-package.yml
parameters:
package_name: $(package_name)
- stage: Build_opam
displayName: Build with opam
dependsOn: []
jobs:
- job: Linux_opam
pool:
vmImage: ubuntu-latest
variables:
CACHE_FOLDER: $(Pipeline.Workspace)/cache
OPAM_VERSION: 2.0.6
OPAM_PKG: opam-$(OPAM_VERSION)-x86_64-linux
OCAML_VERSION: 4.09.0
steps:
- template: .ci/build-opam.yml
|
azure-pipelines.yml
|
form:
label: Media
tabs:
- name: tabMain
label: Background Media
fields:
- name: image
fieldType: link
targetWorkspace: dam
appName: assets
label: Image, Poster
i18n: true
identifierToPathConverter:
class: info.magnolia.dam.app.assets.field.translator.AssetCompositeIdKeyTranslator
contentPreviewDefinition:
contentPreviewClass: info.magnolia.dam.app.ui.field.DamFilePreviewComponent
validators:
- name: image
class: ch.openinteractive.main.validators.MimeTypeValidatorDefinition
pattern: image/*
errorMessage: Choose an image
- name: video
fieldType: link
targetWorkspace: dam
appName: assets
label: Video
i18n: true
identifierToPathConverter:
class: info.magnolia.dam.app.assets.field.translator.AssetCompositeIdKeyTranslator
validators:
- name: video
class: ch.openinteractive.main.validators.MimeTypeValidatorDefinition
pattern: video/*
errorMessage: Choose a video
- name: isAutoplay
fieldType: checkbox
label: Auto play
buttonLabel: Play the video automatically or show controls
defaultValue: false
- name: hasCaption
fieldType: checkbox
label: Show caption
buttonLabel: Show the caption, if any, defined in the Asset Manager
defaultValue: false
- name: isInstantly
fieldType: checkbox
label: Instantly
buttonLabel: The media loads as soon as possible (no Intersection Observer)
defaultValue: false
- name: isCover
fieldType: checkbox
label: Media as cover
buttonLabel: The media covers the whole container (like a background)
defaultValue: true
- name: maxWidth
fieldType: text
label: Max. width
description: Maximum rendition width to use for responsive pictures
- name: ratio
fieldType: composite
transformerClass: info.magnolia.ui.form.field.transformer.composite.DelegatingCompositeFieldTransformer
label: Fixed ratio
description: For instance, 16 / 9 for videos or 4 / 3 for images.
fields:
- name: width
fieldType: text
label: Width unit
- name: height
fieldType: text
label: Height unit
- name: position
fieldType: optionGroup
layout: horizontal
sortOptions: false
label: Position
description: If cropped, this is the part of the media which will be shown.
options:
- name: center
selected: true
label: Center
- name: top
label: Top
- name: right
label: Right
- name: bottom
label: Bottom
- name: left
label: Left
- name: tabText
label: Foreground Text
fields:
- name: title
fieldType: richText
label: Title
i18n: true
height: 120
configJsFile: /.resources/main/dialogs/include/config/title.js
- name: body
fieldType: richText
label: Paragraph
i18n: true
height: 180
configJsFile: /.resources/main/dialogs/include/config/simple.js
# call to action (include)
- !include:/main/dialogs/include/tabSectionLayoutOverride.yaml
actions: !include:/main/dialogs/include/commonActions.yaml
|
magnolia/light-modules/main/dialogs/components/lazyMedia.yaml
|
name: main - aarch64
on:
workflow_dispatch:
# push:
# branches:
# - main
# pull_request:
# branches:
# - main
jobs:
build:
name: Build and Push image
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[maven-release-plugin]')"
steps:
- name: Checkout project
uses: actions/checkout@v2
- name: Get the version
run: |
echo ::set-env name=TAG_NAME::$(cat projectFile | grep -Po '(?<=Version>).*(?=</Version>)')
- name: Cache local Maven repository
uses: actions/cache@v2
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-maven-
- name: Set up JDK 11
uses: actions/setup-java@v2
with:
java-version: '11'
distribution: 'temurin'
- name: Setup Maven settings.xml
uses: whelk-io/maven-settings-xml-action@v18
with:
servers: |
[
{
"id": "qiot-central",
"username": "${env.ARTIFACTORY_USERNAME_REF}",
"password": "${env.<PASSWORD>}"
},
{
"id": "qiot-snapshots",
"username": "${env.ARTIFACTORY_USERNAME_REF}",
"password": "${<PASSWORD>}"
}
]
- name: Build the Docker image
run: |
docker run --rm --privileged multiarch/qemu-user-static:register --reset
docker build -t quay.io/qiotproject/all-registration:${{ env.TAG_NAME }}-aarch64 -f src/main/docker/Dockerfile.native.multiarch .
- name: Push To Quay
id: push-to-quay
uses: redhat-actions/push-to-registry@v2
with:
registry: quay.io/qiotproject
image: edge-machinery
tag: ${{ env.TAG_NAME }}-aarch64
username: ${{ secrets.QUAY_ALL_USERNAME }}
password: ${{ secrets.<PASSWORD> }}
|
.github/workflows/main-aarch64.yml
|
server:
# Config the port to listen request
port: 8080
spring:
application:
name: Seed-Server
# Data source configuration the default configuration is using MySQL and
# Druid as connection pool. See https://github.com/alibaba/druid/tree/master/druid-spring-boot-starter/
# for Druid configuration.
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/seed?serverTimezone=GMT%2B8&allowMultiQueries=true
username: root
password: <PASSWORD>
type: com.alibaba.druid.pool.DruidDataSource
druid:
initial-size: 1
min-idle: 1
max-wait: 10000
time-between-eviction-runs-millis: 60000
min-evictable-idle-time-millis: 300000
test-while-idle: true
test-on-borrow: true
test-on-return: false
pool-prepared-statements: true
max-pool-prepared-statement-per-connection-size: 20
default-auto-commit: true
validation-query: select 1
# Change the host, username and password as your Email if you need to send email.
mail:
host: smtp.163.com
username: <EMAIL>
password: <PASSWORD>
# Redis in server, change into your own configuration.
redis:
database: 0
host: localhost
port: 6379
password: <PASSWORD>
jedis:
pool:
max-active: 8
max-idle: 9
max-wait: -1ms
min-idle: 0
# Mybatis configuration, the files was saved in data module.
# See https://mybatis.org/spring-boot-starter/mybatis-spring-boot-autoconfigure/ for details.
mybatis:
config-location: classpath:mybatis/mybatis-config.xml
mapper-locations: classpath:mybatis/mapper/*.xml, classpath:mybatis/mapper/base/*.xml
type-aliases-package: com.seed.data.model
type-handlers-package: com.seed.data.dao.handler
# Log configuration, as default, logback will be used.
logging:
config: classpath:logback.xml
# Spring Boot Actuator, used to monitor and manage your application when you push it to production.
# See https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html for details.
management:
endpoint:
env:
enabled: true
health:
show-details: always
health:
db:
enabled: true
rabbit:
enabled: true
redis:
enabled: true
# Add custom values below
com:
seed:
# The administrator username and password, used to login import App info page,
# Change them for production.
admin:
username: seed_admin
password: <PASSWORD>
intercept:
enable: false
baidu:
api:
client-id: xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
client-secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Mail
mail:
from:
address: test<<EMAIL>>
|
seed/seed-portal/src/main/resources/application-dev.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-16 17:34"
game: "Unreal"
name: "DmWesternS"
author: "<NAME>"
description: "None"
releaseDate: "1999-09"
attachments:
- type: "IMAGE"
name: "DmWesternS_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal/Maps/DeathMatch/D/DmWesternS_shot_3.png"
- type: "IMAGE"
name: "DmWesternS_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal/Maps/DeathMatch/D/DmWesternS_shot_1.png"
- type: "IMAGE"
name: "DmWesternS_shot_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal/Maps/DeathMatch/D/DmWesternS_shot_4.png"
- type: "IMAGE"
name: "DmWesternS_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal/Maps/DeathMatch/D/DmWesternS_shot_2.png"
originalFilename: "dmwesternsun.zip"
hash: "02e5c1018904ce3704b3a730c10eded0a3cfd5c4"
fileSize: 1440315
files:
- name: "DmWesternS.unr"
fileSize: 3751703
hash: "cd0df61fa6e9d1d0306731d28430c7954da017e0"
otherFiles: 5
dependencies: {}
downloads:
- url: "https://gamefront.online/files2/service/thankyou?id=5541187"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal/Maps/DeathMatch/D/dmwesternsun.zip"
main: true
repack: false
state: "OK"
- url: "http://unreal.ut-files.com/index.php?dir=Maps/DeathMatch/MapsW/&file=dmwesternsun.zip"
main: false
repack: false
state: "OK"
- url: "http://unrealtexture.com/Unreal/Downloads/Maps/DeathMatch/MapsW/dmwesternsun.zip"
main: false
repack: false
state: "OK"
- url: "https://www.newbiesplayground.net/download/maps/dm_maps/dmwesternsun.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal/Maps/DeathMatch/W/0/2/e5c101/dmwesternsun.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal/Maps/DeathMatch/W/0/2/e5c101/dmwesternsun.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DmWesternSun"
playerCount: "8-12"
themes:
Skaarj Tech: 1.0
bots: true
|
content/Unreal/Maps/DeathMatch/W/0/2/e5c101/dmwesterns_[02e5c101].yml
|
server:
# Use the Enterprise Image
image:
repository: "hashicorp/vault"
tag: "1.7.3"
# These Resource Limits are in line with node requirements in the
# Vault Reference Architecture for a Small Cluster
resources:
requests:
memory: 256Mi
cpu: 250m
limits:
memory: 256Mi
cpu: 250m
# For HA configuration and because we need to manually init the vault,
# we need to define custom readiness/liveness Probe settings
readinessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
livenessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true"
initialDelaySeconds: 60
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars:
VAULT_CACERT: /vault/userconfig/tls-ca/ca-chain.pem
extraSecretEnvironmentVars:
- envName: AWS_ACCESS_KEY_ID
secretName: eks-creds
secretKey: AWS_ACCESS_KEY_ID
- envName: AWS_SECRET_ACCESS_KEY
secretName: eks-creds
secretKey: AWS_SECRET_ACCESS_KEY
# extraVolumes is a list of extra volumes to mount. These will be exposed
# to Vault in the path `/vault/userconfig/<name>/`.
extraVolumes:
- type: secret
name: tls-server
- type: secret
name: tls-ca
- type: secret
name: eks-creds
# This configures the Vault Statefulset to create a PVC for audit logs.
# See https://www.vaultproject.io/docs/audit/index.html to know more
auditStorage:
enabled: true
standalone:
enabled: false
# Run Vault in "HA" mode.
ha:
enabled: true
replicas: 1
raft:
enabled: true
setNodeId: true
config: |
ui = true
listener "tcp" {
address = "[::]:8200"
cluster_address = "[::]:8201"
tls_cert_file = "/vault/userconfig/tls-server/tls.crt"
tls_key_file = "/vault/userconfig/tls-server/tls.key"
tls_ca_cert_file = "/vault/userconfig/tls-ca/ca.crt"
}
seal "awskms" {
region = "us-east-2"
kms_key_id = "a1fd68ba-bacb-45b4-9a1d-7f747ae8a7a7"
}
storage "raft" {
path = "/vault/data"
/*
retry_join {
leader_api_addr = "https://vault-0.vault-internal:8200"
leader_ca_cert_file = "/vault/userconfig/tls-ca/ca.crt"
leader_client_cert_file = "/vault/userconfig/tls-server/tls.crt"
leader_client_key_file = "/vault/userconfig/tls-server/tls.key"
}
retry_join {
leader_api_addr = "https://vault-1.vault-internal:8200"
leader_ca_cert_file = "/vault/userconfig/tls-ca/ca.crt"
leader_client_cert_file = "/vault/userconfig/tls-server/tls.crt"
leader_client_key_file = "/vault/userconfig/tls-server/tls.key"
}
retry_join {
leader_api_addr = "https://vault-2.vault-internal:8200"
leader_ca_cert_file = "/vault/userconfig/tls-ca/ca.crt"
leader_client_cert_file = "/vault/userconfig/tls-server/tls.crt"
leader_client_key_file = "/vault/userconfig/tls-server/tls.key"
}
autopilot {
cleanup_dead_servers = "true"
last_contact_threshold = "200ms"
last_contact_failure_threshold = "10m"
max_trailing_logs = 250000
min_quorum = 5
server_stabilization_time = "10s"
}
*/
}
service_registration "kubernetes" {}
|
vault/service.yaml
|
api_name: []
items:
- children:
- azure.communication.chat.ChatClient
- azure.communication.chat.ChatThreadClient
- azure.communication.chat.ChatMessage
- azure.communication.chat.ChatMessagePriority
- azure.communication.chat.ReadReceipt
- azure.communication.chat.SendChatMessageResult
- azure.communication.chat.ChatThread
- azure.communication.chat.ChatThreadInfo
- azure.communication.chat.CommunicationUserCredential
- azure.communication.chat.ChatThreadMember
- azure.communication.chat.CommunicationUser
- azure.communication.chat.aio
fullName: azure.communication.chat
kind: import
langs:
- python
module: azure.communication.chat
name: chat
type: package
uid: azure.communication.chat
references:
- fullName: azure.communication.chat.ChatClient
isExternal: false
name: ChatClient
parent: azure.communication.chat
uid: azure.communication.chat.ChatClient
- fullName: azure.communication.chat.ChatThreadClient
isExternal: false
name: ChatThreadClient
parent: azure.communication.chat
uid: azure.communication.chat.ChatThreadClient
- fullName: azure.communication.chat.ChatMessage
isExternal: false
name: ChatMessage
parent: azure.communication.chat
uid: azure.communication.chat.ChatMessage
- fullName: azure.communication.chat.ChatMessagePriority
isExternal: false
name: ChatMessagePriority
parent: azure.communication.chat
uid: azure.communication.chat.ChatMessagePriority
- fullName: azure.communication.chat.ReadReceipt
isExternal: false
name: ReadReceipt
parent: azure.communication.chat
uid: azure.communication.chat.ReadReceipt
- fullName: azure.communication.chat.SendChatMessageResult
isExternal: false
name: SendChatMessageResult
parent: azure.communication.chat
uid: azure.communication.chat.SendChatMessageResult
- fullName: azure.communication.chat.ChatThread
isExternal: false
name: ChatThread
parent: azure.communication.chat
uid: azure.communication.chat.ChatThread
- fullName: azure.communication.chat.ChatThreadInfo
isExternal: false
name: ChatThreadInfo
parent: azure.communication.chat
uid: azure.communication.chat.ChatThreadInfo
- fullName: azure.communication.chat.CommunicationUserCredential
isExternal: false
name: CommunicationUserCredential
parent: azure.communication.chat
uid: azure.communication.chat.CommunicationUserCredential
- fullName: azure.communication.chat.ChatThreadMember
isExternal: false
name: ChatThreadMember
parent: azure.communication.chat
uid: azure.communication.chat.ChatThreadMember
- fullName: azure.communication.chat.CommunicationUser
isExternal: false
name: CommunicationUser
parent: azure.communication.chat
uid: azure.communication.chat.CommunicationUser
- fullName: azure.communication.chat.aio
isExternal: false
name: aio
parent: azure.communication.chat
uid: azure.communication.chat.aio
|
preview/docs-ref-autogen/azure-communication-chat/azure.communication.chat.yml
|
title: Rede definida pelo software (SDN)
summary: Saiba mais sobre os recursos, a tecnologia e a implantação da Rede definida pelo software (SDN).
metadata:
title: Rede definida pelo software (SDN)
description: Saiba mais sobre os recursos, a tecnologia e a implantação da Rede definida pelo software (SDN).
ms.service: help
ms.topic: landing-page
author: AnirbanPaul
ms.author: anpaul
ms.date: 06/12/2020
ms.openlocfilehash: 4314579da8e247f6b013e68faa3bb2d193ff2cea
ms.sourcegitcommit: 56ac7cf3f4bbcc5175f140d2df5f37cc42ba76d1
ms.translationtype: HT
ms.contentlocale: pt-BR
ms.lasthandoff: 06/23/2020
ms.locfileid: "85217616"
landingContent:
- title: Sobre a rede definida pelo software
linkLists:
- linkListType: overview
links:
- text: Visão geral da rede definida pelo software (SDN)
url: '\windows-server\networking\sdn\software-defined-networking'
- text: Segurança para SDN
url: '\windows-server\networking\sdn\security\sdn-security-top'
- linkListType: whats-new
links:
- text: Novidades na SDN para o Windows Server 2019
url: '\windows-server\networking\sdn\sdn-whats-new'
- linkListType: concept
links:
- text: Principais componentes da arquitetura da SDN
url: '\windows-server\networking\sdn\technologies\Software-Defined-Networking-Technologies'
- linkListType: video
links:
- text: Mecânica da SDN da Microsoft
url: https://youtu.be/f501zUUcXD0
- title: Introdução
linkLists:
- linkListType: get-started
links:
- text: Planejar uma infraestrutura SDN
url: '\windows-server\networking\sdn\plan\Plan-a-Software-Defined-Network-Infrastructure'
- text: Implantar uma infraestrutura SDN
url: '\windows-server\networking\sdn\deploy\Deploy-a-Software-Defined-Network-Infrastructure'
- text: Gerenciar uma infraestrutura SDN
url: '\windows-server\networking\sdn\manage\manage-sdn'
- linkListType: overview
links:
- text: Solucionar problemas e diagnosticar uma infraestrutura SDN
url: '\windows-server\networking\sdn\troubleshoot\troubleshoot-windows-server-software-defined-networking-stack'
- title: Recursos de aprendizagem sobre SDN
linkLists:
- linkListType: learn
links:
- text: Solucionar problemas de definição de configurações de largura de banda de VPN de Gateway RAS da SDN no VMM
url: https://blogs.technet.microsoft.com/networking/2017/03/06/troubleshoot-configuring-sdn-ras-gateway-vpn-bandwidth-settings-in-virtual-machine-manager/
- text: Folha de dados da rede definida pelo software da Microsoft
url: https://download.microsoft.com/download/1/5/A/15AC87E7-E99C-4E98-B6AF-9346790AD2DF/Software_Defined_Networking_is_built_in_with_Windows_Server_2016_solution_brief_EN_US.pdf
|
WindowsServerDocs/networking/sdn/index.yml
|
entries:
- title: Sidebar
product: Ios
version: 1.0
folders:
- title:
output: pdf
type: frontmatter
folderitems:
- title:
url: /titlepage.html
output: pdf
type: frontmatter
- title:
url: /tocpage.html
output: pdf
type: frontmatter
- title: Product Overview
output: web, pdf
folderitems:
- title: AgoraChat Overview
url: /ios_product_overview.html
output: web
- title: Pricing
url: /ios_pricing.html
output: web, pdf
- title: Release Note
url: /ios_release_note.html
output: web, pdf
- title: Quickstart Guide
output: web, pdf
folderitems:
- title: Run the Sample Project
url: /ios_run_the_sample_project.html
output: web, pdf
- title: Basic Features
output: web, pdf
folderitems:
- title: User
url: /ios_user.html
output: web, pdf
- title: Relationship
url: /ios_relationship.html
output: web, pdf
- title: Message
url: /ios_message.html
output: web, pdf
- title: Group
url: /ios_group.html
output: web, pdf
- title: Chatroom
url: /ios_chatroom.html
output: web, pdf
- title: IMKit
url: /ios_imkit.html
output: web, pdf
- title: CallKit
url: /ios_callkit.html
output: web, pdf
- title: Multi-Device
url: /ios_multi_device.html
output: web, pdf
- title: Push notification
url: /ios_push_notification.html
output: web, pdf
- title: Advanced Features
output: web, pdf
folderitems:
- title: Webhook
url: /ios_webhook.html
output: web, pdf
- title: Multi-device Sync
url: /ios_multi_device.html
output: web, pdf
- title: Message Roaming
url: /ios_message_roaming.html
output: web, pdf
- title: Message Withdraw
url: /ios_message_withdraw.html
output: web, pdf
- title: Sensitive Words
url: /ios_sensitive_words.html
output: web, pdf
- title: Best Practices
output: web, pdf
folderitems:
- title: UIkit
url: /ios_uikit.html
output: web, pdf
- title: Reference Docs
output: web, pdf
folderitems:
- title: API Reference
url: /ios_api_reference.html
output: web, pdf
- title: Error Code
url: /ios_error_code.html
output: web, pdf
|
_data/sidebars/ios_sidebar.yml
|
name: CI
on:
push:
branches:
- master
pull_request:
schedule:
# run CI every day even if no PRs/merges occur
- cron: '0 6 * * *'
jobs:
MacOS:
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- uses: actions/cache@v1
name: Cache LLVM 4 artifacts
with:
path: ccache_llvm40
key: ${{ runner.os }}-llvm4
restore-keys: |
${{ runner.os }}-llvm4
- uses: actions/cache@v1
name: Cache LLVM 5 artifacts
with:
path: ccache_llvm50
key: ${{ runner.os }}-llvm5
restore-keys: |
${{ runner.os }}-llvm5
- uses: actions/cache@v1
name: Cache LLVM 6 artifacts
with:
path: ccache_llvm60
key: ${{ runner.os }}-llvm6
restore-keys: |
${{ runner.os }}-llvm6
- name: Run Tests
run: |
./scripts/travis.sh macos-latest initialize
./scripts/travis.sh macos-latest build
Docker_Linux:
runs-on: ubuntu-latest
strategy:
matrix:
llvm: ["800", "900", "1000"]
ubuntu: ["20.04", "19.10", "18.04"]
steps:
- uses: actions/checkout@v2
- name: Build LLVM ${{ matrix.llvm }} on ${{ matrix.ubuntu }}
run: |
docker build . -t docker.pkg.github.com/lifting-bits/remill/llvm${{ matrix.llvm }}-ubuntu${{ matrix.ubuntu }}-amd64:latest -f Dockerfile --build-arg UBUNTU_VERSION=${{ matrix.ubuntu }} --build-arg ARCH=amd64 --build-arg LLVM_VERSION=${{ matrix.llvm }}
- name: Test Remill with LLVM ${{ matrix.llvm }} on ${{ matrix.ubuntu }}
run: |
docker run --rm docker.pkg.github.com/lifting-bits/remill/llvm${{ matrix.llvm }}-ubuntu${{ matrix.ubuntu }}-amd64:latest --arch amd64 --ir_out /dev/stdout --bytes c704ba01000000
docker run --rm docker.pkg.github.com/lifting-bits/remill/llvm${{ matrix.llvm }}-ubuntu${{ matrix.ubuntu }}-amd64:latest --arch aarch64 --ir_out /dev/stdout --address 0x400544 --bytes FD7BBFA90000009000601891FD030091B7FFFF97E0031F2AFD7BC1A8C0035FD6
- name: Push Image for LLVM ${{ matrix.llvm }} on ${{ matrix.ubuntu }}
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
run: |
docker login docker.pkg.github.com -u "${GH_USER}" -p "${GH_TOKEN}"
for i in 1 2 3; do docker push docker.pkg.github.com/lifting-bits/remill/llvm${{ matrix.llvm }}-ubuntu${{ matrix.ubuntu }}-amd64:latest && break || sleep 10; done
docker login -u "${DOCKER_HUB_USER}" -p "${DOCKER_HUB_TOKEN}"
docker tag docker.pkg.github.com/lifting-bits/remill/llvm${{ matrix.llvm }}-ubuntu${{ matrix.ubuntu }}-amd64:latest trailofbits/remill:llvm${{ matrix.llvm }}-ubuntu${{ matrix.ubuntu }}-amd64
docker push trailofbits/remill:llvm${{ matrix.llvm }}-ubuntu${{ matrix.ubuntu }}-amd64
env:
GH_USER: ${{ github.actor }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKER_HUB_USER: ${{ secrets.DOCKER_HUB_USER }}
DOCKER_HUB_TOKEN: ${{ secrets.DOCKER_HUB_TOKEN }}
windows:
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Run Tests
continue-on-error: true # for now
run: |
scripts/travis.bat
|
.github/workflows/ci.yml
|
name: Infracost Actions
description: See cloud cost estimates for Terraform in pull requests.
author: infracost
branding:
icon: trending-up
color: yellow
inputs:
api-key:
description: Your Infracost API key. It can be retrieved by running `infracost configure get api_key`. If you don't have one, download Infracost (https://www.infracost.io/docs/#quick-start) and run `infracost register` to get a free API key.
required: true
path:
description: The path that will be passed to `infracost breakdown`. This may be a path to a Terraform plan JSON or a Terraform project. Project support requires Terraform to have been installed with hashicorp/setup-terraform@v1.
required: true
behavior:
description: The behavior to use when posting cost estimate comments. Must be one of 'update' | 'delete-and-new' | 'hide-and-new' | 'new'.
required: false
default: update
runs:
using: "composite"
steps:
# Install the Infracost CLI, see https://github.com/infracost/actions/tree/master/setup
# for other inputs such as version, and pricing-api-endpoint (for self-hosted users).
- name: Setup Infracost
uses: infracost/actions/setup@v1
with:
api-key: ${{ inputs.api-key }}
# Generate Infracost JSON output, the following docs might be useful:
# Multi-project/workspaces: https://www.infracost.io/docs/multi_project/config_file
# Combine Infracost JSON files: https://www.infracost.io/docs/multi_project/report
- name: Generate Infracost JSON
shell: bash
run: infracost breakdown --path ${{ inputs.path }} --format json --out-file /tmp/infracost.json
# Env vars can be set using the usual GitHub Actions syntax
# env:
# MY_ENV: ${{ secrets.MY_ENV }}
# See https://github.com/infracost/actions/tree/master/comment
# for other inputs such as target-type.
- name: Post Infracost comment
uses: infracost/actions/comment@v1
with:
path: /tmp/infracost.json
# Choose the commenting behavior, 'update' is a good default:
behavior: ${{ inputs.behavior }}
|
action.yml
|
clone_folder: c:\trik-runtime
skip_commits:
files:
- 'scripts/*'
- '.travis.yml'
- 'docker/*'
- '**/*.html'
- '**/*.txt'
- '**/*.md'
- '.mergify.yml'
- '.github/*'
message: /\[NOCI\]/
environment:
global:
PROJECT_FILE: trikRuntime
APPVEYOR_SAVE_CACHE_ON_ERROR: true
APPVEYOR_CACHE_ENTRY_ZIP_ARGS: -t7z -m0=lzma -mx=3 -md=32m
BUILD_DIR: '%APPVEYOR_BUILD_FOLDER%\.build'
matrix:
- MINGW: C:\Qt\Tools\mingw730_32
QTDIR: C:\Qt\5.12\mingw73_32
PYTHON_PATH: C:\Python38
CONFIGURATION: debug
- MINGW: C:\Qt\Tools\mingw730_32
QTDIR: C:\Qt\5.12\mingw73_32
PYTHON_PATH: C:\Python38
CONFIGURATION: release
- MINGW: C:\Qt\Tools\mingw730_64
QTDIR: C:\Qt\5.12\mingw73_64
PYTHON_PATH: C:\Python38-x64
CONFIGURATION: release
- MINGW: C:\Qt\Tools\mingw810_32
QTDIR: C:\Qt\5.15\mingw81_32
PYTHON_PATH: C:\Python38
CONFIGURATION: release
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
#cache:
# - '%BUILD_DIR% -> **\*.pr?, appveyor.yml'
init:
# - ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
- if not exist %MINGW% ( dir /w c:\qt\tools\ && exit 1 )
- if not exist %QTDIR% ( dir /w c:\qt\* && exit 1 )
- git config --global core.symlinks true
- git config --global core.autocrlf true
- if "x%appveyor_pull_request_number%" NEQ "x" set APPVEYOR_CACHE_SKIP_SAVE=true
- if "x%appveyor_repo_branch%" NEQ "xmaster" set APPVEYOR_CACHE_SKIP_SAVE=true
before_build:
- git submodule update --init --recursive
- set PATH=%PYTHON_PATH%;%QTDIR%\bin;%MINGW%\bin;C:\msys64\usr\bin;%PATH%
- if not exist %BUILD_DIR% md %BUILD_DIR%
- du -sh %BUILD_DIR%
- path
- python -V
- g++ --version
# deal with 2020 summer package upgrade problem, when keys changed
- curl -O http://repo.msys2.org/msys/x86_64/msys2-keyring-r21.b39fb11-1-any.pkg.tar.xz
- curl -O http://repo.msys2.org/msys/x86_64/msys2-keyring-r21.b39fb11-1-any.pkg.tar.xz.sig
- pacman --noconfirm -U msys2-keyring-r21.b39fb11-1-any.pkg.tar.xz || bash -lc 'pacman --noconfirm -U --config <(echo) msys2-keyring-r21.b39fb11-1-any.pkg.tar.xz'
# Deal with December 2020 MSYS2 update: update core files to migrate to zstd compression
- curl -O http://repo.msys2.org/msys/x86_64/zstd-1.4.7-1-x86_64.pkg.tar.xz # the latest in xz
- curl -O http://repo.msys2.org/msys/x86_64/pacman-5.2.2-5-x86_64.pkg.tar.xz # the latest in xz
- pacman --verbose --noconfirm --disable-download-timeout -U zstd-1.4.7-1-x86_64.pkg.tar.xz pacman-5.2.2-5-x86_64.pkg.tar.xz # Upgrade now (Dec 2020) needs zstd decompressor
- cmd /c "pacman --verbose --noconfirm --disable-download-timeout -Syuu" #Full update of core files
- cmd /c "pacman --verbose --noconfirm --disable-download-timeout -Sy" #Update core files if databases list was changed
- pacman --verbose --noconfirm --sync rsync make
build_script:
- cd %BUILD_DIR%
- qmake %APPVEYOR_BUILD_FOLDER%\%PROJECT_FILE%.pro -spec win32-g++ CONFIG+=%CONFIGURATION% CONFIG+=tests
# - ps: $blockRdp = $true;iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
- make -j2 qmake_all
- make -j2 all
test_script:
- cd %BUILD_DIR%
- cd bin\x86-%CONFIGURATION%
- set TRIK_PYTHONPATH=%PYTHON_PATH%\Lib
- .\trikKernelTests
- .\trikCommunicatorTests
- .\trikCameraPhotoTests
- .\trikJsRunnerTests
- .\trikPyRunnerTests
#Uncomment if needed
#artifacts:
# - path: .build\bin\x86-$(configuration)
# name: trikRuntime-win32-$(configuration)
|
appveyor.yml
|
name: E2E CI
on: push
jobs:
Install:
strategy:
matrix:
node-version: [14.x, 15.x]
os: ['ubuntu-latest', 'windows-latest', 'macos-latest']
app: ['angular', 'react', 'vue', 'svelte']
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@master
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
- name: Cache central NPM modules
uses: actions/cache@v1
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ github.ref }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-${{ github.ref }}-${{ hashFiles('**/package-lock.json') }}
- name: Cache Cypress binary
uses: actions/cache@v1
with:
path: ~/.cache/Cypress
key: cypress-${{ runner.os }}-cypress-${{ github.ref }}-${{ hashFiles('**/package.json') }}
restore-keys: |
cypress-${{ runner.os }}-cypress-${{ github.ref }}-${{ hashFiles('**/package.json') }}
# Cache local node_modules to pass to testing jobs
- name: Cache local node_modules
uses: actions/cache@v1
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ github.ref }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-modules-${{ github.ref }}-
- name: Install root dependencies
env:
CI: 1
run: npm ci
Build:
needs: Install
strategy:
matrix:
os: ['ubuntu-latest', 'windows-latest', 'macos-latest']
app: ['angular', 'react', 'vue', 'svelte']
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@master
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
# Restore just local node_modules and the Cypress binary archives.
- name: Cache Cypress binary
uses: actions/cache@v1
with:
path: ~/.cache/Cypress
key: cypress-${{ runner.os }}-cypress-${{ github.ref }}-${{ hashFiles('**/package.json') }}
restore-keys: |
cypress-${{ runner.os }}-cypress-${{ github.ref }}-
- name: Cache local node_modules
uses: actions/cache@v1
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ github.ref }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-modules-
- name: Cache ${{matrix.app}} node_modules and builds
uses: actions/cache@v1
with:
path: ${{matrix.app}}-app/{node_modules,build,dist,public}
key: ${{matrix.app}}-${{ runner.os }}-node-modules-${{ github.ref }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{matrix.app}}-${{ runner.os }}-node-modules-
# check the restored Cypress binary
- name: Check binary
run: |
npx cypress cache path
npx cypress cache list
- name: Build ${{matrix.app}}
env:
CI: 1
run: |
cd ${{matrix.app}}-app
npm ci
npm run build -- --prod
E2E:
needs: Build
strategy:
matrix:
os: ['ubuntu-latest', 'windows-latest', 'macos-latest']
app: ['angular', 'react', 'vue', 'svelte']
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@master
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
# Restore just local node_modules and the Cypress binary archives.
- name: Cache Cypress binary
uses: actions/cache@v1
with:
path: ~/.cache/Cypress
key: cypress-${{ runner.os }}-cypress-${{ github.ref }}-${{ hashFiles('**/package.json') }}
restore-keys: |
cypress-${{ runner.os }}-cypress-${{ github.ref }}-
- name: Cache local node_modules
uses: actions/cache@v1
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ github.ref }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-modules-
- name: Cache ${{matrix.app}} node_modules and builds
uses: actions/cache@v1
with:
path: ${{matrix.app}}-app/{node_modules,build,dist,public}
key: ${{matrix.app}}-${{ runner.os }}-node-modules-${{ github.ref }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{matrix.app}}-${{ runner.os }}-node-modules-
# check the restored Cypress binary
- name: Check binary
run: |
npx cypress cache path
npx cypress cache list
- name: e2e ${{matrix.app}}
env:
CI: 1
run: |
npm run test:${{matrix.app}}
|
.github/workflows/e2e.yml
|
parameters:
- name: testCommand
type: string
- name: displayName
type: string
- name: jobId
type: string
- name: jobAttempt
type: string
- name: featureFlagState
type: string
jobs:
- job: ${{ parameters.jobId }}
displayName: 'RSpec - ${{ parameters.displayName }}'
condition: and(succeeded(), eq(variables['deployOnly'], false))
pool:
vmImage: 'Ubuntu-16.04'
variables:
- name: system.debug
value: $(debug)
steps:
- script: |
echo '##vso[task.setvariable variable=COMPOSE_FILE]docker-compose.yml:docker-compose.azure.yml'
displayName: 'Configure environment'
- template: cancel-build-if-not-latest.yml
parameters:
sourceBranchName: $(Build.SourceBranchName)
- script: |
docker pull $(dockerHubUsername)/$(imageName):$(Build.BuildNumber)
make az_setup
displayName: 'Load Docker image & setup container'
env:
DOCKER_BUILDKIT: $(dockerBuildkitState)
COMPOSE_DOCKER_CLI_BUILD: $(dockerBuildkitState)
dockerHubUsername: $(dockerHubUsername)
dockerHubImageName: $(imageName)
dockerHubImageTag: $(Build.BuildNumber)
railsSecretKeyBase: $(railsSecretKeyBase)
RAILS_ENV: test
GOVUK_NOTIFY_API_KEY: $(govukNotifyAPIKey)
AUTHORISED_HOSTS: $(authorisedHosts)
FIND_BASE_URL: $(findBaseUrl)
GOVUK_NOTIFY_CALLBACK_API_KEY: $(govukNotifyCallbackAPIKey)
SANDBOX: $(sandbox)
- template: cancel-build-if-not-latest.yml
parameters:
sourceBranchName: $(Build.SourceBranchName)
- template: run-rspec-test.yml
parameters:
testCommand: ${{ format('ci.{0}', parameters.testCommand) }}
testResultsFile: ${{ format('testArtifacts/rspec-{0}-results.xml', parameters.testCommand) }}
featureFlagState: ${{ parameters.featureFlagState }}
- task: PublishTestResults@2
condition: succeededOrFailed()
displayName: 'Publish test results'
inputs:
testResultsFiles: 'testArtifacts/**/rspec-*.xml'
testRunTitle: ${{ parameters.testCommand }}
- task: PublishPipelineArtifact@1
condition: succeededOrFailed()
displayName: 'Publish Test Artifacts'
inputs:
path: '$(System.DefaultWorkingDirectory)/testArtifacts/'
artifactName: ${{ format('rspec-{0}-{1}', parameters.jobId, parameters.jobAttempt) }}
|
azure/pipelines/templates/rspec-job.yml
|
{% set version = "1.9-15" %}
{% set posix = 'm2-' if win else '' %}
{% set native = 'm2w64-' if win else '' %}
package:
name: r-ptw
version: {{ version|replace("-", "_") }}
source:
url:
- {{ cran_mirror }}/src/contrib/ptw_{{ version }}.tar.gz
- {{ cran_mirror }}/src/contrib/Archive/ptw/ptw_{{ version }}.tar.gz
sha256: 22fa003f280bc000f46bca88d69bf332b29bc68435115ba8044533b70bfb7b46
build:
merge_build_host: True # [win]
number: 1
rpaths:
- lib/R/lib/
- lib/
requirements:
build:
- {{ compiler('c') }} # [not win]
- {{ compiler('m2w64_c') }} # [win]
- {{ posix }}filesystem # [win]
- {{ posix }}make
- {{ posix }}sed # [win]
- {{ posix }}coreutils # [win]
- {{ posix }}zip # [win]
host:
- r-base
- r-nloptr
run:
- r-base
- {{ native }}gcc-libs # [win]
- r-nloptr
test:
commands:
- $R -e "library('ptw')" # [not win]
- "\"%R%\" -e \"library('ptw')\"" # [win]
about:
home: https://CRAN.R-project.org/package=ptw
license: GPL (>= 2)
summary: 'Parametric Time Warping aligns patterns, i.e. it aims to put corresponding features
at the same locations. The algorithm searches for an optimal polynomial describing
the warping. It is possible to align one sample to a reference, several samples
to the same reference, or several samples to several references. One can choose
between calculating individual warpings, or one global warping for a set of samples
and one reference. Two optimization criteria are implemented: RMS (Root Mean Square
error) and WCC (Weighted Cross Correlation). Both warping of peak profiles and of
peak lists are supported.'
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2'
extra:
recipe-maintainers:
- conda-forge/r
# Package: ptw
# Type: Package
# Title: Parametric Time Warping
# Version: 1.9-13
# Authors@R: c(person("Jan", "Gerretzen", role = "ctb"), person("Paul", "Eilers", role = "aut"), person("Hans", "Wouters", role = "ctb"), person("Tom", "Bloemberg", role = "aut", email = "<EMAIL>"), person("Ron", "Wehrens", role = c("aut", "cre"), email = "<EMAIL>"))
# Description: Parametric Time Warping aligns patterns, i.e. it aims to put corresponding features at the same locations. The algorithm searches for an optimal polynomial describing the warping. It is possible to align one sample to a reference, several samples to the same reference, or several samples to several references. One can choose between calculating individual warpings, or one global warping for a set of samples and one reference. Two optimization criteria are implemented: RMS (Root Mean Square error) and WCC (Weighted Cross Correlation). Both warping of peak profiles and of peak lists are supported.
# License: GPL (>= 2)
# Imports: nloptr, graphics, grDevices, stats
# NeedsCompilation: yes
# Packaged: 2018-05-25 08:13:45 UTC; ron
# Author: <NAME> [ctb], <NAME> [aut], <NAME> [ctb], <NAME> [aut], <NAME> [aut, cre]
# Maintainer: <NAME> <<EMAIL>>
# Repository: CRAN
# Date/Publication: 2018-05-25 22:17:57 UTC
|
recipe/meta.yaml
|
name: main
on:
push:
branches:
- develop
- feature/*
- main
tags:
- 'v*'
pull_request:
branches:
- main
release:
types:
- created
env:
push_to_dockerhub: true
jobs:
getversion:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: 'Get Previous tag'
id: latesttag
uses: WyriHaximus/github-action-get-previous-tag@v1
- name: Split version tag
uses: xom9ikk/split@v1
id: version
with:
string: ${{ steps.latesttag.outputs.tag }}
separator: v
limit: -1
- name: 'conditions check'
run: echo ${{ steps.latesttag.outputs.tag }}
outputs:
version: ${{ steps.version.outputs._1 }}
test:
needs: getversion
runs-on: ubuntu-latest
strategy:
matrix:
release: [ community, developer, enterprise ]
version: [ "${{ needs.getversion.outputs.version }}" ]
include:
- release: lts
version:
steps:
- name: Setup BATS
uses: mig4/setup-bats@v1
- uses: actions/checkout@v2
- name: test test-${{ matrix.version != '' && format('{0}-{1}',matrix.version, matrix.release) || format('{0}', matrix.release) }}
run: make test VERSION=${{ matrix.version != '' && format('{0}-{1}',matrix.version, matrix.release) || format('{0}', matrix.release) }}
publish-images:
needs: [ test, getversion ]
runs-on: ubuntu-latest
strategy:
matrix:
release: [ community, developer, enterprise ]
version: [ "${{ needs.getversion.outputs.version }}" ]
include:
- release: lts
version:
if: startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v2
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
push: ${{ env.push_to_dockerhub }}
tags: ${{ matrix.version != '' && format('mslipets/my-bloody-sonar:{0}-{1}', matrix.version, matrix.release) || '' }}, mslipets/my-bloody-sonar:${{ matrix.release }}
build-args: FROM_TAG=${{ matrix.version != '' && format('{0}-{1}', matrix.version, matrix.release) || matrix.release }}
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}
# Disabled due to https://github.com/docker/hub-feedback/issues/1927
# untill resolution to https://github.com/docker/roadmap/issues/115 implemented.
# - name: Update repo description
# uses: christian-korneck/update-container-description-action@v1
# env:
# DOCKER_USER: ${{ secrets.DOCKERHUB_USERNAME }}
# DOCKER_PASS: ${{ secrets.DOCKERHUB_TOKEN }}
# with:
# destination_container_repo: mslipets/my-bloody-sonar
# provider: dockerhub
# short_description: 'Self Configured SonarQube Docker image based on Official SonarSource Images'
# readme_file: 'README.md'
|
.github/workflows/main.yml
|
serviceName: gwneud cais am gyfrif gwasanaethau asiant, a chreu un, os nad ydych yn y DU
serviceHeaderName: Gwneud cais am gyfrif gwasanaethau asiant, a chreu un, os nad ydych yn y DU
serviceDescription: |
Rydych yn defnyddio’r gwasanaeth hwn i wneud cais i CThEM am gymeradwyaeth i greu cyfrif gwasanaethau asiant os yw’ch busnes asiant wedi’i leoli y tu allan i’r DU. Gallwch hefyd ei ddefnyddio i orffen creu’r cyfrif ar ôl i CThEM gymeradwyo’ch cais.
serviceDomain: www.tax.service.gov.uk
serviceUrl: /agent-services/apply-from-outside-uk
contactFrontendServiceId: AOSS
complianceStatus: partial
accessibilityProblems:
- Mae’r priodoledd ‘aria-expanded’ wedi’i ddefnyddio ar elfennau blwch gwirio sy’n dangos amodoldeb, ac ni chaniateir hynny. Hefyd, nid yw defnyddwyr darllenwyr sgrin bob amser yn cael eu hysbysu pan fydd elfennau blwch gwirio sy’n dangos amodoldeb yn cael eu hehangu neu eu cwympo.
- Mae’r elfennau ‘Golygu’ lluosog yn cynnwys yr un gwerth priodoledd ID.
- Mae elfennau ateb ‘Prawf cyfeiriad masnachu’ yn cynnwys yr un gwerth priodoledd ID.
- Nid yw botwm ‘Cadarnhau atebion ac anfon cais’ wedi’i amlygu gan ddangosydd ffocws gweladwy pan fydd mewn ffocws. Mae hyn yn golygu y gall defnyddwyr bysellfwrdd yn unig ei chael hi'n anodd penderfynu ble mae eu ffocws ar y dudalen.
- Nid yw’r ‘gwiriwch yr arweiniad (yn agor mewn ffenestr neu dab newydd)’ yn ddisgrifiadol o’i gyrchfan na’i bwrpas wrth lywio allan o’r cyd-destun.
milestones:
- description: |
Mae’r priodoledd ‘aria-expanded’ wedi’i ddefnyddio ar elfennau blwch gwirio sy’n dangos amodoldeb, ac ni chaniateir hynny. Hefyd, nid yw defnyddwyr darllenwyr sgrin bob amser yn cael eu hysbysu pan fydd elfennau blwch gwirio sy’n dangos amodoldeb yn cael eu hehangu neu eu cwympo. Nid yw hyn yn bodloni maen prawf llwyddiant 1.3.1 (Info and Relationships) a 4.1.2 (Name, Role, Value) Canllawiau Hygyrchedd Cynnwys y We.
date: 2022-03-31
- description: |
Mae’r elfennau ‘Golygu’ lluosog yn cynnwys yr un gwerth priodoledd ID. Nid yw hyn yn bodloni maen prawf llwyddiant 4.1.1 (Parsing) Canllawiau Hygyrchedd Cynnwys y We, fersiwn 2.1.
date: 2022-03-31
- description: |
Mae elfennau ateb ‘Prawf cyfeiriad masnachu’ yn cynnwys yr un gwerth priodoledd ID. Nid yw hyn yn bodloni maen prawf llwyddiant 4.1.1 (Parsing) Canllawiau Hygyrchedd Cynnwys y We, fersiwn 2.1.
date: 2022-03-31
- description: |
Nid yw botwm ‘Cadarnhau atebion ac anfon cais’ wedi’i amlygu gan ddangosydd ffocws gweladwy pan fydd mewn ffocws. Mae hyn yn golygu y gall defnyddwyr bysellfwrdd yn unig ei chael hi'n anodd penderfynu ble mae eu ffocws ar y dudalen. Nid yw hyn yn bodloni maen prawf llwyddiant 2.4.7 (Focus Visible) Canllawiau Hygyrchedd Cynnwys y We, fersiwn 2.1.
date: 2022-03-31
- description: |
Nid yw’r ‘gwiriwch yr arweiniad (yn agor mewn ffenestr neu dab newydd)’ yn ddisgrifiadol o’i gyrchfan na’i bwrpas wrth lywio allan o’r cyd-destun. Nid yw hyn yn bodloni maen prawf llwyddiant 2.4.9 (Link Purpose) Canllawiau Hygyrchedd Cynnwys y We, fersiwn 2.1.
date: 2022-03-31
serviceLastTestedDate: 2021-10-07
statementVisibility: public
statementCreatedDate: 2019-09-23
statementLastUpdatedDate: 2021-12-13
|
conf/services/agent-overseas.cy.yml
|
parameters:
# parameter_name: value
services:
# service_name:
# class: AppBundle\Directory\ClassName
# arguments: ["@another_service_name", "plain_value", "%parameter_name%"]
lib.twig.grid_extension:
class: LibBundle\Twig\GridExtension
public: false
tags:
- { name: twig.extension }
lib.twig.format_extension:
class: LibBundle\Twig\FormatExtension
public: false
tags:
- { name: twig.extension }
lib.form.form_type_extension:
class: LibBundle\Form\Extension\FormTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\FormType }
lib.form.text_type_extension:
class: LibBundle\Form\Extension\TextTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\TextType }
lib.form.integer_type_extension:
class: LibBundle\Form\Extension\IntegerTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\IntegerType }
lib.form.number_type_extension:
class: LibBundle\Form\Extension\NumberTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\NumberType }
lib.form.date_type_extension:
class: LibBundle\Form\Extension\DateTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\DateType }
lib.form.time_type_extension:
class: LibBundle\Form\Extension\TimeTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\TimeType }
lib.form.date_time_type_extension:
class: LibBundle\Form\Extension\DateTimeTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\DateTimeType }
lib.form.birthday_type_extension:
class: LibBundle\Form\Extension\BirthdayTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Component\Form\Extension\Core\Type\BirthdayType }
lib.form.entity_type_extension:
class: LibBundle\Form\Extension\EntityTypeExtension
tags:
- { name: form.type_extension, extended_type: Symfony\Bridge\Doctrine\Form\Type\EntityType }
lib.form.entity_hidden_type:
class: LibBundle\Form\Type\EntityHiddenType
arguments: ["@doctrine.orm.entity_manager"]
tags:
- { name: form.type, alias: entity_hidden }
lib.form.entity_text_type:
class: LibBundle\Form\Type\EntityTextType
arguments: ["@doctrine.orm.entity_manager"]
tags:
- { name: form.type, alias: entity_text }
lib.grid.datagrid:
class: LibBundle\Grid\DataGridService
autowire: true
lib.excel.xml_reader:
class: LibBundle\Excel\PhpExcelXmlReader
autowire: true
lib.excel.object_parser:
class: LibBundle\Excel\PhpExcelObjectParser
autowire: true
|
src/LibBundle/Resources/config/services.yml
|
- position: 1
driverNumber: 6
driverId: juan-pablo-montoya
constructorId: williams
engineManufacturerId: bmw
tyreManufacturerId: michelin
time: "1:52.072"
gap:
interval:
laps: 7
- position: 2
driverNumber: 5
driverId: ralf-schumacher
constructorId: williams
engineManufacturerId: bmw
tyreManufacturerId: michelin
time: "1:52.959"
gap: "+0.887"
interval: "+0.887"
laps: 10
- position: 3
driverNumber: 1
driverId: michael-schumacher
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: bridgestone
time: "1:54.685"
gap: "+2.613"
interval: "+1.726"
laps: 8
- position: 4
driverNumber: 22
driverId: heinz-harald-frentzen
constructorId: prost
engineManufacturerId: acer
tyreManufacturerId: michelin
time: "1:55.233"
gap: "+3.161"
interval: "+0.548"
laps: 10
- position: 5
driverNumber: 2
driverId: rubens-barrichello
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: bridgestone
time: "1:56.116"
gap: "+4.044"
interval: "+0.883"
laps: 7
- position: 6
driverNumber: 10
driverId: jacques-villeneuve
constructorId: bar
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:57.038"
gap: "+4.966"
interval: "+0.922"
laps: 11
- position: 7
driverNumber: 3
driverId: mika-hakkinen
constructorId: mclaren
engineManufacturerId: mercedes
tyreManufacturerId: bridgestone
time: "1:57.043"
gap: "+4.971"
interval: "+0.005"
laps: 12
- position: 8
driverNumber: 7
driverId: giancarlo-fisichella
constructorId: benetton
engineManufacturerId: renault
tyreManufacturerId: michelin
time: "1:57.668"
gap: "+5.596"
interval: "+0.625"
laps: 7
- position: 9
driverNumber: 4
driverId: david-coulthard
constructorId: mclaren
engineManufacturerId: mercedes
tyreManufacturerId: bridgestone
time: "1:58.008"
gap: "+5.936"
interval: "+0.340"
laps: 10
- position: 10
driverNumber: 19
driverId: pedro-de-la-rosa
constructorId: jaguar
engineManufacturerId: cosworth
tyreManufacturerId: michelin
time: "1:58.519"
gap: "+6.447"
interval: "+0.511"
laps: 11
- position: 11
driverNumber: 9
driverId: olivier-panis
constructorId: bar
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:58.838"
gap: "+6.766"
interval: "+0.319"
laps: 8
- position: 12
driverNumber: 17
driverId: kimi-raikkonen
constructorId: sauber
engineManufacturerId: petronas
tyreManufacturerId: bridgestone
time: "1:59.050"
gap: "+6.978"
interval: "+0.212"
laps: 8
- position: 13
driverNumber: 12
driverId: jean-alesi
constructorId: jordan
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:59.128"
gap: "+7.056"
interval: "+0.078"
laps: 10
- position: 14
driverNumber: 16
driverId: nick-heidfeld
constructorId: sauber
engineManufacturerId: petronas
tyreManufacturerId: bridgestone
time: "1:59.302"
gap: "+7.230"
interval: "+0.174"
laps: 9
- position: 15
driverNumber: 8
driverId: jenson-button
constructorId: benetton
engineManufacturerId: renault
tyreManufacturerId: michelin
time: "1:59.587"
gap: "+7.515"
interval: "+0.285"
laps: 7
- position: 16
driverNumber: 11
driverId: jarno-trulli
constructorId: jordan
engineManufacturerId: honda
tyreManufacturerId: bridgestone
time: "1:59.647"
gap: "+7.575"
interval: "+0.060"
laps: 10
- position: 17
driverNumber: 18
driverId: eddie-irvine
constructorId: jaguar
engineManufacturerId: cosworth
tyreManufacturerId: michelin
time: "1:59.689"
gap: "+7.617"
interval: "+0.042"
laps: 9
- position: 18
driverNumber: 23
driverId: luciano-burti
constructorId: prost
engineManufacturerId: acer
tyreManufacturerId: michelin
time: "1:59.900"
gap: "+7.828"
interval: "+0.211"
laps: 7
- position: 19
driverNumber: 14
driverId: jos-verstappen
constructorId: arrows
engineManufacturerId: asiatech
tyreManufacturerId: bridgestone
time: "2:02.039"
gap: "+9.967"
interval: "+2.139"
laps: 10
- position: 20
driverNumber: 21
driverId: fernando-alonso
constructorId: minardi
engineManufacturerId: european
tyreManufacturerId: michelin
time: "2:02.594"
gap: "+10.522"
interval: "+0.555"
laps: 8
- position: 21
driverNumber: 15
driverId: enrique-bernoldi
constructorId: arrows
engineManufacturerId: asiatech
tyreManufacturerId: bridgestone
time: "2:03.048"
gap: "+10.976"
interval: "+0.454"
laps: 10
- position: 22
driverNumber: 20
driverId: tarso-marques
constructorId: minardi
engineManufacturerId: european
tyreManufacturerId: michelin
time: "2:04.204"
gap: "+12.132"
interval: "+1.156"
laps: 10
|
src/data/seasons/2001/races/14-belgium/qualifying-results.yml
|
fof-drafts:
admin:
permissions:
start: Buat draft
schedule: Penjadwalan draft
settings:
title: FoF Drafts
# enable_scheduled_drafts: Enable Scheduled Drafts
enable_scheduled_drafts: Aktifkan Draft Terjadwal
# schedule_on_one_server: Use 'onOneServer()' directive for the task scheduler (requires Redis/Memcache)
schedule_on_one_server: Gunakan 'onOneServer()' untuk penjadwalan tugas (membutuhkan Redis / Memcache)
# schedule_log_output: Append scheduler output to log storage
schedule_log_output: Tambahkan output penjadwalan ke penyimpanan log
console:
# scheduled_drafts_disabled: Scheduled drafts are currently disabled in settings.
scheduled_drafts_disabled: Draft terjadwal saat ini dinonaktifkan dalam pengaturan.
ref:
schedule_draft: Penjadwalan draft
forum:
composer:
# title: Save Draft
title: Simpan Draft
# saving: Saving...
saving: Menyimpan...
# saved: Saved!
saved: Berhasil Disimpan!
# exit_alert: Discard changes to draft?
exit_alert: Buang perubahan?
# discard_empty_draft_alert: Discard empty draft?
discard_empty_draft_alert: Buang draft kosong?
dropdown:
# empty_text: You haven't saved any drafts
empty_text: Anda tidak memiliki draft
# title: Drafts
title: Draft
delete_all_button: Hapus semua draf
delete_button: Hapus draf
schedule_button: => fof-drafts.ref.schedule_draft
scheduled_icon_tooltip: Dijadwalkan untuk {datetime}
# String passed to dayjs.format() to generate the tooltip date-time
scheduled_icon_tooltip_formatter: L [at] LT
tooltip: => fof-drafts.forum.dropdown.title
# alert: Are you sure you want to delete your draft?
alert: Anda yakin ingin menghapus draft Anda?
# delete_all_alert: Are you sure you want to delete all your drafts?
delete_all_alert: Apakah Anda yakin ingin menghapus semua draf Anda?
schedule_draft_modal:
title: => fof-drafts.ref.schedule_draft
text: Jadwalkan draft Anda untuk diposting secara otomatis di lain waktu!
scheduled_text: Saat ini dijadwalkan untuk memposting pada {datetime}.
scheduled_error: "Tidak dapat menjadwalkan draf: {error}."
unschedule_button: Tidak dijadwalkan
reschedule_button: Menjadwal ulang
schedule_button: Jadwalkan
schedule_time_preview: Draf ini akan diterbitkan secara otomatis pada <b>{datetime}</b>.
# String passed to dayjs.format() to generate the preview
schedule_time_preview_formatter: LLLL
schedule_time_preview_invalid: Tanggal dan / atau waktu tidak valid
user:
settings:
# drafts_heading: Drafts
drafts_heading: Draft
# draft_autosave_enable: Enable Draft Autosave?
draft_autosave_enable: Aktifkan Penyimpanan Otomatis?
# draft_autosave_interval_label: Autosave Interval (seconds)
draft_autosave_interval_label: Interval Penyimpanan Otomatis (detik)
# draft_autosave_interval_button: Update Interval
draft_autosave_interval_button: Update Interval
# draft_autosave_interval_invalid: The interval must be an integer greater than 4.
draft_autosave_interval_invalid: Interval harus bilangan bulat lebih besar dari 4.
|
locale/fof-drafts.yml
|