code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# Define Jekyll collections
collections:
themes:
permalink: "/:collection/:path:output_ext"
output: true
theme_collection_1:
permalink: "/:collection/:path:output_ext"
output: true
theme_collection_2:
permalink: "/:collection/:path:output_ext"
output: true
theme_collection_3:
permalink: "/:collection/:path:output_ext"
output: true
theme_collection_4:
permalink: "/:collection/:path:output_ext"
output: true
theme_collection_4:
permalink: "/:collection/:path:output_ext"
output: true
just_the_docs:
collections:
themes:
name: Themes
nav_exclude: false
nav_fold: true
search_exclude: false
theme_collection_1:
name: Theme Default
nav_exclude: false
nav_fold: true
search_exclude: false
theme_collection_2:
name: Theme Layout
nav_exclude: false
nav_fold: true
search_exclude: false
theme_collection_3:
name: Theme Framework
nav_exclude: false
nav_fold: true
search_exclude: false
theme_collection_4:
name: Theme UI
nav_exclude: false
nav_fold: true
search_exclude: false
theme_collection_5:
name: Theme Components
nav_exclude: false
nav_fold: true
search_exclude: false
defaults:
-
scope:
path: "" # an empty string here means all files in the project
type: "theme"
values:
layout: "default"
# callouts_opacity: 0.15
# callouts_level: loud
callouts:
admonition:
title: Admonition
color: grey-lt
attention:
title: Attention
color: purple
caution:
title: Caution
color: yellow
custom:
title: Custom
color: pink
opacity: 0.3
danger:
title: Danger
color: red
error:
title: Error
color: red
highlight:
color: yellow
opacity: 0.5
hint:
title: Hint
color: grey-dk
important:
title: Important
color: green
note:
title: Note
color: blue
tip:
title: Tip
color: grey-dk
warning:
title: Warning
color: red
|
_config_themes.yml
|
Collections:
-
testcase:
name: test_querynode_pod_kill
chaos: chaos_querynode_pod_kill.yaml
expectation:
cluster_1_node:
search: fail
query: fail
cluster_n_nodes:
search: degrade
query: degrade
-
testcase:
name: test_querycoord_pod_kill
chaos: chaos_querycoord_pod_kill.yaml
expectation:
cluster_1_node:
search: fail
query: fail
cluster_n_nodes:
search: degrade
query: degrade
-
testcase:
name: test_datanode_pod_kill
chaos: chaos_datanode_pod_kill.yaml
expectation:
cluster_1_node:
insert: succ
flush: fail
cluster_n_nodes:
insert: degrade
-
testcase:
name: test_datascoord_pod_kill
chaos: chaos_datacoord_pod_kill.yaml
expectation:
cluster_1_node:
insert: succ
flush: fail
cluster_n_nodes:
insert: degrade
-
testcase:
name: test_indexnode_pod_kill
chaos: chaos_indexnode_pod_kill.yaml
expectation:
cluster_1_node:
index: fail
cluster_n_nodes:
index: degrade
-
testcase:
name: test_indexcoord_pod_kill
chaos: chaos_indexcoord_pod_kill.yaml
expectation:
cluster_1_node:
index: fail
cluster_n_nodes:
insert: degrade
-
testcase:
name: test_proxy_pod_kill
chaos: chaos_proxy_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
cluster_n_nodes:
insert: fail
-
testcase:
name: test_rootcoord_pod_kill
chaos: chaos_rootcoord_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
cluster_n_nodes:
insert: degrade
-
testcase:
name: test_etcd_pod_kill
chaos: chaos_etcd_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
-
testcase:
name: test_minio_pod_kill
chaos: chaos_minio_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
-
testcase:
name: test_pulsar_pod_kill
chaos: chaos_pulsar_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
-
testcase:
name: test_standalone_pod_kill
chaos: chaos_standalone_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
-
testcase:
name: test_allstandalone_pod_kill
chaos: chaos_allstandalone_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
-
testcase:
name: test_allcluster_pod_kill
chaos: chaos_allcluster_pod_kill.yaml
expectation:
cluster_1_node:
create: fail
insert: fail
flush: fail
index: fail
search: fail
query: fail
|
tests/python_client/chaos/chaos_objects/pod_kill/testcases.yaml
|
language: android
sudo: required
jdk: oraclejdk8
services:
- docker
before_cache:
- rm -f ${TRAVIS_BUILD_DIR}/gradle/caches/modules-2/modules-2.lock # Avoid to repack it due locks
- rm -f ${TRAVIS_BUILD_DIR}/gradle/caches/3.3/classAnalysis/classAnalysis.lock
- rm -f ${TRAVIS_BUILD_DIR}/gradle/caches/3.3/jarSnapshots/jarSnapshots.lock
cache:
directories:
- ${TRAVIS_BUILD_DIR}/gradle/caches/
- ${TRAVIS_BUILD_DIR}/gradle/wrapper/dists/
android:
components:
- tools
- build-tools-27.0.3
- platform-tools
- tools
env:
global:
- API=21 # Android API level 21 by default
- TAG=default # Google APIs by default, alternatively use default
- ABI=armeabi-v7a # ARM ABI v7a by default
- QEMU_AUDIO_DRV=none # Disable emulator audio to avoid warning
- GRADLE_USER_HOME="${TRAVIS_BUILD_DIR}/gradle" # Change location for Gradle Wrapper and cache
- ANDROID_HOME=/usr/local/android-sdk # Depends on the cookbooks version used in the VM
- TOOLS=${ANDROID_HOME}/tools # PATH order matters, exists more than one emulator script
- PATH=${ANDROID_HOME}:${ANDROID_HOME}/emulator:${TOOLS}:${TOOLS}/bin:${ANDROID_HOME}/platform-tools:${PATH}
- ADB_INSTALL_TIMEOUT=20 # minutes (2 minutes by default)
before_install:
- export EMULATOR="system-images;android-${API};${TAG};${ABI}" # Used to install/create emulator
- echo 'count=0' > /home/travis/.android/repositories.cfg # Avoid warning
- docker build -t rpicheck/test-sshd ./ssh/src/test/resources/
- docker run -d -p 127.0.0.1:2222:22 rpicheck/test-sshd
install:
# List and delete unnecessary components to free space
- sdkmanager --list || true
# Update sdk tools to latest version and install/update components
- echo yes | sdkmanager "tools" >/dev/null
- echo yes | sdkmanager "platforms;android-27" >/dev/null # Latest platform required by SDK tools
- echo yes | sdkmanager "platforms;android-${API}" >/dev/null # Android platform required by emulator
- echo yes | sdkmanager "extras;android;m2repository" >/dev/null
- echo yes | sdkmanager "extras;google;m2repository" >/dev/null
- echo yes | sdkmanager "emulator" >/dev/null # latest emulator
- echo yes | sdkmanager "$EMULATOR" >/dev/null # Install emulator system image
# Check components status
- sdkmanager --list || true
- echo $PATH
before_script:
# Create and start emulator
- echo no | avdmanager create avd -n acib -k "$EMULATOR" -f --abi "$ABI" --tag "$TAG"
- emulator -avd acib -no-window -camera-back none -camera-front none -engine classic &
# Wait for emulator fully-booted and disable animations
- android-wait-for-emulator
- adb shell input keyevent 82 &
script:
# Run all device checks
- cd ${TRAVIS_BUILD_DIR}/${DIR} && ./gradlew build connectedCheck -PdisablePreDex
after_script:
# Show tests and lint results
- cat ${TRAVIS_BUILD_DIR}/${DIR}/*/build/outputs/androidTest-results/connected/*
- cat ${TRAVIS_BUILD_DIR}/${DIR}/*/build/reports/lint-results.xml
|
.travis.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2019-05-11 22:18"
game: "Unreal Tournament 2004"
name: "DOM-MoonLight2k4"
author: "Antti ''Chaos_Snake'' Kupiainen"
description: "Moon Light is a weapon arsenal at coldest mountains in Siberia. After\
\ Moon Light was converted from UT99 to UT2k4, here were set lot of tournaments.\
\ First time in Moon Light history it was made to DOM matches... special isn't it?"
releaseDate: "2008-03"
attachments:
- type: "IMAGE"
name: "DOM-MoonLight2k4_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Domination/M/DOM-MoonLight2k4_shot_2.png"
- type: "IMAGE"
name: "DOM-MoonLight2k4_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Domination/M/DOM-MoonLight2k4_shot_1.png"
originalFilename: "DOM-MoonLight2k4.zip"
hash: "98ad5494b0d1ddac4ce439c6e6514c8cdd68a7c4"
fileSize: 8298047
files:
- name: "DOM-MoonLight2k4.ut2"
fileSize: 6521481
hash: "55b6f4ee318ec56c0a8353b1b4a7dab5de27b0bc"
- name: "Botmca9.ogg"
fileSize: 5699542
hash: "2e9af6a3e2b47697a99c92f4a0ef1cf8f51ac65b"
otherFiles: 1
dependencies:
DOM-MoonLight2k4.ut2:
- status: "MISSING"
name: "ChaosExtras"
- status: "MISSING"
name: "ChaosGames"
downloads:
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=6865"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/Domination/M/DOM-MoonLight2k4.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/Domination/M/9/8/ad5494/DOM-MoonLight2k4.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/Domination/M/9/8/ad5494/DOM-MoonLight2k4.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Domination"
title: "Moon Light"
playerCount: "2-4"
themes: {}
bots: true
|
content/Unreal Tournament 2004/Maps/Domination/M/9/8/ad5494/dom-moonlight2k4_[98ad5494].yml
|
transformer:
input_dim: -1 # `int`, for pre-extracted features: 39 for mfcc, 40 for fmllr, 80 for fbank, 160 for mel, irrelevent if on-the-fly extraction is used
hidden_size: 768 # Size of the encoder layers and the pooler layer.
num_hidden_layers: 3 # Number of hidden layers in the Transformer encoder.
num_attention_heads: 12 # Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size: 3072 # The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act: gelu # The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: 0.3 # The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: 0.3 # The dropout ratio for the attention probabilities.
initializer_range: 0.02 # The sttdev of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps: 1.e-12 # The epsilon used by LayerNorm.
share_layer: False # Share layer weights
pre_layer_norm: False # To apply the pre layer normalization technique introduced in: https://arxiv.org/abs/2002.04745
task:
loss: L1 # L1 or MSE
sequence_length: 1500 # The maximum input sequence length for the transformer model (0 for no restriction)
position_encoding_size: 768 # this should be identical to `hidden_size`
mask_proportion: 0.0 # mask this percentage of all spectrogram frames in each sequence at random during MAM training
mask_consecutive_min: 7 # mask this amount of consecutive frames
mask_consecutive_max: 7 # mask this amount of consecutive frames
mask_allow_overlap: True # allow overlap masking
mask_bucket_ratio: 1.5 # only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]
mask_frequency: 0.2 # mask maximum this percentage of frequency bands, set to 0 for no frequency mask
noise_proportion: 0.0 # for this percentage of the time, Gaussian noise will be applied on all frames during MAM training, set to 0 for no noise
audio:
target_level: -25 # pretrained utterances are first scaled to the same decibel level
win_ms: 25
hop_ms: 10
n_freq: 201
n_mels: 80
n_mfcc: 13
input:
feat_type: mel # feat_type can be: wav, complx, linear, mel, mfcc, phase
channel: 0
log: True
delta: 0
cmvn: True
target:
feat_type: mel # feat_type can be: wav, complx, linear, mel, mfcc, phase
channel: 1
log: True
delta: 0
cmvn: True
|
pretrain/tera/exp_model_config/logMelBase-F.yaml
|
uid: "com.azure.storage.file.share.models.ShareStorageException"
fullName: "com.azure.storage.file.share.models.ShareStorageException"
name: "ShareStorageException"
nameWithType: "ShareStorageException"
summary: "A `StorageException` is thrown whenever Azure Storage successfully returns an error code that is not 200-level. Users can inspect the status code and error code to determine the cause of the error response. The exception message may also contain more detailed information depending on the type of error. The user may also inspect the raw HTTP response or call toString to get the full payload of the error response if present. Note that even some expected \"errors\" will be thrown as a `StorageException`. For example, some users may perform a getProperties request on an entity to determine whether it exists or not. If it does not exists, an exception will be thrown even though this may be considered an expected indication of absence in this case.\n\n**Sample Code**\n\nFor more samples, please see the [sample file][]\n\n\n[sample file]: https://github.com/Azure/azure-storage-java/blob/master/src/test/java/com/microsoft/azure/storage/Samples.java"
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"java.lang.Throwable\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"java.lang.Exception\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"java.lang.RuntimeException\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"com.azure.core.exception.AzureException\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"com.azure.core.exception.HttpResponseException\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "com.azure.core.exception.HttpResponseException.getResponse()"
- "com.azure.core.exception.HttpResponseException.getValue()"
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
- "java.lang.Throwable.addSuppressed(java.lang.Throwable)"
- "java.lang.Throwable.fillInStackTrace()"
- "java.lang.Throwable.getCause()"
- "java.lang.Throwable.getLocalizedMessage()"
- "java.lang.Throwable.getMessage()"
- "java.lang.Throwable.getStackTrace()"
- "java.lang.Throwable.getSuppressed()"
- "java.lang.Throwable.initCause(java.lang.Throwable)"
- "java.lang.Throwable.printStackTrace()"
- "java.lang.Throwable.printStackTrace(java.io.PrintStream)"
- "java.lang.Throwable.printStackTrace(java.io.PrintWriter)"
- "java.lang.Throwable.setStackTrace(java.lang.StackTraceElement[])"
- "java.lang.Throwable.toString()"
syntax: "public final class ShareStorageException extends HttpResponseException"
constructors:
- "com.azure.storage.file.share.models.ShareStorageException.ShareStorageException(java.lang.String,com.azure.core.http.HttpResponse,java.lang.Object)"
methods:
- "com.azure.storage.file.share.models.ShareStorageException.getErrorCode()"
- "com.azure.storage.file.share.models.ShareStorageException.getServiceMessage()"
- "com.azure.storage.file.share.models.ShareStorageException.getStatusCode()"
type: "class"
metadata: {}
package: "com.azure.storage.file.share.models"
artifact: com.azure:azure-storage-file-share:12.9.0-beta.1
|
preview/docs-ref-autogen/com.azure.storage.file.share.models.ShareStorageException.yml
|
title: |-
SonarQube MSBuild Scanner
summary: |
Step for running SonarQube MSBuild Scanner CLI.
description: |
Step for running SonarQube MSBuild Scanner CLI.
website: https://github.com/tbrushwyler/bitrise-step-sonarqube-msbuild-scanner
source_code_url: https://github.com/tbrushwyler/bitrise-step-sonarqube-msbuild-scanner
support_url: https://github.com/tbrushwyler/bitrise-step-sonarqube-msbuild-scanner/issues
host_os_tags:
- osx-10.10
- ubuntu-16.04
# If this step should be available only for certain project types
# just uncomment this `project_type_tags` section and include all the
# project types supported by the step. If the step can be used for all
# project types then you can just remove this section.
# If no `project_type_tags` specified (or specified as an empty array)
# that means the step can be used for any project type.
# You can find more information about project type tags in the Step Development Guideline:
# https://github.com/bitrise-io/bitrise/blob/master/_docs/step-development-guideline.md
#
project_type_tags:
- xamarin
# Type tags are used for categorizing steps, for easier step discovery in Step Libraries.
# You can find more information about type tags in the Step Development Guideline:
# https://github.com/bitrise-io/bitrise/blob/master/_docs/step-development-guideline.md
type_tags:
- utility
is_requires_admin_user: true
is_always_run: false
is_skippable: false
run_if: ""
deps:
brew:
- name: wget
- name: unzip
apt_get:
- name: wget
- name: unzip
toolkit:
bash:
entry_file: step.sh
inputs:
- scanner_version: 4.5.0.1761
opts:
title: Scanner CLI version
description: |-
Scanner CLI version to be used. Step will fail if invalid or non-existent is specified.
is_required: true
- sonarqube_project_key:
opts:
title: Key of the analyzed project in SonarQube.
description: |-
Specifies the key of the analyzed project in SonarQube.
is_required: true
- sonarqube_project_name:
opts:
title: Name of the analyzed project in SonarQube.
description: |-
Specifies the name of the analyzed project in SonarQube. Adding this argument will overwrite the project name in SonarQube if it already exists.
is_required: false
- sonarqube_project_version:
opts:
title: Version of your project in SonarQube.
description: |-
Specifies the version of your project.
is_required: false
- scanner_begin_properties:
opts:
title: Scanner parameters for the `begin` step
description: |-
See [analysis parameters](https://docs.sonarqube.org/display/SONAR/Analysis+Parameters) documentation
for list of possible options.
Example:
```
sonar.sources=.
sonar.java.binaries=build/classes/kotlin/main
sonar.login=TOKEN_HERE
#environment variables are evaluated
sonar.projectKey=$BITRISE_APP_TITLE
sonar.host.url=https://sonar.example.test
sonar.analysis.mode=preview
sonar.report.export.path=sonar-report.json
```
is_required: false
is_expand: true
is_sensitive: true
- scanner_build_commands:
opts:
title: Build commands.
description: |-
Commands to run between "begin" and "end" steps.
You may want to build your project, execute unit tests, and generate code coverage data.
Example:
```
msbuild $BITRISE_PROJECT_PATH /t:Rebuild
```
- scanner_end_properties:
opts:
title: Scanner parameters for the `end` step
description: |-
See [documentation](https://docs.sonarqube.org/display/SCAN/Analyzing+with+SonarQube+Scanner+for+MSBuild)
for list of possible options.
Example:
```
sonar.login=TOKEN_HERE
sonar.password=<PASSWORD>
```
is_required: false
is_expand: true
is_sensitive: true
- is_debug: "false"
opts:
title: Print all executed shell commands to a build log?
description: |-
Whether trace of shell commands should be printd to a build log.
Options:
* "true"
* "false" (default)
is_expand: false
value_options: ["true", "false"]
|
step.yml
|
title: Documentação das Instâncias de Contêiner do Azure
summary: Execute contêineres do Docker sob demanda em um ambiente gerenciado e sem servidor do Azure. As Instâncias de Contêiner do Azure são uma solução para qualquer cenário que possa operar em contêineres isolados, sem orquestração. Execute aplicativos orientados a eventos, implante rapidamente de seus pipelines de desenvolvimento de contêiner, execute o processamento de dados e crie trabalhos.
metadata:
title: Documentação das Instâncias de Contêiner do Azure – contêineres sem servidor, sob demanda
description: As ACI (Instâncias de Contêiner do Azure) permitem que você execute contêineres do Docker em um ambiente de nuvem gerenciado e sem servidor, sem precisar configurar VMs, clusters ou orquestradores.
services:
ms.service: container-instances
ms.topic: landing-page
ms.collection: collection
author: dlepow
ms.author: danlep
ms.date: 09/25/2019
ms.openlocfilehash: 8dd403d9c1b2996c70c8d387e5305d2c8749bce8
ms.sourcegitcommit: 829d951d5c90442a38012daaf77e86046018e5b9
ms.translationtype: HT
ms.contentlocale: pt-BR
ms.lasthandoff: 10/09/2020
ms.locfileid: "90705190"
landingContent:
- title: Sobre as Instâncias de Contêiner do Azure
linkLists:
- linkListType: overview
links:
- text: O que são as Instâncias de Contêiner do Azure?
url: container-instances-overview.md
- text: Grupos de contêineres
url: container-instances-container-groups.md
- text: Perguntas frequentes
url: container-instances-faq.md
- title: Introdução
linkLists:
- linkListType: quickstart
links:
- text: Implantar uma instância de contêiner – CLI
url: container-instances-quickstart.md
- text: Implantar uma instância de contêiner – portal
url: container-instances-quickstart-portal.md
- text: Implantar instância de contêiner – Modelo do Resource Manager
url: container-instances-quickstart-template.md
- text: Implantar uma instância de contêiner – CLI do Docker
url: quickstart-docker-cli.md
- linkListType: learn
links:
- text: Executar contêineres do Docker com Instâncias de Contêiner do Azure
url: /learn/modules/run-docker-with-azure-container-instances/
- title: Implantar grupos com vários contêineres
linkLists:
- linkListType: tutorial
links:
- text: Implantar um grupo de contêineres – YAML
url: container-instances-multi-container-yaml.md
- text: Implantar grupo de contêineres – Modelo do Resource Manager
url: container-instances-multi-container-group.md
- text: Implantar um grupo de contêineres – Docker Compose
url: tutorial-docker-compose.md
- title: Cenários de implantação
linkLists:
- linkListType: how-to-guide
links:
- text: Conectar-se a uma rede virtual
url: container-instances-vnet.md
- text: Configurar política de reinicialização
url: container-instances-restart-policy.md
- text: Definir variáveis de ambiente
url: container-instances-environment-variables.md
- title: Gerenciar aplicativos de contêiner
linkLists:
- linkListType: how-to-guide
links:
- text: Parar e iniciar contêineres
url: container-instances-stop-start.md
- text: Atualizar contêineres em execução
url: container-instances-update.md
- text: Compartilhar dados entre contêineres
url: container-instances-volume-emptydir.md
- title: Integrar ao Azure
linkLists:
- linkListType: how-to-guide
links:
- text: Implantar imagem com origem no Registro de Contêiner do Azure
url: container-instances-using-azure-container-registry.md
- text: Usar identidade gerenciada
url: container-instances-managed-identity.md
- text: Montar um compartilhamento de arquivos do Azure
url: container-instances-volume-azure-files.md
|
articles/container-instances/index.yml
|
items:
- uid: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku
id: WithSku
artifact: com.microsoft.azure.storagecache.v2020_03_01:azure-mgmt-storagecache:1.0.0-beta
parent: com.microsoft.azure.management.storagecache.v2020_03_01
children:
- com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku.withSku(com.microsoft.azure.management.storagecache.v2020_03_01.CacheSku)
langs:
- java
name: Cache.UpdateStages.WithSku
nameWithType: Cache.UpdateStages.WithSku
fullName: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku
type: Interface
package: com.microsoft.azure.management.storagecache.v2020_03_01
summary: The stage of the cache update allowing to specify Sku.
syntax:
content: public static interface Cache.UpdateStages.WithSku
- uid: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku.withSku(com.microsoft.azure.management.storagecache.v2020_03_01.CacheSku)
id: withSku(com.microsoft.azure.management.storagecache.v2020_03_01.CacheSku)
artifact: com.microsoft.azure.storagecache.v2020_03_01:azure-mgmt-storagecache:1.0.0-beta
parent: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku
langs:
- java
name: withSku(CacheSku sku)
nameWithType: Cache.UpdateStages.WithSku.withSku(CacheSku sku)
fullName: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku.withSku(CacheSku sku)
overload: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku.withSku*
type: Method
package: com.microsoft.azure.management.storagecache.v2020_03_01
summary: Specifies sku.
syntax:
content: public abstract Cache.Update withSku(CacheSku sku)
parameters:
- id: sku
type: com.microsoft.azure.management.storagecache.v2020_03_01.CacheSku
description: SKU for the Cache
return:
type: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.Update
description: the next update stage
references:
- uid: com.microsoft.azure.management.storagecache.v2020_03_01.CacheSku
name: CacheSku
nameWithType: CacheSku
fullName: com.microsoft.azure.management.storagecache.v2020_03_01.CacheSku
- uid: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.Update
name: Cache.Update
nameWithType: Cache.Update
fullName: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.Update
- uid: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku.withSku*
name: withSku
nameWithType: Cache.UpdateStages.WithSku.withSku
fullName: com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku.withSku
package: com.microsoft.azure.management.storagecache.v2020_03_01
|
docs-ref-autogen/com.microsoft.azure.management.storagecache.v2020_03_01.Cache.UpdateStages.WithSku.yml
|
%YAML 1.2
---
$id: "http://devicetree.org/meta-schemas/keywords.yaml#"
$schema: "http://json-schema.org/draft/2019-09/schema#"
description:
Keywords must be a subset of known json-schema keywords
definitions:
sub-schemas:
allOf:
- $ref: "#"
propertyNames:
# The subset of keywords allowed for sub-schema
enum:
- $ref
- additionalItems
- additionalProperties
- allOf
- anyOf
- const
- contains
- default
- dependencies
- dependentRequired
- dependentSchemas
- deprecated
- description
- else
- enum
- exclusiveMaximum
- exclusiveMinimum
- items
- if
- minItems
- minimum
- maxItems
- maximum
- multipleOf
- not
- oneOf
- pattern
- patternProperties
- properties
- required
- then
- type
- typeSize
- unevaluatedProperties
- uniqueItems
scalar-prop-list:
propertyNames:
description:
Scalar and array keywords cannot be mixed
not:
enum:
- const
- enum
- exclusiveMaximum
- exclusiveMinimum
- minimum
- maximum
- multipleOf
- pattern
# Array keywords should not be mixed with scalar keywords
dependentSchemas:
contains:
$ref: "#/definitions/scalar-prop-list"
items:
$ref: "#/definitions/scalar-prop-list"
maxItems:
$ref: "#/definitions/scalar-prop-list"
minItems:
$ref: "#/definitions/scalar-prop-list"
properties:
additionalItems:
type: boolean
additionalProperties:
oneOf:
- type: object
allOf:
- $ref: "#/definitions/sub-schemas"
- type: boolean
allOf:
items:
$ref: "#/definitions/sub-schemas"
anyOf:
items:
$ref: "#/definitions/sub-schemas"
contains:
$ref: "#/definitions/sub-schemas"
else:
$ref: "#/definitions/sub-schemas"
if:
$ref: "#/definitions/sub-schemas"
items:
oneOf:
- type: object
allOf:
- $ref: "#/definitions/sub-schemas"
- type: array
items:
$ref: "#/definitions/sub-schemas"
minItems:
minimum: 1
not:
$ref: "#/definitions/sub-schemas"
oneOf:
allOf:
- items:
$ref: "#/definitions/sub-schemas"
- description: Use 'enum' rather than 'oneOf' + 'const' entries
not:
items:
propertyNames:
const: const
required:
- const
patternProperties:
additionalProperties:
$ref: "#/definitions/sub-schemas"
properties:
if:
# Filter out overlapping json-schema and DT property names
not:
propertyNames:
const: type
then:
not:
$ref: "#/definitions/sub-schemas"
description: A json-schema keyword was found instead of a DT property name.
propertyNames:
description: Expected a valid DT property name
pattern: "^[#$a-zA-Z][a-zA-Z0-9,+\\-._@]{0,63}$"
additionalProperties:
$ref: "#/definitions/sub-schemas"
required:
type: array
items:
pattern: '^([a-zA-Z#][a-zA-Z0-9,+\-._@]{0,63}|\$nodename)$'
select:
$ref: "#/definitions/sub-schemas"
then:
$ref: "#/definitions/sub-schemas"
unevaluatedProperties:
oneOf:
- type: object
allOf:
- $ref: "#/definitions/sub-schemas"
- type: boolean
uniqueItems:
type: boolean
|
dtschema/meta-schemas/keywords.yaml
|
#-----------------------------------------------------------------------------------------------
# PROFILE LOCAL
#-----------------------------------------------------------------------------------------------
spring:
profiles: local
# ----------------------------------------
# ACTUATOR PROPERTIES
# ----------------------------------------
# ENDPOINTS WEB CONFIGURATION
management.endpoints.web.exposure.include: '*' # Endpoint IDs that should be included or '*' for all.
management.endpoints.web.exposure.exclude: # Endpoint IDs that should be excluded or '*' for all.
management.endpoints.web.base-path: /management # Base path for Web endpoints. Relative to server.servlet.context-path or management.server.servlet.context-path if management.server.port is configured.
management.endpoints.web.path-mapping.health: healthcheck
# HEALTH ENDPOINT
management.endpoint.health.cache.time-to-live: 0ms # Maximum time that a response can be cached.
management.endpoint.health.enabled: true # Whether to enable the health endpoint.
management.endpoint.health.roles: # Roles used to determine whether or not a user is authorized to be shown details. When empty, all authenticated users are authorized.
management.endpoint.health.show-details: always # When to show full health details.
# INFO CONTRIBUTORS (InfoContributorProperties)
management.info.build.enabled: true # Whether to enable build info.
management.info.defaults.enabled: true # Whether to enable default info contributors.
management.info.env.enabled: true # Whether to enable environment info.
management.info.git.enabled: true # Whether to enable git info.
management.info.git.mode: full # Mode to use to expose git information.
#------------------------------
# KAFKA
#------------------------------
spring.cloud.stream:
kafka.binder:
autoCreateTopics: true # true - only for develop and test environments
autoAddPartitions: true
healthTimeout: 10
requiredAcks: 1
minPartitionCount: 1
replicationFactor: 1
brokers: localhost
defaultBrokerPort: 9092
bindings:
request_channel_1:
destination: request-topic-1
content-type: application/json
binder: kafka
reply_channel_1:
destination: reply-topic-1
content-type: application/json
binder: kafka
request_channel_2:
destination: request-topic-2
content-type: application/json
binder: kafka
reply_channel_2:
destination: reply-topic-2
content-type: application/json
binder: kafka
spring.cloud.sleuth:
json:
enabled: false
# LOGGING
logging.config: classpath:log4j2-local.xml # Location of the logging configuration file. For instance, `classpath:logback.xml` for Logback.
logging.file: log4j2-local.xml # Log file name (for instance, `myapp.log`). Names can be an exact location or relative to the current directory.
logging.level: # Log levels severity mapping. For instance, `logging.level.org.springframework: DEBUG`.
org.springframework: INFO
io.swagger.models.*: ERROR
com.sample.*: DEBUG
|
processor-service/src/main/resources/application.yml
|
{{- if .Values.tempest.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: keystone-tempest
annotations:
"helm.sh/hook": test-success
chart-version: {{.Chart.Version}}
configmap-etc-hash: {{ include (print $.Template.BasePath "/configmap-etc.yaml") . | sha256sum }}
configmap-bin-hash: {{ include (print $.Template.BasePath "/configmap-bin.yaml") . | sha256sum }}
labels:
app: {{ template "fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
system: openstack
component: keystone
type: test
spec:
{{- if .Values.rbac.enabled }}
serviceAccountName: keystone
{{- end }}
restartPolicy: Never
containers:
- name: keystone-tempest
image: {{ default "hub.global.cloud.sap" .Values.global.imageRegistry }}/{{ .Values.tempest.image }}:{{ .Values.tempest.imageTag }}
imagePullPolicy: {{ default "IfNotPresent" .Values.tempest.imagePullPolicy | quote }}
command:
- /usr/local/bin/kubernetes-entrypoint
env:
- name: COMMAND
value: "/scripts/keystone-tempest.sh"
- name: NAMESPACE
value: {{ .Release.Namespace }}
- name: DEPENDENCY_SERVICE
value: keystone
- name: OS_IDENTITY_API_VERSION
value: "3"
- name: OS_AUTH_URL
{{- if .Values.global.clusterDomain }}
value: http://keystone.{{.Release.Namespace}}.svc.{{.Values.global.clusterDomain}}:5000/v3
{{- else }}
value: http://keystone.{{.Release.Namespace}}.svc.kubernetes.{{.Values.global.region}}.{{.Values.global.tld}}:5000/v3
{{- end }}
- name: OS_REGION_NAME
value: {{ .Values.global.region }}
- name: OS_USERNAME
value: "admin"
- name: OS_PASSWORD
value: {{ required "A valid .Values.tempest.adminPassword required!" .Values.tempest.adminPassword | quote }}
- name: OS_USER_DOMAIN_NAME
value: "tempest"
- name: OS_DOMAIN_NAME
value: "tempest"
- name: OS_INTERFACE
value: "internal"
volumeMounts:
- name: etc-keystone
mountPath: /etc/keystone
- name: keystone-etc
mountPath: /etc/keystone/rally_deployment_config.json
subPath: rally_deployment_config.json
readOnly: true
- name: keystone-bin
mountPath: /scripts
readOnly: true
- name: etc-tempest
mountPath: /etc/tempest
- name: keystone-etc
mountPath: /etc/tempest/tempest.conf
subPath: tempest.conf
readOnly: true
- name: keystone-etc
mountPath: /etc/tempest/accounts.yaml
subPath: accounts.yaml
readOnly: true
- name: keystone-etc
mountPath: /etc/tempest/tempest-skip-list.yaml
subPath: tempest-skip-list.yaml
readOnly: true
volumes:
- name: etc-tempest
emptyDir: {}
- name: etc-keystone
emptyDir: {}
- name: keystone-etc
configMap:
name: keystone-etc
defaultMode: 0444
- name: keystone-bin
configMap:
name: keystone-bin
defaultMode: 0555
{{- end }}
|
openstack/keystone/templates/tests/keystone-tempest.yaml
|
name: Button
description: Use buttons to move though a transaction, aim to use only one button per page.
body: |
Button text should be short and describe the action the button performs.
This component is also [extended for use in govspeak](https://components.publishing.service.gov.uk/component-guide/govspeak/button).
These instances of buttons are added by Content Designers, ideally this duplication would not exist but we currently don't have shared markup
via our components within the generated [govspeak](https://github.com/alphagov/govspeak).
(This is a challenge to the reader)
accessibility_criteria: |
The button must:
- accept focus
- be focusable with a keyboard
- indicate when it has focus
- activate when focused and space is pressed
- activate when focused and enter is pressed
- have a role of button
- have an accessible label
govuk_frontend_components:
- button
examples:
default:
data:
text: "Submit"
link_button:
data:
text: "I'm really a link sssh"
href: "#"
link_button_target_blank:
data:
text: "I'm really a link sssh"
href: "http://www.gov.uk"
target: "_blank"
with_type:
description: Buttons default to having a type of submit, but in some cases it may be desirable to have a different type.
data:
text: "Button type button"
type: "button"
start_now_button:
data:
text: "Start now"
href: "#"
start: true
rel: "external"
secondary_button:
data:
text: "Secondary button"
secondary: true
secondary_quiet_button:
data:
text: "Secondary quiet button"
secondary_quiet: true
secondary_solid_button:
data:
text: "Secondary solid button"
secondary_solid: true
destructive_button:
data:
text: "Destructive button"
destructive: true
start_now_button_with_info_text:
data:
text: "Start now"
href: "#"
start: true
info_text: "Sometimes you want to explain where a user is going to."
start_now_button_with_info_text_and_margin_bottom:
data:
text: "Start now"
href: "#"
start: true
info_text: "Sometimes you want to explain where a user is going to and have a margin bottom"
margin_bottom: true
with_margin_bottom:
description: "Sometimes it's useful to break up a page, for example if a button is at the bottom of a page."
data:
text: "Submit"
margin_bottom: true
extreme_text:
data:
text: "I'm a button with lots of text to test how the component scales at extremes."
href: "#"
extreme_text_start_now_button:
data:
text: "I'm a start now button with lots of text to test how the component scales at extremes."
start: true
href: "#"
with_data_attributes:
data:
text: "Track this!"
margin_bottom: true
data_attributes: {
"module": "cross-domain-tracking",
"tracking-code": "GA-123ABC",
"tracking-name": "transactionTracker"
}
with_title_attribute:
data:
text: "Click me"
margin_bottom: true
title: "A button to click"
inline_layout:
description: Buttons will display adjacent to each other until mobile view, when they will appear on top of each other.
embed: |
<button class="gem-c-button govuk-button gem-c-button--inline">First button</button>
<%= component %>
data:
text: "Second button"
inline_layout: true
with_name_and_value_set:
description: |
By default, the button has no value or name set so it will not pass information when the form is submitted. This allows a name and value to be added so a button can add information to the form submission.
Please note that Internet Explorer 6 and 7 have **breaking bugs** when submitting a form with multiple buttons - this can [change what value is submitted by the button](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/button#Notes). Make sure to check your user needs and browser usage.
data:
text: "This is the button text"
value: "this_is_the_value"
name: "this_is_the_name"
with_js_classes:
description: Use `js-` prefixed classes only as interaction hooks – to query and operate on elements via JavaScript
data:
text: "Button"
classes: "js-selector-1 js-selector-2"
with_aria_label:
data:
text: "Button"
aria_label: "Button with custom label"
|
app/views/govuk_publishing_components/components/docs/button.yml
|
name: Vul Test
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Clone Repository
uses: actions/checkout@v2
with:
path: DongTai-agent-python
- name: Clone Vul Repository
uses: actions/checkout@v2
with:
repository: jinghao1/DockerVulspace
ref: main
path: DockerVulspace
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: "3.9"
- name: Prepare Agent code
run: |
cp -r ${{ github.workspace }}/DongTai-agent-python ${{ github.workspace }}/DockerVulspace/DongTai-agent-python
- name: Prepare Agent Config
shell: python
run: |
import json
example_filename = '${{ github.workspace }}/DockerVulspace/DongTai-agent-python/dongtai_agent_python/config-example.json'
filename = '${{ github.workspace }}/DockerVulspace/DongTai-agent-python/dongtai_agent_python/config.json'
with open(example_filename) as f:
data = json.load(f)
data['iast']['server']['token'] = '0f0025dff8311467f6da5b5109a469f1831aa782'
data['iast']['server']['url'] = 'https://iast-test.huoxian.cn/openapi'
with open(filename, 'w') as f:
json.dump(data, f)
- name: Run Vul Container
env:
PROJECT_VERSION: 'v1.0.${{ github.run_id }}'
run: |
cd ${{ github.workspace }}/DockerVulspace
docker-compose up -d
- name: Run Vul test
run: |
curl --fail --retry-delay 10 --retry 30 --retry-connrefused http://127.0.0.1:8003/api/django/demo/get_open?name=Data
cd ${{ github.workspace }}/DockerVulspace
docker-compose logs djangoweb flaskweb
bash ${{ github.workspace }}/DongTai-agent-python/dongtai_agent_python/tests/vul-test.sh \
django http://127.0.0.1:8003/api/django ${{ github.run_id }}
bash ${{ github.workspace }}/DongTai-agent-python/dongtai_agent_python/tests/vul-test.sh \
flask http://127.0.0.1:8003/api/flask ${{ github.run_id }}
docker-compose logs djangoweb flaskweb
|
.github/workflows/vul-test.yml
|
name: panos_ngfw_device_setting_9_1
label: 9.1 PAN_OS NGFW Device - Setting
description: |-
reference device setting configuration snippets
type: panos
labels:
collection:
- PAN-OS
variables:
- name: API_KEY_LIFETIME
description: lifetime for the api key in minutes
default: 525600
type_hint: text
help_text: sets the expiration period for generated API keys
snippets:
- name: ironskillet_device_setting_ctd
xpath: /config/<EMAIL>/entry[@<EMAIL>']/deviceconfig/setting
element: |-
<ctd>
<strip-x-fwd-for>yes</strip-x-fwd-for>
<x-forwarded-for>yes</x-forwarded-for>
<tcp-bypass-exceed-queue>no</tcp-bypass-exceed-queue>
<udp-bypass-exceed-queue>no</udp-bypass-exceed-queue>
</ctd>
- name: ironskillet_device_setting_management
xpath: /config/devices/entry[@<EMAIL>']/deviceconfig/setting
element: |-
<management>
<enable-log-high-dp-load>yes</enable-log-high-dp-load>
<max-rows-in-csv-export>1048576</max-rows-in-csv-export>
<api>
<key>
<lifetime>{{ API_KEY_LIFETIME }}</lifetime>
</key>
</api>
<admin-lockout>
<failed-attempts>5</failed-attempts>
<lockout-time>30</lockout-time>
</admin-lockout>
<idle-timeout>10</idle-timeout>
<auto-acquire-commit-lock>yes</auto-acquire-commit-lock>
</management>
- name: ironskillet_device_setting_wildfire
xpath: /config/devices/entry[@<EMAIL>']/deviceconfig/setting
element: |-
<wildfire>
<file-size-limit>
<entry name="pe">
<size-limit>16</size-limit>
</entry>
<entry name="apk">
<size-limit>30</size-limit>
</entry>
<entry name="pdf">
<size-limit>3072</size-limit>
</entry>
<entry name="ms-office">
<size-limit>16384</size-limit>
</entry>
<entry name="jar">
<size-limit>5</size-limit>
</entry>
<entry name="flash">
<size-limit>5</size-limit>
</entry>
<entry name="MacOSX">
<size-limit>10</size-limit>
</entry>
<entry name="archive">
<size-limit>50</size-limit>
</entry>
<entry name="linux">
<size-limit>50</size-limit>
</entry>
<entry name="script">
<size-limit>2000</size-limit>
</entry>
</file-size-limit>
<report-benign-file>yes</report-benign-file>
<report-grayware-file>yes</report-grayware-file>
</wildfire>
- name: ironskillet_device_setting_config
xpath: /config/devices/entry[<EMAIL>']/deviceconfig/setting
element: |-
<config>
<rematch>yes</rematch>
</config>
- name: ironskillet_device_setting_application
xpath: /config/devices/entry[<EMAIL>']/deviceconfig/setting
element: |-
<application>
<notify-user>yes</notify-user>
<bypass-exceed-queue>no</bypass-exceed-queue>
</application>
- name: ironskillet_device_setting_logging
xpath: /config/devices/entry[<EMAIL>']/deviceconfig/setting
element: |-
<logging>
<log-suppression>no</log-suppression>
</logging>
- name: ironskillet_device_setting_tcp
xpath: /config/devices/entry[<EMAIL>']/deviceconfig/setting
element: |-
<tcp>
<urgent-data>clear</urgent-data>
<drop-zero-flag>yes</drop-zero-flag>
<bypass-exceed-oo-queue>no</bypass-exceed-oo-queue>
<check-timestamp-option>yes</check-timestamp-option>
<strip-mptcp-option>yes</strip-mptcp-option>
</tcp>
|
panos_v9.1/ngfw/panos_ngfw_device_setting_9_1.skillet.yaml
|
$schema: "http://json-schema.org/draft-06/schema#"
title: networkservices v1 UrlRedirect export schema
description: A gcloud export/import command YAML validation schema.
type: object
additionalProperties: false
properties:
COMMENT:
type: object
description: User specified info ignored by gcloud import.
additionalProperties: false
properties:
template-id:
type: string
region:
type: string
description:
type: string
date:
type: string
version:
type: string
UNKNOWN:
type: array
description: Unknown API fields that cannot be imported.
items:
type: string
hostRedirect:
description: |-
The host that will be used in the redirect response instead of the one
that was supplied in the request. The value must be between 1 and 255
characters.
type: string
httpsRedirect:
description: |-
If set to true, the URL scheme in the redirected request is set to https.
If set to false, the URL scheme of the redirected request will remain the
same as that of the request. This can only be set if there is at least one
(1) edgeSslCertificate set on the service.
type: boolean
pathRedirect:
description: |-
The path that will be used in the redirect response instead of the one
that was supplied in the request. pathRedirect cannot be supplied together
with prefixRedirect. Supply one alone or neither. If neither is supplied,
the path of the original request will be used for the redirect. The path
value must be between 1 and 1024 characters.
type: string
prefixRedirect:
description: |-
The prefix that replaces the prefixMatch specified in the routeRule,
retaining the remaining portion of the URL before redirecting the request.
prefixRedirect cannot be supplied together with pathRedirect. Supply one
alone or neither. If neither is supplied, the path of the original request
will be used for the redirect. The prefix value must be between 1 and 1024
characters.
type: string
redirectResponseCode:
description: |-
The HTTP Status code to use for this RedirectAction. The supported values
are: - MOVED_PERMANENTLY_DEFAULT, which is the default value and
corresponds to 301. - FOUND, which corresponds to 302. - SEE_OTHER which
corresponds to 303. - TEMPORARY_REDIRECT, which corresponds to 307. in
this case, the request method will be retained. - PERMANENT_REDIRECT,
which corresponds to 308. in this case, the request method will be
retained.
type: string
enum:
- FOUND
- MOVED_PERMANENTLY_DEFAULT
- PERMANENT_REDIRECT
- SEE_OTHER
- TEMPORARY_REDIRECT
stripQuery:
description: |-
If set to true, any accompanying query portion of the original URL is
removed prior to redirecting the request. If set to false, the query
portion of the original URL is retained. The default is set to false.
type: boolean
|
lib/googlecloudsdk/schemas/networkservices/v1/UrlRedirect.yaml
|
---
- under_construction: false
unit: PJ
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: source_of_heat_production
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: false
dependent_on: ''
output_element_type_name: vertical_stacked_bar
- under_construction: false
unit: ''
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: chp_properties
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: false
dependent_on: ''
output_element_type_name: html_table
- under_construction: false
unit: MJ
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: collective_heat_mekko
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: false
dependent_on: ''
output_element_type_name: mekko
related_output_element_key: heat_network_demand
- under_construction: false
unit: MW
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: heat_network_demand
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: false
dependent_on: ''
output_element_type_name: demand_curve
- under_construction: false
unit: MW
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: heat_network_production
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: false
dependent_on: ''
output_element_type_name: demand_curve
- under_construction: false
unit: MJ
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: industrial_heat_mekko
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: false
dependent_on:
description:
output_element_type_name: mekko
- under_construction: false
unit: MWh
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: heat_network_storage
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: true
dependent_on: ''
output_element_type_name: demand_curve
- under_construction: false
unit: Eur/Mwh
percentage: false
group: Supply
sub_group: collective_heat
show_point_label: false
growth_chart: false
key: heat_cost_capacity
max_axis_value:
min_axis_value:
hidden: false
requires_merit_order: true
dependent_on: ''
output_element_type_name: cost_capacity_bar
|
config/interface/output_elements/supply_collective_heat.yml
|
version: "2.1"
# -----BEGIN Environment Variables-----
# Environment variables required for deployment:
#
# - PYPI_PASSWORD := PyPI password or API token.
# - PYPI_USERNAME := PyPI username. For API tokens, use "__token__".
# - TWINE_NON_INTERACTIVE := Do not interactively prompt for credentials if they are missing.
# - TWINE_REPOSITORY_URL := The repository (package index) URL to register the package to.
# x-deploy-environment := Deployment environment variables
x-deploy-environment: &x-deploy-environment
TWINE_NON_INTERACTIVE: "true"
TWINE_REPOSITORY_URL: https://upload.pypi.org/legacy/
# -----END Environment Variables-----
jobs:
test:
parameters:
python_version:
type: string
docker:
- image: python:<< parameters.python_version >>
working_directory: ~/repo
steps:
- checkout
- run:
name: install dependencies
command: |
python3 -m venv venv
. venv/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools wheel
pip install -r requirements/test.txt
- run:
name: Check Dependencies
command: |
. venv/bin/activate
pip check
- run:
name: run tests
command: |
. venv/bin/activate
make lint
# Set Tox environment to the installed Python version.
TOXENV=$(
python -c 'import sys; v = sys.version_info; print("py{}{}".format(v.major, v.minor))'
)
tox -e "$TOXENV"
codecov
make test-coverage-report-console
make test-coverage-report-html
- store_artifacts:
path: test-reports
destination: test-reports
dist:
docker:
- image: python:3.7.2
working_directory: ~/repo
steps:
- checkout
- run:
name: install dependencies
command: |
python3 -m venv venv
. venv/bin/activate
pip install --upgrade pip
pip install --upgrade setuptools wheel
pip install -r requirements/release.txt
- run:
name: Check Dependencies
command: |
. venv/bin/activate
pip check
- run:
name: make dist
command: |
. venv/bin/activate
make dist
- store_artifacts:
path: dist
destination: dist
- persist_to_workspace:
root: ~/repo
paths:
- dist
- venv
deploy:
docker:
- image: python:3.7.2
environment:
<<: *x-deploy-environment
working_directory: ~/repo
steps:
- checkout
- attach_workspace:
at: ~/repo
- deploy:
name: Upload Artifacts to Repository
command: |
. venv/bin/activate
make upload-release \
TWINE_USERNAME="${PYPI_USERNAME:?}" \
TWINE_PASSWORD="${PYPI_PASSWORD:?}"
workflows:
version: 2
ci:
jobs:
- test:
matrix:
parameters:
python_version:
- "3.7.2"
- "3.8.2" # Ubuntu 20.04 LTS
- "3.8.3" # Latest version
- dist
- deploy:
requires:
- dist
filters:
branches:
only:
- master
|
.circleci/config.yml
|
title: 앱 작성 및 배포
metadata:
title: '과정 AZ-300T04-A: 앱 작성 및 배포'
description: '과정 AZ-300T04-A: 앱 작성 및 배포'
uid: course.az-300t04
courseNumber: 'AZ-300T04-A'
hoursToComplete: 24
iconUrl: /media/learn/certification/course.svg
skillsGained:
- skill: 셀 명령을 사용하여 App Service Web App 생성
- skill: 배경 작업 생성
- skill: Swagger를 사용하여 API 문서화
- skill: 신뢰할 수 있는 서비스 생성
- skill: Reliable Actors 앱 생성
- skill: 신뢰할 수 있는 컬렉션으로 실습
- skill: Azure Container Registry 이해
- skill: Azure Container Instances 사용
learningPartnersLink: https://www.microsoft.com/learning/partners.aspx
locales:
- en
levels:
- advanced
roles:
- solution-architect
products:
- azure
- azure-active-directory
- azure-application-gateway
- azure-virtual-machines
- azure-virtual-network
- azure-storage
- azure-resource-manager
- azure-site-recovery
- azure-policy
- azure-logic-apps
- azure-functions
- azure-event-grid
- azure-service-bus
- azure-app-service
- azure-expressroute
- azure-vpn-gateway
- azure-kubernetes-service
- azure-container-instances
- azure-cosmos-db
- azure-sql-database
- azure-event-hubs
exams:
- uid: exam.az-300
summary: |-
본 과정에서는 IT 전문가들이 작업 및 비즈니스 프로세스를 워크플로로 자동화하여 기업 또는 조직 전반에서 앱, 데이터, 시스템 및 서비스를 통합하는 Logic App 솔루션을 구축하는 방법에 대해 설명합니다. Logic Apps는 Azure의 클라우드 서비스로, 클라우드에서, 온 프레미스에서 또는 둘 모두에서 앱 통합, 데이터 통합, 시스템 통합, 엔터프라이즈 애플리케이션 통합(EAI) 및 B2B(Business-to-Business) 통신을 위한 확장 가능한 솔루션을 설계 및 생성하는 방법을 단순화합니다.
또한 Azure Service Fabric이 확장 가능하고 신뢰할 수 있는 마이크로 서비스 및 컨테이너를 쉽게 패키징, 배포 및 관리할 수 있는 분산 시스템 플랫폼임을 설명합니다. 또한 Service Fabric은 클라우드 네이티브 애플리케이션을 개발하고 관리하는 데 있어 중요한 챌린지를 해결합니다. 개발자와 관리자는 복잡한 인프라 문제를 방지하고 확장가능하고 신뢰할 수 있으며 관리할 수 있는 까다로운 중요 업무용 워크로드를 구현하는 데 집중할 수 있습니다. Service Fabric은 컨테이너에서 실행되는 이러한 엔터프라이즈급 계층 1 클라우드 규모의 애플리케이션을 구축 및 관리하기 위한 차세대 플랫폼을 나타냅니다.
마지막으로 AKS(Azure Kubernetes Service)를 통해 어떻게 Azure에 관리형 Kubernetes 클러스터를 간편하게 구현할 수 있는지 확인할 수 있습니다. AKS는 이러한 책임의 상당 부분을 Azure에 오프로딩함으로써 Kubernetes 관리의 복잡성과 운영 오버헤드를 줄입니다. 호스팅된 Kubernetes 서비스인 상태 모니터링 및 유지 보수와 같은 중요한 작업을 처리합니다.
#### 대상 그룹 프로필
성공적인 Cloud Solutions Architect는 운영 체제, 가상화, 클라우드 인프라, 저장소 구조, 청구 및 네트워킹에 대한 실제 경험을 바탕으로 이 역할을 시작합니다.
outlineSection: |-
### 모듈 1: PaaS를 사용한 웹 애플리케이션 생성
이 모듈에서는 웹 애플리케이션, REST API 및 모바일 백엔드를 호스팅하기 위한 Azure App Service Web Apps에 대한 개요를 제공합니다. 항목에는 셀 명령을 사용한 App Service Web App 생성, 배경 과제 생성, Swagger를 사용한 API 문서화 뿐만아니라 Logic Apps가 작업 및 비즈니스 프로세스를 워크플로로 자동화하여 기업 또는 조직 전반에서 애플리케이션, 데이터, 시스템 및 서비스를 통합하는 솔루션을 구축하는 데 도움을 주는 방법에 대한 설명을 포함합니다.
이 모듈을 완료하면 학생들은 다음을 수행할 수 있습니다:
- 셀 명령을 사용하여 App Service Web App 생성
- 백그라운드 작업 생성
- Swagger를 사용하여 API 문서화
### 모듈 2: Service Fabric에서 실행하는 앱 및 서비스 생성
이 모듈에서는 확장 가능하고 신뢰할 수 있는 마이크로 서비스 및 컨테이너를 쉽게 패키징, 배포 및 관리할 수 있는 분산 시스템 플랫폼으로써의 Azure Service Fabric의 개요를 설명합니다. 또한 클라우드 네이티브 애플리케이션을 개발하고 관리하는 데 있어 난제를 해결합니다.
또한 신뢰할 수 있는 서비스 생성, Reliable Actors 앱 생성, 신뢰할 수 있는 컬렉션을 통한 작업 등이 추가적으로 제공됩니다.
이 모듈을 완료하면 학생들은 다음을 수행할 수 있습니다:
- 신뢰할 수 있는 서비스 생성
- Reliable Actors 앱 생성
- 신뢰할 수 있는 컬렉션으로 실습
### 모듈 3: Azure Kubernetes Service 사용
본 모듈에서는 Azure에서 Kubernetes 클러스터를 배포 및 관리하기 위해 AKS(Azure Kubernetes Service)를 중심으로 다룹니다. 이 항목에는 상태 모니터링 및 유지 보수와 같은 Azure에 대한 책임의 상당 부분을 오프로딩하여 Kubernet 관리에 따른 운영 오버헤드를 줄이는 방법이 포함됩니다.
항목에는 Azure Container Registry 및 Azure Container Instances가 추가로 포함됩니다.
이 모듈을 완료하면 학생들은 다음을 수행할 수 있습니다:
- Azure Container Registry 이해
- Azure Container Instances 사용
|
learn-certs-pr/courses/az-300t04.yml
|
--- !ruby/object:RI::ClassDescription
attributes:
- !ruby/object:RI::Attribute
comment:
- !ruby/struct:SM::Flow::P
body: Ruby Gem::Specification containing the metadata for this package. The name,
version and package_files are automatically determined from the gemspec and don't
need to be explicitly provided.
name: gem_spec
rw: RW
class_methods:
- !ruby/object:RI::MethodSummary
name: new
comment:
- !ruby/struct:SM::Flow::P
body: Create a package based upon a Gem::Specification. Gem packages, as well as
zip files and tar/gzipped packages can be produced by this task.
- !ruby/struct:SM::Flow::P
body: "In addition to the Rake targets generated by Rake::PackageTask, a Gem::PackageTask \
will also generate the following tasks:"
- !ruby/object:SM::Flow::LIST
contents:
- !ruby/struct:SM::Flow::LI
label: <b>"<em>package_dir</em>/<em>name</em>-<em>version</em>.gem"</b>
body: Create a RubyGems package with the given name and version.
type: :LABELED
- !ruby/struct:SM::Flow::P
body: "Example using a Gem::Specification:"
- !ruby/struct:SM::Flow::VERB
body: |
require 'rubygems'
require 'rubygems/package_task'
spec = Gem::Specification.new do |s|
s.platform = Gem::Platform::RUBY
s.summary = "Ruby based make-like utility."
s.name = 'rake'
s.version = PKG_VERSION
s.requirements << 'none'
s.require_path = 'lib'
s.autorequire = 'rake'
s.files = PKG_FILES
s.description = <<-EOF
Rake is a Make-like program implemented in Ruby. Tasks
and dependencies are specified in standard Ruby syntax.
EOF
end
Gem::PackageTask.new(spec) do |pkg|
pkg.need_zip = true
pkg.need_tar = true
end
constants: []
full_name: Gem::PackageTask
includes: []
instance_methods:
- !ruby/object:RI::MethodSummary
name: define
- !ruby/object:RI::MethodSummary
name: init
name: PackageTask
superclass: Rake::PackageTask
|
tools/ironruby/lib/ironruby/gems/1.8/doc/rubygems-1.3.6/ri/Gem/PackageTask/cdesc-PackageTask.yaml
|
---
# https://xxx.execute-api.us-east-1.amazonaws.com/prod/v1/enqueue
Description: API Gateway integration with SQS
Outputs:
ApiEndpoint:
Description: Webook URL
Value: !Join
- ''
- - https://
- !Ref 'APIGateway'
- .execute-api.
- !Ref 'AWS::Region'
- .amazonaws.com/
- queue/
- !Ref 'endpointID'
QueueArnSQS:
Description: ARN of SQS Queue
Value: !GetAtt 'DestQueue.Arn'
AccessPolicy:
Description: Name of policy for robot access to queue
Value: !Join
- ''
- - !Ref 'AWS::Region'
- '-'
- !Ref 'queueName'
- '-access'
Parameters:
queueName:
Description: The name of the SQS queue to create.
Type: String
endpointID:
Description: A random identifier for the enpoint URL.
Type: String
Resources:
APIGateway:
Properties:
Description: Webhook for queueing git builds to sqs
Name: APIGateway
Type: AWS::ApiGateway::RestApi
APIGatewayRole:
Properties:
AssumeRolePolicyDocument:
Statement:
- Action:
- sts:AssumeRole
Effect: Allow
Principal:
Service:
- apigateway.amazonaws.com
Version: '2012-10-17'
Path: /
Policies:
- PolicyDocument:
Statement:
- Action: sqs:SendMessage
Effect: Allow
Resource: !GetAtt 'DestQueue.Arn'
Version: '2012-10-17'
PolicyName: apig-sqs-send-msg-policy
RoleName: apig-sqs-send-msg-role
Type: AWS::IAM::Role
DestQueue:
Properties:
DelaySeconds: 0
MaximumMessageSize: 262144
MessageRetentionPeriod: 604800 # 1 week
QueueName: !Ref 'queueName'
ReceiveMessageWaitTimeSeconds: 0
VisibilityTimeout: 7200 # 2 hours
Type: AWS::SQS::Queue
RobotPolicy:
Type: AWS::IAM::ManagedPolicy
Properties:
Description: Policy allowing robot to access queue
ManagedPolicyName: !Join
- ''
- - !Ref 'AWS::Region'
- '-'
- !Ref 'queueName'
- '-access'
PolicyDocument:
Statement:
- Action: SQS:*
Effect: Allow
Resource: !GetAtt 'DestQueue.Arn'
Sid: Sid1517269801413
Version: '2012-10-17'
PostMethod:
Properties:
AuthorizationType: NONE
HttpMethod: POST
Integration:
Credentials: !GetAtt 'APIGatewayRole.Arn'
IntegrationHttpMethod: POST
IntegrationResponses:
- StatusCode: '200'
PassthroughBehavior: NEVER
RequestParameters:
integration.request.header.Content-Type: '''application/x-www-form-urlencoded'''
RequestTemplates:
application/json: Action=SendMessage&MessageBody=$input.body
Type: AWS
Uri: !Join
- ''
- - 'arn:aws:apigateway:'
- !Ref 'AWS::Region'
- :sqs:path/
- !Ref 'AWS::AccountId'
- /
- !Ref 'queueName'
MethodResponses:
- ResponseModels:
application/json: Empty
StatusCode: '200'
ResourceId: !Ref 'enqueueResource'
RestApiId: !Ref 'APIGateway'
Type: AWS::ApiGateway::Method
enqueueResource:
Properties:
ParentId: !GetAtt 'APIGateway.RootResourceId'
PathPart: !Ref 'endpointID'
RestApiId: !Ref 'APIGateway'
Type: AWS::ApiGateway::Resource
prodDeployment:
DependsOn: PostMethod
Properties:
RestApiId: !Ref 'APIGateway'
Type: AWS::ApiGateway::Deployment
prodStage:
Properties:
DeploymentId: !Ref 'prodDeployment'
RestApiId: !Ref 'APIGateway'
StageName: build
Type: AWS::ApiGateway::Stage
|
resources/aws/sqs-webhook.yaml
|
name: build-push-docker
# Controls when the workflow will run
on:
# Triggers the workflow on push events for the master branch
push:
branches: [ master ]
paths:
- 'Dockerfile'
- '**.sh'
- '**.txt'
pull_request:
branches: [ master ]
paths:
- 'Dockerfile'
- '**.sh'
- '**.txt'
# Allows this workflow to be ran manually from the Actions tab
workflow_dispatch:
jobs:
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
steps:
# Checkout repository
- uses: actions/checkout@v2
- name: Set Timezone
uses: szenius/set-timezone@v1.0
with:
timezoneLinux: "America/Chicago"
- name: Log in to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1.12.0
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Generate Version Number
run: |
eval "$(date +'today=%F now=%s')"
IMG_VER="$(date +%Y.%m.%d).$((now - $(date -d "$today 0" +%s)))"
echo "IMG_VER=${IMG_VER}" >> $GITHUB_ENV
BUILD_DATE=$(date +%Y.%m.%d)
echo "BUILD_DATE=${BUILD_DATE}" >> $GITHUB_ENV
- name: Docker Setup Buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Build and push Docker images
uses: docker/build-push-action@v2.7.0
with:
build-args: BUILD_DATE=${{ env.BUILD_DATE }}
context: .
pull: true
push: ${{ github.event_name != 'pull_request' }}
tags: cubeworx/mcbe-server:${{ env.IMG_VER }},cubeworx/mcbe-server:latest
- uses: ethomson/send-tweet-action@v1.0.0
if: github.event_name != 'pull_request'
with:
status: "New version of CubeWorx Minecraft Bedrock Edition Server Image released: https://hub.docker.com/r/cubeworx/mcbe-server/tags?page=1&ordering=last_updated&name=${{ env.IMG_VER }}"
consumer-key: ${{ secrets.TWITTER_CONSUMER_API_KEY }}
consumer-secret: ${{ secrets.TWITTER_CONSUMER_API_SECRET }}
access-token: ${{ secrets.TWITTER_ACCESS_TOKEN }}
access-token-secret: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
|
.github/workflows/build-push-docker.yml
|
name: publishes nuget package to artifactory
on:
workflow_dispatch:
inputs:
# version_type:
# description: 'Release Type'
# required: true
# default: alpha | beta | release
project_name:
description: 'Project to Build'
required: true
default: Auth
env:
JDA_REGISTRY: "https://jdasoftware.jfrog.io/artifactory/api/nuget/nuget-release-local"
# VERSION_TYPE: ${{ github.event.inputs.version_type }}
PROJECT_NAME: ${{ github.event.inputs.project_name }}
jobs:
publish:
runs-on: macos-latest
timeout-minutes: 20
steps:
- uses: actions/checkout@v2
# - uses: nuget/setup-nuget@v1
# with:
# nuget-version: '5.x'
- name: Build nuget
run: |
nuget restore source/Xamarin.Auth-Library.sln
sh ./build.sh --target=clean && sh ./build.sh --target=libs && sh ./build.sh --target=nuget
#msbuild source/Core/Xamarin.Auth.XamarinAndroid/Xamarin.Auth.XamarinAndroid.csproj /p:Configuration=Release
#msbuild source/Core/Xamarin.Auth.XamarinAndroid/Xamarin.Auth.XamarinAndroid.csproj /t:pack /p:Configuration=Release
# nuget push BlueYonder.Xamarin.Authentication.Liam/bin/Release/BlueYonder.Xamarin.Authentication.Liam.*.nupkg -Source Artifactory
- name: Upload Artifact
uses: actions/upload-artifact@v2
with:
name: Nugets
path: '**/*.nupkg'
# - name: Update version code
# run: |
# chmod +x .github/scripts/update_version_code.sh
# .github/scripts/update_version_code.sh BlueYonder.Xamarin.Analytics.AppCenter/BlueYonder.Xamarin.Analytics.AppCenter.csproj
# - name: Setup nuget registry
# run: |
# nuget setapikey ${{secrets.ARTIFACTORY_AUTH}} -source ${JDA_REGISTRY}
# nuget sources add -Name Artifactory -source ${JDA_REGISTRY} -User mobility-ci -pass ${{secrets.ARTIFACTORY_PASS}}
# - name: Nuget restore
# run: |
# nuget restore MobilePlatform.sln
# - name: Publish nuget
# run: |
# chmod +x .github/scripts/build_project.sh
# .github/scripts/build_project.sh $PROJECT_NAME
|
.github/workflows/publish_nuget.yaml
|
plugin:
name: 'reklamus.push34::lang.plugin.name'
description: 'reklamus.push34::lang.plugin.description'
author: Reklamus
icon: oc-icon-adn
homepage: ''
permissions:
manage_push34:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.push34'
manage_design:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.design'
manage_app:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.app'
manage_notification:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.notification'
manage_registrant:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.registrant'
manage_plan:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.plan'
manage_subscriber:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.subscribers'
manage_included:
tab: 'reklamus.push34::lang.permission.push34'
label: 'reklamus.push34::lang.permission.included'
navigation:
main-menu-push34:
label: 'reklamus.push34::lang.menu.push34'
url: reklamus/push34/designcontroller
icon: icon-adn
permissions:
- manage_push34
order: '99'
sideMenu:
side-menu-design:
label: 'reklamus.push34::lang.menu.design'
url: reklamus/push34/designcontroller
icon: icon-sitemap
permissions:
- manage_design
side-menu-app:
label: 'reklamus.push34::lang.menu.app'
url: reklamus/push34/appcontroller
icon: icon-sheqel
permissions:
- manage_app
side-menu-notification:
label: 'reklamus.push34::lang.menu.notification'
url: reklamus/push34/notificationcontroller
icon: icon-circle-o-notch
permissions:
- manage_notification
side-menu-registrant:
label: 'reklamus.push34::lang.menu.registrant'
url: reklamus/push34/registrantcontroller
icon: icon-users
permissions:
- manage_registrant
side-menu-plan:
label: 'reklamus.push34::lang.menu.plan'
url: reklamus/push34/plancontroller
icon: icon-shopping-cart
permissions:
- manage_plan
side-menu-user:
label: User
url: reklamus/push34/usercontroller
icon: icon-user
|
plugins/reklamus/push34/plugin.yaml
|
# parameters
# Legacy mode
#
# if legacy set to true we keep the old reverse domain notation for CSI driver name (com.nutanix.csi).
# need to be set to true only if upgrade and initialy installed with helm package before 2.2.x
legacy: {{ legacy }}
# OS settings
#
# Starting v2.3.1 CSI driver is OS independent, this value is reserved
os: {{ os }}
# kubeletDir allows overriding the host location of kubelet's internal state.
kubeletDir: "{{ kubeletDir }}"
# Global Settings for all pods
nodeSelector: {{ nodeSelector }}
tolerations: {{ tolerations }}
imagePullPolicy: {{ imagePullPolicy }}
# Storage Class settings
#
# choose for which mode (Volume, File, Dynamic File) storageclass need to be created
volumeClass: {{ volumeClass }}
volumeClassName: "{{ volumeClassName }}"
fileClass: {{ fileClass }}
fileClassName: "{{ fileClassName }}"
dynamicFileClass: {{ dynamicFileClass }}
dynamicFileClassName: "{{ dynamicFileClassName }}"
# Default Storage Class settings
#
# Decide wich storageclass will be the default
# value are: node, volume, file, dynfile
defaultStorageClass: {{ defaultStorageClass }}
# Nutanix Prism Elements settings
#
# Allow dynamic creation of Volumes and Fileshare
# needed if volumeClass or dynamicFileClass is set to true
prismEndPoint: "{{ prismEndPoint }}"
username: "{{ username }}"
password: "{{ password }}"
secretName: {{ secretName }}
createSecret: {{ createSecret }}
# Volumes Settings
#
{% if volumeClass %}
storageContainer: {{ storageContainer }}
fsType: {{ fsType }}
lvmVolume: {{ lvmVolume }}
lvmDisks: {{ lvmDisks }}
networkSegmentation: {{ networkSegmentation }}
{% endif %}
# Files Settings
#
{% if fileClass %}
fileHost: {{ fileHost }}
filePath: {{ filePath }}
{% endif %}
# Dynamic Files Settings
#
{% if dynamicFileClass %}
fileServerName: {{ fileServerName }}
{% endif %}
# Volume metrics and CSI operations metrics configuration
#
servicemonitor:
enabled: {{ servicemonitor.enabled }}
labels:
# This should match the serviceMonitorSelector logic configured
# on the prometheus.
k8s-app: csi-driver
# Pod pecific Settings
#
provisioner:
image: quay.io/karbon/ntnx-csi:v2.5.0
nodeSelector: {}
tolerations: []
node:
image: quay.io/karbon/ntnx-csi:v2.5.0
nodeSelector: {}
tolerations: []
sidecars:
registrar:
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0
provisioner:
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
snapshotter:
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
imageBeta: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3
resizer:
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0
livenessprobe:
image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0
|
roles/install_csi_driver/templates/ntnx-helm-csi-values.yaml
|
site_name: HSE Project
site_url: https://github.com/hse-project
site_description: >-
Fast embeddable key-value store designed for SSDs and persistent memory
# HSE repo information (source code not docs)
# Setting these results in hse repo link and stats in doc header
repo_url: https://github.com/hse-project/hse
repo_name: hse-project/hse
edit_uri: ""
# Documentation copyright notice
copyright: Copyright © 2020 - 2021 Micron Technology, Inc.
# HSE colors set in docs/stylesheets/extra.css
extra_css:
- stylesheets/extra.css
# Theme configuration
theme:
name: material
language: en
favicon: img/favicon.png
logo: img/logo.png
palette:
- media: "(prefers-color-scheme: light)"
scheme: default
toggle:
icon: material/weather-sunny
name: Switch to dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
toggle:
icon: material/weather-night
name: Switch to light mode
features:
- navigation.tabs
- navigation.sections
# Theme customization
extra:
version:
# mike is a natively supported external utility for MkDocs versioning
provider: mike
# Useful markdown extensions
markdown_extensions:
- meta
- admonition
- footnotes
- pymdownx.details
- pymdownx.superfences
- pymdownx.tabbed:
alternate_style: true
- pymdownx.emoji:
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
# Plugins providing additional functionality for this theme
plugins:
- search
# Page navigation
nav:
- Home: index.md
- Getting Started:
- About: gs/about.md
- KVDB Storage: gs/storage.md
- Configuration Parameters: gs/params.md
- Command Line Interface: gs/cli.md
- System Requirements: gs/sysreqs.md
- Developing HSE Apps:
- Concepts: dev/concepts.md
- Best Practices: dev/bp.md
- Limits: dev/limits.md
- Resources:
- Community: res/community.md
- Benchmarking Tips: res/benchmarking.md
- API Reference:
- Overview: api/index.md
- Version Information: api/Modules/group__VERSION.md
- Errors: api/Modules/group__ERRORS.md
- Subsystem: api/Modules/group__SUBSYS.md
- Key-Value Database (KVDB): api/Modules/group__KVDB.md
- Key-Value Store (KVS): api/Modules/group__KVS.md
- Transactions: api/Modules/group__TRANSACTIONS.md
- Cursors: api/Modules/group__CURSORS.md
- Examples: api/Pages/examples.md
- Files: api/Files/index.md
|
v2/mkdocs.yml
|
name: release-please
on:
push:
branches:
- master
jobs:
release-please:
runs-on: ubuntu-latest
steps:
- uses: google-github-actions/release-please-action@v2
id: release
with:
package-name: node-gyp
release-type: node
changelog-types: >
[{"type":"feat","section":"Features","hidden":false},
{"type":"fix","section":"Bug Fixes","hidden":false},
{"type":"bin","section":"Core","hidden":false},
{"type":"gyp","section":"Core","hidden":false},
{"type":"lib","section":"Core","hidden":false},
{"type":"src","section":"Core","hidden":false},
{"type":"test","section":"Tests","hidden":false},
{"type":"build","section":"Core","hidden":false},
{"type":"clean","section":"Core","hidden":false},
{"type":"configure","section":"Core","hidden":false},
{"type":"install","section":"Core","hidden":false},
{"type":"list","section":"Core","hidden":false},
{"type":"rebuild","section":"Core","hidden":false},
{"type":"remove","section":"Core","hidden":false},
{"type":"deps","section":"Core","hidden":false},
{"type":"python","section":"Core","hidden":false},
{"type":"lin","section":"Core","hidden":false},
{"type":"linux","section":"Core","hidden":false},
{"type":"mac","section":"Core","hidden":false},
{"type":"macos","section":"Core","hidden":false},
{"type":"win","section":"Core","hidden":false},
{"type":"windows","section":"Core","hidden":false},
{"type":"zos","section":"Core","hidden":false},
{"type":"doc","section":"Doc","hidden":false},
{"type":"docs","section":"Doc","hidden":false},
{"type":"readme","section":"Doc","hidden":false},
{"type":"chore","section":"Miscellaneous","hidden":false},
{"type":"refactor","section":"Miscellaneous","hidden":false},
{"type":"ci","section":"Miscellaneous","hidden":false},
{"type":"meta","section":"Miscellaneous","hidden":false}]
# Standard Conventional Commits: `feat` and `fix`
# node-gyp subdirectories: `bin`, `gyp`, `lib`, `src`, `test`
# node-gyp subcommands: `build`, `clean`, `configure`, `install`, `list`, `rebuild`, `remove`
# Core abstract category: `deps`
# Languages/platforms: `python`, `lin`, `linux`, `mac`, `macos`, `win`, `window`, `zos`
# Documentation: `doc`, `docs`, `readme`
# Standard Conventional Commits: `chore` (under "Miscellaneous")
# Miscellaneous abstract categories: `refactor`, `ci`, `meta`
|
.github/workflows/release-please.yml
|
---
description: "Create a foreign input set (resource: POST /api/templates/:template_id/foreign_input_sets)"
enabled: true
runner_type: "python-script"
entry_point: lib/action.py
name: templates.foreign_input_sets_create
pack: foreman
parameters:
operation:
type: string
default: templates.foreign_input_sets_create
immutable: true
connection:
type: string
description: "Name of <connection> from this pack's configuration that specifies how to connect to a server."
required: false
server:
type: string
description: "Optional override of the server in <connection> (required if <connection> is not specified)."
required: false
username:
type: string
description: "Optional override of the username in <connection> (example: <EMAIL>) (required if <connection> is not specified)."
required: false
password:
type: string
description: "Optional override of the password in <connection>. (required if <connection> is not specified)"
required: false
secret: true
template_id:
type: string
required: true
foreign_input_set:
type: object
required: true
description: >
"description:
parameters:
include:
required: false
type: string
description: A comma separated list of input names to be included from the foreigntemplate.
include_all:
required: false
type: string
description: Include all inputs from the foreign template
description:
required: false
type: string
description: Input set description
target_template_id:
required: true
type: string
description: Target template ID
exclude:
required: false
type: string
description: A comma separated list of input names to be included from the foreigntemplate.
"
|
actions/templates.foreign_input_sets_create.yaml
|
language: cpp
dist: trusty
sudo: required
matrix:
include:
- os: osx
osx_image: xcode10.1
- os: linux
env: COMPILER_C=gcc-7 COMPILER_CC=g++-7
compiler:
- gcc
addons:
apt:
update: true
sources:
- sourceline: 'ppa:mhier/libboost-latest'
- ubuntu-toolchain-r-test
packages:
- g++-7
- cmake
- boost1.67
- os: linux
env: COMPILER_C=clang-7 COMPILER_CC=clang++-7
compiler:
- clang
addons:
apt:
update: true
sources:
- sourceline: 'ppa:mhier/libboost-latest'
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty-7
packages:
- clang-7
- cmake
- boost1.67
before_script:
- if [[ $TRAVIS_OS_NAME == "osx" ]]; then
brew update;
fi
- if [[ $TRAVIS_OS_NAME == "linux" ]]; then
sudo apt-get install;
sudo apt-get -qq update;
sudo apt-get -y install python3 libx11-dev libxrandr-dev libxinerama-dev libxcursor-dev libxext-dev;
fi
install:
- ls;
rm -rf dependencies;
rm -rf build;
python --version;
pip install --user future;
ls
# - if [[ $TRAVIS_OS_NAME == "osx" ]]; then
# git clone https://github.com/KhronosGroup/MoltenVK.git;
# cd MoltenVK;
# travis_wait 60 ./fetchDependencies;
# travis_wait 60 xcodebuild -project MoltenVKPackaging.xcodeproj -scheme "MoltenVK Package (Release)" build;
# cd ..;
# export MOLTENVK_SDK_PATH=$TRAVIS_BUILD_DIR/MoltenVK;
# fi
- if [[ $TRAVIS_OS_NAME == "linux" ]]; then
wget -O vulkansdk.run https://sdk.lunarg.com/sdk/download/1.1.73.0/linux/vulkansdk-linux-x86_64-1.1.73.0.run;
chmod ugo+x vulkansdk.run;
./vulkansdk.run;
export VK_SDK_PATH=$TRAVIS_BUILD_DIR/VulkanSDK/1.1.73.0/x86_64;
export VULKAN_SDK=$TRAVIS_BUILD_DIR/VulkanSDK/1.1.73.0/x86_64;
fi
script:
- cmake --version
- if [[ $TRAVIS_OS_NAME == "osx" ]]; then
sed -i '' "s,<EMAIL>:,https://github.com/,g" CMakeLists.txt;
cmake -G Xcode -H. -Bbuild -DBUILD_FOLDER_SUFFIX:STRING=build -DVIEWER_DOWNLOAD_AND_USE_VULKAN_SDK:BOOL=TRUE;
xcodebuild -project build/ViewerSdl.xcodeproj -configuration Release;
fi
- if [[ $TRAVIS_OS_NAME == "linux" ]]; then
export CC=${COMPILER_C};
export CXX=${COMPILER_CC};
sed -i 's,<EMAIL>:,https://github.com/,g' CMakeLists.txt;
cmake -H. -Bbuild -DBUILD_FOLDER_SUFFIX:STRING=build;
make -C build;
fi
notifications:
email:
recipients:
- <EMAIL>
on_success: change
on_failure: always
|
.travis.yml
|
excluded:
- Example
- Tests
- Library/Tests
- Carthage
- Pods
- Sources/Utils/Reachability.swift
- Library/Sources/Utils/Reachability.swift
- UI/macOS/Components/JSSwitch.swift
- Library/UI/macOS/Components/JSSwitch.swift
disabled_rules:
- void_return
- notification_center_detachment
- block_based_kvo
opt_in_rules:
- force_unwrapping
- object_literal
- private_outlet
- explicit_init
- overridden_super_call
- switch_case_on_newline
- empty_count
- redundant_nil_coalescing
- nimble_operator
- closure_end_indentation
- file_header
- first_where
- prohibited_super_call
- fatal_error_message
- implicitly_unwrapped_optional
# - explicit_type_interface
- operator_usage_whitespace
- number_separator
# - missing_docs
- closure_spacing
- implicit_return
# - conditional_returns_on_newline
- unneeded_parentheses_in_closure_argument
# - let_var_whitespace
# - extension_access_modifier
# - explicit_top_level_acl
- vertical_parameter_alignment_on_call
- unused_optional_binding
# - multiline_parameters
- contains_over_first_not_nil
- attributes
# - explicit_enum_raw_value
# - explicit_top_level_acl
- joined_default_parameter
# - literal_expression_end_indentation
- multiline_arguments
- multiline_parameters
- multiple_closures_with_trailing_closure
- no_extension_access_modifier
- no_grouping_extension
- pattern_matching_keywords
- quick_discouraged_call
- single_test_class
- sorted_imports
- strict_fileprivate
# - trailing_closure
- discouraged_object_literal
# - discouraged_optional_boolean
# - explicit_acl
file_length:
warning: 500
error: 1000
line_length:
warning: 120
function_body_length:
warning: 100
error: 150
identifier_name:
min_length:
warning: 2
error: 2
max_length:
warning: 40
error: 60
type_name:
min_length:
warning: 2
error: 2
max_length:
warning: 50
error: 60
statement_position:
statement_mode: uncuddled_else
trailing_whitespace:
ignores_empty_lines: true
ignores_comments: true
colon:
flexible_right_spacing: true
large_tuple:
warning: 5
error: 8
|
.swiftlint.yml
|
version: 2
jobs:
build:
docker:
- image: circleci/python:3.7
environment:
PIPENV_VENV_IN_PROJECT: true
steps:
- checkout
- run: sudo chown -R circleci:circleci /usr/local/bin
- run: sudo chown -R circleci:circleci /usr/local/lib/python3.7/site-packages
- restore_cache:
key: deps-{{ .Branch }}-{{ checksum "Pipfile.lock" }}
- run:
command: |
sudo pip install pipenv
pipenv install --dev --deploy
- save_cache:
key: deps-{{ .Branch }}-{{ checksum "Pipfile.lock" }}
paths:
- ".venv"
- "/usr/local/bin"
- "/usr/local/lib/python3.7/site-packages"
lint:
docker:
- image: circleci/python:3.7
environment:
PIPENV_VENV_IN_PROJECT: true
steps:
- checkout
- run: sudo chown -R circleci:circleci /usr/local/bin
- run: sudo chown -R circleci:circleci /usr/local/lib/python3.7/site-packages
- restore_cache:
key: deps-{{ .Branch }}-{{ checksum "Pipfile.lock" }}
- run:
command: |
pipenv run flake8 src test
pipenv run black --check src test
test:
docker:
- image: circleci/python:3.7
environment:
PIPENV_VENV_IN_PROJECT: true
steps:
- checkout
- run: sudo chown -R circleci:circleci /usr/local/bin
- run: sudo chown -R circleci:circleci /usr/local/lib/python3.7/site-packages
- restore_cache:
key: deps-{{ .Branch }}-{{ checksum "Pipfile.lock" }}
- run:
command: pipenv run pytest
release:
docker:
- image: circleci/python:3.7
environment:
PIPENV_VENV_IN_PROJECT: true
steps:
- checkout
- run: sudo chown -R circleci:circleci /usr/local/bin
- run: sudo chown -R circleci:circleci /usr/local/lib/python3.7/site-packages
- run:
name: Install release dependencies
command: |
sudo pip install -q -U aws-sam-cli
pipenv lock --requirements --keep-outdated > ./src/requirements.txt
sam validate
sam build
sam package --s3-bucket nr-serverless-applications --output-template-file packaged.yaml
sam publish --region us-east-1 --template packaged.yaml
workflows:
version: 2
build-all:
jobs:
- build:
filters:
branches:
only: /.*/
tags:
only: /.*/
- lint:
requires:
- build
filters:
branches:
only: /.*/
tags:
only: /.*/
- test:
requires:
- build
filters:
branches:
only: /.*/
tags:
only: /.*/
- release:
requires:
- build
- lint
- test
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/
|
.circleci/config.yml
|
---
# Copyright 2017, <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Create testing containers
hosts: docker_containers
gather_facts: no
tasks:
- name: Create base container set
include: common-tasks/docker-create.yml
vars:
image: "{{ docker_image }}:{{ docker_image_tag }}"
command: "{{ docker_command }}"
volumes: "{{ docker_volumes }}"
user: "{{ docker_user | default(omit) }}"
privileged: "{{ docker_privileged | default(omit) }}"
devices: "{{ docker_devices | default(omit) }}"
capabilities: "{{ docker_capabilities | default(omit) }}"
exposed_ports: "{{ docker_exposed_ports | default(omit) }}"
when: container_link_group is not defined
delegate_to: "{{ physical_host }}"
- name: Create base container set
include: common-tasks/docker-create.yml
vars:
image: "{{ docker_image }}:{{ docker_image_tag }}"
command: "{{ docker_command }}"
volumes: "{{ docker_volumes }}"
user: "{{ docker_user | default(omit) }}"
privileged: "{{ docker_privileged | default(omit) }}"
devices: "{{ docker_devices | default(omit) }}"
capabilities: "{{ docker_capabilities | default(omit) }}"
exposed_ports: "{{ docker_exposed_ports | default(omit) }}"
links: "{{ groups[container_link_group] }}"
when: container_link_group is defined
delegate_to: "{{ physical_host }}"
vars:
docker_image: "{{ lookup('env', 'docker_image') }}"
docker_image_tag: "{{ lookup('env', 'docker_image_tag') }}"
docker_command: "{{ lookup('env', 'docker_command') }}"
docker_volumes:
- '/sys/fs/cgroup:/sys/fs/cgroup:ro'
|
tests/test-create-containers.yml
|
version: '3.4'
x-common-variables: &common-variables
{%- if cookiecutter.celery %}
CELERY_BROKER_URL: redis://redis:6379
CELERY_RESULT_BACKEND: redis://redis:6379
{%- endif %}
{%- if cookiecutter.mysql %}
MYSQL_HOST: mysql
MYSQL_ROOT_PASSWORD: <PASSWORD>
MYSQL_USER: mysql
MYSQL_PASSWORD: <PASSWORD>
MYSQL_DATABASE: hseling-{{cookiecutter.package_uri_part}}
{%- endif %}
HSELING_RPC_ENDPOINT: http://hse-api-web:80/rpc/
HSELING_API_ENDPOINT: http://hse-api-web:80/api/
RESTRICTED_MODE: 0
services:
hse-api-web:
build:
context: .
dockerfile: hseling-api-{{cookiecutter.package_uri_part}}/Dockerfile
ports:
- "5000:80"
volumes:
- type: bind
source: ./hseling-api-{{cookiecutter.package_uri_part}}/hseling_api_{{cookiecutter.package_name}}
target: /app/hseling_api_{{cookiecutter.package_name}}
- type: bind
source: ./hseling-data-{{cookiecutter.package_uri_part}}/api
target: /data
{%- if cookiecutter.mysql or cookiecutter.celery %}
depends_on:
{%- endif %}
{%- if cookiecutter.mysql %}
- mysql
{%- endif %}
{%- if cookiecutter.celery %}
- redis
- hse-api-celery
{%- endif %}
environment: *common-variables
{% if cookiecutter.celery %}
hse-api-celery:
build:
context: .
dockerfile: hseling-api-{{cookiecutter.package_uri_part}}/Dockerfile
command: celery -A main.celery worker
volumes:
- type: bind
source: ./hseling-api-{{cookiecutter.package_uri_part}}/hseling_api_{{cookiecutter.package_name}}
target: /app/hseling_api_{{cookiecutter.package_name}}
- type: bind
source: ./hseling-data-{{cookiecutter.package_uri_part}}/api
target: /data
depends_on:
{%- if cookiecutter.mysql %}
- mysql
{%- endif %}
- redis
environment: *common-variables
{%- endif %}
{% if cookiecutter.mysql %}
mysql:
image: mysql
ports:
- "13306:3306"
command: --default-authentication-plugin=mysql_native_password
restart: always
environment: *common-variables
volumes:
- type: bind
source: ./hseling-data-{{cookiecutter.package_uri_part}}/mysql
target: /var/lib/mysql
{%- endif %}
{% if cookiecutter.celery %}
redis:
image: redis:latest
{%- endif %}
hse-web-web:
build:
context: .
dockerfile: hseling-web-{{cookiecutter.package_uri_part}}/Dockerfile
ports:
- "8000:80"
depends_on:
- hse-api-web
{%- if cookiecutter.celery %}
- hse-api-celery
{%- endif %}
environment: *common-variables
volumes:
- type: bind
source: ./hseling-web-{{cookiecutter.package_uri_part}}/hseling_web_{{cookiecutter.package_name}}
target: /app/hseling_web_{{cookiecutter.package_name}}
- type: bind
source: ./hseling-web-{{cookiecutter.package_uri_part}}/hseling_web_{{cookiecutter.package_name}}/static
target: /app/static
- type: bind
source: ./hseling-data-{{cookiecutter.package_uri_part}}/web
target: /data
|
{{cookiecutter.repo_name}}/docker-compose.yaml
|
apiVersion: v1
kind: Template
metadata:
name: gpg
annotations:
openshift.io/display-name: Custom Priviledged SCC
description: Template to create a specialized Priviledged SCC with a Service Account
objects:
- allowHostDirVolumePlugin: true
allowHostIPC: true
allowHostNetwork: true
allowHostPID: true
allowHostPorts: true
allowPrivilegedContainer: true
allowedCapabilities:
- '*'
allowedFlexVolumes: []
apiVersion: v1
defaultAddCapabilities: []
fsGroup:
type: RunAsAny
groups:
- system:cluster-admins
- system:nodes
- system:masters
kind: SecurityContextConstraints
metadata:
annotations:
kubernetes.io/description: 'privileged allows access to all privileged and host
features and the ability to run as any user, any group, any fsGroup, and with
any SELinux context. WARNING: this is the most relaxed SCC and should be used
only for cluster administration. Grant with caution.'
creationTimestamp: null
name: ${NAME}
priority: ${{SCC_PRIORITY_LEVEL}}
readOnlyRootFilesystem: false
requiredDropCapabilities: []
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
seccompProfiles:
- '*'
supplementalGroups:
type: RunAsAny
users:
- system:admin
- system:serviceaccount:openshift-infra:build-controller
- system:serviceaccount:management-infra:management-admin
- system:serviceaccount:management-infra:inspector-admin
- system:serviceaccount:${SERVICE_ACCOUNT_NAMESPACE}:${SERVICE_ACCOUNT_NAME}
volumes:
- '*'
parameters:
- name: NAME
displayName: SCC Name
description: Name of the SCC
value: privileged-custom-sa
required: true
- name: SERVICE_ACCOUNT_NAME
displayName: Service Account Name
description: Name of the Service Account
value: imagemanager
required: true
- name: SERVICE_ACCOUNT_NAMESPACE
displayName: Service Account Namespace
description: Namespace of the Service Account
value: image-management
required: true
- name: SCC_PRIORITY_LEVEL
displayName: SCC Priority
description: Priority Level for the SCC
value: "8"
required: true
|
files/scc/template.yml
|
items:
- uid: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle
id: WithDeleteOnIdle
artifact: com.microsoft.azure:azure-mgmt-servicebus:1.37.0
parent: com.microsoft.azure.management.servicebus
children:
- com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes(int)
langs:
- java
name: Queue.DefinitionStages.WithDeleteOnIdle
nameWithType: Queue.DefinitionStages.WithDeleteOnIdle
fullName: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle
type: Interface
package: com.microsoft.azure.management.servicebus
summary: The stage of the queue definition allowing to define auto delete behaviour.
syntax:
content: public static interface Queue.DefinitionStages.WithDeleteOnIdle
- uid: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes(int)
id: withDeleteOnIdleDurationInMinutes(int)
artifact: com.microsoft.azure:azure-mgmt-servicebus:1.37.0
parent: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle
langs:
- java
name: withDeleteOnIdleDurationInMinutes(int durationInMinutes)
nameWithType: Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes(int durationInMinutes)
fullName: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes(int durationInMinutes)
overload: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes*
type: Method
package: com.microsoft.azure.management.servicebus
summary: 'The idle interval after which the queue is automatically deleted. Note: unless it is explicitly overridden the default delete on idle duration is infinite (TimeSpan.Max).'
syntax:
content: public abstract Queue.DefinitionStages.WithCreate withDeleteOnIdleDurationInMinutes(int durationInMinutes)
parameters:
- id: durationInMinutes
type: int
description: idle duration in minutes
return:
type: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithCreate
description: the next stage of queue definition
references:
- uid: int
spec.java:
- uid: int
name: int
fullName: int
- uid: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithCreate
name: Queue.DefinitionStages.WithCreate
nameWithType: Queue.DefinitionStages.WithCreate
fullName: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithCreate
- uid: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes*
name: withDeleteOnIdleDurationInMinutes
nameWithType: Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes
fullName: com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle.withDeleteOnIdleDurationInMinutes
package: com.microsoft.azure.management.servicebus
|
docs-ref-autogen/com.microsoft.azure.management.servicebus.Queue.DefinitionStages.WithDeleteOnIdle.yml
|
version: '3'
services:
api:
labels:
traefik.enable: true
traefik.http.routers.saleor-api.entrypoints: websecure
traefik.http.routers.saleor-api.rule: Host(`saleor-api.test.govinddiwakar.tech`) && PathPrefix(`/graphql`)
traefik.http.routers.saleor-api.service: saleor-api
traefik.http.services.saleor-api.loadbalancer.server.port: 8000
traefik.docker.network: web
networks:
web:
storefront:
labels:
traefik.enable: true
traefik.http.routers.saleor-store.entrypoints: websecure
traefik.http.routers.saleor-store.rule: Host(`test.govinddiwakar.tech`)
traefik.http.routers.saleor-store.service: saleor-store
traefik.http.services.saleor-store.loadbalancer.server.port: 80
traefik.docker.network: web
networks:
web:
dashboard:
labels:
traefik.enable: true
traefik.http.routers.saleor-admin.entrypoints: websecure
traefik.http.routers.saleor-admin.rule: Host(`saleor-admin.test.govinddiwakar.tech`)
traefik.http.routers.saleor-admin.service: saleor-admin
traefik.http.services.saleor-admin.loadbalancer.server.port: 80
traefik.docker.network: web
networks:
web:
jaeger:
labels:
traefik.enable: true
traefik.http.routers.saleor-jaeger.entrypoints: websecure
traefik.http.routers.saleor-jaeger.rule: Host(`saleor-jaeger.test.govinddiwakar.tech`)
traefik.http.routers.saleor-jaeger.service: saleor-jaeger
traefik.http.services.saleor-jaeger.loadbalancer.server.port: 16686
traefik.docker.network: web
networks:
web:
mailhog:
labels:
traefik.enable: true
traefik.http.routers.saleor-mail.entrypoints: websecure
traefik.http.routers.saleor-mail.rule: Host(`saleor-mail.test.govinddiwakar.tech`)
traefik.http.routers.saleor-mail.service: saleor-mail
traefik.http.services.saleor-mail.loadbalancer.server.port: 8025
traefik.docker.network: web
networks:
web:
db:
labels:
traefik.enable: false
redis:
labels:
traefik.enable: false
worker:
labels:
traefik.enable: false
networks:
web:
external: true
internal:
external: false
|
docker-compose-override.yml
|
version: 1.1.2
documentation: LogIsland analytics main config file. Put here every engine or component config
#########################################################################################################
# engine every 10'
engine:
component: com.hurence.logisland.engine.spark.KafkaStreamProcessingEngine
type: engine
documentation: Main Logisland job entry point
configuration:
spark.app.name: SaveToHDFS
spark.master: local[4]
spark.driver.memory: 512m
spark.driver.cores: 1
spark.executor.memory: 512m
spark.executor.instances: 4
spark.executor.cores: 2
spark.yarn.queue: default
spark.yarn.maxAppAttempts: 4
spark.yarn.am.attemptFailuresValidityInterval: 1h
spark.yarn.max.executor.failures: 20
spark.yarn.executor.failuresValidityInterval: 1h
spark.task.maxFailures: 8
spark.serializer: org.apache.spark.serializer.KryoSerializer
spark.streaming.batchDuration: 600000
spark.streaming.backpressure.enabled: true
spark.streaming.unpersist: false
spark.streaming.blockInterval: 500
spark.streaming.kafka.maxRatePerPartition: 3000
spark.streaming.timeout: -1
spark.streaming.kafka.maxRetries: 3
spark.streaming.ui.retainedBatches: 200
spark.streaming.receiver.writeAheadLog.enable: false
spark.ui.port: 4051
streamConfigurations:
- stream: events_burner
component: com.hurence.logisland.stream.spark.KafkaRecordStreamHDFSBurner
type: stream
documentation: average bytes sent by host_name
configuration:
kafka.input.topics: logisland_events
kafka.output.topics: none
kafka.error.topics: logisland_errors
kafka.input.topics.serializer: com.hurence.logisland.serializer.KryoSerializer
kafka.output.topics.serializer: none
kafka.error.topics.serializer: com.hurence.logisland.serializer.JsonSerializer
kafka.metadata.broker.list: ${KAFKA_BROKERS}
kafka.zookeeper.quorum: ${ZK_QUORUM}
kafka.topic.autoCreate: true
kafka.topic.default.partitions: 2
kafka.topic.default.replicationFactor: 1
output.format: parquet
output.folder.path: data/syslog_events
|
logisland-core/logisland-framework/logisland-resources/src/main/resources/conf/save-to-hdfs.yml
|
summary: |-
Removes an administrative unit member.
module: AzureADPreview
notes: ""
links:
- text: Add-AzureADAdministrativeUnitMember
href: ./Add-AzureADAdministrativeUnitMember.yml
- text: Remove-AzureADAdministrativeUnitMember
href: ./Remove-AzureADAdministrativeUnitMember.yml
syntaxes:
- >-
Remove-AzureADAdministrativeUnitMember -ObjectId <String> -MemberId <String>
[-InformationAction <ActionPreference>] [-InformationVariable <String>] [<CommonParameters>]
parameters:
- type: <xref href="ActionPreference" data-throw-if-not-resolved="False" />
name: InformationAction
description: |+
Specifies how this cmdlet responds to an information event.
The acceptable values for this parameter are:
- Continue
- Ignore
- Inquire
- SilentlyContinue
- Stop
- Suspend
defaultValue: None
position: Named
aliases: infa
parameterValueGroup: ""
- type: <xref href="String" data-throw-if-not-resolved="False" />
name: InformationVariable
description: |+
Specifies an information variable.
defaultValue: None
position: Named
aliases: iv
parameterValueGroup: ""
- type: <xref href="String" data-throw-if-not-resolved="False" />
name: MemberId
isRequired: true
description: |+
Specifies the ID of the administrative unit member.
defaultValue: None
pipelineInput: true
position: Named
aliases: ""
parameterValueGroup: ""
- type: <xref href="String" data-throw-if-not-resolved="False" />
name: ObjectId
isRequired: true
description: |+
Specifies the ID of an administrative unit in Azure AD.
defaultValue: None
pipelineInput: true
position: Named
aliases: ""
parameterValueGroup: ""
uid: AzureADPreview.Remove-AzureADAdministrativeUnitMember
name: Remove-AzureADAdministrativeUnitMember
description: |-
The **Remove-AzureADAdministrativeUnitMember** cmdlet removes an administrative unit member in Azure Active Directory (AD).
metadata:
external help file: Microsoft.Open.AzureADBeta.Graph.PowerShell.dll-Help.xml
Module Name: AzureADPreview
ms.assetid: 9DEA1FE5-FE78-431F-9D12-53C349812A81
ms.custom: iamfeature=PowerShell
ms.reviewer: rodejo
online version: ""
schema: 2.0.0
|
azureadps-2.0-preview/AzureAD/Remove-AzureADAdministrativeUnitMember.yml
|
---
tomcat:
supported_install_method: "local_zipfiles,rhn_zipfiles,rpm"
install_method: "{{ tomcat_install_method }}"
install_dir: "{{ tomcat_install_dir }}"
rpm: "{{ tomcat_rpm }}"
rhn:
server_zipfile_url: "{{ jws_rhn_server_zipfile_url }}"
native_zipfile_url: "{{ jws_native_zipfile_url }}"
username: "{{ rhn_username | default('') }}"
password: "{{ rhn_password | default('') }}"
user: "{{ tomcat_user }}"
group: "{{ tomcat_group }}"
home: "{{ tomcat_home }}"
base: "{{ tomcat_catalina_base }}"
conf:
properties: "{{ tomcat_conf_properties }}"
policy: "{{ tomcat_conf_policy }}"
logging: "{{ tomcat_conf_loggging }}"
context: "{{ tomcat_conf_context }}"
server: "{{ tomcat_conf_server }}"
web: "{{ tomcat_conf_web }}"
templates:
context: "{{ tomcat_conf_templates_context }}"
server: "{{ tomcat_conf_templates_server }}"
web: "{{ tomcat_conf_templates_web }}"
apps:
to_remove: "{{ tomcat_apps_to_remove.split(',') }}"
shutdown:
port: "{{ tomcat_shutdown_port }}"
listen:
http:
port: "{{ tomcat_listen_http_port }}"
bind_address: "{{ tomcat_listen_http_bind_address }}"
enabled: "{{ tomcat_listen_http_enabled }}"
https:
port: "{{ tomcat_listen_https_port }}"
ajp:
enabled: "{{ tomcat_listen_ajp_enabled }}"
address: "{{ tomcat_listen_ajp_address }}"
port: "{{ tomcat_listen_ajp_port }}"
secretRequired: "{{ tomcat_listen_ajp_secretRequired }}"
secret: "{{ tomcat_listen_ajp_secret }}"
vault:
name: "{{ tomcat_vault_name }}"
enable: "{{ tomcat_vault_enable }}"
alias: "{{ tomcat_vault_alias }}"
storepass: "{{ tomcat_vault_storepass }}"
iteration: "{{ tomcat_vault_iteration }}"
salt: "{{ tomcat_vault_salt }}"
properties: "{{ tomcat_vault_properties }}"
mod_cluster:
enable: "{{ tomcat_modcluster_enable }}"
ip: "{{ tomcat_modcluster_ip }}"
port: "{{ tomcat_modcluster_port }}"
connector_port: "{{ tomcat_modcluster_connector_port }}"
advertise: "{{ tomcat_modcluster_advertise }}"
stickySession: "{{ tomcat_modcluster_stickySession }}"
stickySessionForce: "{{ tomcat_modcluster_stickySessionForce }}"
stickySessionRemove: "{{ tomcat_modcluster_stickySessionRemove }}"
service:
enabled: "{{ tomcat_systemd_enabled }}"
name: "{{ tomcat_service_name }}"
conf: "{{ tomcat_service_conf }}"
script: "{{ tomcat_service_script }}"
systemd: "{{ tomcat_service_systemd }}"
pidfile: "{{ tomcat_service_systemd_pidfile }}"
type: "{{ tomcat_service_systemd_type }}"
|
roles/jws/vars/main.yml
|
apiVersion: template.openshift.io/v1
kind: Template
metadata:
name: "migrate-src-template"
annotations:
description: "Template to create a runnable pod intended to copy storage content from one PVC to another."
tags: storage,migration
objects:
- apiVersion: apps/v1
kind: Deployment
metadata:
name: ${NAME}
labels:
app: ${NAME}
base: xc-pvc-migrator
spec:
replicas: 1
selector:
matchLabels:
app: ${NAME}
template:
metadata:
labels:
app: ${NAME}
base: xc-pvc-migrator
spec:
containers:
- name: ${NAME}
image: ${SRC_IMAGE_REGISTRY}/${SRC_IMAGE_NAMESPACE}/${SRC_IMAGE_NAME}:latest
command:
- bash
- '-c'
- >-
mkdir -p /target/tkube &&
touch /target/tkube/KUBECONFIG &&
oc login --token=${TARGET_TOKEN} --server=${TARGET_CLUSTER_API} --kubeconfig=/target/tkube/KUBECONFIG &&
oc project ${TARGET_PROJECT}
nohup `tail -f /dev/null > /dev/null`
env:
- name: TARGET_TOKEN #target cluster token for oc to use
valueFrom:
secretKeyRef:
key: token
name: ${REMOTE_SECRET}
- name: TARGET_CLUSTER_API # target cluster token for oc to use (must provide)
value: ${REMOTE_CLUSTER}
- name: TARGET_PROJECT # target cluster project/ namespace
value: ${REMOTE_PROJECT}
volumeMounts:
- name: source
mountPath: /source
- name: target
mountPath: /target
volumes:
- name: source
persistentVolumeClaim:
claimName: ${SOURCE_PVC}
- name: target
emptyDir: {}
parameters:
- name: NAME
description: "Name of the deployment."
value: "source-pvc-migrator"
- name: SOURCE_PVC
description: "Name of an existing PVC to use as the content destination."
required: true
value: backup
- name: REMOTE_CLUSTER
displayName: Remote Cluster API URL
description: The URL of the remote cluster API (including port)
required: true
value: https://api.silver.devops.gov.bc.ca:6443
- name: REMOTE_PROJECT
displayName: Remote Namespace
description: The name of the remote namespace (project)
required: true
value: 4a9599-prod
- name: REMOTE_SECRET
displayName: Remote Cluster token secret
description: The name of the secret that contains an access token
required: true
value: x-cluster-test
- name: SRC_IMAGE_NAME
displayName: Container Image Name
description: The name of the image to use for this resource.
required: true
value: pvc-migrator
- name: SRC_IMAGE_NAMESPACE
displayName: Image Namespace
description: The namespace of the OpenShift project containing the imagestream for the application.
required: true
value: devex-von-prod
- name: SRC_IMAGE_REGISTRY
#ocp3#docker-registry.default.svc:5000
value: docker-registry.default.svc:5000
- name: TAG_NAME
displayName: Environment TAG name
description: The TAG name for this environment, e.g., dev, test, prod
required: true
value: test
|
cross-cluster/source-migrator-tmpl.yaml
|
homepage: https://github.com/exclipy/pdata
changelog-type: ''
hash: bbdcebe83e12f704a0b6611b1a9e7aa22db20023c6e3d44505617410f53733b6
test-bench-deps: {}
maintainer: <NAME> <<EMAIL>>
synopsis: A purely functional and persistent hash map
changelog: ''
basic-deps:
base: ! '>=4 && <4.5'
array: <0.5
hashable: ! '>=1.0 && <1.3'
deepseq: ! '>=1.1 && <1.5'
all-versions:
- '0.2'
- '0.3'
author: <NAME>
latest: '0.3'
description-type: markdown
description: ! "Hash Array Mapped Tries\n=======================\n\nOne of the prominent
features of the [Clojure][1] language are a set of\n[immutable data structures][2]
with efficient manipulation operations. One of\nthe most innovative and important
is the persistent hash map based on the\n*hash array mapped trie* (HAMT).\n\nThis
project is a port of this structure to Haskell, as Data.HamtMap. The\ninterface
has been kept as consistent as possible with Data.Map.\n\n[1]: http://clojure.org/\n[2]:
http://clojure.org/datatypes\n\n\nBasic usage\n-----------\nHere's a demo of what
you can do with a HamtMap:\n\n ghci> :m + Data.HamtMap\n ghci> empty Data.HashTable.hashString\n
\ -- an empty HamtMap (requires a key hash function)\n fromList hashFn
[]\n\n ghci> insert \"foo\" 1 it\n fromList hashFn [(\"foo\",1)]\n\n ghci>
insert \"bar\" 42 it\n fromList hashFn [(\"foo\",1),(\"bar\",42)]\n\n ghci>
insert \"qux\" 123 it\n fromList hashFn [(\"qux\",12),(\"foo\",1),(\"bar\",42)]\n\n
\ ghci> insert \"qux\" 13 it -- inserting an existing key overwrites by default\n
\ fromList hashFn [(\"qux\",13),(\"foo\",1),(\"bar\",42)]\n\n ghci> let a =
it\n ghci> a ! \"foo\"\n 1\n\n ghci> a ! \"baz\" -- using (!) is unsafe\n
\ *** Exception: array index out of range: element not in the map\n\n ghci>
Data.HamtMap.lookup \"bar\" a\n Just 42\n\n ghci> Data.HamtMap.lookup \"baz\"
a -- 'lookup' returns a safe Maybe\n Nothing\n\n ghci> adjust succ \"foo\"
a -- apply a function to a value\n fromList hashFn [(\"qux\",13),(\"foo\",2),(\"bar\",42)]\n\n
\ ghci> Data.HamtMap.map succ a -- apply a function to all values\n fromList
hashFn [(\"qux\",14),(\"foo\",2),(\"bar\",43)]\n\n ghci> keys a\n [\"qux\",\"foo\",\"bar\"]\n\n
\ ghci> elems a\n [13,1,42]\n\n ghci> fromList Data.HashTable.hashString
[(\"a\", 1), (\"b\", 2), (\"c\", 3)]\n fromList hashFn [(\"b\",2),(\"c\",3),(\"a\",1)]\n\n
\ ghci> toList it\n [(\"b\",2),(\"c\",3),(\"a\",1)]\n\n\nInstallation\n------------\n\nTo
try it yourself, just do the usual:\n\n $ runghc Setup.hs configure --user\n
\ $ runghc Setup.hs build\n $ runghc Setup.hs install\n\nPerformance\n-----------\n\nThe
single-element operations for the hash map have logarithmic asymtotic\nruntime complexity.
\ However, it is implemented as a 32-ary tree, which means it\nnever exceeds a depth
of 7 nodes, so you can treat them as constant-time\noperations (for relatively large
constants).\n\nHow it works\n------------\n\nI wrote this code after reading the
following explanatory blog posts on how the\nClojure version works. They should
also provide a decent birds-eye overview of\nmy Haskell implementation.\n\n* [Understanding
Clojure’s PersistentHashMap\n ](http://blog.higher-order.net/2009/09/08/understanding-clojures-persistenthashmap-deftwice/)\n*
[Assoc and Clojure’s PersistentHashMap: part II\n ](http://blog.higher-order.net/2010/08/16/assoc-and-clojures-persistenthashmap-part-ii/)\n\n\nTo
do\n-----\n* Match Data.Map in completeness\n* Performance tuning\n * Efficient
implementations of (//), etc. based on fromList\n"
license-name: BSD3
|
packages/ha/hamtmap.yaml
|
---
- name: Create user on a different partition
bigip_user:
partition_access:
- foo:operator
username_credential: "{{ username_credential }}"
password_credential: "{{ <PASSWORD> }}"
update_password: on_create
partition: "{{ partition }}"
register: result
- name: Assert Create user on a different partition
assert:
that:
- result is changed
- name: Create user on a different partition - Idempotent check
bigip_user:
partition_access:
- foo:operator
username_credential: "{{ username_credential }}"
password_credential: "{{ <PASSWORD> }}"
update_password: on_create
partition: "{{ partition }}"
register: result
- name: Assert Create user on a different partition - Idempotent check
assert:
that:
- result is not changed
- name: Assign multiple roles for the same partition
bigip_user:
username_credential: "{{ username_credential }}"
partition_access:
- Common:operator
- Common:manager
partition: "{{ partition }}"
register: result
- name: Assert Assign multiple roles for the same partition
assert:
that:
- result is changed
- name: Modify roles for a different partitions
bigip_user:
partition_access:
- Common:guest
- foo:manager
username_credential: "{{ username_credential }}"
partition: "{{ partition }}"
register: result
- name: Assert Modify roles for a different partitions
assert:
that:
- result is changed
- name: Update password for user on a different partition
bigip_user:
username_credential: "{{ username_credential }}"
password_credential: "{{ <PASSWORD> }}"
partition: "{{ partition }}"
register: result
- name: Assert Update password for user on a different partition
assert:
that:
- result is changed
# We should see password not updating here
- name: Update user password for user on a different partition password_update set to on_create
bigip_user:
username_credential: "{{ username_credential }}"
password_credential: "{{ <PASSWORD> }}"
update_password: on_create
partition: "{{ partition }}"
register: result
- name: Assert Update user password for user on a different partition password_update set to on_create
assert:
that:
- result is not changed
- name: Remove user on different partition
bigip_user:
username_credential: "{{ username_credential }}"
state: absent
partition: "{{ partition }}"
register: result
- name: Assert Remove user on different partition
assert:
that:
- result is changed
- name: Remove user on different partition - Idempotent check
bigip_user:
username_credential: "{{ username_credential }}"
state: absent
partition: "{{ partition }}"
register: result
- name: Assert Remove user on different partition - Idempotent check
assert:
that:
- result is not changed
|
f5-ansible/test/integration/targets/bigip_user/tasks/test-post-version-12-specific.yaml
|
# Check if pki-generator is already run
- name: "Checking if pki-generator job is already completed"
include_role:
name: "{{ playbook_dir }}/../../shared/configuration/roles/check/helm_component"
vars:
job_title: "{{ org.name | lower }}-generate-pki"
component_type: "OneTimeJob"
component_name: "{{ org.name | lower }}-generate-pki"
- name: "Set Variable generate_pki"
set_fact:
generate_pki: "{{ result }}"
# This task will loop over the network.yaml to fetch the cenm details
- name: Create value file for pki generator
include_role:
name: helm_component
vars:
component_name: "{{ org.name | lower }}pkigenerator"
type: "pki-generator"
values_dir: "{{playbook_dir}}/../../../{{ gitops.release_dir }}"
name: "{{ org.name | lower }}"
signerName: "{{ services.signer.name }}"
idmanName: "{{ services.idman.name }}"
networkmapName: "{{ services.networkmap.name }}"
notaryName: "{{ services.notary.name }}"
charts_dir: "{{ gitops.chart_source }}"
git_url: "{{ gitops.git_ssh }}"
git_branch: "{{ gitops.branch }}"
docker_url: "corda/enterprise-pkitool"
docker_tag: "1.2-zulu-openjdk8u242"
init_container_name: "index.docker.io/hyperledgerlabs/alpine-utils:1.0"
vault_address: "{{ vault.url }}"
authpath: "cordaent{{ org.name | lower }}"
certsecretprefix: "secret/{{ org.name | lower }}"
username: "signer"
password: "<PASSWORD>"
idman_ip: "{{ services.idman.name | lower }}.{{ org.external_url_suffix }}"
idman_port: 8443
idman_host: "{{ services.idman.name }}.{{ component_ns }}"
networkmap_host: "{{ services.networkmap.name }}.{{ component_ns }}"
tls_crl_signer_subject: "{{ services.signer.subject }}"
tls_crl_signer_issuer: "{{ services.idman.crlissuer_subject }}"
corda_root_ca_subject: "{{ org.subject }}"
subordinate_ca_subject: "{{ org.subordinate_ca_subject }}"
idman_ca_subject: "{{ services.idman.subject }}"
networkmap_ca_subject: "{{ services.networkmap.subject }}"
when: generate_pki.resources|length == 0
# Git Push : Pushes the above generated files to git directory
- name: Git Push
include_role:
name: "{{ playbook_dir }}/../../shared/configuration/roles/git_push"
vars:
GIT_DIR: "{{ playbook_dir }}/../../../"
GIT_REPO: "{{ gitops.git_push_url }}"
GIT_USERNAME: "{{ gitops.username }}"
GIT_EMAIL: "{{ gitops.email }}"
GIT_PASSWORD: "{{ <PASSWORD> }}"
GIT_BRANCH: "{{ gitops.branch }}"
GIT_RESET_PATH: "platforms/r3-corda-ent/configuration"
msg: "[ci skip] Pushing pki-generator files"
when: generate_pki.resources|length == 0
|
platforms/r3-corda-ent/configuration/roles/setup/pki-generator/tasks/main.yaml
|
---
"$id": http://ga4gh.org/schemas/discovery/search/response
"$schema": http://json-schema.org/draft-07/schema#
definitions:
component-version:
description: An object where a key is the component name and the value specifies the version that was provided
type: object
additionalProperties:
"$ref": http://ga4gh.org/schemas/discovery/search/definitions#definitions/semver
components-in-response:
description: An object which has component types that can be used in a response
type: object
propertyNames:
enum:
- record
- recordMeta
- collection
- responseMeta
additionalProperties:
"$ref": "#/definitions/component-version"
"$comment": |
`propertyNames` makes sure that all the properties of this object have one of the allowed names.
Combining this with `additionalProperties` which uses a reference means all property values have the same validation constraint
records:
type: array
minItems: 0
items:
type: object
required:
- components
properties:
meta:
"$ref": http://ga4gh.org/schemas/discovery/search/components/record_meta_components
components:
"$ref": http://ga4gh.org/schemas/discovery/search/components/record_components
collection-meta:
type: object
required:
- name
properties:
name:
type: string
description: A name for the collection (or data set / data source) which will be presented to the end user
id:
type: string
description: An internal string based identifier defined by the server for the collection. They may not be stable
description:
type: string
description: A description of the collection which will be presented to the end user
infoURL:
type: string
format: uri
description: A URL at which an end user can find information about the collection
type: object
required:
- meta
- recordCollections
properties:
meta:
description: Contains metadata about the response
type: object
required:
- response
properties:
response:
description: Details on the component versions used in the response
type: object
required:
- apiVersion
properties:
components:
"$ref": "#/definitions/components-in-response"
apiVersion:
description: The full version of the API that is being used for the response.
"$ref": http://ga4gh.org/schemas/discovery/search/definitions#definitions/semver
request:
description: Contains metadata relating to the request from the client
type: object
properties:
apiVersion:
description: The API version that was given and what it was used as, if different
type: object
properties:
given:
"$ref": http://ga4gh.org/schemas/discovery/search/definitions#definitions/semver
usedAs:
"$ref": http://ga4gh.org/schemas/discovery/search/definitions#definitions/semver
componentsUsed:
description: An array of the components that were used from the request
type: array
items:
type: string
components:
"$ref": http://ga4gh.org/schemas/discovery/search/components/response_meta_components
recordCollections:
type: array
items:
type: object
properties:
meta:
"ref": "#/definitions/collection-meta"
collectionComponents:
"$ref": http://ga4gh.org/schemas/discovery/search/components/collection_components
records:
"$ref": "#/definitions/records"
|
json_schema/schemas_source/response.yaml
|
markdown: redcarpet
pygments: true
# Permalinks
permalink: pretty
# Setup
title: Rui Albuquerque
tagline: #The Jekyll Butler
# url: http://getpoole.com
url: ruialbuquerque.pt/about.md
paginate: 1
baseurl: /
author:
#name: <NAME>
name: <NAME>
#url: https://twitter.com/mdo
url: https://twitter.com/rui278
#email: <EMAIL>
modified by: <NAME>
based on: joshualande.com / github.com/joshualande/joshualande.github.io
# Custom vars
version: 1.0.0
github:
repo: https://github.com/poole/poole
#added by me
# This is the list of pages to incldue in the header of the website.
#pages_list: {'About':'/about',
# 'Projects':'/projects','Archive':'/archive',
# 'Twitter':'https://twitter.com/rui278','GitHub':'https://github.com/rui278',
# 'Feed':'/atom.xml'
# }
pages_list: {'Blog':'https://www.blog.ruialbuquerque.pt',
'Projects':'/projects','Archive':'/archive',
'Twitter':'https://www.twitter.com/rui278','GitHub':'https://www.github.com/rui278',
'Feed':'/atom.xml'
}
projects_list: {'CipherChat':'https://github.com/rui278/CipherChat',
'CipherManager':'https://github.com/rui278/CipherManager',
'Cuda Kmeans':'https://github.com/rui278/cudaKmeans',
'Dinamic Directories':'https://github.com/rui278/Dinamic-Directories',
'x265 Hevc Doxygen':'http://ruialbuquerque.pt/hevc'
}
projects_text: {'CipherChat':'An Android chat app that provides secure and confidential conversations among users. It uses the Revised Needham-Shroeder Protocol to provide confidentiality, authentication, freshness and integrity. It purposely does not assure non-repudiation so that full client privacy can be respected.',
'CipherManager':'A simple desktop password manager. You can use, view or create passwords. Passwords are stored localy. uses AES and SHA256.',
'Cuda Kmeans':'A Kmeans Implementation using Cuda to enable GPU usage and acceleration.',
'Dinamic Directories':'A dinamic user directory based chat application.',
'x265 Hevc Doxygen':'I am working on a project wich includes an implementation of the hevc standard by the guys at x265.org. They did not have all the documentation the way I wanted/needed it, so I ran doxygen on the code and I will leave it here for any one who wants to use it.'
}
|
_config.yml
|
parameters:
eki.block.service.pad.class: Eki\Block\BlockBundle\Block\ContainerBlockService
eki.block.service.zone.class: Eki\Block\BlockBundle\Block\ContainerBlockService
eki.block.service.square.class: Eki\Block\BlockBundle\Block\ContainerBlockService
eki.block.service.item.class: Eki\Block\BlockBundle\Block\ItemBlockService
eki_block.delegate.template_matcher.class: Eki\Block\BlockBundle\Matcher\DelegateTemplateMatcher
eki_block.template_matcher.pad.class: Eki\Block\BlockBundle\Matcher\PadBlockTemplateMatcher
eki_block.template_matcher.zone.class: Eki\Block\BlockBundle\Matcher\ZoneBlockTemplateMatcher
eki_block.template_matcher.square.class: Eki\Block\BlockBundle\Matcher\SquareBlockTemplateMatcher
eki_block.template_matcher.item.class: Eki\Block\BlockBundle\Matcher\ItemBlockTemplateMatcher
eki_block.template_matcher.action.form.class: Eki\Block\BlockBundle\Matcher\FormBlockTemplateMatcher
eki.block.controller.form_block.class: Eki\Block\BlockBundle\Controller\FormBlockController
services:
eki.block.pad:
class: %eki.block.service.pad.class%
arguments:
- "eki.block.pad"
- "@templating"
- "@sonata.block.renderer"
- EkiBlockBundle:Block:pad.html.twig
tags:
- { name: "sonata.block" }
eki.block.zone:
class: %eki.block.service.zone.class%
arguments:
- "eki.block.zone"
- "@templating"
- "@sonata.block.renderer"
- EkiBlockBundle:Block:zone.html.twig
tags:
- { name: "sonata.block" }
eki.block.square:
class: %eki.block.service.square.class%
arguments:
- "eki.block.square"
- "@templating"
- "@sonata.block.renderer"
- EkiBlockBundle:Block:square.html.twig
tags:
- { name: "sonata.block" }
eki.block.item:
class: %eki.block.service.item.class%
arguments:
- "eki.block.item"
- "@templating"
- EkiBlockBundle:Block:item.html.twig
tags:
- { name: "sonata.block" }
eki_block.delegate.template_matcher:
class: %eki_block.delegate.template_matcher.class%
eki_block.template_matcher.pad:
class: %eki_block.template_matcher.pad.class%
calls:
- [setContainer, ['@service_container']]
tags:
- { name: eki_block.template_matcher, key: eki.block.pad }
eki_block.template_matcher.zone:
class: %eki_block.template_matcher.zone.class%
calls:
- [setContainer, ['@service_container']]
tags:
- { name: eki_block.template_matcher, key: eki.block.zone }
eki_block.template_matcher.square:
class: %eki_block.template_matcher.square.class%
calls:
- [setContainer, ['@service_container']]
tags:
- { name: eki_block.template_matcher, key: eki.block.square }
eki_block.template_matcher.item:
class: %eki_block.template_matcher.item.class%
calls:
- [setContainer, ['@service_container']]
tags:
- { name: eki_block.template_matcher, key: eki.block.item }
eki_block.template_matcher.action.form:
class: %eki_block.template_matcher.action.form.class%
calls:
- [setContainer, ['@service_container']]
tags:
- { name: eki_block.template_matcher, key: eki.block.action.form }
eki.block.controller.form_block:
class: %eki.block.controller.form_block.class%
calls:
- [setContainer, ['@service_container']]
- [setDispatcher, ['@event_dispatcher']]
- [setTemplateMatcher, ['@eki_block.delegate.template_matcher']]
|
Resources/config/services.yml
|
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: sriov-network-config-daemon
namespace: {{.Namespace}}
annotations:
release.openshift.io/version: "{{.ReleaseVersion}}"
spec:
selector:
matchLabels:
app: sriov-network-config-daemon
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: sriov-network-config-daemon
component: network
type: infra
openshift.io/component: network
spec:
hostNetwork: true
hostPID: true
nodeSelector:
beta.kubernetes.io/os: linux
node-role.kubernetes.io/worker:
tolerations:
- operator: Exists
serviceAccountName: sriov-network-config-daemon
containers:
- name: sriov-network-config-daemon
image: {{.Image}}
command:
- sriov-network-config-daemon
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
args:
- "start"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: host
mountPath: /host
# - name: hostetc
# mountPath: /host/etc
# - name: hostdev
# mountPath: /host/dev
# - name: hostproc
# mountPath: /host/proc
# - name: hostboot
# mountPath: /host/boot
# - name: hostusrlib
# mountPath: /host/lib
# - name: hostusrlib64
# mountPath: /host/lib64
# - name: hostusr
# mountPath: /host/usr
# - name: hostusrbin
# mountPath: /host/bin
# - name: hostusrsbin
# mountPath: /host/sbin
# - name: hostsysmodule
# mountPath: /host/sys/module
volumes:
- name: host
hostPath:
path: /
# - name: hostetc
# hostPath:
# path: /etc
# - name: hostdev
# hostPath:
# path: /dev
# - name: hostproc
# hostPath:
# path: /proc
# - name: hostboot
# hostPath:
# path: /boot
# - name: hostusr
# hostPath:
# path: /usr
# - name: hostusrbin
# hostPath:
# path: /usr/bin
# - name: hostusrlib
# hostPath:
# path: /usr/lib
# - name: hostusrlib64
# hostPath:
# path: /usr/lib64
# - name: hostusrsbin
# hostPath:
# path: /usr/sbin
# - name: hostsysmodule
# hostPath:
# path: /sys/module
|
bindata/manifests/daemon/daemonset.yaml
|
---
# vars file for install
remove_pkgs:
- drawing
- gnome-calendar
- gnote
- hexchat
- libreoffice*
- pix
- rhythmbox*
- thunderbird*
- transmission*
apt_key_urls:
- https://dl.google.com/linux/linux_signing_key.pub #GoogleChrome
- https://www.virtualbox.org/download/oracle_vbox_2016.asc #VirtualBox
- https://apt.enpass.io/keys/enpass-linux.key #Enpass
- https://typora.io/linux/public-key.asc #Typora
- https://download.docker.com/linux/ubuntu/gpg #Docker
apt_repo_urls:
- url: deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main #GoogleChrome
filename: google-chrome
- url: deb [arch=amd64] https://download.virtualbox.org/virtualbox/debian focal contrib #VirtualBox
filename: virtualbox
- url: deb https://apt.enpass.io/ stable main #Enpass
filename: enpass
- url: deb https://typora.io/linux ./ #Typora
filename: typora
- url: deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable #Docker
filename: docker
- url: deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/packages.microsoft.gpg] https://packages.microsoft.com/repos/vscode stable main #VS Code
filename: vscode
apt_repo_ppa:
- url: ppa:sebastian-stenzel/cryptomator #Cryptomator
filename: cryptomator
- url: ppa:graphics-drivers/ppa #NvidiaDrivers
filename: ppa_graphics
pkgs_urls:
- "{{ url_deb_autokey.stdout_lines[0] }}" #AutoKey
- "{{ url_deb_autokey.stdout_lines[1] }}" #AutoKey
- "{{ url_deb_etcher.stdout }}" #BalenaEtcher
- https://download.teamviewer.com/download/linux/teamviewer_amd64.deb #TeamViewer
- "{{ url_deb_vagrant.stdout }}" #Vagrant
pkgs:
- anki
- audacity
- build-essential
- calibre
- code
- cryptomator
- docker-ce
- docker-ce-cli
- containerd.io
- enpass
- flameshot
- gimp
- gnome-tweaks
- google-chrome-stable
- handbrake
- htop
- inkscape
- intel-microcode
- jq
- openssh-server
- p7zip-full
- p7zip-rar
- peek
- preload
- qbittorrent
- remmina
- remmina-plugin-rdp
- remmina-plugin-vnc
- scrcpy
- steam
- teamviewer
- tmux
- toilet
- typora
- uget
- vim
- virtualbox-6.1
- xclip
- zsh
flathub_pkgs:
- com.discordapp.Discord
- com.github.johnfactotum.Foliate
- com.meetfranz.Franz
- com.obsproject.Studio
- com.getpostman.Postman
- com.skype.Client
- com.wps.Office
|
roles/install/vars/main.yml
|
version: 2.1
orbs:
browser-tools: circleci/browser-tools@1.1.1
defaults: &defaults
working_directory: ~/repo
docker:
- image: circleci/node:erbium
jobs:
build:
<<: *defaults
steps:
- checkout
- run: ./build.sh
- persist_to_workspace:
root: ~/repo
paths: .
webtests:
working_directory: ~/repo
docker:
- image: circleci/openjdk:8-jdk-browsers
environment:
DISPLAY: :99
steps:
- run: sudo Xvfb :99 -screen 0 1920x1200x24 > /dev/null 2>&1 &
- attach_workspace:
at: ~/repo
- browser-tools/install-chrome
- browser-tools/install-chromedriver
- run:
command: |
google-chrome --version
chromedriver --version
ps -efa | grep Xvfb
name: Check install
- run: cd webtests && export DISPLAY=:99 && mvn clean install -Dwebdriver.chrome.driver=/usr/local/bin/chromedriver
- store_artifacts:
path: ~/repo/webtests/target/surefire-reports
- store_artifacts:
path: ~/repo/webtests/target/videos
docs-deploy:
<<: *defaults
steps:
- attach_workspace:
at: ~/repo
- run:
name: Add github.com to known hosts
command: mkdir ~/.ssh && ssh-keyscan github.com >> ~/.ssh/known_hosts
- run: git config user.email "<EMAIL>"
- run: git config user.name "vankeisb-ci"
- run:
name: Deploy docs to gh-pages branch
command: cd demo && yarn gh-pages
deploy:
<<: *defaults
steps:
- attach_workspace:
at: ~/repo
- run:
name: Authenticate with registry
command: echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" > ~/repo/.npmrc
- run: ./deploy.sh
workflows:
version: 2
build-deploy:
jobs:
- build:
filters:
branches:
ignore: /^gh-pages$/
- webtests:
requires:
- build
- docs-deploy:
requires:
- build
- webtests
filters:
branches:
only: main
- deploy:
requires:
- build
- webtests
filters:
tags:
only: /\d+\.\d+\.\d+/
branches:
only: main
|
.circleci/config.yml
|
name: Docker (test runner)
on:
pull_request:
paths:
- 'docker/**'
- '.github/workflows/docker-test-runner.yml'
- 'setup.py'
- 'setup.cfg'
- 'check-code.sh'
push:
paths:
- 'docker/**'
- '.github/workflows/docker-test-runner.yml'
- 'setup.py'
- 'setup.cfg'
- 'check-code.sh'
env:
ORG: opendatacube
IMAGE: datacube-tests
BUILDER_TAG: _build_cache
DOCKER_USER: gadockersvc
jobs:
docker:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: '3.6'
# This is just to get dependencies right, we do not keep datacube in the final image
- name: Install packaging dependencies
run: |
# needed for version getting
git fetch --prune --unshallow 2> /dev/null || true
python -m pip install -U --force \
"setuptools>=42" \
"setuptools_scm[toml]>=3.4" \
wheel \
twine
# This is just to get dependencies right, we do not keep datacube in the final image
- name: Build datacube wheel
run: |
mkdir -p ./docker/dist/
find ./docker/dist/ -type f -delete
python setup.py bdist_wheel --dist-dir ./docker/dist/
ls -lh ./docker/dist/
twine check ./docker/dist/*
- name: Pull docker cache
run: |
docker pull ${ORG}/${IMAGE}:latest || true
docker pull ${ORG}/${IMAGE}:${BUILDER_TAG} || true
- name: Build Test Runner Docker
run: |
# build and cache first stage (env_builder)
docker build \
--target env_builder \
--cache-from ${ORG}/${IMAGE}:${BUILDER_TAG} \
--tag ${ORG}/${IMAGE}:${BUILDER_TAG} \
./docker/
# now build second stage making sure first stage is from cache
docker build \
--cache-from ${ORG}/${IMAGE}:${BUILDER_TAG} \
--cache-from ${ORG}/${IMAGE}:latest \
--tag ${ORG}/${IMAGE}:latest \
./docker/
- name: Verify Docker Image
run: |
./check-code.sh --with-docker integration_tests
- name: DockerHub Push
if: |
github.event_name == 'push' && (
github.ref == 'refs/heads/develop'
)
run: |
if [ -n "${{ secrets.DockerPassword }}" ]; then
echo "Login to DockerHub as ${DOCKER_USER}"
echo "${{ secrets.DockerPassword }}" | docker login -u "${DOCKER_USER}" --password-stdin
docker push ${ORG}/${IMAGE}:${BUILDER_TAG}
docker push ${ORG}/${IMAGE}:latest
else
echo "Set DockerPassword secret to push to docker"
fi
|
.github/workflows/docker-test-runner.yml
|
kentcdodds-kentcdodds.com:
url: https://github.com/kentcdodds/kentcdodds.com
version: 0.21.0
platforms: [express, fly]
HovaLabs-hova-labs-remix:
url: https://github.com/HovaLabs/hova-labs-remix
version: ^0.17.5
platforms: [vercel]
mcansh-snkrs:
url: https://github.com/mcansh/snkrs
packages: [remix-seo]
version: 0.20.1
platforms: [fly]
BenoitAverty-realworld-remix.run:
url: https://github.com/BenoitAverty/realworld-remix.run
version: ^0.21.1
platforms: [fly]
camchenry-camchenry-remix:
url: https://github.com/camchenry/camchenry-remix
version: ^0.21.1
platforms: [vercel]
sergiodxa-personal-site:
url: https://github.com/sergiodxa/personal-site
packages: [remix-utils]
version: 0.18.0
platforms: [vercel]
roelandmoors-remix-cloudflare-dynamodb:
url: https://github.com/roelandmoors/remix-cloudflare-dynamodb
version: ^1.0.0-rc.2
packages: [remix-auth]
platforms: [cloudflare]
itsMapleLeaf-thoughtbucket:
url: https://github.com/itsMapleLeaf/thoughtbucket
version: ^0.20.1
platforms: [fly]
jacob-ebey-remix-cloudflare-demo:
url: https://github.com/jacob-ebey/remix-cloudflare-demo
version: ^0.21.0
platforms: [cloudflare]
tidusia-thibaud-on-remix:
url: https://github.com/tidusia/thibaud-on-remix
version: ^0.20.1
gmencz-oneclip:
url: https://github.com/gmencz/OneClip
version: ^0.17.5
platforms: [express, fly]
TheRealFlyingCoder-remix-firebase-portal:
url: https://github.com/TheRealFlyingCoder/remix-firebase-portal
version: 0.17.0
platforms: [firebase]
GregBrimble-remix-on-cloudflare-pages:
url: https://github.com/GregBrimble/remix-on-cloudflare-pages
version: ^0.19.3
platforms: [cloudflare]
dbanisimov-remix-firebase-hosted-bundle:
url: https://github.com/dbanisimov/remix-firebase-hosted-bundle
version: ^0.19.3
platforms: [firebase]
dbanisimov-remix-firebase-express:
url: https://github.com/dbanisimov/remix-firebase-express
version: ^0.20.1
platforms: [express, firebase]
ajcwebdev-ajcwebdev-remix:
url: https://github.com/ajcwebdev/ajcwebdev-remix
version: ^0.20.1
platforms: [netlify]
jacob-ebey-ebey-me-remix:
url: https://github.com/jacob-ebey/ebey-me-remix
version: 0.19.0-pre.0
platforms: [vercel]
edmundhung-blog:
url: https://github.com/edmundhung/blog
version: ^0.20.1
platforms: [cloudflare]
edmundhung-remix-guide:
url: https://github.com/edmundhung/remix-guide
version: ^1.0.3
platforms: [cloudflare]
HerrBertling-hamvaxmap:
url: https://github.com/HerrBertling/hamvaxmap
version: ^1.0.4
platforms: [vercel]
marcomafessollli-remix-graphql-loader-example:
url: https://github.com/marcomafessolli/remix-graphql-loader-example
version: ^1.0.6
|
content/examples.yml
|
---
name: dist
on:
pull_request:
push:
branches:
- main
tags:
- '*'
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: psf/black@stable
build:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install build dependencies
run: |
python -m pip install wheel
- uses: robotpy/build-actions/build-sdist@v2021
- uses: robotpy/build-actions/build-wheel@v2021
- name: Upload build artifacts
uses: actions/upload-artifact@v2
with:
name: dist
path: dist
test:
needs: [build]
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [windows-latest, macos-latest, ubuntu-18.04]
python_version: [3.6, 3.7, 3.8, 3.9]
architecture: [x86, x64]
exclude:
- os: macos-latest
architecture: x86
- os: ubuntu-18.04
architecture: x86
steps:
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python_version }}
architecture: ${{ matrix.architecture }}
- name: Download build artifacts
uses: actions/download-artifact@v2
with:
name: dist
path: dist
- uses: robotpy/build-actions/test-native-wheel@v2021
publish:
runs-on: ubuntu-latest
needs: [check, test]
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags')
steps:
- name: Download build artifacts
uses: actions/download-artifact@v2
with:
name: dist
path: dist
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@master
with:
user: __token__
password: ${{ secrets.pypi_password }}
ping:
runs-on: ubuntu-latest
needs: [publish]
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags')
steps:
- uses: robotpy/build-actions/ping-meta@v2021
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
|
.github/workflows/dist.yml
|
#: Sets the ip address.
ipAddress: "127.0.0.1"
#: Sets the port.
port: 8850
#: Setup Bcrypt.
bcryptLogRounds: &bcryptLogRounds 12
#: Default user name. An empty string will disable it.
#: Should only be used when behind CERN SSO!
defaultUsername: ""
#: basePath is just a useful value.
#: It defines a base directory to reference if the static, template, etc folders are
#: all in the same dir.
basePath: ""
#: staticFolder is the disk location of the static folder.
#: It is a flask defined variable.
#: To check if the static files are from the front-end webserver, use:
#: https://stackoverflow.com/questions/16595691/static-files-with-flask-in-production
#:
#: (ie. add + "CHANGE" to the staticFolder location specified here).
#staticFolder = os.path.join(sharedParameters.staticFolderName)
staticFolder: *staticFolder
#: staticURLPath is the URL of the static folder.
#: If you want to access "foo", it would be at $BASE_URL/staticURLPath/foo. "" is just the root.
#: It is a flask defined variable.
staticURLPath: "/static"
#: protectedFolder is the disk location of the protected folder.
#: This folder holds the experimental data.
protectedFolder: *dataFolder
#: docsFolder is the disk location of the docs folder.
docsFolder: &docsFolder "doc"
#: docsBuildFolder is the disk location of the docs html folder.
docsBuildFolder: !joinPaths
- *docsFolder
- "build"
- "html"
#: Subsystems which have templates available (determined on startup).
#: Since this is run from the root directory, we need to go into the "webApp" directory to find the templates!
availableRunPageTemplates: !findRunPageTemplates
- ".."
- "webApp"
- *templateFolder
flaskAssetsDebug: null
#: Sites to check during the status request.
statusRequestSites:
null: null
######
# Sensitive parameters
######
#: Defines the users way may be added to the database depending on the configuration.
#: This will currently not create any users
#: Defined with an underscore since it is a private value.
#: Each user sure should added of the form user: "password"
#: If the password is not hashed, then the _users type should be
#: "!bcrypt". It will then hash all passwords that are passed
#: Also be certain to pass bcryptLogRounds for it to succeed
_users: !bcrypt
bcryptLogRounds: *bcryptLogRounds
null: null
#: Secret key for signing cookies
#: Defined with an underscore since it is a private value.
#: If "false" is passed, then the value will be generated automatically
#: using urandom(50), as suggested by the flask developers.
_secretKey: !secretKey false
|
overwatch/webApp/config.yaml
|
uid: azure.mgmt.healthcareapis.models
name: models
fullName: azure.mgmt.healthcareapis.models
type: import
classes:
- azure.mgmt.healthcareapis.models.CheckNameAvailabilityParameters
- azure.mgmt.healthcareapis.models.ErrorDetails
- azure.mgmt.healthcareapis.models.ErrorDetailsInternal
- azure.mgmt.healthcareapis.models.Operation
- azure.mgmt.healthcareapis.models.OperationDisplay
- azure.mgmt.healthcareapis.models.OperationListResult
- azure.mgmt.healthcareapis.models.OperationResultsDescription
- azure.mgmt.healthcareapis.models.PrivateEndpoint
- azure.mgmt.healthcareapis.models.PrivateEndpointConnection
- azure.mgmt.healthcareapis.models.PrivateEndpointConnectionDescription
- azure.mgmt.healthcareapis.models.PrivateEndpointConnectionListResultDescription
- azure.mgmt.healthcareapis.models.PrivateLinkResource
- azure.mgmt.healthcareapis.models.PrivateLinkResourceDescription
- azure.mgmt.healthcareapis.models.PrivateLinkResourceListResultDescription
- azure.mgmt.healthcareapis.models.PrivateLinkServiceConnectionState
- azure.mgmt.healthcareapis.models.Resource
- azure.mgmt.healthcareapis.models.ServiceAccessPolicyEntry
- azure.mgmt.healthcareapis.models.ServiceAcrConfigurationInfo
- azure.mgmt.healthcareapis.models.ServiceAuthenticationConfigurationInfo
- azure.mgmt.healthcareapis.models.ServiceCorsConfigurationInfo
- azure.mgmt.healthcareapis.models.ServiceCosmosDbConfigurationInfo
- azure.mgmt.healthcareapis.models.ServiceExportConfigurationInfo
- azure.mgmt.healthcareapis.models.ServicesDescription
- azure.mgmt.healthcareapis.models.ServicesDescriptionListResult
- azure.mgmt.healthcareapis.models.ServicesNameAvailabilityInfo
- azure.mgmt.healthcareapis.models.ServicesPatchDescription
- azure.mgmt.healthcareapis.models.ServicesProperties
- azure.mgmt.healthcareapis.models.ServicesResource
- azure.mgmt.healthcareapis.models.ServicesResourceIdentity
- azure.mgmt.healthcareapis.models.SystemData
enums:
- azure.mgmt.healthcareapis.models.CreatedByType
- azure.mgmt.healthcareapis.models.Kind
- azure.mgmt.healthcareapis.models.ManagedServiceIdentityType
- azure.mgmt.healthcareapis.models.OperationResultStatus
- azure.mgmt.healthcareapis.models.PrivateEndpointConnectionProvisioningState
- azure.mgmt.healthcareapis.models.PrivateEndpointServiceConnectionStatus
- azure.mgmt.healthcareapis.models.ProvisioningState
- azure.mgmt.healthcareapis.models.PublicNetworkAccess
- azure.mgmt.healthcareapis.models.ServiceNameUnavailabilityReason
|
docs-ref-autogen/azure-mgmt-healthcareapis/azure.mgmt.healthcareapis.models.yml
|
store_package_artifacts:
steps:
- store_artifacts:
path: << pipeline.parameters.packages_workspace >>
destination: /
store_rebar3_crashdump:
steps:
- store_artifacts:
path: rebar3.crashdump
upload_artifacts_s3:
parameters:
bucket:
type: string
steps:
- attach_workspace:
at: << pipeline.parameters.packages_workspace >>
- run:
name: Upload artifacts to s3
command: |
envdir /secrets aws s3 sync --acl public-read << pipeline.parameters.packages_workspace >> s3://<< parameters.bucket >>/
promote_latest_artifact_s3:
parameters:
uri:
type: string
steps:
- attach_workspace:
at: << pipeline.parameters.packages_workspace >>
- run:
name: Upload artifacts to s3
command: |
BUCKET=$(dirname << parameters.uri >>)
DST=$(basename << parameters.uri >> | sed -e 's/\(aeternity\)-.*-\(.*\)-\(.*\).tar.gz/\1-latest-\2-\3.tar.gz/g')
envdir /secrets aws s3 cp --acl public-read << parameters.uri >> $BUCKET/$DST
upload_artifacts_github:
steps:
- attach_workspace:
at: << pipeline.parameters.packages_workspace >>
- run:
name: Upload GitHub Release Asset
command: |
envdir /secrets /infrastructure/scripts/upload-github-release-assets.sh \
github_api_token=${GITHUB_API_TOKEN:?} \
owner=${CIRCLE_PROJECT_USERNAME} \
repo=${CIRCLE_PROJECT_REPONAME} \
tag=${CIRCLE_TAG} \
ASSETS=<< pipeline.parameters.packages_workspace >>/*
upload_artifacts_apt:
parameters:
packages_dir:
type: string
component:
type: string
steps:
- run:
name: Checkout APT repository
command: |
git clone <EMAIL>:aeternity/apt-repo.git --branch inbox --single-branch ~/apt-repo
- run:
name: Add deb packages
command: |
cp << parameters.packages_dir >>/aeternity-node*.deb ~/apt-repo/inbox/aeternity-node/<< parameters.component >>/
- run:
name: Setup git
command: |
git config --global push.default simple
git config --global user.email "<EMAIL>"
git config --global user.name "Aeternity node CI"
- run:
name: Commit and push
command: |
cd ~/apt-repo
git add -A
git commit -m "Add aeternity node package to << parameters.component >>" || true
git push
upload_artifacts_brew:
parameters:
packages_dir:
type: string
branch:
type: string
steps:
- run:
name: Checkout Homebrew repository
command: |
git clone <EMAIL>:aeternity/homebrew-aeternity.git --branch << parameters.branch >> --single-branch ~/homebrew-aeternity
- run:
name: Update the Homebrew formula
command: |
make VERSION
VERSION=$(cat VERSION)
PACKAGE_URL=https://github.com/aeternity/aeternity/releases/download/v$VERSION/aeternity-$VERSION-macos-x86_64.tar.gz
SHA256=$(sha256sum << parameters.packages_dir >>/aeternity-*-macos-x86_64.tar.gz | head -1 | cut -f 1 -d " ")
cd ~/homebrew-aeternity/
sed -i "s|^ AETERNITY_VERSION = .*| AETERNITY_VERSION = \"$VERSION\"|g" Formula/aeternity-node.rb
sed -i "s|^ sha256 .*| sha256 \"$SHA256\"|g" Formula/aeternity-node.rb
- run:
name: Setup git
command: |
git config --global push.default simple
git config --global user.email "<EMAIL>"
git config --global user.name "Aeternity node CI"
- run:
name: Commit and push
command: |
make VERSION
VERSION=$(cat VERSION)
cd ~/homebrew-aeternity/
git add Formula/aeternity-node.rb
git diff --staged
git commit -m "Update aeternity-node formula to $VERSION"
git push
|
.circleci/config/commands/@artifacts.yml
|
- hosts: localhost
vars:
root_dir: "{{ playbook_dir }}/../../.."
component_name: "teamtrak-api"
vars_files:
- "{{ root_dir }}/infrastructure/deploy/ansible/vars/vars"
- "{{ root_dir }}/infrastructure/deploy/ansible/vars/secrets"
tasks:
################################
# PULL AND ZIP ARTIFACT
################################
# DELETE TMP DIRECTORY
- name: Delete tmp directory
shell: "rm -rf {{ root_dir }}/tmp"
# CREATE TMP DIRECTORY
- name: Create tmp directory
shell: "mkdir {{ root_dir }}/tmp"
# GET CODEARTIFACT AUTHORIZATION TOKEN
- name: Fetch CodeArtifact token
shell: "AWS_ACCESS_KEY_ID={{ aws_access_key }} AWS_SECRET_ACCESS_KEY={{ aws_secret_access_key }} aws codeartifact get-authorization-token --region {{ aws_region }} --domain {{ aws_codeartifact_domain }} --domain-owner {{ aws_account_id }} --query authorizationToken --output text"
register: codeartifact_token
# SET AUTHORIZATION TOKEN VARIABLE
- set_fact:
codeartifact_token: "{{ codeartifact_token.stdout }}"
# REGISTER CODEARTIFACT REPOSITORY
- name: Register CodeArtifact repository
shell: "python -m pip config set global.index-url 'https://aws:{{ codeartifact_token }}@{{ aws_codeartifact_domain }}-{{ aws_account_id }}.d.codeartifact.{{ aws_region }}.amazonaws.com/pypi/{{ aws_codeartifact_repo }}/simple/'"
# DOWNLOAD ARTIFACT FROM REPOSITORY
- name: Download artifact
shell: "python -m pip install -t {{ root_dir }}/tmp/artifact teamtrakapi-pkg-billycohen96"
# EXTRACT VERSION NUMBER FROM PACKAGE METADATA
- name: Register version number
shell: 'cat {{ root_dir }}/tmp/artifact/*.dist-info/METADATA | grep "^Version" | cut -d " " -f 2'
register: version_number
# SET VERSION NUMBER VARIABLE
- set_fact:
version_number: "{{ version_number.stdout }}"
################################
# EXECUTE TERRAFORM PLAN
################################
# REMOVE CURRENT TERRAFORM PLAN
- name:
shell: "rm -rf {{ root_dir }}/infrastructure/deploy/terraform/.terraform"
# INITIALISE TERRAFORM PLAN
- name: Terraform init
command: terraform init
args:
chdir: "{{ root_dir }}/infrastructure/deploy/terraform"
register: output
# TERRAFORM DEPLOY/DESTROY
- name: Terraform deploy/destroy
command:
cmd: terraform {{ terraform_command }}
-var 'access_key={{ aws_access_key }}'
-var 'secret_key={{ aws_secret_access_key }}'
-var 'component_name={{ component_name }}'
-var 'env={{ env }}'
-var 'version_number={{ version_number }}'
-var 'artifact_path={{ root_dir }}/tmp/artifact'
-auto-approve
args:
chdir: "{{ root_dir }}/infrastructure/deploy/terraform"
register: output
################################
# EXTRACT OUTPUT VARIABLES
################################
- name: Terraform output
command:
cmd: terraform output
args:
chdir: "{{ root_dir }}/infrastructure/deploy/terraform"
register: output_variables
- debug: msg="{{ output_variables.stdout }}"
################################
# CLEAN UP
################################
|
infrastructure/deploy/ansible/deploy.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: interceptor
httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }}
keda.sh/addon: {{ .Chart.Name }}
app: {{ .Chart.Name }}
name: {{ .Chart.Name }}-interceptor
app.kubernetes.io/name: {{ .Chart.Name }}-interceptor
app.kubernetes.io/version: {{ .Values.images.tag | default .Chart.AppVersion }}
app.kubernetes.io/component: interceptor
app.kubernetes.io/part-of: {{ .Chart.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{ tpl .Values.additionalLabels . | indent 4}}
name: {{ .Chart.Name }}-interceptor
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
selector:
matchLabels:
control-plane: interceptor
httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }}
keda.sh/addon: {{ .Chart.Name }}
template:
metadata:
labels:
control-plane: interceptor
httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }}
keda.sh/addon: {{ .Chart.Name }}
app: {{ .Chart.Name }}
name: {{ .Chart.Name }}-interceptor
app.kubernetes.io/name: {{ .Chart.Name }}-interceptor
app.kubernetes.io/version: {{ .Values.images.tag | default .Chart.AppVersion }}
app.kubernetes.io/component: interceptor
app.kubernetes.io/part-of: {{ .Chart.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{ tpl .Values.additionalLabels . | indent 4}}
spec:
containers:
- args:
image: "{{ .Values.images.interceptor }}:{{ .Values.images.tag | default .Chart.AppVersion }}"
imagePullPolicy: '{{ .Values.interceptor.pullPolicy | default "Always" }}'
name: "{{ .Chart.Name }}-interceptor"
env:
- name: KEDA_HTTP_ROUTING_TABLE_UPDATE_DURATION_MS
value: "{{ .Values.interceptor.routingTableUpdateDurationMS }}"
- name: KEDA_HTTP_CURRENT_NAMESPACE
value: "{{ .Release.Namespace }}"
- name: KEDA_HTTP_PROXY_PORT
value: "{{ .Values.interceptor.proxy.port }}"
- name: KEDA_HTTP_ADMIN_PORT
value: "{{ .Values.interceptor.admin.port }}"
- name: KEDA_HTTP_CONNECT_TIMEOUT
value: "{{ .Values.interceptor.tcpConnectTimeout }}"
- name: KEDA_HTTP_KEEP_ALIVE
value: "{{ .Values.interceptor.keepAlive }}"
- name: KEDA_RESPONSE_HEADER_TIMEOUT
value: "{{ .Values.interceptor.responseHeaderTimeout }}"
- name: KEDA_CONDITION_WAIT_TIMEOUT
value: "{{ .Values.interceptor.replicas.waitTimeout }}"
- name: KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS
value: "{{ .Values.interceptor.deploymentCachePollingIntervalMS }}"
- name: KEDA_HTTP_FORCE_HTTP2
value: "{{ .Values.interceptor.forceHTTP2 }}"
- name: KEDA_HTTP_MAX_IDLE_CONNS
value: "{{ .Values.interceptor.maxIdleConns }}"
- name: KEDA_HTTP_IDLE_CONN_TIMEOUT
value: "{{ .Values.interceptor.idleConnTimeout }}"
- name: KEDA_HTTP_TLS_HANDSHAKE_TIMEOUT
value: "{{ .Values.interceptor.tlsHandshakeTimeout }}"
- name: KEDA_HTTP_EXPECT_CONTINUE_TIMEOUT
value: "{{ .Values.interceptor.expectContinueTimeout }}"
ports:
- containerPort: {{ .Values.interceptor.admin.port }}
name: inter-admin
- containerPort: {{ .Values.interceptor.proxy.port }}
name: inter-proxy
resources:
{{- toYaml .Values.resources | nindent 12 }}
terminationGracePeriodSeconds: 10
{{- with .Values.interceptor.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.interceptor.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.interceptor.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
|
charts/keda-add-ons-http/templates/deployment-interceptor.yaml
|
uid: learn.data-ai.identify-faces-with-computer-vision.knowledge-check
title: Knowledge check - Use the Computer Vision Face API
metadata:
title: Knowledge check - Use the Computer Vision Face API
description: Check how much you know about how to use the Face API in Azure Cognitive Services
ms.date: 01/07/2019
author: gerryaobrien
ms.author: gerryo
ms.topic: interactive-tutorial
ms.prod: learning-azure
ROBOTS: NOINDEX
durationInMinutes: 7
quiz:
title: Check your knowledge
questions:
- content: "Which of the following concepts *isn't* part of the Cognitive Services Face API? (Choose one.)"
choices:
- content: Face verification
isCorrect: false
explanation: "Concepts (and methods) that are used in the Face API fall into the following categories: verification, detection, identification, similarity, and grouping."
- content: Face detection
isCorrect: false
explanation: "Concepts (and methods) that are used in the Face API fall into the following categories: verification, detection, identification, similarity, and grouping."
- content: Face identification
isCorrect: false
explanation: "Concepts (and methods) that are used in the Face API fall into the following categories: verification, detection, identification, similarity, and grouping."
- content: Face morphing
isCorrect: true
explanation: "Concepts (and methods) that are used in the Face API fall into the following categories: verification, detection, identification, similarity, and grouping. Manipulation of faces (*morphing*) is not part of the Face API."
- content: Face matching
isCorrect: false
explanation: "Concepts (and methods) that are used in the Face API fall into the following categories: verification, detection, identification, similarity, and grouping."
- content: "You want to add 20 friends to a collection, and you have four face images for each friend. Each face image is referred to as a *face*, and each friend is referred to as a *person*. In the Face API, what is this collection of friends called? (Choose one.)"
choices:
- content: A person group
isCorrect: true
explanation: "A collection of friends, although technically a collection, is called a *person group*. A person group is list or collection of people (persons)."
- content: A person collection
isCorrect: false
explanation: "A collection of friends, although technically a collection, is called a *person group*. A person group is list or collection of people (persons)."
- content: A people group
isCorrect: false
explanation: "A collection of friends, although technically a collection, is called a *person group*. A person group is list or collection of people (persons)."
|
learn-pr/data-ai-cert/identify-faces-with-computer-vision/11-knowledge-check.yml
|
name: CD
on:
pull_request:
branches: [ release ]
# The CD job is a god job due to a security issue.
# In order to transfer the restored files in-between jobs, I would have to upload and then download them.
# That is a security breach because the uploaded files will be publicly available.
jobs:
cd:
name: CD
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Restore keystore.properties
run: |
echo "${{ secrets.KEYSTORE_PROPERTIES }}" > keystore.properties.asc
gpg -d --passphrase "${{ secrets.KEYSTORE_PROPERTIES_PASSPHRASE }}" --batch keystore.properties.asc > keystore.properties
- name: Restore keystore.jsk
run: |
echo "${{ secrets.KEYSTORE }}" > lists_keystore.jks.asc
gpg -d --passphrase "${{ secrets.KEYSTORE_PASSPHRASE }}" --batch lists_keystore.jks.asc > app/lists_keystore.jks
- name: Restore debug google.json
run: |
echo "${{ secrets.GOOGLE_JSON_DEBUG }}" > google-services.json.asc
gpg -d --passphrase "${{ secrets.GOOGLE_JSON_DEBUG_PASSPHRASE }}" --batch google-services.json.asc > app/src/debug/google-services.json
- name: Restore release google.json
run: |
echo "${{ secrets.GOOGLE_JSON_RELEASE }}" > google-services.json.asc
gpg -d --passphrase "${{ secrets.GOOGLE_JSON_RELEASE_PASSPHRASE }}" --batch google-services.json.asc > app/src/release/google-services.json
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Cache Gradle packages
uses: actions/cache@v2
with:
path: ~/.gradle/caches
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }}
restore-keys: ${{ runner.os }}-gradle
- name: Run all unit tests
run: ./gradlew testDebug --info
- name: Assemble release APK
run: ./gradlew assembleRelease --info
- name: Upload the release APK
uses: actions/upload-artifact@v2
with:
name: release-apk
path: app/build/outputs/apk/release/*.apk
|
.github/workflows/cd.yml
|
fr:
blogit:
comments:
leave_a_comment: "Laissez un commentaire"
name: "Nom *"
email_never_displayed: "Email * (Jamais affiché)"
your_website: "Votre site web"
your_comment: "Votre commentaire *"
add_comment: "Ajouter un commentaire"
adding_comment: "Ajout de commentaire ..."
wrote: "a écrit:"
posted_on: "Publié le "
delete: "effacer"
are_you_sure_you_want_to_remove_this_comment: "Etes vous certain de vouloir supprimer ce commentaire?"
successfully_added_comment: "Commentaire ajouté avec succès."
successfully_removed_comment: "Commentaire supprimé avec succès."
posts:
written_by: "Ecrit par %{author} le"
there_are_no_posts_about: "Désolé, il n'y a pas d'article à propos de \"%{tag}\"."
prohibited_this_post_from_being_saved:
one: "Une erreur a empêché cet article d'être enregistré :"
other: "%{count} erreurs ont empêché cet article d'être enregistré :"
give_your_post_a_title: "Donnez un nom à votre article"
write_something_here: "Ecrivez quelque chose ici ..."
tags: "Libellés"
tag_placeholder: "Libellé 1, Libellé 2 ..."
tip_you_can_style_your_post_using: "Astuce : vous pouvez styler votre article en utilisant %{parser}"
or: "ou"
cancel: "annuler"
edit: "editer"
delete: "supprimer"
are_you_sure_you_want_to_remove_this_post: "Etes vous certain de vouloir supprimer cet article?"
comment:
zero: "Aucun commentaire"
one: "1 commentaire"
other: "%{count} commentaires"
edit_blog_post: "Editez l'article #%{id}"
new_blog_post: "Nouvel article"
write_a_new_blog_post: "Ecrivez un nouvel article"
blog_post_was_successfully_created: "Article ajouté avec succès."
blog_post_was_successfully_updated: "Article modifié avec succès."
blog_post_was_successfully_destroyed: "Article supprimé avec succès."
tags: "Catégories"
related_posts: "Articles similaires"
share_bar:
facebook: "fr_FR"
time:
formats:
plain_month_only: "%B"
activerecord:
models:
blogit/post: "article"
blogit/comment: "commentaire"
attributes:
blogit/post:
title: "titre"
body: "corps"
blogit/comment:
name: "nom"
body: "corps"
views:
pagination:
first: "« Premiers"
last: "Derniers »"
previous: "‹ Précédents"
next: "Suivants ›"
truncate: "…"
helpers:
page_entries_info:
one_page:
display_entries:
zero: "Aucun %{entry_name} n'a été trouvé"
one: "<b>1</b> %{entry_name}"
other: "<b>Tous les %{count}</b> %{entry_name}s"
more_pages:
display_entries: "${entry_name}s <b>%{first} - %{last}</b> d'un total de <b>%{total}</b>"
|
config/locales/fr.yml
|
# External logical database schema.
schemaName: warehousedb
# Extension default datasource configuration.
# @see:com.wl4g.shardingproxy.config.ProxyConfigurationLoader2.loadRuleConfiguration
extensionDefaultDataSource:
username: root
password: <PASSWORD>
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
# Datasource configuration.
dataSources:
ds_warehousedb_r0z0mgr0i0:
url: 'jdbc:mysql://n0.rds.local:3306/warehousedb?serverTimezone=UTC&useSSL=false&allowMultiQueries=true&characterEncoding=utf-8'
ds_warehousedb_r0z0mgr0i1:
url: 'jdbc:mysql://n1.rds.local:3306/warehousedb?serverTimezone=UTC&useSSL=false&allowMultiQueries=true&characterEncoding=utf-8'
ds_warehousedb_r0z0mgr0i2:
url: 'jdbc:mysql://n2.rds.local:3306/warehousedb?serverTimezone=UTC&useSSL=false&allowMultiQueries=true&characterEncoding=utf-8'
# Schema rules configuration.
rules:
## @see:org.apache.shardingsphere.readwritesplitting.yaml.config.rule.YamlReadwriteSplittingDataSourceRuleConfiguration
## @see:org.apache.shardingsphere.readwritesplitting.yaml.config.YamlReadwriteSplittingRuleConfiguration
- !READWRITE_SPLITTING
dataSources:
rw_warehousedb_r0z0mgr0:
writeDataSourceName: ds_warehousedb_r0z0mgr0i0
readDataSourceNames:
- ds_warehousedb_r0z0mgr0i1
- ds_warehousedb_r0z0mgr0i2
loadBalancerName: r_lb_0
loadBalancers:
r_lb_0:
type: RANDOM
## @see:org.apache.shardingsphere.readwritesplitting.api.DatabaseDiscoveryRuleConfiguration
- !DB_DISCOVERY
dataSources:
ha_warehousedb_r0z0mgr0:
discoveryTypeName: default_dis_mgr
discoveryHeartbeatName: default_mgr_heartbeat
dataSourceNames:
- ds_warehousedb_r0z0mgr0i0
- ds_warehousedb_r0z0mgr0i1
- ds_warehousedb_r0z0mgr0i2
discoveryHeartbeats:
default_mgr_heartbeat:
props:
keep-alive-cron: '0/5 * * * * ?'
discoveryTypes:
## HA MGR architecture @see: https://blog.csdn.net/n88Lpo/article/details/118315051
default_dis_mgr:
type: MGR
props:
## Must be consistent with the MGR configuration group name.
## @see:https://github.com/apache/shardingsphere/blob/5.1.0/shardingsphere-features/shardingsphere-db-discovery/shardingsphere-db-discovery-provider/shardingsphere-db-discovery-mgr/src/main/java/org/apache/shardingsphere/dbdiscovery/mgr/MGRDatabaseDiscoveryType.java#L95
## @see:https://github.com/apache/shardingsphere/blob/5.1.0/examples/shardingsphere-sample/shardingsphere-example-generated/shardingsphere-proxy-sample/shardingsphere-proxy-cluster-etcd-local-db-discovery-example/src/main/resources/conf/config-db-discovery.yaml#L56
## @see:https://github.com/mysql/mysql-server/blob/mysql-5.7.30/rapid/plugin/group_replication/src/plugin.cc#L1726
## @see:SELECT * FROM performance_schema.global_variables WHERE VARIABLE_NAME='group_replication_group_name'
group-name: 5db40c3c-180c-11e9-afbf-005056ac6820
## @see:org.apache.shardingsphere.dbdiscovery.mgr.MGRDatabaseDiscoveryType#extDiscoveryConfig
extensionDiscoveryConfigJson: |-
{
"memberHostMappings": [{
"rds-mgr-0:3306": [
"n0.rds.local:3306"
]
}, {
"rds-mgr-1:3306": [
"n1.rds.local:3306"
]
}, {
"rds-mgr-2:3306": [
"n2.rds.local:3306"
]
}]
}
|
shardingproxy-starter/src/main/resources/example/config-readwrite-warehousedb.yaml
|
uid: "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility"
fullName: "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility"
name: "UpdateOperationUtility"
nameWithType: "UpdateOperationUtility"
summary: "A utility to create the JSON patch payload required for update operations such as update digital twin"
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class UpdateOperationUtility"
constructors:
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.UpdateOperationUtility()"
methods:
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.appendAddComponentOperation(java.lang.String,java.util.Map<java.lang.String,java.lang.Object>)"
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.appendAddPropertyOperation(java.lang.String,java.lang.Object)"
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.appendRemoveComponentOperation(java.lang.String)"
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.appendRemovePropertyOperation(java.lang.String)"
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.appendReplaceComponentOperation(java.lang.String,java.util.Map<java.lang.String,java.lang.Object>)"
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.appendReplacePropertyOperation(java.lang.String,java.lang.Object)"
- "com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.getUpdateOperations()"
type: "class"
metadata: {}
package: "com.microsoft.azure.sdk.iot.service.digitaltwin"
artifact: com.microsoft.azure.sdk.iot:iot-service-client-preview:1.2.0
|
preview/docs-ref-autogen/com.microsoft.azure.sdk.iot.service.digitaltwin.UpdateOperationUtility.yml
|
AWSTemplateFormatVersion: 2010-09-09
Description: AWS Perspective Cloudfront Distribution Stack
Parameters:
AccessLogsBucket:
Description: The bucket that contains the access logs for Perspective buckets.
Type: String
SecuredEdgeBucket:
Type: String
Description: The bucket where the zip files containing the source code for the SecuredEdge Lambda
DeploymentBucketKey:
Type: String
Description: The key within the bucket that contains the source code zips
Resources:
SecurityFunction:
Type: AWS::CloudFront::Function
Properties:
AutoPublish: true
Name:
!Sub
- HSTS-FUNCTION-${StackBit}
- StackBit: !Select
- 7
- !Split
- "-"
- !Ref AWS::StackName
FunctionConfig:
Comment: Security Headers
Runtime: cloudfront-js-1.0
'Fn::Transform':
Name: 'AWS::Include'
Parameters:
Location : !Sub 's3://${SecuredEdgeBucket}/${DeploymentBucketKey}/cff-hsts.js'
WebUIBucketReadPolicy:
Type: AWS::S3::BucketPolicy
Properties:
Bucket: '{{resolve:ssm:WebUIBucketName:1}}'
PolicyDocument:
Statement:
- Action: s3:GetObject
Effect: Allow
Resource:
Fn::Sub:
- arn:aws:s3:::${WebUIBucketName}/*
- WebUIBucketName: '{{resolve:ssm:WebUIBucketName:1}}'
Principal:
CanonicalUser:
Fn::GetAtt:
- CloudFrontOriginAccessIdentity
- S3CanonicalUserId
CloudFrontOriginAccessIdentity:
Type: AWS::CloudFront::CloudFrontOriginAccessIdentity
Properties:
CloudFrontOriginAccessIdentityConfig:
Comment: '{{resolve:ssm:WebUIBucketName:1}}'
CloudFrontDistribution:
Metadata:
cfn_nag:
rules_to_suppress:
- id: W70
reason: 'If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net (you set CloudFrontDefaultCertificate to true), CloudFront automatically sets the security policy to TLSv1 regardless of the value that you set here.'
Type: AWS::CloudFront::Distribution
Properties:
DistributionConfig:
Origins:
- DomainName: '{{resolve:ssm:WebUIBucketRegionalDomainName:1}}'
Id: perspective-origin
S3OriginConfig:
OriginAccessIdentity:
Fn::Sub: origin-access-identity/cloudfront/${CloudFrontOriginAccessIdentity}
Enabled: true
HttpVersion: http2
Comment: The Distribution for the Perspective Web UI
DefaultRootObject: index.html
Logging:
Bucket: !Sub ${AccessLogsBucket}.s3.amazonaws.com
IncludeCookies: false
Prefix: 'aws-perspective-ui'
DefaultCacheBehavior:
AllowedMethods:
- HEAD
- GET
- OPTIONS
TargetOriginId: perspective-origin
ForwardedValues:
QueryString: false
Cookies:
Forward: none
ViewerProtocolPolicy: redirect-to-https
FunctionAssociations:
- EventType: viewer-response
FunctionARN: !GetAtt SecurityFunction.FunctionMetadata.FunctionARN
PriceClass: PriceClass_All
ViewerCertificate:
CloudFrontDefaultCertificate: true
MinimumProtocolVersion: TLSv1.1_2016
Outputs:
WebUiUrl:
Value: !Sub https://${CloudFrontDistribution.DomainName}
Description: WebUI URL
|
source/cfn/templates/perspective-cloudfront.yaml
|
DATALOADER:
ASPECT_RATIO_GROUPING: false
SIZE_DIVISIBILITY: 32
DATASETS:
GENERAL_COPY: 16
OVERRIDE_CATEGORY: '[{"id": 1, "name": "1", "supercategory": "dice"}, {"id": 2,
"name": "2", "supercategory": "dice"}, {"id": 3, "name": "3", "supercategory":
"dice"}, {"id": 4, "name": "4", "supercategory": "dice"}, {"id": 5, "name": "5",
"supercategory": "dice"}, {"id": 6, "name": "6", "supercategory": "dice"}]'
PREDEFINED_TEXT: odinw/pothole/category_description.json
REGISTER:
test:
ann_file: odinw/dice/mediumColor/export/test_annotations_without_background.json
img_dir: odinw/dice/mediumColor/export
train:
ann_file: odinw/dice/mediumColor/export/train_annotations_without_background.json
img_dir: odinw/dice/mediumColor/export
train_10_3:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot10_seed3.json
img_dir: odinw/dice/mediumColor/export
train_10_30:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot10_seed30.json
img_dir: odinw/dice/mediumColor/export
train_10_300:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot10_seed300.json
img_dir: odinw/dice/mediumColor/export
train_1_3:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot1_seed3.json
img_dir: odinw/dice/mediumColor/export
train_1_30:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot1_seed30.json
img_dir: odinw/dice/mediumColor/export
train_1_300:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot1_seed300.json
img_dir: odinw/dice/mediumColor/export
train_3_3:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot3_seed3.json
img_dir: odinw/dice/mediumColor/export
train_3_30:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot3_seed30.json
img_dir: odinw/dice/mediumColor/export
train_3_300:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot3_seed300.json
img_dir: odinw/dice/mediumColor/export
train_5_3:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot5_seed3.json
img_dir: odinw/dice/mediumColor/export
train_5_30:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot5_seed30.json
img_dir: odinw/dice/mediumColor/export
train_5_300:
ann_file: odinw/dice/mediumColor/export/fewshot_train_shot5_seed300.json
img_dir: odinw/dice/mediumColor/export
val:
ann_file: odinw/dice/mediumColor/export/val_annotations_without_background.json
img_dir: odinw/dice/mediumColor/export
val_10_3:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot10_seed3.json
img_dir: odinw/dice/mediumColor/export
val_10_30:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot10_seed30.json
img_dir: odinw/dice/mediumColor/export
val_10_300:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot10_seed300.json
img_dir: odinw/dice/mediumColor/export
val_1_3:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot1_seed3.json
img_dir: odinw/dice/mediumColor/export
val_1_30:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot1_seed30.json
img_dir: odinw/dice/mediumColor/export
val_1_300:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot1_seed300.json
img_dir: odinw/dice/mediumColor/export
val_3_3:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot3_seed3.json
img_dir: odinw/dice/mediumColor/export
val_3_30:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot3_seed30.json
img_dir: odinw/dice/mediumColor/export
val_3_300:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot3_seed300.json
img_dir: odinw/dice/mediumColor/export
val_5_3:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot5_seed3.json
img_dir: odinw/dice/mediumColor/export
val_5_30:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot5_seed30.json
img_dir: odinw/dice/mediumColor/export
val_5_300:
ann_file: odinw/dice/mediumColor/export/fewshot_val_shot5_seed300.json
img_dir: odinw/dice/mediumColor/export
TEST: ("val",)
TRAIN: ("train",)
INPUT:
MAX_SIZE_TEST: 1333
MAX_SIZE_TRAIN: 1333
MIN_SIZE_TEST: 800
MIN_SIZE_TRAIN: 800
MODEL:
ATSS:
NUM_CLASSES: 71
DYHEAD:
NUM_CLASSES: 71
FCOS:
NUM_CLASSES: 71
ROI_BOX_HEAD:
NUM_CLASSES: 71
SOLVER:
CHECKPOINT_PERIOD: 100
MAX_EPOCH: 12
WARMUP_ITERS: 0
TEST:
IMS_PER_BATCH: 8
|
configs/odinw_35/dice_mediumColor_export.yaml
|
TaskRole:
Metadata:
'aws:copilot:description': 'An IAM role to control permissions for the containers in your tasks'
Type: AWS::IAM::Role
Properties:{{if .NestedStack}}{{$stackName := .NestedStack.StackName}}{{if gt (len .NestedStack.PolicyOutputs) 0}}
ManagedPolicyArns:{{range $managedPolicy := .NestedStack.PolicyOutputs}}
- Fn::GetAtt: [{{$stackName}}, Outputs.{{$managedPolicy}}]{{end}}{{end}}{{end}}
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service: ecs-tasks.amazonaws.com
Action: 'sts:AssumeRole'
Policies:
- PolicyName: 'DenyIAMExceptTaggedRoles'
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: 'Deny'
Action: 'iam:*'
Resource: '*'
- Effect: 'Allow'
Action: 'sts:AssumeRole'
Resource:
- !Sub 'arn:aws:iam::${AWS::AccountId}:role/*'
Condition:
StringEquals:
'iam:ResourceTag/copilot-application': !Sub '${AppName}'
'iam:ResourceTag/copilot-environment': !Sub '${EnvName}'
{{- if .Storage}}
{{- range $EFS := .Storage.EFSPerms}}
- PolicyName: 'GrantEFSAccess{{$EFS.FilesystemID}}'
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: 'Allow'
Action:
- 'elasticfilesystem:ClientMount'
{{- if $EFS.Write}}
- 'elasticfilesystem:ClientWrite'
{{- end}}
{{- if $EFS.AccessPointID}}
Condition:
StringEquals:
'elasticfilesystem:AccessPointArn': !Sub 'arn:aws:elasticfilesystem:${AWS::Region}:${AWS::AccountId}:access-point/{{$EFS.AccessPointID}}'
{{- end}}
Resource:
- !Sub 'arn:aws:elasticfilesystem:${AWS::Region}:${AWS::AccountId}:file-system/{{$EFS.FilesystemID}}'
{{- end}}
{{- end -}}
|
templates/workloads/partials/cf/taskrole.yml
|
velo_homepage:
path: /
defaults: { _controller: VeloBundle:Default:index }
read_velo:
path: /read
defaults: { _controller: VeloBundle:Velo:read }
create_velo:
path: /create
defaults: { _controller: VeloBundle:Velo:create }
update_velo:
path: /update/{id}
defaults: { _controller: VeloBundle:Velo:update }
delete_velo:
path: /delete/{id}
defaults: { _controller: VeloBundle:Velo:delete }
plus_de_details:
path: /details/{id}
defaults: { _controller: VeloBundle:Velo:index }
panier_add:
path: /panier/add/{id}
defaults: { _controller: VeloBundle:Carte:add }
panier_remove:
path: /panier/remove/{id}
defaults: { _controller: VeloBundle:Carte:remove }
panier_select:
path: /panier
defaults: { _controller: VeloBundle:Carte:index }
readcmd_commande:
path: /readcmd
defaults: { _controller: VeloBundle:Commande:readcmd }
createcmd_commande:
path: /createcmd
defaults: { _controller: VeloBundle:Commande:createcmd }
deletecmd_commande:
path: /deletecmd/{id}
defaults: { _controller: VeloBundle:Commande:deletecmd }
updatecmd_commande:
path: /updatecmd/{id}
defaults: { _controller: VeloBundle:Commande:updatecmd }
passer_cmd:
path: /passercmd
defaults: { _controller: VeloBundle:Commande:indexx }
consulter_cmd:
path: /consultercmd/{id}
defaults: { _controller: VeloBundle:Commande:indexxx }
veloback_homepage:
path: /back
defaults: { _controller: VeloBundle:Default:indexbackk }
Evenement_affliste:
path: /back
defaults: { _controller: VeloBundle:Default:indexbackk }
createback_velo:
path: /createback
defaults: { _controller: VeloBundle:Velo:createback }
readback_velo:
path: /readback
defaults: { _controller: VeloBundle:Velo:readback }
deleteback_velo:
path: /deleteback/{id}
defaults: { _controller: VeloBundle:Velo:deleteback }
updateback_velo:
path: /updateback/{id}
defaults: { _controller: VeloBundle:Velo:updateback }
readcmdback_commande:
path: /readcmdback
defaults: { _controller: VeloBundle:Commande:readcmdback }
deletecmdback_commande:
path: /deletecmdback/{id}
defaults: { _controller: VeloBundle:Commande:deletecmdback }
updatecmdback_commande:
path: /updatecmdback/{id}
defaults: { _controller: VeloBundle:Commande:updatecmdback }
|
src/VeloBundle/Resources/config/routing.yml
|
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "actinia.fullname" . }}
labels:
{{ include "actinia.labels" . | indent 4 }}
data:
start.sh: |
#!/bin/sh
# TODO: read paths from config
mkdir -p /actinia_core/grassdb
mkdir -p /actinia_core/userdata
mkdir -p /actinia_core/workspace/temp_db
mkdir -p /actinia_core/workspace/tmp
mkdir -p /actinia_core/resources
# copy pgpass from mounted (!) file
cp /mnt/pgpass/.pgpass $HOME/.pgpass
chmod 0600 $HOME/.pgpass
# copy db.login file from mounted (!) file
cp /mnt/pgpass/.grass7 $HOME/.grass7/dblogin
chmod 0600 $HOME/.grass7/dblogin
# copy db.login file to actinia-core tmp location
mkdir -p /tmp/:/root/.grass7
cp /root/.grass7/dblogin /tmp/:/root/.grass7/
# Create default location in mounted (!) directory
[ ! -d "/actinia_core/grassdb/nc_spm_08" ] && grass --text -e -c 'EPSG:3358' /actinia_core/grassdb/nc_spm_08
actinia-user create -u actinia-gdi -w actinia-gdi -r superadmin -g superadmin -c {{ .Values.config.actinia.cellLimit }} -n {{ .Values.config.actinia.processNumLimit }} -t {{ .Values.config.actinia.processTimeLimit }}
actinia-user update -u actinia-gdi -w {{ .Values.config.actinia.password }}
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start actinia-user: $status"
exit $status
fi
gunicorn -b 0.0.0.0:8088 -w {{ .Values.config.actinia.workercount }} --access-logfile=- -k gthread actinia_core.main:flask_app
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start actinia_core/main.py: $status"
exit $status
fi
actinia-core-config: |
[GRASS]
grass_database = /actinia_core/grassdb
grass_user_database = /actinia_core/userdata
grass_tmp_database = /actinia_core/workspace/temp_db
grass_resource_dir = /actinia_core/resources
grass_gis_base = /usr/local/grass7
grass_gis_start_script = /usr/local/bin/grass
grass_addon_path = /root/.grass7/addons/
[API]
plugins = ["actinia_module_plugin", "actinia_stac_plugin"]
force_https_urls = True
[REDIS]
{{- if .Values.config.redis.host }}
redis_server_url = {{ .Values.config.redis.host }}
{{- else }}
redis_server_url = {{ include "actinia.fullname" . }}-redis-headless
{{- end }}
{{- if .Values.config.redis.port }}
redis_server_port = {{ .Values.config.redis.port }}
{{- else }}
redis_server_port = 6379
{{- end }}
redis_resource_expire_time = 864000
worker_logfile = /actinia_core/workspace/tmp/actinia_worker.log
[LOGGING]
log_stdout_format = json
log_level = 1
[MISC]
tmp_workdir = /actinia_core/workspace/tmp
download_cache = /actinia_core/workspace/download_cache
secret_key = token_signing_key_changeme
init-data.sh: |-
#!/bin/bash
apk add curl
echo "check if sampledata exists"
FILE=/actinia_core/grassdb/.init
if [ -f "$FILE" ]; then
echo "sampledata already exists"
exit 0
fi
mkdir -p /actinia_core/grassdb
cd /actinia_core/grassdb
echo "download basic sampledata"
DOWNLOAD=nc_basic_spm_grass7.tar.gz
curl --continue - -O https://grass.osgeo.org/sampledata/north_carolina/${DOWNLOAD}
tar -zxf ${DOWNLOAD} && rm ${DOWNLOAD}
mv nc_basic_spm_grass7 nc_spm_08
echo "download temporal sampledata"
DOWNLOAD=nc_spm_mapset_modis2015_2016_lst.zip
curl --continue - -O https://grass.osgeo.org/sampledata/north_carolina/${DOWNLOAD}
unzip ${DOWNLOAD} && rm ${DOWNLOAD}
mv modis_lst /actinia_core/grassdb/nc_spm_08/modis_lst
CMD="touch $FILE"
echo $CMD && eval $CMD
# chown -R 1001:1001 /actinia_core/grassdb/nc_spm_08/modis_lst && chmod -R g+w /actinia_core/grassdb/nc_spm_08/modis_lst
|
charts/actinia/templates/configmap.yaml
|
homepage: https://github.com/k0001/pipes-network
changelog-type: markdown
hash: 1dd16c7dd7f70c0683052c486f4d46d89ff4ae68d0738da2efa451d452fcd657
test-bench-deps: {}
maintainer: <EMAIL>
synopsis: Use network sockets together with the pipes library.
changelog: ! "# Version 0.6.5\n\n* Remover upper bound on all dependencies except
`base`.\n\n* Fixed internal error handling in server-side functions.\n\n* Re-export
`MonadSafe`.\n\n\n# Version 0.6.4.1\n\n* Raise upper-bound dependency on `transformers`
and `pipes`.\n\n\n# Version 0.6.4\n\n* The various `fromServe` and `toServe*` pipes
from\n `Pipes.Network.TCP.Safe` now close the listening socket immediately\n after
accepting a first connection.\n\n* Re-export `sendLazy`, `sendMany` and `closeSock`.\n\n*
Add `to{Socket,SocketTimeout,Serve,Connect}{Lazy,Many}`.\n\n* Fix issue #29 where
`fromSocketTimeout` and `fromSocketTimeoutN`\n would loop forever.\n\n\n# Version
0.6.3\n\n* Bump `network-simple`, `transformers` and `pipes-safe` upper bounds.\n\n*
Remove `Base m ~ IO` constraints from `Pipes.Network.TCP.Safe`, as\n not all of
them were removed in 0.6.1.\n\n\n# Version 0.6.2\n\n* Dependency bumps (upper bounds).\n\n\n#
Version 0.6.1\n\n* Remove the `Base m ~ IO` constraint from `Pipes.Network.TCP.Safe`\n\n\n#
Version 0.6.0\n\n* Significantly upgraded the API and renamed functions to play
well with\n pipes-4.0.0, pipes-safe-2.0.0 and network-simple-0.3.0.\n\n* Throw
`IOError` in `IO` in order to report timeout errors. Delete\n the `Timeout` data-type.\n\n\n#
Version 0.5.1.0\n\n* Re-export `Network.Socket.withSocketsDo`.\n\n* Use TCP `send`
and `recv` as eported by network-simple-0.2.1.0.\n\n\n# Version 0.5.0.0\n\n* Removed
`Control.Proxy.TCP.Sync` and `Control.Proxy.TCP.Safe.Sync`.\n\n\n# Version 0.4.0.1\n\n*
FIX: `acceptFork` now properly closes the connection socket, even in\n case of
asynchronous exceptions.\n\n\n# Version 0.4.0.0\n\n* Do not handle “Broken Pipe”
errors on the `*Write*D` proxies anymore.\n As as a result, those proxies run forever
and have a polymorphic\n return value, which makes this release binary compatible
with 0.2.0.0,\n but not with 0.3.0.0.\n\n\n# Version 0.3.0.0\n\n* Quietly stop
writing or reading bytes from a TCP socket if a\n “Broken Pipe” error happens,
indicating that the remote end\n already closed the connection. Previously, a `ResourceVanished`\n
\ exception was thrown.\n\n* All the `*Write*D` proxies now return `()` if the remote
end\n closed the connection.\n\n\n# Version 0.2.0.0\n\n* Depend on network-simple
0.2\n\n* In both modules `Control.Proxy.TCP` and `Control.Proxy.TCP.Safe`:\n `serveFork`
was renamed to `serve` and the previous function named\n `serve` was removed.\n\n\n#
Version 0.1.1.0\n\n* Split many of the non-pipes-related TCP utilities to the own\n
\ `network-simple` package.\n* Depend on `network-simple` and re-export its functions.\n\n\n#
Version 0.1.0.1\n\n* Dependency bumps.\n\n\n# Version 0.1.0\n\n* New backwards incompatible
API\n* Based on pipes 3.1\n\n\n# Up to version 0.0.2\n\n* Based on pipes-core.\n"
basic-deps:
exceptions: -any
bytestring: -any
base: ==4.*
network-simple: -any
network: -any
pipes: -any
transformers: -any
pipes-safe: -any
all-versions:
- '0.0.1'
- '0.0.2'
- '0.1.0'
- '0.1.0.1'
- '0.1.1.0'
- '0.2.0.0'
- '0.3.0.0'
- '0.4.0.0'
- '0.4.0.1'
- '0.4.0.2'
- '0.5.0.0'
- '0.5.1.0'
- '0.6.0'
- '0.6.1'
- '0.6.2'
- '0.6.3'
- '0.6.4'
- '0.6.4.1'
- '0.6.5'
author: <NAME>
latest: '0.6.5'
description-type: markdown
description: ! '# pipes-network
Utilities to deal with sockets using the **pipes** and **pipes-safe**
libraries.
Check the source or rendered Haddocks for extensive documentation.
This code is licensed under the terms of the so called **3-clause BSD
license**. Read the file named ``LICENSE`` found in this same directory
for details.
See the ``PEOPLE`` file to learn about the people involved in this
effort.
'
license-name: BSD3
|
packages/pi/pipes-network.yaml
|
--- !<SKIN>
contentType: "SKIN"
firstIndex: "2018-12-25 23:52"
game: "Unreal Tournament"
name: "<NAME>"
author: "Unknown"
description: "None"
releaseDate: "2000-03"
attachments:
- type: "IMAGE"
name: "Matrix_Runners_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/M/Matrix_Runners_shot_1.png"
- type: "IMAGE"
name: "Matrix_Runners_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/M/Matrix_Runners_shot_2.png"
originalFilename: "ArcLightsMatrixSkinsV1_0.ZIP"
hash: "3a49ae119095d581cc235cf605089db0eb69f6c4"
fileSize: 582088
files:
- name: "commandoskins_smith.utx"
fileSize: 810729
hash: "a4eb386d71d900f9e28fda73d75223f9554ebac7"
- name: "commandoskins_neo.utx"
fileSize: 539405
hash: "994be11c96109a9f3fad3193e4cc9aaf76e45a3c"
otherFiles: 3
dependencies: {}
downloads:
- url: "https://gamefront.online/files2/service/thankyou?id=1419860"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Skins/M/ArcLightsMatrixSkinsV1_0.ZIP"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Skins/SkinsA/&file=arclightsmatrixskinsv1_0.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Skins/Male/ArcLightsMatrixSkinsV1_0.ZIP"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Skins/Misc/SkinsA/arclightsmatrixskinsv1_0.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Skins/&file=arclightsmatrixskinsv1_0.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Skins/M/3/a/49ae11/ArcLightsMatrixSkinsV1_0.ZIP"
main: false
repack: false
state: "OK"
- url: "http://ut-files.com/index.php?dir=Skins/SkinsA/&file=arclightsmatrixskinsv1_0.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Skins/M/3/a/49ae11/ArcLightsMatrixSkinsV1_0.ZIP"
main: false
repack: false
state: "OK"
deleted: false
skins:
- "Matrix Runners"
- "Matrix Agents"
faces:
- "Neo"
- "<NAME>"
model: "Unknown"
teamSkins: true
|
content/Unreal Tournament/Skins/M/3/a/49ae11/matrix-runners_[3a49ae11].yml
|
category: Data Enrichment & Threat Intelligence
fromversion: 5.5.0
commonfields:
id: TAXIIFeed
version: -1
configuration:
- display: Fetch indicators
name: feed
required: false
type: 8
- defaultvalue: '240'
display: Fetch Interval
name: feedFetchInterval
required: true
type: 19
- defaultvalue: F - Reliability cannot be judged
display: Reliability
name: feedInstanceReliability
options:
- A - Completely reliable
- B - Usually reliable
- C - Fairly reliable
- D - Not usually reliable
- E - Unreliable
- F - Reliability cannot be judged
required: true
type: 15
- name: expiration
required: false
type: 17
- defaultvalue: indicatorType
hidden: true
name: expirationPolicy
required: false
type: 0
- hidden: true
name: expirationInterval
required: false
type: 1
- display: Bypass exclusion list
name: bypassExclusionList
defaultvalue: ""
type: 8
required: false
additionalinfo: When selected, the exclusion list will be ignored for this feed.
- defaultvalue: ''
display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
- display: Use system proxy settings
name: proxy
required: false
type: 8
- additionalinfo: TAXII discovery service endpoint. For example, http://hailataxii.com/taxii-discovery-service
display: Discovery Service
name: discovery_service
required: true
type: 0
- additionalinfo: Collection name to fetch indicators from.
display: Collection
name: collection
required: true
type: 0
- additionalinfo: Subscription ID for the TAXII consumer.
display: Subscription ID
name: subscription_id
required: false
type: 0
- display: Username
name: credentials
required: false
type: 9
- additionalinfo: Time (in seconds) before HTTP requests timeout.
defaultvalue: '20'
display: Request Timeout
name: polling_timeout
required: false
type: 0
- additionalinfo: Used by a TAXII Client to request information from a TAXII Server.
display: Poll Service
name: poll_service
required: false
type: 0
- additionalinfo: API key used for authentication with the TAXII server.
display: API Key
name: api_key
required: false
type: 0
- additionalinfo: API key header to be used to provide API key to the TAXII server. For example, "Authorization".
display: API Header Name
name: api_header
required: false
type: 0
- additionalinfo: The time interval for the first fetch (retroactive). <number> <time unit> of type minute/hour/day. For example, 1 minute, 12
hours, 7 days.
defaultvalue: 1 day
display: First Fetch Time
hidden: false
name: initial_interval
required: false
type: 0
description: Ingests indicator feeds from TAXII 1.x servers.
display: TAXII Feed
name: TAXIIFeed
script:
commands:
- arguments:
- default: false
defaultValue: '50'
description: The maximum number of results to return.
isArray: false
name: limit
required: false
secret: false
- default: false
defaultValue: 1 day
description: The time interval for the first fetch (retroactive). <number> <time unit> of type minute/hour/day. For example, 1 minute, 12
hours, 7 days.
isArray: false
name: initial_interval
required: false
secret: false
deprecated: false
description: Gets indicators from the the feed.
execution: false
name: get-indicators
outputs:
- contextPath: TAXII.Indicator.Value
description: The indicator value.
type: String
- contextPath: TAXII.Indicator.Type
description: The indicator type.
type: String
- contextPath: TAXII.Indicator.Rawjson
description: The indicator rawJSON value.
type: Unknown
dockerimage: demisto/taxii:1.0.0.5214
feed: true
isfetch: false
longRunning: false
longRunningPort: false
runonce: false
script: '-'
subtype: python3
type: python
tests:
- TAXII_Feed_Test
|
Integrations/FeedTAXII/FeedTAXII.yml
|
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-etcd-bootstrap
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: etcd
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
data:
etcd.conf.yaml: |-
# Human-readable name for this member.
name: {{ .Release.Name }}-etcd
# Path to the data directory.
data-dir: /var/etcd/data/new.etcd
# metrics configuration
metrics: basic
# Number of committed transactions to trigger a snapshot to disk.
snapshot-count: 75000
# Raise alarms when backend size exceeds the given quota. 0 means use the
# default quota.
{{- if .Values.backup.etcdQuotaBytes }}
quota-backend-bytes: {{ int $.Values.backup.etcdQuotaBytes }}
{{- end }}
# List of comma separated URLs to listen on for client traffic.
listen-client-urls: {{ if .Values.etcdTLS }}https{{ else }}http{{ end }}://0.0.0.0:{{ .Values.servicePorts.client }}
# List of this member's client URLs to advertise to the public.
# The URLs needed to be a comma-separated list.
advertise-client-urls: {{ if .Values.etcdTLS }}https{{ else }}http{{ end }}://0.0.0.0:{{ .Values.servicePorts.client }}
# Initial cluster token for the etcd cluster during bootstrap.
initial-cluster-token: 'new'
# Initial cluster state ('new' or 'existing').
initial-cluster-state: 'new'
{{- if .Values.autoCompaction }}
# auto-compaction-mode ("periodic" or "revision").
{{- if .Values.autoCompaction.mode }}
auto-compaction-mode: {{ .Values.autoCompaction.mode }}
{{- end }}
# auto-compaction-retention defines Auto compaction retention length for etcd.
{{- if .Values.autoCompaction.retentionLength }}
auto-compaction-retention: {{ .Values.autoCompaction.retentionLength }}
{{- end }}
{{- end }}
{{- if .Values.etcdTLS }}
client-transport-security:
# Path to the client server TLS cert file.
cert-file: /var/etcd/ssl/tls/tls.crt
# Path to the client server TLS key file.
key-file: /var/etcd/ssl/tls/tls.key
# Enable client cert authentication.
client-cert-auth: true
# Path to the client server TLS trusted CA cert file.
trusted-ca-file: /var/etcd/ssl/ca/ca.crt
{{- end }}
|
chart/etcd-backup-restore/templates/etcd-configmap.yaml
|
uid: "com.azure.cosmos.implementation.guava25.collect.Multisets.unmodifiableSortedMultiset*"
fullName: "com.azure.cosmos.implementation.guava25.collect.Multisets.<E>unmodifiableSortedMultiset"
name: "<E>unmodifiableSortedMultiset"
nameWithType: "Multisets.<E>unmodifiableSortedMultiset"
members:
- uid: "com.azure.cosmos.implementation.guava25.collect.Multisets.<E>unmodifiableSortedMultiset(com.azure.cosmos.implementation.guava25.collect.SortedMultiset<E>)"
fullName: "com.azure.cosmos.implementation.guava25.collect.Multisets.<E>unmodifiableSortedMultiset(SortedMultiset<E> sortedMultiset)"
name: "<E>unmodifiableSortedMultiset(SortedMultiset<E> sortedMultiset)"
nameWithType: "Multisets.<E>unmodifiableSortedMultiset(SortedMultiset<E> sortedMultiset)"
summary: "Returns an unmodifiable view of the specified sorted multiset. Query operations on the returned multiset \"read through\" to the specified multiset, and attempts to modify the returned multiset result in an <xref uid=\"java.lang.UnsupportedOperationException\" data-throw-if-not-resolved=\"false\" data-raw-source=\"UnsupportedOperationException\"></xref>.\n\nThe returned multiset will be serializable if the specified multiset is serializable."
parameters:
- description: "the sorted multiset for which an unmodifiable view is to be generated"
name: "sortedMultiset"
type: "<xref href=\"com.azure.cosmos.implementation.guava25.collect.SortedMultiset?alt=com.azure.cosmos.implementation.guava25.collect.SortedMultiset&text=SortedMultiset\" data-throw-if-not-resolved=\"False\" /><<xref href=\"E?alt=E&text=E\" data-throw-if-not-resolved=\"False\" />>"
syntax: "public static SortedMultiset<E> <E>unmodifiableSortedMultiset(SortedMultiset<E> sortedMultiset)"
returns:
description: "an unmodifiable view of the multiset"
type: "<xref href=\"com.azure.cosmos.implementation.guava25.collect.SortedMultiset?alt=com.azure.cosmos.implementation.guava25.collect.SortedMultiset&text=SortedMultiset\" data-throw-if-not-resolved=\"False\" /><<xref href=\"E?alt=E&text=E\" data-throw-if-not-resolved=\"False\" />>"
type: "method"
metadata: {}
package: "com.azure.cosmos.implementation.guava25.collect"
artifact: com.azure:azure-cosmos:4.4.0-beta.1
|
preview/docs-ref-autogen/com.azure.cosmos.implementation.guava25.collect.Multisets.unmodifiableSortedMultiset.yml
|
jobs:
- job:
displayName: Signing Validation
dependsOn: setupMaestroVars
variables:
- template: /eng/publishing/v3/common-variables.yml
- name: AzDOProjectName
value: $[ dependencies.setupMaestroVars.outputs['setReleaseVars.AzDOProjectName'] ]
- name: AzDOPipelineId
value: $[ dependencies.setupMaestroVars.outputs['setReleaseVars.AzDOPipelineId'] ]
- name: AzDOBuildId
value: $[ dependencies.setupMaestroVars.outputs['setReleaseVars.AzDOBuildId'] ]
pool:
vmImage: 'windows-2019'
steps:
- task: DownloadBuildArtifacts@0
displayName: Download Package Artifacts
inputs:
buildType: specific
buildVersionToDownload: specific
project: $(AzDOProjectName)
pipeline: $(AzDOPipelineId)
buildId: $(AzDOBuildId)
artifactName: PackageArtifacts
# This is necessary whenever we want to publish/restore to an AzDO private feed
# Since sdk-task.ps1 tries to restore packages we need to do this authentication here
# otherwise it'll complain about accessing a private feed.
- task: NuGetAuthenticate@0
displayName: 'Authenticate to AzDO Feeds'
- task: PowerShell@2
displayName: Enable cross-org publishing
inputs:
filePath: $(Build.SourcesDirectory)/eng/common/enable-cross-org-publishing.ps1
arguments: -token $(dn-bot-dnceng-artifact-feeds-rw)
# Signing validation will optionally work with the buildmanifest file which is downloaded from
# Azure DevOps above.
- task: PowerShell@2
displayName: Validate
inputs:
filePath: $(Build.SourcesDirectory)/eng/common/sdk-task.ps1
arguments: -task SigningValidation -restore -msbuildEngine vs
/p:PackageBasePath='$(Build.ArtifactStagingDirectory)/PackageArtifacts'
/p:SignCheckExclusionsFile='$(Build.SourcesDirectory)/eng/SignCheckExclusionsFile.txt'
${{ parameters.signingValidationAdditionalParameters }}
- template: /eng/common/templates/steps/publish-logs.yml
parameters:
StageLabel: 'Validation'
JobLabel: 'Signing'
|
eng/publishing/v3/signing-validation.yml
|
version: "3.5"
services:
result-frontend:
image: dna-frontend
build:
context: ../packages/frontend/
dockerfile: ../../deployment/dockerfiles/app/result-frontend.Dockerfile
environment:
- PROJECTSMO_BACKEND_HOST=result-backend
- PROJECTSMO_FRONTEND_OIDC_DISABLED=true
- PROJECTSMO_FRONTEND_API_BASEURL=http://localhost:7171/api
- PROJECTSMO_ENABLEINTERNALUSERINFO=false
- PROJECTSMO_ENABLEDATACOMPLIANCE=false
- PROJECTSMO_ENABLEJUPYTERWORKSPACE=false
- PROJECTSMO_ENABLEDATAIKUWORKSPACE=false
- PROJECTSMO_ENABLEMALWARESERVICE=false
- PROJECTSMO_ENABLEPIPELINSERVICE=false
- PROJECTSMO_DNA_COMPANYNAME=Company_Name
- PROJECTSMO_DEPLOY_VERSION=0.91
- PROJECTSMO_DNA_APPNAME_HEADER=DnA App
- PROJECTSMO_DNA_APPNAME_HOME=Data and Analytics
- PROJECTSMO_DNA_CONTACTUS_HTML=<div><p>There could be many places where you may need our help, and we are happy to support you. <br /> Please add your communication channels links here</p></div>
- PROJECTSMO_DNA_BRAND_LOGO_URL=/images/branding/logo-brand.png
- PROJECTSMO_DNA_APP_LOGO_URL=/images/branding/logo-app-white.png
ports:
- "8080:3000"
result-backend:
image: dna-backend
build:
context: ../packages/backend/
dockerfile: ../../deployment/dockerfiles/app/result-backend.Dockerfile
environment:
- API_DB_URL=jdbc:postgresql://db:5432/db
- OIDC_DISABLED=true
- JUPYTER_NOTEBOOK=false
- DATAIKU=false
- ITSMM=false
- ATTACHMENT_MALWARE_SCAN=false
- DRD_INTERNAL_USER_ENABLED=false
depends_on:
- db
ports:
- "7171:7171"
deploy:
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 30s
db:
image: postgres:10
restart: on-failure
environment:
- "POSTGRES_USER=admin"
- "POSTGRES_PASSWORD=admin"
- "POSTGRES_DB=db"
# Init db on startup
volumes:
- ./localdb-init/schema.sql:/docker-entrypoint-initdb.d/01-schema-init.sql
- ./localdb-init/data.sql:/docker-entrypoint-initdb.d/02-data-init.sql
ports:
- "5432:5432"
|
deployment/docker-compose-local-basic.yml
|
---
- name: Check if IIM installed
shell: test -f /opt/IBM/InstallationManager/eclipse/tools/imcl
register: result
check_mode: no
failed_when: False
changed_when: result.rc != 0
- name: Install IIM
shell: /work/IIM/installc -acceptLicense
when: result is changed
- name: Check if WAS installed
shell: /opt/IBM/InstallationManager/eclipse/tools/imcl listInstalledPackages | grep com.ibm.websphere.ND.v90
register: result
check_mode: no
failed_when: False
changed_when: result.rc != 0
- name: Install WAS
shell: |
/opt/IBM/InstallationManager/eclipse/tools/imcl install com.ibm.websphere.ND.v90 com.ibm.java.jdk.v8 \
-repositories /work/WAS,/work/JDK,/work/IHS,/work/PLG,/work/WAS_FP,/work/JDK_FP,/work/IHSPLG_FP \
-installationDirectory /opt/IBM/WebSphere/AppServer \
-installFixes all \
-acceptLicense
when: result is changed
- name: Check if IHS installed
shell: /opt/IBM/InstallationManager/eclipse/tools/imcl listInstalledPackages | grep com.ibm.websphere.IHS.v90
register: result
check_mode: no
failed_when: False
changed_when: result.rc != 0
- name: Install IHS
shell: |
/opt/IBM/InstallationManager/eclipse/tools/imcl install com.ibm.websphere.IHS.v90 com.ibm.java.jdk.v8 \
-repositories /work/WAS,/work/JDK,/work/IHS,/work/PLG,/work/WAS_FP,/work/JDK_FP,/work/IHSPLG_FP \
-installationDirectory /opt/IBM/HTTPServer \
-installFixes all \
-acceptLicense
when: result is changed
- name: Check if PLG installed
shell: /opt/IBM/InstallationManager/eclipse/tools/imcl listInstalledPackages | grep com.ibm.websphere.PLG.v90
register: result
check_mode: no
failed_when: False
changed_when: result.rc != 0
- name: Install PLG
shell: |
/opt/IBM/InstallationManager/eclipse/tools/imcl install com.ibm.websphere.PLG.v90 com.ibm.java.jdk.v8 \
-repositories /work/WAS,/work/JDK,/work/IHS,/work/PLG,/work/WAS_FP,/work/JDK_FP,/work/IHSPLG_FP \
-installationDirectory /opt/IBM/WebSphere/Plugins \
-installFixes all \
-acceptLicense
when: result is changed
|
roles/install_was_traditional/tasks/main.yml
|
version: 1.0.{build}
skip_tags: true
stack: node 10
skip_commits:
message: /chore(release)/
branches:
only:
- master
- /preview\/*/
- /release\/*/
image:
- Ubuntu
- Visual Studio 2019
environment:
GH_TOKEN:
secure: <KEY>
access_token:
secure: <KEY>
EmailPasswordOptions:ApiKey:
secure: <KEY>
EmailPasswordOptions:Email:
secure: M+wRwy+l/01DvVvmy0Th96FLiwzNnwK3Sp1YMvMnUns=
EmailPasswordOptions:Password:
secure: h7zhWIG5sQBVM6loU77ClQ==
decrypt_secret:
secure: BDKOW5teXcJUwLqekbf6VQ==
donetsdk: 5.0.402
init:
- cmd: git config --global core.autocrlf true
install:
- ps: if ($isWindows) { Install-Product node '' }
- sh: wget -q https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb
- sh: sudo dpkg -i packages-microsoft-prod.deb
- sh: sudo apt-get update
- sh: sudo apt-get -y install apt-transport-https
- sh: sudo chmod +x ./dotnet-install.sh
- sh: sudo ./dotnet-install.sh -Channel Current -Version $donetsdk -InstallDir ./dotnetsdk -NoPath
- sh: export PATH=/home/appveyor/projects/identity-firebase/dotnetsdk:$PATH
- sh: sudo apt -y install nuget
- ps: if ($isWindows) { ./dotnet-install.ps1 -Version $env:donetsdk }
- ps: dotnet tool install --global GitVersion.Tool
- ps: dotnet gitversion /l console /output buildserver
- ps: dotnet tool install --global dotnet-sonarscanner
- cmd: nuget install ReportGenerator -ExcludeVersion
- nuget install secure-file -ExcludeVersion
- cmd: secure-file\tools\secure-file -decrypt test\testsettings.json.enc -secret %decrypt_secret%
- cmd: secure-file\tools\secure-file -decrypt test\identityfirestore.json.enc -secret %decrypt_secret%
- sh: mono secure-file/tools/secure-file.exe -decrypt test/testsettings.json.enc -secret $decrypt_secret
- sh: mono secure-file/tools/secure-file.exe -decrypt test/identityfirestore.json.enc -secret $decrypt_secret
- ps: ./appveyorinit.ps1
- cmd: set JAVA_HOME=C:\Program Files\Java\jdk14
- cmd: set PATH=%JAVA_HOME%\bin;%PATH%
build_script:
- ps: ./build.ps1
test_script:
- cmd: publish.cmd
artifacts:
- path: artifacts/**/*.nupkg
name: nuget
deploy:
- provider: NuGet
api_key:
secure: <KEY>
on:
branch:
- /preview\/*/
- /release\/*/
CI_WINDOWS: true
- provider: GitHub
auth_token: $(GH_TOKEN)
draft: true
prerelease: true
release: $(NextVersion)
on:
branch:
- /preview\/*/
CI_WINDOWS: true
- provider: GitHub
auth_token: $(GH_TOKEN)
draft: true
release: $(NextVersion)
on:
branch:
- /release\/*/
CI_WINDOWS: true
for:
-
branches:
only:
- /release\/*/
on_success:
- cmd: semantic-release -b %APPVEYOR_REPO_BRANCH%
|
appveyor.yml
|
---
version: 1
rwmutex: {}
interactions:
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey golangrocksonazure:E0WSw7N6AWnym9SFrhEj77r0kjB15KsaocqkV2Npd7c=
User-Agent:
- Go/go1.9beta1 (amd64-windows) azure-storage-go/10.0.2 api-version/2016-05-31
file
x-ms-date:
- Thu, 20 Jul 2017 23:34:05 GMT
x-ms-version:
- 2016-05-31
url: https://golangrocksonazure.file.core.windows.net/share-35storagedirsuitetestcreatedirectory?restype=share
method: PUT
response:
body: ""
headers:
Date:
- Thu, 20 Jul 2017 23:34:04 GMT
Etag:
- '"0x8D4CFC7CF60D2E9"'
Last-Modified:
- Thu, 20 Jul 2017 23:34:05 GMT
Server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- f27d5ccc-001a-00eb-73b0-01f767000000
X-Ms-Version:
- 2016-05-31
status: 201 Created
code: 201
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey golangrocksonazure:<KEY>
User-Agent:
- Go/go1.9beta1 (amd64-windows) azure-storage-go/10.0.2 api-version/2016-05-31
file
x-ms-date:
- Thu, 20 Jul 2017 23:34:05 GMT
x-ms-version:
- 2016-05-31
url: https://golangrocksonazure.file.core.windows.net/share-35storagedirsuitetestcreatedirectory/dir?restype=directory
method: PUT
response:
body: ""
headers:
Date:
- Thu, 20 Jul 2017 23:34:04 GMT
Etag:
- '"0x8D4CFC7CDF8BA96"'
Last-Modified:
- Thu, 20 Jul 2017 23:34:02 GMT
Server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- f27d5ccf-001a-00eb-74b0-01f767000000
X-Ms-Version:
- 2016-05-31
status: 201 Created
code: 201
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey golangrocksonazure:<KEY>
User-Agent:
- Go/go1.9beta1 (amd64-windows) azure-storage-go/10.0.2 api-version/2016-05-31
file
x-ms-date:
- Thu, 20 Jul 2017 23:34:05 GMT
x-ms-version:
- 2016-05-31
url: https://golangrocksonazure.file.core.windows.net/share-35storagedirsuitetestcreatedirectory/dir?restype=directory
method: DELETE
response:
body: ""
headers:
Date:
- Thu, 20 Jul 2017 23:34:04 GMT
Server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- f27d5cd1-001a-00eb-75b0-01f767000000
X-Ms-Version:
- 2016-05-31
status: 202 Accepted
code: 202
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey <KEY>
User-Agent:
- Go/go1.9beta1 (amd64-windows) azure-storage-go/10.0.2 api-version/2016-05-31
file
x-ms-date:
- Thu, 20 Jul 2017 23:34:05 GMT
x-ms-version:
- 2016-05-31
url: https://golangrocksonazure.file.core.windows.net/share-35storagedirsuitetestcreatedirectory/dir?restype=directory
method: HEAD
response:
body: ""
headers:
Date:
- Thu, 20 Jul 2017 23:34:04 GMT
Server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- f27d5cd2-001a-00eb-76b0-01f767000000
X-Ms-Version:
- 2016-05-31
status: 404 The specified resource does not exist.
code: 404
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey <KEY>
User-Agent:
- Go/go1.9beta1 (amd64-windows) azure-storage-go/10.0.2 api-version/2016-05-31
file
x-ms-date:
- Thu, 20 Jul 2017 23:34:05 GMT
x-ms-version:
- 2016-05-31
url: https://golangrocksonazure.file.core.windows.net/share-35storagedirsuitetestcreatedirectory?restype=share
method: DELETE
response:
body: ""
headers:
Date:
- Thu, 20 Jul 2017 23:34:04 GMT
Server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- f27d5cd3-001a-00eb-77b0-01f767000000
X-Ms-Version:
- 2016-05-31
status: 202 Accepted
code: 202
|
vendor/github.com/Azure/azure-sdk-for-go/storage/recordings/StorageDirSuite/TestCreateDirectory.yaml
|
stages:
- preparation
- building
- testing
- security
image: edbizarro/gitlab-ci-pipeline-php:7.3
variables:
MYSQL_ROOT_PASSWORD: <PASSWORD>
MYSQL_USER: laravel
MYSQL_PASSWORD: <PASSWORD>
MYSQL_DATABASE: laravel
DB_HOST: mysql
cache:
key: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
composer:
stage: preparation
script:
- php -v
- composer install --prefer-dist --no-ansi --no-interaction --no-progress --no-scripts
- cp .env.example .env
- php artisan key:generate
artifacts:
paths:
- vendor/
- .env
expire_in: 1 days
when: always
cache:
paths:
- vendor/
yarn:
stage: preparation
script:
- yarn --version
- yarn install --pure-lockfile
artifacts:
paths:
- node_modules/
expire_in: 1 days
when: always
cache:
paths:
- node_modules/
build-assets:
stage: building
dependencies:
- composer
- yarn
script:
- yarn --version
- yarn run build
artifacts:
paths:
- public/admin/
- public/app/
- public/images/
- public/user/
- public/vendor/
- public/mix-manifest.json
expire_in: 1 days
when: always
db-seeding:
stage: building
services:
- name: mysql:8.0
command: ["--default-authentication-plugin=mysql_native_password"]
dependencies:
- composer
- yarn
script:
- mysql --version
- php artisan migrate:fresh --seed
artifacts:
paths:
- ./storage/logs # for debugging
expire_in: 1 days
when: on_failure
phpunit:
stage: testing
services:
- name: mysql:8.0
command: ["--default-authentication-plugin=mysql_native_password"]
dependencies:
- build-assets
- composer
- db-seeding
script:
- php -v
- sudo cp /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini /usr/local/etc/php/conf.d/docker-php-ext-xdebug.bak
- echo "" | sudo tee /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
- ./vendor/phpunit/phpunit/phpunit --version
- php -d short_open_tag=off ./vendor/phpunit/phpunit/phpunit -v --colors=never --stderr
- sudo cp /usr/local/etc/php/conf.d/docker-php-ext-xdebug.bak /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
artifacts:
paths:
- ./storage/logs # for debugging
expire_in: 1 days
when: on_failure
codestyle:
stage: testing
image: lorisleiva/laravel-docker
script:
- phpcs --extensions=php --standard=PSR2 app
dependencies: []
phpcpd:
stage: testing
script:
- test -f phpcpd.phar || curl -L https://phar.phpunit.de/phpcpd.phar -o phpcpd.phar
- php phpcpd.phar app/ --min-lines=50
dependencies: []
cache:
paths:
- phpcpd.phar
sensiolabs:
stage: security
script:
- test -d security-checker || git clone https://github.com/sensiolabs/security-checker.git
- cd security-checker
- composer install
- php security-checker security:check ../composer.lock
dependencies: []
cache:
paths:
- security-checker/
|
.gitlab-ci.yml
|
---
## Deploy RADOS Gateway
#
- name: Add Ceph extra
apt_repository: >
repo="deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main"
state=present
when: ansible_lsb.codename in ['natty', 'oneiric', 'precise', 'quantal', 'raring', 'sid', 'squeeze', 'wheezy']
# Needed for Ubuntu 12.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
- name: Enable multiverse repo for Precise
apt_repository: >
repo="{{ item }}"
state=present
with_items:
- deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }} multiverse
- deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
- deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
when: ansible_lsb.codename in ['precise'] and not http_100_continue
# Disable the repo when we are using the Ceph repo for 100-continue packages
- name: Disable multiverse repo for Precise
apt_repository: >
repo="{{ item }}"
state=absent
with_items:
- deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }} multiverse
- deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
- deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
when: ansible_lsb.codename in ['precise'] and http_100_continue
# Needed for Ubuntu 14.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
- name: Enable multiverse repo for Trusty
command: "apt-add-repository multiverse"
when: ansible_lsb.codename in ['trusty'] and not http_100_continue
# Disable the repo when we are using the Ceph repo for 100-continue packages
- name: Disable multiverse repo for Trusty
command: "apt-add-repository -r multiverse"
when: ansible_lsb.codename in ['trusty'] and http_100_continue
# If using 100-continue, add Ceph dev key
- name: Install the Ceph development repository key
apt_key: >
data="{{ lookup('file', 'cephdev.asc') }}"
state=present
when: http_100_continue
# If using 100-continue, add Ceph sources and update
- name: Add Ceph Apache and FastCGI sources
apt_repository: >
repo="{{ item }}"
state=present
with_items:
- deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
- deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
register: purge_default_apache
when: http_100_continue
# Else remove them to ensure you use the default packages
- name: Remove Ceph Apache and FastCGI sources
apt_repository: >
repo="{{ item }}"
state=absent
with_items:
- deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
- deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
register: purge_ceph_apache
when: not http_100_continue
# Purge Ceph Apache and FastCGI packages if needed
- name: "Purge Ceph Apache and FastCGI packages"
apt: >
pkg="{{ item }}"
state=absent
purge=yes
with_items:
- apache2
- apache2-bin
- apache2-data
- apache2-mpm-worker
- apache2-utils
- apache2.2-bin
- apache2.2-common
- libapache2-mod-fastcgi
when: purge_default_apache.changed or purge_ceph_apache.changed
- name: "Install Apache, fastcgi and Rados Gateway"
apt: >
pkg={{ item }}
state=present
update_cache=yes
with_items:
- apache2
- libapache2-mod-fastcgi
- radosgw
## Prepare Apache
#
- name: Install default httpd.conf
template: src=httpd.conf dest=/etc/apache2/httpd.conf owner=root group=root
- name: Enable some apache mod rewrite and fastcgi
command: "{{ item }}"
with_items:
- a2enmod rewrite
- a2enmod fastcgi
- name: Install Rados Gateway vhost
template: >
src=rgw.conf
dest=/etc/apache2/sites-available/rgw.conf
owner=root
group=root
## Prepare RGW
#
- name: Create RGW directory
file: >
path=/var/lib/ceph/radosgw/{{ ansible_fqdn }}
state=directory
owner=root
group=root
mode=0644
- name: Enable Rados Gateway vhost and disable default site
command: "{{ item }}"
with_items:
- a2ensite rgw.conf
- a2dissite *default
ignore_errors: True
notify:
- restart apache2
- name: Install s3gw.fcgi script
copy: >
src=s3gw.fcgi
dest=/var/www/s3gw.fcgi
mode=0555
owner=root
group=root
## If we don't perform this check Ansible will start multiple instance of radosgw
- name: Check if RGW is started
command: /etc/init.d/radosgw status
register: rgwstatus
ignore_errors: True
- name: Start RGW
command: /etc/init.d/radosgw start
when: rgwstatus.rc != 0
|
roles/ceph-radosgw/tasks/install_debian.yml
|
- name: make-config | chroot directory for bind9 should be created
file: >
path={{ bind.changeroot }}
mode=0755
state=directory
owner={{ bind.user.username }}
group={{ bind.user.group }}
- name: make-config | Each directory for bind9 should be created
file: >
path={{ bind.changeroot }}/{{ item }}
mode=0755
state=directory
owner={{ bind.user.username }}
group={{ bind.user.group }}
with_items:
- dev
- etc
- var/run
- var/log
- var/data/masters/l
- var/data/masters/1
- var/data/slaves
- tmp
- name: mame-config | /dev/random
shell: >
chdir={{ bind.changeroot }}/dev
creates={{ item.name }}
mknod -m 666 {{ item.name }} c 1 {{ item.minor }}
with_items:
- { name: 'random', minor: 8 }
- { name: 'urandom', minor: 9 }
- { name: 'zero', minor: 5 }
- { name: 'null', minor: 3 }
- name: make-config | Sub directories for master zone should be created
file: >
path={{ bind.changeroot }}/var/data/masters/{{ item }}
mode=0755
owner={{ bind.user.username }}
group={{ bind.user.group }}
state=directory
with_items: bind.zone.subdirs
- name: make-config | Sub directories for slave zone should be created
file: >
path={{ bind.changeroot }}/var/data/slaves/{{ item }}
mode=0755
owner={{ bind.user.username }}
group={{ bind.user.group }}
state=directory
with_items: bind.zone.subdirs
- name: make-config | Deploy {{ bind.changeroot }}/etc/named.conf
notify: Restart BIND
template: >
src=opt/named/etc/named.conf.j2
dest={{ bind.changeroot }}/etc/named.conf
backup=yes
- name: make-config | localtime file must exist
shell: >
chdir={{ bind.changeroot }}
creates={{ bind.changeroot }}/etc/localtime
/bin/cp /etc/localtime {{ bind.changeroot }}/etc/localtime
- name: make-config | {{ bind.changeroot }}/etc/rndc.key should be created
shell: >
chdir={{ bind.changeroot }}
creates={{ bind.changeroot }}/etc/{{ ansible_hostname }}-rndc-key
{{ bind.serverroot }}/sbin/rndc-confgen -a -p 953
-c /etc/{{ ansible_hostname }}-rndc-key
-k {{ ansible_hostname }}-rndc-key
-r /dev/urandom
-u {{ bind.user.username }}
-t {{ bind.changeroot }}
- name: make-config | {{ bind.changeroot }}/etc/rndc.key should be linked
file: >
src={{ bind.changeroot }}/etc/{{ ansible_hostname }}-rndc-key
path={{ bind.changeroot }}/etc/rndc.key
state=link
- name: make-config | {{ bind.changeroot }}/etc/rootzone-dnssec-keys should be deployed
shell: >
chdir={{ buildroot }}/bind-{{ bind.version }}
creates={{ bind.changeroot }}/etc/rootzone-dnssec-keys
/bin/cp bind.keys {{ bind.changeroot }}/etc/rootzone-dnssec-keys
- name: make-config | {{ bind.changeroot }}/var/data/named.root should be deployed
get_url: >
url=ftp://rs.internic.net/domain/named.root
dest={{ bind.changeroot }}/var/data/named.root
- name: make-config | localhost.zone and 127.in-addr.arpa.rev should be deployed
copy: >
src=opt/named/var/data/masters/{{ item }}
dest={{ bind.changeroot }}/var/data/masters/{{ item }}
with_items:
- l/localhost.zone
- 1/127.in-addr.arpa.rev
- name: make-config | localhost.zone and 127.in-addr.arpa.rev should be owned by {{ bind.user.username }}
file: >
path={{ bind.changeroot }}/var/data/masters/{{ item }}
owner={{ bind.user.username }}
group={{ bind.user.group }}
mode=0644
with_items:
- l/localhost.zone
- 1/127.in-addr.arpa.rev
- name: make-config | dig command should be linked from /usr/local/bin/dig
file: >
src={{ bind.serverroot }}/bin/dig
dest=/usr/local/bin/dig
state=link
|
server/roles/src/bind-9/tasks/make-config.yml
|
swagger: '2.0'
info:
description: |
**Discovery API**
Provides access to information about Alfresco Content Services.
version: '1'
title: Alfresco Content Services REST API
basePath: /alfresco/api
securityDefinitions:
basicAuth:
type: basic
description: HTTP Basic Authentication
security:
- basicAuth: []
produces:
- application/json
paths:
'/discovery':
get:
x-alfresco-since: "5.2"
tags:
- discovery
summary: Get repository information
description: |
**Note:** this endpoint is available in Alfresco 5.2 and newer versions.
Retrieves the capabilities and detailed version information from the repository.
operationId: getRepositoryInformation
produces:
- application/json
responses:
'200':
description: Successful response
schema:
$ref: '#/definitions/DiscoveryEntry'
'501':
description: Discovery is disabled for the system
default:
description: Unexpected error
schema:
$ref: '#/definitions/Error'
definitions:
Error:
type: object
required:
- error
properties:
error:
type: object
required:
- statusCode
- briefSummary
- stackTrace
- descriptionURL
properties:
errorKey:
type: string
statusCode:
type: integer
format: int32
briefSummary:
type: string
stackTrace:
type: string
descriptionURL:
type: string
logId:
type: string
VersionInfo:
type: object
required:
- major
- minor
- patch
- hotfix
- schema
- label
- display
properties:
major:
type: string
minor:
type: string
patch:
type: string
hotfix:
type: string
schema:
type: integer
label:
type: string
display:
type: string
StatusInfo:
type: object
required:
- isReadOnly
- isAuditEnabled
- isQuickShareEnabled
- isThumbnailGenerationEnabled
- isDirectAccessUrlEnabled
properties:
isReadOnly:
type: boolean
default: false
isAuditEnabled:
type: boolean
isQuickShareEnabled:
type: boolean
isThumbnailGenerationEnabled:
type: boolean
isDirectAccessUrlEnabled:
type: boolean
EntitlementsInfo:
type: object
properties:
maxUsers:
type: integer
format: int64
maxDocs:
type: integer
format: int64
isClusterEnabled:
type: boolean
default: false
isCryptodocEnabled:
type: boolean
default: false
LicenseInfo:
type: object
required:
- issuedAt
- expiresAt
- remainingDays
- holder
- mode
properties:
issuedAt:
type: string
format: date-time
expiresAt:
type: string
format: date-time
remainingDays:
type: integer
holder:
type: string
mode:
type: string
entitlements:
$ref: '#/definitions/EntitlementsInfo'
ModuleInfo:
type: object
properties:
id:
type: string
title:
type: string
description:
type: string
version:
type: string
installDate:
type: string
format: date-time
installState:
type: string
versionMin:
type: string
versionMax:
type: string
RepositoryInfo:
type: object
required:
- id
- edition
- version
- status
properties:
id:
type: string
edition:
type: string
version:
$ref: '#/definitions/VersionInfo'
status:
$ref: '#/definitions/StatusInfo'
license:
$ref: '#/definitions/LicenseInfo'
modules:
type: array
items:
$ref: '#/definitions/ModuleInfo'
RepositoryEntry:
type: object
required:
- repository
properties:
repository:
$ref: '#/definitions/RepositoryInfo'
DiscoveryEntry:
type: object
required:
- entry
properties:
entry:
$ref: '#/definitions/RepositoryEntry'
|
src/main/webapp/definitions/alfresco-discovery.yaml
|
version: "3.8"
services:
chain-1337:
container_name: chain-1337-test
image: trufflesuite/ganache-cli:latest
command: "--chainId 1337 --mnemonic 'candy maple cake sugar pudding cream honey rich smooth crumble sweet treat' --hostname 0.0.0.0 --blockTime 3 --verbose"
ports:
- 8545:8545
networks:
- nxtp-test
chain-1338:
container_name: chain-1338-test
image: trufflesuite/ganache-cli:latest
command: "--chainId 1338 --mnemonic 'candy maple cake sugar pudding cream honey rich smooth crumble sweet treat' --host 0.0.0.0 --blockTime 3 --verbose"
ports:
- 8546:8545
networks:
- nxtp-test
######################
graph-node-1337:
container_name: graph-node-1337-test
image: graphprotocol/graph-node:v0.25.0
ports:
- "8010:8000"
- "8001:8001"
- "8020:8020"
- "8030:8030"
- "8040:8040"
depends_on:
- ipfs
- postgres-1337
- chain-1337
environment:
postgres_host: postgres-1337
postgres_user: graph-node
postgres_pass: <PASSWORD>
postgres_db: graph-node
ipfs: "ipfs:5001"
ethereum: "mainnet:http://chain-1337:8545"
GRAPH_LOG: info
networks:
- nxtp-test
postgres-1337:
container_name: postgres-1337-test
image: postgres
ports:
- "5432:5432"
command: ["postgres", "-cshared_preload_libraries=pg_stat_statements"]
environment:
POSTGRES_USER: graph-node
POSTGRES_PASSWORD: <PASSWORD>
POSTGRES_DB: graph-node
networks:
- nxtp-test
graph-node-1338:
container_name: graph-node-1338-test
image: graphprotocol/graph-node:v0.25.0
ports:
- "9010:8000"
- "9001:8001"
- "9020:8020"
- "9030:8030"
- "9040:8040"
depends_on:
- ipfs
- postgres-1338
- chain-1338
environment:
postgres_host: postgres-1338
postgres_user: graph-node
postgres_pass: <PASSWORD>
postgres_db: graph-node
ipfs: "ipfs:5001"
ethereum: "mainnet:http://chain-1338:8545"
GRAPH_LOG: info
networks:
- nxtp-test
postgres-1338:
container_name: postgres-1338-test
image: postgres
ports:
- "5433:5432"
command: ["postgres", "-cshared_preload_libraries=pg_stat_statements"]
environment:
POSTGRES_USER: graph-node
POSTGRES_PASSWORD: <PASSWORD>
POSTGRES_DB: graph-node
networks:
- nxtp-test
ipfs:
container_name: ipfs-test
image: ipfs/go-ipfs:v0.4.23
ports:
- "5001:5001"
networks:
- nxtp-test
networks:
nxtp-test:
|
packages/integration/ops/chains.docker-compose.yml
|
imports:
- { resource: parameters.yml }
- { resource: security.yml }
framework:
#esi: ~
translator: { fallback: %locale% }
secret: %secret%
router:
resource: "%kernel.root_dir%/config/routing.yml"
strict_requirements: ~
form: ~
csrf_protection: ~
validation: { enable_annotations: true }
templating:
engines: ['twig']
#assets_version: SomeVersionScheme
default_locale: "%locale%"
trusted_proxies: ~
session: ~
fragments: ~
http_method_override: true
# Twig Configuration
twig:
debug: %kernel.debug%
strict_variables: %kernel.debug%
# Assetic Configuration
assetic:
debug: %kernel.debug%
use_controller: false
bundles: [ ]
#java: /usr/bin/java
filters:
cssrewrite: ~
#closure:
# jar: %kernel.root_dir%/Resources/java/compiler.jar
#yui_css:
# jar: %kernel.root_dir%/Resources/java/yuicompressor-2.4.7.jar
# Doctrine Configuration
doctrine:
dbal:
driver: %database_driver%
host: %database_host%
port: %database_port%
dbname: %database_name%
user: %database_user%
password: %<PASSWORD>%
charset: UTF8
# if using pdo_sqlite as your database driver, add the path in parameters.yml
# e.g. database_path: %kernel.root_dir%/data/data.db3
# path: %database_path%
orm:
auto_generate_proxy_classes: %kernel.debug%
auto_mapping: true
# Swiftmailer Configuration
swiftmailer:
transport: %mailer_transport%
host: %mailer_host%
username: %mailer_user%
password: %mailer_password%
spool: { type: memory }
# app/config/config. yml
# app/config/config.yml
fos_user:
db_driver: orm # other valid values are 'mongodb', 'couchdb' and 'propel'
firewall_name: main
user_class: MyApp\UserBundle\Entity\User
## config de la captcha
genemu_form:
recaptcha:
public_key: "<KEY>"
private_key: "<KEY>"
##clé secrete <KEY>
##clé public <KEY>
# app/config/config.yml
fos_comment:
db_driver: orm
class:
model:
comment: MyApp\ForumBundle\Entity\Comment
thread: MyApp\ForumBundle\Entity\Thread
vote: MyApp\ForumBundle\Entity\Vote
#app/config/config.yml
services:
debug.twig.extension:
class: Twig_Extensions_Extension_Debug
tags: [{ name: 'twig.extension' }]
assetic:
bundles: [ "FOSCommentBundle" ]
#pour truncate lors dee l affichage d un texte assez long#
services:
twig.extension.text:
class: Twig_Extensions_Extension_Text
tags:
- { name: twig.extension }
# in app/config/config.yml
craue_twig_extensions: ~
twig:
globals:
variable: aaa
# app/config/config.yml
avalanche_imagine:
filters:
my_thumb:
type: thumbnail
options: { size: [120, 90], mode: outbound }
# Stof\DoctrineExtensionBundle configuration
stof_doctrine_extensions:
orm:
default:
sluggable: true
uploadable: true
# CSS JS configuration
assetic:
bundles: ["MyAppEspritBundle"]
bundles: ["MyAppArticleBundle"]
bundles: ["MyAppForumBundle"]
bundles: ["MyAppUserBundle"]
# app/config/config.yml
#fos_js_routing:
# cache_control:
# # All are optional, defaults shown
# public: false # can be true (public) or false (private)
# maxage: null # integer value, e.g. 300
# smaxage: null # integer value, e.g. 300
# expires: null # anything that can be fed to "new \DateTime($expires)", e.g. "5 minutes"
# vary: [] # string or array, e.g. "Cookie" or [ Cookie, Accept ]
|
app/config/config.yml
|
items:
- uid: TcoData.IShadowTcoDataTwinController
commentId: T:TcoData.IShadowTcoDataTwinController
id: IShadowTcoDataTwinController
parent: TcoData
children:
- TcoData.IShadowTcoDataTwinController.AttributeName
- TcoData.IShadowTcoDataTwinController.CreatePlainerType
langs:
- csharp
- vb
name: IShadowTcoDataTwinController
nameWithType: IShadowTcoDataTwinController
fullName: TcoData.IShadowTcoDataTwinController
type: Interface
source:
remote:
path: src/TcoData/src/TcoDataConnector/_generated/TcoData.g.cs
branch: dev
repo: https://github.com/TcOpenGroup/TcOpen
id: IShadowTcoDataTwinController
path: ../TcOpen/src/TcoData/src/TcoDataConnector/_generated/TcoData.g.cs
startLine: 159
assemblies:
- TcoDataConnector
namespace: TcoData
syntax:
content: public interface IShadowTcoDataTwinController
content.vb: Public Interface IShadowTcoDataTwinController
modifiers.csharp:
- public
- interface
modifiers.vb:
- Public
- Interface
- uid: TcoData.IShadowTcoDataTwinController.AttributeName
commentId: P:TcoData.IShadowTcoDataTwinController.AttributeName
id: AttributeName
parent: TcoData.IShadowTcoDataTwinController
langs:
- csharp
- vb
name: AttributeName
nameWithType: IShadowTcoDataTwinController.AttributeName
fullName: TcoData.IShadowTcoDataTwinController.AttributeName
type: Property
source:
remote:
path: src/TcoData/src/TcoDataConnector/_generated/TcoData.g.cs
branch: dev
repo: https://github.com/TcOpenGroup/TcOpen
id: AttributeName
path: ../TcOpen/src/TcoData/src/TcoDataConnector/_generated/TcoData.g.cs
startLine: 161
assemblies:
- TcoDataConnector
namespace: TcoData
syntax:
content: string AttributeName { get; }
parameters: []
return:
type: System.String
content.vb: ReadOnly Property AttributeName As String
overload: TcoData.IShadowTcoDataTwinController.AttributeName*
modifiers.csharp:
- get
modifiers.vb:
- ReadOnly
- uid: TcoData.IShadowTcoDataTwinController.CreatePlainerType
commentId: M:TcoData.IShadowTcoDataTwinController.CreatePlainerType
id: CreatePlainerType
parent: TcoData.IShadowTcoDataTwinController
langs:
- csharp
- vb
name: CreatePlainerType()
nameWithType: IShadowTcoDataTwinController.CreatePlainerType()
fullName: TcoData.IShadowTcoDataTwinController.CreatePlainerType()
type: Method
source:
remote:
path: src/TcoData/src/TcoDataConnector/_generated/TcoData.g.cs
branch: dev
repo: https://github.com/TcOpenGroup/TcOpen
id: CreatePlainerType
path: ../TcOpen/src/TcoData/src/TcoDataConnector/_generated/TcoData.g.cs
startLine: 166
assemblies:
- TcoDataConnector
namespace: TcoData
syntax:
content: PlainTcoDataTwinController CreatePlainerType()
return:
type: TcoData.PlainTcoDataTwinController
content.vb: Function CreatePlainerType As PlainTcoDataTwinController
overload: TcoData.IShadowTcoDataTwinController.CreatePlainerType*
references:
- uid: TcoData
commentId: N:TcoData
name: TcoData
nameWithType: TcoData
fullName: TcoData
- uid: TcoData.IShadowTcoDataTwinController.AttributeName*
commentId: Overload:TcoData.IShadowTcoDataTwinController.AttributeName
name: AttributeName
nameWithType: IShadowTcoDataTwinController.AttributeName
fullName: TcoData.IShadowTcoDataTwinController.AttributeName
- uid: System.String
commentId: T:System.String
parent: System
isExternal: true
name: String
nameWithType: String
fullName: System.String
- uid: System
commentId: N:System
isExternal: true
name: System
nameWithType: System
fullName: System
- uid: TcoData.IShadowTcoDataTwinController.CreatePlainerType*
commentId: Overload:TcoData.IShadowTcoDataTwinController.CreatePlainerType
name: CreatePlainerType
nameWithType: IShadowTcoDataTwinController.CreatePlainerType
fullName: TcoData.IShadowTcoDataTwinController.CreatePlainerType
- uid: TcoData.PlainTcoDataTwinController
commentId: T:TcoData.PlainTcoDataTwinController
parent: TcoData
name: PlainTcoDataTwinController
nameWithType: PlainTcoDataTwinController
fullName: TcoData.PlainTcoDataTwinController
|
api/TcOpen.Inxton/TcOpen.Inxton.Data/TcoData.IShadowTcoDataTwinController.yml
|
Collections:
- Name: HRNet
Paper:
Title: Deep high-resolution representation learning for human pose estimation
URL: http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Deep_High-Resolution_Representation_Learning_for_Human_Pose_Estimation_CVPR_2019_paper.html
README: https://github.com/open-mmlab/mmpose/blob/master/docs/en/papers/backbones/hrnet.md
Models:
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w32_posetrack18_256x192.py
In Collection: HRNet
Metadata:
Architecture: &id001
- HRNet
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w32_posetrack18_256x192
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 78.8
Elb: 84.3
Head: 87.4
Hip: 79.7
Knee: 81.8
Shou: 88.6
Total: 83.0
Wri: 78.5
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w32_posetrack18_384x288.py
In Collection: HRNet
Metadata:
Architecture: *id001
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w32_posetrack18_384x288
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 79.4
Elb: 85.0
Head: 87.0
Hip: 80.5
Knee: 82.6
Shou: 88.8
Total: 83.6
Wri: 80.1
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w48_posetrack18_256x192.py
In Collection: HRNet
Metadata:
Architecture: *id001
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w48_posetrack18_256x192
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 80.3
Elb: 85.8
Head: 88.2
Hip: 80.7
Knee: 83.3
Shou: 90.1
Total: 84.4
Wri: 80.8
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w48_posetrack18_384x288.py
In Collection: HRNet
Metadata:
Architecture: *id001
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w48_posetrack18_384x288
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 80.9
Elb: 85.9
Head: 87.8
Hip: 81.1
Knee: 83.3
Shou: 90.0
Total: 84.5
Wri: 81.3
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w32_posetrack18_256x192.py
In Collection: HRNet
Metadata:
Architecture: *id001
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w32_posetrack18_256x192
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 70.2
Elb: 79.5
Head: 78.0
Hip: 76.9
Knee: 76.6
Shou: 82.9
Total: 76.9
Wri: 73.8
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_256x192-1ee951c4_20201028.pth
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w32_posetrack18_384x288.py
In Collection: HRNet
Metadata:
Architecture: *id001
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w32_posetrack18_384x288
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 70.5
Elb: 80.4
Head: 79.9
Hip: 74.8
Knee: 76.1
Shou: 83.6
Total: 77.3
Wri: 74.5
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_posetrack18_384x288-806f00a3_20211130.pth
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w48_posetrack18_256x192.py
In Collection: HRNet
Metadata:
Architecture: *id001
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w48_posetrack18_256x192
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 70.4
Elb: 80.6
Head: 80.1
Hip: 74.3
Knee: 76.8
Shou: 83.4
Total: 77.4
Wri: 74.8
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_256x192-b5d9b3f1_20211130.pth
- Config: configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_w48_posetrack18_384x288.py
In Collection: HRNet
Metadata:
Architecture: *id001
Training Data: PoseTrack18
Name: topdown_heatmap_hrnet_w48_posetrack18_384x288
Results:
- Dataset: PoseTrack18
Metrics:
Ankl: 71.7
Elb: 80.9
Head: 80.2
Hip: 74.7
Knee: 76.7
Shou: 83.8
Total: 77.8
Wri: 75.2
Task: Body 2D Keypoint
Weights: https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_posetrack18_384x288-5fd6d3ff_20211130.pth
|
configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/posetrack18/hrnet_posetrack18.yml
|
- name: Ferramentas XAML no Visual Studio
href: index.yml
- name: Visão geral do XAML
href: xaml-overview.md
- name: XAML Designer
items:
- name: Projetar XAML no Visual Studio e no Blend
href: designing-xaml-in-visual-studio.md
- name: Visão geral do Designer XAML
href: creating-a-ui-by-using-xaml-designer-in-visual-studio.md
- name: Trabalhar com elementos
href: working-with-elements-in-xaml-designer.md
- name: Organizar objetos em contêineres de layout
href: organize-objects-into-layout-containers-in-xaml-designer.md
- name: Criar e aplicar um recurso
href: how-to-create-and-apply-a-resource.md
- name: 'Passo a passo: Associar a dados'
href: walkthrough-binding-to-data-in-xaml-designer.md
- name: Depurar ou desabilitar código do projeto
href: debugging-or-disabling-project-code-in-xaml-designer.md
- name: Blend for Visual Studio
items:
- name: Visão geral
href: creating-a-ui-by-using-blend-for-visual-studio.md
- name: Desenhe as formas e demarcadores
href: draw-shapes-and-paths.md
- name: Modificar o estilo de objetos
href: modify-the-style-of-objects-in-blend.md
- name: Animar objetos
href: animate-objects-in-xaml-designer.md
- name: Exibir dados
href: display-data-in-blend.md
- name: Referência
items:
- name: Avisos e erros de XAML
href: xaml-errors-warnings.md
- name: Atalhos de teclado do XAML Designer
href: keyboard-shortcuts-for-xaml-designer.md
- name: Atalhos de teclado do Blend
href: keyboard-shortcuts-in-blend.md
- name: Teclas modificadoras da prancheta (Blend)
href: artboard-modifier-keys-in-blend.md
- name: Teclas modificadoras da ferramenta Caneta (Blend)
href: pen-tool-modifier-keys-in-blend.md
- name: Teclas modificadoras da ferramenta Seleção Direta (Blend)
href: direct-selection-tool-modifier-keys-in-blend.md
- name: Depurar XAML
items:
- name: Inspecione as propriedades XAML durante a depuração
href: inspect-xaml-properties-while-debugging.md
- name: Recarga Dinâmica de XAML
items:
- name: Gravar e depurar executando código XAML
href: xaml-hot-reload.md
- name: Solução de problemas
href: xaml-hot-reload-troubleshooting.md
- name: Depurar XAML no Blend
href: debug-xaml-in-blend.md
- name: Depurar aplicativos UWP >>
href: ../debugger/debugging-windows-store-and-windows-universal-apps.md
- name: Windows Presentation Foundation (WPF)
items:
- name: Introdução
displayName: Windows Presentation Foundation (WPF)
href: ../designers/getting-started-with-wpf.md
- name: Depurar aplicativos WPF
items:
- name: Depurar o WPF
href: ../debugger/debugging-wpf.md?toc=/visualstudio/xaml-tools/toc.json&bc=/visualstudio/xaml-tools/breadcrumb/toc.json
- name: Usar o visualizador de árvore do WPF
href: ../debugger/how-to-use-the-wpf-tree-visualizer.md?toc=/visualstudio/xaml-tools/toc.json&bc=/visualstudio/xaml-tools/breadcrumb/toc.json
- name: Exibir informações de rastreamento do WPF
href: ../debugger/how-to-display-wpf-trace-information.md?toc=/visualstudio/xaml-tools/toc.json&bc=/visualstudio/xaml-tools/breadcrumb/toc.json
|
docs/xaml-tools/toc.yml
|
{% set version = "2.2.0" %}
{% set name = "InPAS" %}
{% set bioc = "3.14" %}
package:
name: 'bioconductor-{{ name|lower }}'
version: '{{ version }}'
source:
url:
- 'https://bioconductor.org/packages/{{ bioc }}/bioc/src/contrib/{{ name }}_{{ version }}.tar.gz'
- 'https://bioarchive.galaxyproject.org/{{ name }}_{{ version }}.tar.gz'
- 'https://depot.galaxyproject.org/software/bioconductor-{{ name|lower }}/bioconductor-{{ name|lower }}_{{ version }}_src_all.tar.gz'
md5: 4ddbb62fbfcf79395119f8ade22ebdb1
build:
number: 0
rpaths:
- lib/R/lib/
- lib/
noarch: generic
# Suggests: RUnit, BiocGenerics, BiocManager, rtracklayer, BiocStyle, knitr, markdown, rmarkdown, EnsDb.Hsapiens.v86, EnsDb.Mmusculus.v79, BSgenome.Hsapiens.UCSC.hg19, BSgenome.Mmusculus.UCSC.mm10, TxDb.Hsapiens.UCSC.hg19.knownGene, TxDb.Mmusculus.UCSC.mm10.knownGene
requirements:
host:
- 'bioconductor-annotationdbi >=1.56.0,<1.57.0'
- 'bioconductor-biobase >=2.54.0,<2.55.0'
- 'bioconductor-biocparallel >=1.28.0,<1.29.0'
- 'bioconductor-biostrings >=2.62.0,<2.63.0'
- 'bioconductor-bsgenome >=1.62.0,<1.63.0'
- 'bioconductor-cleanupdtseq >=1.32.0,<1.33.0'
- 'bioconductor-genomeinfodb >=1.30.0,<1.31.0'
- 'bioconductor-genomicfeatures >=1.46.0,<1.47.0'
- 'bioconductor-genomicranges >=1.46.0,<1.47.0'
- 'bioconductor-iranges >=2.28.0,<2.29.0'
- 'bioconductor-limma >=3.50.0,<3.51.0'
- 'bioconductor-plyranges >=1.14.0,<1.15.0'
- 'bioconductor-preprocesscore >=1.56.0,<1.57.0'
- 'bioconductor-s4vectors >=0.32.0,<0.33.0'
- r-base
- r-dbi
- r-depmixs4
- r-dplyr
- r-ggplot2
- r-magrittr
- r-purrr
- r-readr
- r-reshape2
- r-rsqlite
run:
- 'bioconductor-annotationdbi >=1.56.0,<1.57.0'
- 'bioconductor-biobase >=2.54.0,<2.55.0'
- 'bioconductor-biocparallel >=1.28.0,<1.29.0'
- 'bioconductor-biostrings >=2.62.0,<2.63.0'
- 'bioconductor-bsgenome >=1.62.0,<1.63.0'
- 'bioconductor-cleanupdtseq >=1.32.0,<1.33.0'
- 'bioconductor-genomeinfodb >=1.30.0,<1.31.0'
- 'bioconductor-genomicfeatures >=1.46.0,<1.47.0'
- 'bioconductor-genomicranges >=1.46.0,<1.47.0'
- 'bioconductor-iranges >=2.28.0,<2.29.0'
- 'bioconductor-limma >=3.50.0,<3.51.0'
- 'bioconductor-plyranges >=1.14.0,<1.15.0'
- 'bioconductor-preprocesscore >=1.56.0,<1.57.0'
- 'bioconductor-s4vectors >=0.32.0,<0.33.0'
- r-base
- r-dbi
- r-depmixs4
- r-dplyr
- r-ggplot2
- r-magrittr
- r-purrr
- r-readr
- r-reshape2
- r-rsqlite
test:
commands:
- '$R -e "library(''{{ name }}'')"'
about:
home: 'https://bioconductor.org/packages/{{ bioc }}/bioc/html/{{ name }}.html'
license: 'GPL (>= 2)'
summary: 'A Bioconductor package for identifying novel Alternative PolyAdenylation Sites (PAS) from RNA-seq data'
description: 'Alternative polyadenylation (APA) is one of the important post- transcriptional regulation mechanisms which occurs in most human genes. InPAS facilitates the discovery of novel APA sites and the differential usage of APA sites from RNA-Seq data. It leverages cleanUpdTSeq to fine tune identified APA sites by removing false sites.'
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3'
|
recipes/bioconductor-inpas/meta.yaml
|
uid: "com.azure.search.documents.implementation.models.SearchRequest"
fullName: "com.azure.search.documents.implementation.models.SearchRequest"
name: "SearchRequest"
nameWithType: "SearchRequest"
summary: "Parameters for filtering, sorting, faceting, paging, and other search query behaviors."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class SearchRequest"
constructors:
- "com.azure.search.documents.implementation.models.SearchRequest.SearchRequest()"
methods:
- "com.azure.search.documents.implementation.models.SearchRequest.getFacets()"
- "com.azure.search.documents.implementation.models.SearchRequest.getFilter()"
- "com.azure.search.documents.implementation.models.SearchRequest.getHighlightFields()"
- "com.azure.search.documents.implementation.models.SearchRequest.getHighlightPostTag()"
- "com.azure.search.documents.implementation.models.SearchRequest.getHighlightPreTag()"
- "com.azure.search.documents.implementation.models.SearchRequest.getMinimumCoverage()"
- "com.azure.search.documents.implementation.models.SearchRequest.getOrderBy()"
- "com.azure.search.documents.implementation.models.SearchRequest.getQueryType()"
- "com.azure.search.documents.implementation.models.SearchRequest.getScoringParameters()"
- "com.azure.search.documents.implementation.models.SearchRequest.getScoringProfile()"
- "com.azure.search.documents.implementation.models.SearchRequest.getScoringStatistics()"
- "com.azure.search.documents.implementation.models.SearchRequest.getSearchFields()"
- "com.azure.search.documents.implementation.models.SearchRequest.getSearchMode()"
- "com.azure.search.documents.implementation.models.SearchRequest.getSearchText()"
- "com.azure.search.documents.implementation.models.SearchRequest.getSelect()"
- "com.azure.search.documents.implementation.models.SearchRequest.getSessionId()"
- "com.azure.search.documents.implementation.models.SearchRequest.getSkip()"
- "com.azure.search.documents.implementation.models.SearchRequest.getTop()"
- "com.azure.search.documents.implementation.models.SearchRequest.isIncludeTotalResultCount()"
- "com.azure.search.documents.implementation.models.SearchRequest.setFacets(java.util.List<java.lang.String>)"
- "com.azure.search.documents.implementation.models.SearchRequest.setFilter(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setHighlightFields(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setHighlightPostTag(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setHighlightPreTag(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setIncludeTotalResultCount(java.lang.Boolean)"
- "com.azure.search.documents.implementation.models.SearchRequest.setMinimumCoverage(java.lang.Double)"
- "com.azure.search.documents.implementation.models.SearchRequest.setOrderBy(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setQueryType(com.azure.search.documents.models.QueryType)"
- "com.azure.search.documents.implementation.models.SearchRequest.setScoringParameters(java.util.List<java.lang.String>)"
- "com.azure.search.documents.implementation.models.SearchRequest.setScoringProfile(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setScoringStatistics(com.azure.search.documents.models.ScoringStatistics)"
- "com.azure.search.documents.implementation.models.SearchRequest.setSearchFields(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setSearchMode(com.azure.search.documents.models.SearchMode)"
- "com.azure.search.documents.implementation.models.SearchRequest.setSearchText(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setSelect(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setSessionId(java.lang.String)"
- "com.azure.search.documents.implementation.models.SearchRequest.setSkip(java.lang.Integer)"
- "com.azure.search.documents.implementation.models.SearchRequest.setTop(java.lang.Integer)"
type: "class"
metadata: {}
package: "com.azure.search.documents.implementation.models"
artifact: com.azure:azure-search-documents:11.2.0-beta.3
|
preview/docs-ref-autogen/com.azure.search.documents.implementation.models.SearchRequest.yml
|
name: Publish release
on:
workflow_dispatch:
jobs:
get_release_version:
name: Get Release Version
runs-on: ubuntu-latest
if: github.repository_owner == 'ballerina-platform'
steps:
- id: checkout
uses: actions/checkout@v2
- id: execute
run: |
export VERSION="$((grep -w 'version' | cut -d= -f2) < gradle.properties | cut -d- -f1)"
echo "::set-output name=version::${VERSION}"
outputs:
version: ${{steps.execute.outputs.version}}
create_release:
needs: get_release_version
name: Create Release
runs-on: ubuntu-latest
if: github.repository_owner == 'ballerina-platform'
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
steps:
- name: Create release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.BALLERINA_BOT_TOKEN }}
with:
tag_name: ${{ needs.get_release_version.outputs.version }}
release_name: ${{ needs.get_release_version.outputs.version }}
draft: false
prerelease: false
release_assets:
name: Release assets
needs: [ get_release_version, create_release ]
runs-on: ${{ matrix.os }}
if: github.repository_owner == 'ballerina-platform'
strategy:
fail-fast: false
matrix:
os: [ ubuntu-latest, windows-latest, macOS-latest ]
steps:
- name: Checkout Code
uses: actions/checkout@v2
- name: Set up JDK 11
uses: actions/setup-java@v1
with:
java-version: 11
- name: Build JRE on Linux
if: matrix.os == 'ubuntu-latest'
env:
packageUser: ${{ secrets.BALLERINA_BOT_USERNAME }}
packagePAT: ${{ secrets.BALLERINA_BOT_TOKEN }}
run: ./gradlew build
- name: Build JRE on Windows
if: matrix.os == 'windows-latest'
env:
packageUser: ${{ secrets.BALLERINA_BOT_USERNAME }}
packagePAT: ${{ secrets.BALLERINA_BOT_TOKEN }}
run: ./gradlew.bat build
- name: Build JRE on macOS
if: matrix.os == 'macOS-latest'
env:
packageUser: ${{ secrets.BALLERINA_BOT_USERNAME }}
packagePAT: ${{ secrets.BALLERINA_BOT_TOKEN }}
run: ./gradlew build
- name: Upload linux artifacts
if: matrix.os == 'ubuntu-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.BALLERINA_BOT_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_name: ballerina-jre-linux-${{ needs.get_release_version.outputs.version }}.zip
asset_path: build/distributions/ballerina-jre-linux-${{ needs.get_release_version.outputs.version }}-SNAPSHOT.zip
asset_content_type: application/octet-stream
- name: Upload windows artifacts
if: matrix.os == 'windows-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.BALLERINA_BOT_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_name: ballerina-jre-windows-${{ needs.get_release_version.outputs.version }}.zip
asset_path: build/distributions/ballerina-jre-windows-${{ needs.get_release_version.outputs.version }}-SNAPSHOT.zip
asset_content_type: application/octet-stream
- name: Upload macOS artifacts
if: matrix.os == 'macOS-latest'
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.BALLERINA_BOT_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_name: ballerina-jre-macos-${{ needs.get_release_version.outputs.version }}.zip
asset_path: build/distributions/ballerina-jre-macos-${{ needs.get_release_version.outputs.version }}-SNAPSHOT.zip
asset_content_type: application/octet-stream
|
.github/workflows/publish-release.yml
|
items:
- uid: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles
id: DefineAutoscaleSettingResourceProfiles
artifact: com.microsoft.azure:azure-mgmt-monitor:1.37.0
parent: com.microsoft.azure.management.monitor
children:
- com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile(java.lang.String)
langs:
- java
name: AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles
nameWithType: AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles
fullName: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles
type: Interface
package: com.microsoft.azure.management.monitor
summary: The stage of the definition which specifies autoscale profile.
syntax:
content: public static interface AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles
- uid: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile(java.lang.String)
id: defineAutoscaleProfile(java.lang.String)
artifact: com.microsoft.azure:azure-mgmt-monitor:1.37.0
parent: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles
langs:
- java
name: defineAutoscaleProfile(String name)
nameWithType: AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile(String name)
fullName: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile(String name)
overload: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile*
type: Method
package: com.microsoft.azure.management.monitor
summary: Starts the definition of automatic scaling profiles that specify different scaling parameters for different time periods. A maximum of 20 profiles can be specified.
syntax:
content: public abstract AutoscaleProfile.DefinitionStages.Blank defineAutoscaleProfile(String name)
parameters:
- id: name
type: java.lang.String
description: name of the autoscale profile.
return:
type: com.microsoft.azure.management.monitor.AutoscaleProfile.DefinitionStages.Blank
description: the next stage of the definition.
references:
- uid: java.lang.String
spec.java:
- uid: java.lang.String
name: String
fullName: java.lang.String
- uid: com.microsoft.azure.management.monitor.AutoscaleProfile.DefinitionStages.Blank
name: AutoscaleProfile.DefinitionStages.Blank
nameWithType: AutoscaleProfile.DefinitionStages.Blank
fullName: com.microsoft.azure.management.monitor.AutoscaleProfile.DefinitionStages.Blank
- uid: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile*
name: defineAutoscaleProfile
nameWithType: AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile
fullName: com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.defineAutoscaleProfile
package: com.microsoft.azure.management.monitor
|
docs-ref-autogen/com.microsoft.azure.management.monitor.AutoscaleSetting.DefinitionStages.DefineAutoscaleSettingResourceProfiles.yml
|
version: 2.1
executors:
gcloud:
docker:
- image: google/cloud-sdk:latest
jobs:
set-build-number-prod:
executor: gcloud
working_directory: ~/repo
steps:
- checkout
- run:
name: Authenticate with gcloud
command: |
echo $GCLOUD_DOCKER_PRIVATE_CREDENTIALS > ${HOME}/gcloud-service-key.json
gcloud auth activate-service-account --key-file=${HOME}/gcloud-service-key.json
- run:
name: add build name to file
command: |
apt-get install -y jq
chmod +x .circleci/get-build-number.sh
chmod +x .circleci/notify-slack.sh
TAG=$(./.circleci/get-build-number.sh facebook-apps)
echo $TAG > ~/build-id
./.circleci/notify-slack.sh info Deploy to prod workflow started with tag: $TAG
- run:
name: Notify Slack on failure
when: on_fail
command: |
chmod +x .circleci/notify-slack.sh
./.circleci/notify-slack.sh false Unable to create tag
- persist_to_workspace:
root: ~/
paths:
- build-id
build-image:
executor: gcloud
working_directory: ~/repo
steps:
- checkout
- attach_workspace:
at: ~/repo
- run:
name: Authenticate with gcloud
command: |
echo $GCLOUD_DOCKER_PRIVATE_CREDENTIALS > ${HOME}/gcloud-service-key.json
gcloud auth activate-service-account --project=dnt-docker-registry-private --key-file=${HOME}/gcloud-service-key.json
- setup_remote_docker:
docker_layer_caching: true
- run:
name: Build facebook-apps docker image and push image to GCR
command: |
gcloud auth configure-docker eu.gcr.io --quiet
TAG=$(cat ~/repo/build-id)
cd ~/repo
docker build -t facebook-apps .
docker tag facebook-apps eu.gcr.io/dnt-docker-registry-private/facebook-apps:latest
docker tag facebook-apps eu.gcr.io/dnt-docker-registry-private/facebook-apps:$TAG
docker push eu.gcr.io/dnt-docker-registry-private/facebook-apps:latest
docker push eu.gcr.io/dnt-docker-registry-private/facebook-apps:$TAG
- run:
name: Notify Slack on failure
when: on_fail
command: |
chmod +x .circleci/notify-slack.sh
./.circleci/notify-slack.sh false Build image failed
- run:
name: Notify Slack on success
when: on_success
command: |
chmod +x .circleci/notify-slack.sh
./.circleci/notify-slack.sh true Build image succeeded
deploy-to-prod:
executor: gcloud
steps:
- checkout
- attach_workspace:
at: ~/repo
- run:
name: Authenticate with gcloud
command: |
echo $CLOUD_RUN_DEPLOYER_CREDENTIALS > ${HOME}/gcloud-service-key.json
gcloud auth activate-service-account --key-file=${HOME}/gcloud-service-key.json
- run:
name: Deploy Cloud Run service
command: |
TAG=$(cat ~/repo/build-id)
gcloud beta run deploy facebook-apps--prod \
--project dnt-platform \
--platform managed \
--region europe-west1 \
--allow-unauthenticated \
--revision-suffix=${TAG} \
--max-instances=1 \
--concurrency=500 \
--memory=128 \
--image eu.gcr.io/dnt-docker-registry-private/facebook-apps:${TAG} \
--service-account=<EMAIL>
- run:
name: Notify Slack on failure
when: on_fail
command: |
chmod +x .circleci/notify-slack.sh
TAG=$(cat ~/repo/build-id)
./.circleci/notify-slack.sh false Deploy to prod failed
- run:
name: Notify Slack on success
when: on_success
command: |
chmod +x .circleci/notify-slack.sh
TAG=$(cat ~/repo/build-id)
./.circleci/notify-slack.sh true Deploy to prod succeeded :tada:
workflows:
version: 2
test-code-and-build-and-deploy-prod:
jobs:
- set-build-number-prod:
context: dnt-k8s-prod
filters:
branches:
only: /^master$/
- build-image:
context: dnt-k8s-prod
requires:
- set-build-number-prod
filters:
branches:
only: /^master$/
- deploy-to-prod:
context: dnt-k8s-prod
requires:
- build-image
filters:
branches:
only: /^master$/
|
.circleci/config.yml
|
{% set name = "lalpulsar" %}
{% set version = "1.17.0" %}
{% set sha256 = "548da87dad8272ca8fb5015da9b41ef6e1897976c4d44ce5593812aa4601e225" %}
package:
name: {{ name }}
version: {{ version }}
source:
fn: {{ name }}-{{ version }}.tar.xz
url: http://software.ligo.org/lscsoft/source/lalsuite/{{ name }}-{{ version }}.tar.xz
sha256: {{ sha256 }}
build:
number: 0
skip: true # [win]
requirements:
build:
- pkg-config
- make
- {{ compiler('c') }}
host:
- swig >=3.0.7
- cfitsio
- fftw
- gsl
- lal >=6.19.0
outputs:
- name: lalpulsar
script: install-c.sh
requirements:
host:
- {{ compiler('c') }}
- make
- swig >=3.0.7
- cfitsio
- fftw
- gsl
- lal >=6.19.0
run:
- cfitsio
- fftw
- gsl
- lal >=6.19.0
test:
commands:
- lalpulsar_version --verbose
about:
home: https://wiki.ligo.org/DASWG/LALSuite
license: GPLv2+
license_family: GPL
license_file: COPYING
summary: LSC Algorithm Pulsar Library
description: |
The LSC Algorithm Pulsar Library for gravitational wave data analysis.
This package contains the shared-object libraries needed to run
applications that use the LAL Pulsar library.
- name: python-lalpulsar
script: install-python.sh
requirements:
host:
- {{ compiler('c') }}
- make
- pkg-config
- swig >=3.0.7
- {{ pin_subpackage('lalpulsar', exact=True) }}
- python
- numpy
- python-lal >=6.19.0
run:
- {{ pin_subpackage('lalpulsar', exact=True) }}
- python
- {{ pin_compatible('numpy') }}
- python-lal >=6.19.0
- astropy
test:
imports:
- lalpulsar
- lalpulsar.NstarTools
- lalpulsar.PulsarParametersWrapper
- lalpulsar.simulateCW
about:
home: https://wiki.ligo.org/DASWG/LALSuite
license: GPLv2+
license_family: GPL
license_file: COPYING
summary: LSC Algorithm Pulsar Library
description: |
The LSC Algorithm Pulsar Library for gravitational wave data analysis.
This package contains the python bindings.
about:
home: https://wiki.ligo.org/DASWG/LALSuite
license: GPLv2+
license_family: GPL
summary: LSC Algorithm Pulsar Library
extra:
recipe-maintainers:
- duncanmmacleod
- skymoo
|
recipe/meta.yaml
|
wiki:
controller: App\Controller\WikiController::wiki
defaults: { path: index }
path: /wiki/{path}
methods: [GET]
requirements: { path: "%wiki_page_regex%" }
wiki_shortcut:
controller: Symfony\Bundle\FrameworkBundle\Controller\RedirectController::redirectAction
defaults: { route: wiki }
path: /w/{path}
requirements: { path: "%wiki_page_regex%" }
wiki_create:
controller: App\Controller\WikiController::create
path: /wiki/_create/{path}
methods: [GET, POST]
requirements: { path: "%wiki_page_regex%" }
wiki_delete:
controller: App\Controller\WikiController::delete
path: /wiki/_delete/{path}
methods: [POST]
requirements: { path: "%wiki_page_regex%" }
wiki_edit:
controller: App\Controller\WikiController::edit
path: /wiki/_edit/{path}
methods: [GET, POST]
requirements: { path: "%wiki_page_regex%" }
wiki_lock:
controller: App\Controller\WikiController::lock
defaults: { lock: true }
methods: [POST]
path: /wiki/_lock/{path}
requirements: { path: "%wiki_page_regex%" }
wiki_unlock:
controller: App\Controller\WikiController::lock
defaults: { lock: false }
methods: [POST]
path: /wiki/_unlock/{path}
requirements: { path: "%wiki_page_regex%" }
wiki_history:
controller: App\Controller\WikiController::history
defaults: { page: 1 }
path: /wiki/_history/{path}/{page}
methods: [GET]
requirements: { path: "%wiki_page_regex%" }
wiki_diff:
controller: App\Controller\WikiController::diff
path: /wiki/_diff
methods: [GET]
wiki_all:
controller: App\Controller\WikiController::all
defaults: { page: 1 }
path: /wiki/_all/{page}
methods: [GET]
wiki_revision:
controller: App\Controller\WikiController::revision
path: /wiki/_revision/{id}
methods: [GET]
requirements: { id: "%uuid_regex%" }
wiki_recent_changes:
controller: App\Controller\WikiController::recentChanges
defaults: { page: 1 }
path: /wiki/_recent/{page}
methods: [GET]
requirements: { page: \d+ }
|
config/app_routes/wiki.yaml
|
name: Upload to Test PyPI
on:
workflow_dispatch:
inputs:
overrideVersion:
description: Manually force a version
env:
CIBW_BUILD_VERBOSITY: 1
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.overrideVersion }}
jobs:
make_sdist:
name: Make SDist
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@v2
with:
submodules: recursive
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.8'
- name: Install dependencies
run: |
python -m pip install build twine
- name: Build SDist
run: |
python -m build --sdist
- name: Put SDist in artifact container
uses: actions/upload-artifact@v2
with:
path: dist/*.tar.gz
- name: Check SDist metadata
run: |
twine check dist/*
build_wheels:
name: Build wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
bitness: [32, 64]
python: [3.6, 3.7, 3.8, 3.9]
include:
- os: windows-latest
bitness: 64
platform_id: win_amd64
- os: windows-latest
bitness: 32
platform_id: win32
- os: ubuntu-latest
bitness: 64
platform_id: manylinux_x86_64
- os: ubuntu-latest
bitness: 32
platform_id: manylinux_i686
- os: macos-latest
bitness: 64
platform_id: macosx_x86_64
exclude:
- os: macos-latest
bitness: 32
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
submodules: recursive
- name: Get Python Version Name
uses: mad9000/actions-find-and-replace-string@2
id: pythonversion
with:
source: ${{ matrix.python }}
find: '.'
replace: ''
- name: Build wheels
uses: joerick/cibuildwheel@v1.10.0
env:
CIBW_BUILD: cp${{ steps.pythonversion.outputs.value }}-${{ matrix.platform_id }}
CIBW_ENVIRONMENT: "SETUPTOOLS_SCM_PRETEND_VERSION=${{ github.event.inputs.overrideVersion }}"
- name: Upload wheels to artifact container
uses: actions/upload-artifact@v2
with:
path: wheelhouse/*.whl
upload_all:
needs: [build_wheels, make_sdist]
runs-on: ubuntu-latest
steps:
- name: Get SDist and wheels from artifact container
uses: actions/download-artifact@v2
with:
name: artifact
path: dist
- name: Publish wheels to Test PyPI
uses: pypa/gh-action-pypi-publish@v1.4.2
with:
user: ${{ secrets.TEST_PYPI_USERNAME }}
password: ${{ secrets.<PASSWORD> }}
repository_url: https://test.pypi.org/legacy/
|
.github/workflows/build-test-pypi.yml
|
name: Java CI with Maven
on:
push:
branches: [ master, feat/* ]
jobs:
trialist3:
environment: TRIALIST3_PRODUCTION
runs-on: ubuntu-latest
env:
REGION: us-east-1
VERSION: v0.0.3
TAG_CLIENT: trialist3
steps:
- uses: actions/checkout@v2
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Cache local Maven repository
uses: actions/cache@v2
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- name: Build with Maven
run: ./resources/build.sh -Pcoverage
- name: Upload coverage report
uses: codecov/codecov-action@v1
- uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACCESS_KEY }}
aws-region: ${{ env.REGION }}
- run: ./build-deploy.sh -v ${{ env.VERSION }} -b ${{ secrets.BUCKET }} -c ${{ env.TAG_CLIENT }}
trialist2:
environment: TRIALIST2_PRODUCTION
runs-on: ubuntu-latest
env:
REGION: us-east-1
VERSION: v0.0.3
TAG_CLIENT: trialist2
steps:
- uses: actions/checkout@v2
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
# - name: Cache local Maven repository
# uses: actions/cache@v2
# with:
# path: ~/.m2/repository
# key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
# restore-keys: |
# ${{ runner.os }}-maven-
# - name: Build with Maven
# run: ./resources/build.sh -Pcoverage
# - name: Upload coverage report
# uses: codecov/codecov-action@v1
- uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.ACCESS_KEY }}
aws-region: ${{ env.REGION }}
- run: ./build-deploy.sh -v ${{ env.VERSION }} -b ${{ secrets.BUCKET }} -c ${{ env.TAG_CLIENT }}
|
.github/workflows/maven.yml
|
base_experiment: # the base config object
environment:
num_clients: 100
num_selected_clients: 40
num_malicious_clients: 1
experiment_name: "cifar_bounds"
use_config_dir: true
attack_frequency: 1.0
attacker_full_knowledge: true
load_model: ./models/resnet18_080.h5
server:
num_rounds: 1000
num_test_batches: 50
aggregator:
name: FedAvg
global_learning_rate: 1
client:
model_name: resnet18
clip:
type: l2
value: 5
benign_training:
num_epochs: 2
batch_size: 64
optimizer: SGD
learning_rate: 0.1
malicious:
attack_start: 5
attack_stop: 1005
estimate_other_updates: false
objective:
name: TargetedAttack
args:
num_epochs: 25
num_batch: 10
poison_samples: 16
optimizer: SGD
learning_rate: 0.35
step_decay: true
backdoor:
type: edge
edge_case_type: NorthWesternEdgeCase
evasion:
name: NormBoundPGDEvasion
args:
norm_type: l2
pgd_factor: 0.0625
scale_factor: 100
dataset:
dataset: cifar10
data_distribution: dirichlet
augment_data: true # for training
job:
minutes: 1440 # Max. minutes to run. Leonhard has 240 and 1440 queues
cpu_cores: 20
cpu_mem_per_core: 4096
use_gpu: 1
gpu_memory_min: 10240
experiments: # in the experiments object you have a list for the different experiments with the values for the fields marked as $VARIES$ in the base config object.
- server:
gaussian_noise: 0.000
#- server:
# gaussian_noise: 0.002
- server:
gaussian_noise: 0.004
#- server:
# gaussian_noise: 0.006
#- server:
# gaussian_noise: 0.008
- server:
gaussian_noise: 0.010
#- server:
# gaussian_noise: 0.012
#- server:
# gaussian_noise: 0.014
- server:
gaussian_noise: 0.016
#- server:
# gaussian_noise: 0.018
- server:
gaussian_noise: 0.020
- server:
gaussian_noise: 0.030
#- server:
# gaussian_noise: 0.040
- server:
gaussian_noise: 0.050
|
ansible/experiments/cifar_noise.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "cstor.fullname" . }}-cspc-operator
{{- with .Values.cspcOperator.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "cstor.cspcOperator.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "cstor.cspcOperator.matchLabels" . | nindent 6 }}
replicas: {{ .Values.cspcOperator.replicas }}
strategy:
type: Recreate
template:
metadata:
{{- with .Values.cspcOperator.podAnnotations }}
annotations: {{ toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "cstor.cspcOperator.labels" . | nindent 8 }}
{{- if .Values.cspcOperator.podLabels }}
{{ toYaml .Values.cspcOperator.podLabels | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ .Values.serviceAccount.cstorOperator.name }}
containers:
- name: {{ template "cstor.fullname" . }}-cspc-operator
imagePullPolicy: {{ .Values.cspcOperator.image.pullPolicy }}
image: "{{ .Values.cspcOperator.image.registry }}{{ .Values.cspcOperator.image.repository }}:{{ .Values.cspcOperator.image.tag }}"
resources:
{{ toYaml .Values.cspcOperator.resources | indent 12 }}
env:
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPENEBS_SERVICEACCOUNT_NAME
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: CSPC_OPERATOR_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# OPENEBS_IO_BASE_DIR is used to configure base directory for openebs on host path.
# Where OpenEBS can store required files. Default base path will be /var/openebs
# - name: OPENEBS_IO_BASE_DIR
# value: "/var/openebs"
# OPENEBS_IO_CSTOR_POOL_SPARSE_DIR can be used to specify the hostpath
# to be used for saving the shared content between the side cars
# of cstor pool pod. This ENV is also used to indicate the location
# of the sparse devices.
# The default path used is /var/openebs/sparse
#- name: OPENEBS_IO_CSTOR_POOL_SPARSE_DIR
# value: "/var/openebs/sparse"
- name: OPENEBS_IO_CSPI_MGMT_IMAGE
value: "{{ .Values.cspcOperator.poolManager.image.registry }}{{ .Values.cspcOperator.poolManager.image.repository }}:{{ .Values.cspcOperator.poolManager.image.tag }}"
- name: OPENEBS_IO_CSTOR_POOL_IMAGE
value: "{{ .Values.cspcOperator.cstorPool.image.registry }}{{ .Values.cspcOperator.cstorPool.image.repository }}:{{ .Values.cspcOperator.cstorPool.image.tag }}"
- name: OPENEBS_IO_CSTOR_POOL_EXPORTER_IMAGE
value: "{{ .Values.cspcOperator.cstorPoolExporter.image.registry }}{{ .Values.cspcOperator.cstorPoolExporter.image.repository }}:{{ .Values.cspcOperator.cstorPoolExporter.image.tag }}"
- name: RESYNC_INTERVAL
value: "{{ .Values.cspcOperator.resyncInterval }}"
{{- if .Values.imagePullSecrets }}
- name: OPENEBS_IO_IMAGE_PULL_SECRETS
value: "{{- range $.Values.imagePullSecrets }}{{ .name }},{{- end }}"
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{- end }}
{{- if .Values.cspcOperator.nodeSelector }}
nodeSelector:
{{ toYaml .Values.cspcOperator.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.cspcOperator.securityContext }}
securityContext:
{{ toYaml .Values.cspcOperator.securityContext | indent 8 }}
{{- end }}
{{- if .Values.cspcOperator.tolerations }}
tolerations:
{{ toYaml .Values.cspcOperator.tolerations | indent 8 }}
{{- end }}
|
deploy/helm/charts/templates/cspc-operator.yaml
|
--- !ruby/object:Provider::Terraform::Config
overrides: !ruby/object:Overrides::ResourceOverrides
FolderSettings: !ruby/object:Overrides::Terraform::ResourceOverride
legacy_name: "google_folder_access_approval_settings"
import_format: ["folders/{{folder_id}}/accessApprovalSettings"]
examples:
- !ruby/object:Provider::Terraform::Examples
skip_test: true
name: 'folder_access_approval_full'
primary_resource_id: 'folder_access_approval'
vars:
folder_name: 'my-folder'
test_env_vars:
org_id: :ORG_ID
custom_code: !ruby/object:Provider::Terraform::CustomCode
custom_delete: templates/terraform/custom_delete/clear_folder_access_approval_settings.go.erb
pre_create: templates/terraform/update_mask.erb
constants: templates/terraform/constants/access_approval.go.erb
properties:
notificationEmails: !ruby/object:Overrides::Terraform::PropertyOverride
is_set: true
default_from_api: true
enrolledServices: !ruby/object:Overrides::Terraform::PropertyOverride
is_set: true
set_hash_func: accessApprovalEnrolledServicesHash
ProjectSettings: !ruby/object:Overrides::Terraform::ResourceOverride
legacy_name: "google_project_access_approval_settings"
import_format: ["projects/{{project}}/accessApprovalSettings"]
examples:
- !ruby/object:Provider::Terraform::Examples
skip_test: true
name: 'project_access_approval_full'
primary_resource_id: 'project_access_approval'
test_env_vars:
project: :PROJECT_NAME
org_id: :ORG_ID
custom_code: !ruby/object:Provider::Terraform::CustomCode
custom_delete: templates/terraform/custom_delete/clear_project_access_approval_settings.go.erb
pre_create: templates/terraform/update_mask.erb
properties:
notificationEmails: !ruby/object:Overrides::Terraform::PropertyOverride
is_set: true
default_from_api: true
enrolledServices: !ruby/object:Overrides::Terraform::PropertyOverride
is_set: true
set_hash_func: accessApprovalEnrolledServicesHash
OrganizationSettings: !ruby/object:Overrides::Terraform::ResourceOverride
legacy_name: "google_organization_access_approval_settings"
import_format: ["organizations/{{organization_id}}/accessApprovalSettings"]
examples:
- !ruby/object:Provider::Terraform::Examples
skip_test: true
name: 'organization_access_approval_full'
primary_resource_id: 'organization_access_approval'
test_env_vars:
org_id: :ORG_ID
custom_code: !ruby/object:Provider::Terraform::CustomCode
custom_delete: templates/terraform/custom_delete/clear_organization_access_approval_settings.go.erb
pre_create: templates/terraform/update_mask.erb
properties:
notificationEmails: !ruby/object:Overrides::Terraform::PropertyOverride
is_set: true
default_from_api: true
enrolledServices: !ruby/object:Overrides::Terraform::PropertyOverride
is_set: true
set_hash_func: accessApprovalEnrolledServicesHash
# This is for copying files over
files: !ruby/object:Provider::Config::Files
# These files have templating (ERB) code that will be run.
# This is usually to add licensing info, autogeneration notices, etc.
compile:
<%= lines(indent(compile('provider/terraform/product~compile.yaml'), 4)) -%>
|
products/accessapproval/terraform.yaml
|
name: Build Production Rust Code
on:
push:
branches: [ main ]
paths:
- .github/actions/build-component-per-arch/**
- .github/actions/build-component-multi-arch/**
- .github/workflows/build-rust-code.yml
- .github/workflows/build-agent-container.yml
- .github/workflows/build-controller-container.yml
- .github/workflows/build-udev-video-broker-container.yml
- .github/workflows/build-webhook-configuration-container.yml
- build/containers/Dockerfile.agent
- build/containers/Dockerfile.controller
- build/containers/Dockerfile.udev-video-broker
- build/containers/Dockerfile.webhook-configuration
- '**.rs'
- '**/Cargo.toml'
- '**/Cargo.lock'
- version.txt
- build/akri-containers.mk
- build/akri-rust-containers.mk
- Makefile
pull_request:
branches: [ main ]
paths:
- .github/actions/build-component-per-arch/**
- .github/actions/build-component-multi-arch/**
- .github/workflows/build-rust-code.yml
- .github/workflows/build-agent-container.yml
- .github/workflows/build-controller-container.yml
- .github/workflows/build-udev-video-broker-container.yml
- .github/workflows/build-webhook-configuration-container.yml
- build/containers/Dockerfile.agent
- build/containers/Dockerfile.controller
- build/containers/Dockerfile.udev-video-broker
- build/containers/Dockerfile.webhook-configuration
- '**.rs'
- '**/Cargo.toml'
- '**/Cargo.lock'
- version.txt
- build/akri-containers.mk
- build/akri-rust-containers.mk
- Makefile
release:
types:
- published
jobs:
build-rust:
runs-on: ubuntu-latest
timeout-minutes: 50
strategy:
matrix:
arch:
- label: arm64v8
make-target: arm64
rust-target-path: aarch64-unknown-linux-gnu
- label: arm32v7
make-target: arm32
rust-target-path: armv7-unknown-linux-gnueabihf
- label: amd64
make-target: amd64
rust-target-path: x86_64-unknown-linux-gnu
steps:
- name: Checkout the head commit of the branch
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Rust install
uses: actions-rs/toolchain@v1
with:
toolchain: 1.54.0
override: true
components: clippy, rustfmt
- name: Build production rust for ${{ matrix.arch.label }}
env:
AGENT_FEATURES: "agent-full onvif-feat opcua-feat udev-feat"
run: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
make install-cross
cross --version
make akri-cross-build-${{ matrix.arch.make-target }}
- name: Package build binaries
run: |
tar_manifest='/tmp/tar-contents.txt'
> $tar_manifest
for f in target/${{ matrix.arch.rust-target-path }}/release/*; do filetype=$( file "$f" ); case "$filetype" in *ELF*) echo "$f" >> $tar_manifest ;; esac; done
tar -cvf /tmp/rust-${{ matrix.arch.label }}-binaries.tar `cat $tar_manifest`
- name: Upload target binaries as artifact
uses: actions/upload-artifact@v2
with:
name: rust-${{ matrix.arch.label }}-binaries
path: /tmp/rust-${{ matrix.arch.label }}-binaries.tar
retention-days: 1
|
.github/workflows/build-rust-code.yml
|
---
- name: install dependencies
include_role:
name: install_dependencies
- name: confirm QAT service is correctly installed on the system
include_tasks: check_qat_status.yml
- name: create configuration directory if it does not exist
file:
path: "{{ sriov_config_path }}"
state: directory
mode: '0700'
become: yes
- name: remove existing configuration file if it exists
file:
path: "{{ sriov_config_path }}/cek_sriov_qat_numvfs"
state: absent
become: yes
- name: create file for QAT ids to create defined VFs
file:
path: "{{ sriov_config_path }}/cek_sriov_qat_numvfs"
state: touch
owner: root
group: root
mode: '0700'
become: yes
- name: populate QAT vf template with vfs per bus location
lineinfile:
path: "{{ sriov_config_path }}/cek_sriov_qat_numvfs"
line: "{{ item.qat_id }} {{ item.qat_sriov_numvfs | default(0) }}"
owner: root
group: root
mode: '0700'
become: yes
with_items: "{{ qat_devices }}"
- name: create the first dummy record to warmup QAT device on Rocky
lineinfile:
path: "{{ sriov_config_path }}/cek_sriov_qat_numvfs"
insertbefore: "{{ qat_devices[0].qat_id }} {{ qat_devices[0].qat_sriov_numvfs | default(0) }}"
line: "{{ qat_devices[0].qat_id }} 2"
become: yes
when:
- ansible_distribution == "Rocky"
- qat_devices | length > 0
- qat_devices[0].qat_sriov_numvfs > 0
- name: copy QAT SRIOV setup script to /usr/local/bin
copy:
src: "{{ role_path }}/files/cek_sriov_qat_init"
dest: /usr/local/bin/cek_sriov_qat_init
owner: root
group: root
mode: '0700'
become: yes
- name: create systemd unit file
template:
src: cek_sriov_qat_init.service.j2
dest: /lib/systemd/system/cek_sriov_qat_init.service
owner: root
group: root
mode: '0644'
become: yes
- name: ensure that systemd service is enabled and restarted
systemd:
name: cek_sriov_qat_init
state: restarted
enabled: yes
daemon_reload: yes
become: yes
|
roles/bootstrap/configure_qat/tasks/main.yml
|