code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
---
# yamllint disable rule:line-length
- name: Install pgbouncer package
package:
name: pgbouncer
environment: "{{ proxy_env | default({}) }}"
when: ansible_os_family == "Debian" or
(ansible_os_family == "RedHat" and
ansible_distribution_major_version == '7')
tags: pgbouncer_install, pgbouncer
# RHEL 8
- name: Install pgbouncer package
dnf:
name: pgbouncer
disablerepo: AppStream
environment: "{{ proxy_env | default({}) }}"
when: ansible_os_family == "RedHat" and
ansible_distribution_major_version >= '8'
tags: pgbouncer_install, pgbouncer
- name: Ensure config directory "{{ pgbouncer_conf_dir }}" exist
file:
path: "{{ pgbouncer_conf_dir }}"
state: directory
owner: postgres
group: postgres
mode: 0750
tags: pgbouncer_conf, pgbouncer
- name: Stop and disable standard init script
service:
name: pgbouncer
state: stopped
enabled: false
when: ansible_os_family == "Debian"
tags: pgbouncer_service, pgbouncer
- name: Copy systemd service file
template:
src: templates/pgbouncer.service.j2
dest: /etc/systemd/system/pgbouncer.service
owner: postgres
group: postgres
mode: 0644
notify: "restart pgbouncer"
tags: pgbouncer_service, pgbouncer
- block: # workaround for pgbouncer from postgrespro repo
- name: Check that /usr/bin/pgbouncer is exists
stat:
path: /usr/bin/pgbouncer
register: pgbouncer_bin
- name: create a symlink to /usr/sbin/pgbouncer
file:
src: /usr/sbin/pgbouncer
dest: /usr/bin/pgbouncer
owner: root
group: root
state: link
when: not pgbouncer_bin.stat.exists
when: ansible_os_family == "RedHat" and
postgresql_packages is search("postgrespro")
tags: pgbouncer_service, pgbouncer
- name: Enable log rotation with logrotate
copy:
content: |
/var/log/pgbouncer/pgbouncer.log {
daily
rotate 7
copytruncate
delaycompress
compress
notifempty
missingok
su root root
}
dest: /etc/logrotate.d/pgbouncer
tags: pgbouncer_logrotate, pgbouncer
- name: Configure pgbouncer.ini
template:
src: templates/pgbouncer.ini.j2
dest: "{{ pgbouncer_conf_dir }}/pgbouncer.ini"
owner: postgres
group: postgres
mode: 0640
notify: "restart pgbouncer"
when: existing_pgcluster is not defined or not existing_pgcluster|bool
tags: pgbouncer_conf, pgbouncer
- name: Configure userlist.txt
template:
src: templates/userlist.txt.j2
dest: "{{ pgbouncer_conf_dir }}/userlist.txt"
owner: postgres
group: postgres
mode: 0640
notify: "restart pgbouncer"
when: existing_pgcluster is not defined or not existing_pgcluster|bool
tags: pgbouncer_conf, pgbouncer
- block: # for add_pgnode.yml
- name: Fetch pgbouncer.ini and userlist.txt conf files from master
run_once: true
fetch:
src: "{{ item }}"
dest: files/
validate_checksum: true
flat: true
loop:
- /etc/pgbouncer/pgbouncer.ini
- /etc/pgbouncer/userlist.txt
delegate_to: "{{ groups.master[0] }}"
- name: Copy pgbouncer.ini and userlist.txt conf files to replica
copy:
src: "files/{{ item }}"
dest: /etc/pgbouncer/
owner: postgres
group: postgres
mode: 0640
loop:
- pgbouncer.ini
- userlist.txt
- name: Prepare pgbouncer.ini conf file (replace "listen_addr")
lineinfile:
path: /etc/pgbouncer/pgbouncer.ini
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
backrefs: true
loop:
- {regexp: '^listen_addr =', line: 'listen_addr = {{ hostvars[inventory_hostname].inventory_hostname }}'}
loop_control:
label: "{{ item.line }}"
notify: "restart pgbouncer"
when: with_haproxy_load_balancing|bool or
(cluster_vip is not defined or cluster_vip | length < 1)
- name: Prepare pgbouncer.ini conf file (replace "listen_addr")
lineinfile:
path: /etc/pgbouncer/pgbouncer.ini
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
backrefs: true
loop:
- {regexp: '^listen_addr =', line: 'listen_addr = {{ hostvars[inventory_hostname].inventory_hostname }},{{ cluster_vip }}'}
loop_control:
label: "{{ item.line }}"
notify: "restart pgbouncer"
when: not with_haproxy_load_balancing|bool and (cluster_vip is defined and cluster_vip | length > 0 )
when: existing_pgcluster is defined and existing_pgcluster|bool
tags: pgbouncer_conf, pgbouncer
...
|
roles/pgbouncer/tasks/main.yml
|
name: ci
on: [push, pull_request]
env:
XDG_CACHE_HOME: ${{ github.workspace }}/.cache
ELECTRON_CACHE: ${{ github.workspace }}/.cache/electron
ELECTRON_BUILDER_CACHE: ${{ github.workspace }}/.cache/electron-builder
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [macos-latest, ubuntu-latest, windows-latest]
steps:
- name: Check out Git repository
uses: actions/checkout@v2
- name: Install Node.js
uses: actions/setup-node@v2
with:
node-version: 'lts/*'
- name: Cache bigger downloads
uses: actions/cache@v2
id: cache
with:
path: ${{ github.workspace }}/.cache
key: ${{ runner.os }}-${{ hashFiles('package.json', 'package-lock.json', 'electron-builder.yml') }}
restore-keys: |
${{ runner.os }}-${{ hashFiles('package.json', 'package-lock.json', 'electron-builder.yml') }}
${{ runner.os }}-
- name: Install dependencies
run: npm ci --prefer-offline --no-audit --progress=false --cache ${{ github.workspace }}/.cache/npm
- name: Build
run: npm run build
- name: Test
run: npm run test
- name: Test end-to-end
uses: GabrielBB/xvfb-action@86d97bde4a65fe9b290c0b3fb92c2c4ed0e5302d # v1.6
with:
working-directory: ${{ github.workspace }}
run: npm run test:e2e
- name: Lint
run: npm run lint
package:
runs-on: ${{ matrix.os }}
needs: build # build packages only if regular build and tests passed
strategy:
fail-fast: false
matrix:
os: [macos-latest, ubuntu-latest, windows-latest]
steps:
- name: Check out Git repository
uses: actions/checkout@v2
- name: Install Node.js
uses: actions/setup-node@v2
with:
node-version: 'lts/*'
- name: Cache webui
uses: actions/cache@v2
id: webui-cache
with:
path: assets/webui
key: ${{ hashFiles('package.json') }}
- name: Cache bigger downloads
uses: actions/cache@v2
id: cache
with:
path: ${{ github.workspace }}/.cache
key: ${{ runner.os }}-${{ hashFiles('package.json', 'package-lock.json', 'electron-builder.yml') }}
restore-keys: |
${{ runner.os }}-${{ hashFiles('package.json', 'package-lock.json', 'electron-builder.yml') }}
${{ runner.os }}-
- name: Install dependencies
run: npm ci --prefer-offline --no-audit --progress=false --cache ${{ github.workspace }}/.cache/npm
- name: Build
run: npm run build
- name: Get tag
id: tag
uses: dawidd6/action-get-tag@12319896edaa290b27558e34a177804e9b8d077b # v1
continue-on-error: true # empty steps.tag.outputs.tag will inform the next step
- name: Build binaries with electron-builder
uses: samuelmeuli/action-electron-builder@92327c67bc45ff7c38bf55d8aa8c4d75b7ea38e7 # v1.6.0 but safer than a tag that can be changed
with:
args: --publish onTag # attach signed binaries to a release draft only when building a tag
release: false # keep github release as draft for manual inspection
max_attempts: 2
# GH token for attaching atrifacts to release draft on tag build
github_token: ${{ secrets.github_token }}
# Windows signing
windows_certs: ${{ secrets.windows_certs }}
windows_certs_password: ${{ secrets.windows_certs_password }}
# Apple signing
mac_certs: ${{ secrets.mac_certs }}
mac_certs_password: ${{ secrets.mac_certs_password }}
env:
CI_BUILD_TAG: ${{steps.tag.outputs.tag}} # used by --publish onTag
# Apple notarization
APPLEID: ${{ secrets.apple_id }}
APPLEIDPASS: ${{ secrets.apple_id_pass }}
- name: Show dist/
run: du -sh dist/ && ls -l dist/
# Persist produced binaries and effective config used for building them
# - this is not for releases, but for quick testing during the dev
# - action artifacts can be downloaded for 90 days, then are removed by github
# - binaries in PRs from forks won't be signed
- name: Attach produced packages to Github Action
uses: actions/upload-artifact@v2
with:
name: dist-${{ matrix.os }}
path: dist/*tation*.*
if-no-files-found: error
- name: Show Cache
run: du -sh ${{ github.workspace }}/.cache/ && ls -l ${{ github.workspace }}/.cache/
|
.github/workflows/ci.yml
|
---
####################
# Load Certificates
####################
- name: Create kubernetes data dir
file: path=/var/lib/kubernetes state=directory
become: true
- name: Copy certificates
copy:
src: "{{ playbook_dir }}/../cert/{{ item }}"
dest: /var/lib/kubernetes
owner: root
group: root
mode: 0644
with_items:
- ca.pem
- kubernetes.pem
- kubernetes-key.pem
become: true
#########
# Docker
#########
- name: Download docker binaries
get_url:
url: "{{ docker_download_url }}"
dest: "/usr/local/src"
# TODO Add hash check
become: true
- name: Unpack docker binaries
unarchive:
copy: no
src: "/usr/local/src/{{ docker_package_file }}"
dest: "/usr/local/src/"
creates: "/local/src/docker/docker"
become: true
- name: Copy docker binaries
copy:
remote_src: true
src: "/usr/local/src/docker/{{ item }}"
dest: /usr/bin
mode: 0755
with_items: # Remote-to-remote copy doesn't support file glob yet
- docker
- docker-containerd
- docker-containerd-ctr
- docker-containerd-shim
- docker-runc
become: true
- name: Add docker systemd unit
template:
src: docker.service.j2
dest: /etc/systemd/system/docker.service
mode: 700
become: true
notify:
- Reload systemd
- Enable docker service
- Restart docker service
###########
# Kubelet
###########
- name: Create CNI dir
file: path=/opt/cni state=directory
become: true
- name: Download CNI binaries
get_url:
url: "{{ cni_download_url }}"
dest: "/usr/local/src"
# TODO Add hash check
become: true
- name: Unpack CNI binaries
unarchive:
copy: no
src: "/usr/local/src/{{ cni_package_file }}"
dest: "/opt/cni/"
creates: "/opt/cni/bin/cnitool"
become: true
- name: Create Kubelet directory
file: path=/var/lib/kubelet state=directory
become: true
- name: Download Kubelets binaries
get_url:
url: "{{ kubelet_download_path}}/{{ item }}"
dest: "/usr/bin"
group: root
owner: root
mode: 0755
# TODO Add hash check
with_items:
- kubectl
- kube-proxy
- kubelet
become: true
- name: Add Kubelet configuration
template:
src: kubeconfig.j2
dest: /var/lib/kubelet/kubeconfig
mode: 644
become: true
- name: Add kubelet systemd unit
template:
src: kubelet.service.j2
dest: /etc/systemd/system/kubelet.service
mode: 700
become: true
notify:
- Reload systemd
- Enable kubelet service
- Restart kubelet service
#############
# Kube-proxy
#############
- name: Add kube-proxy systemd unit
template:
src: kube-proxy.service.j2
dest: /etc/systemd/system/kube-proxy.service
mode: 700
become: true
notify:
- Reload systemd
- Enable kube-proxy service
- Restart kube-proxy service
|
ansible/roles/worker/tasks/main.yml
|
get:
# 積分平均サウンドレベルメータ
tags:
- noise
summary: |
騒音レベルの取得
# 等価騒音レベル・単発騒音暴露レベル・ピーク音圧レベルの取得
operationId: getNoise
description: |
下記3項目を測定する。
周波数重み付け特性はA特性を適用する。
- 等価騒音レベル(Leq)
Equivalent continuous A-weighted sound pressure level.
指定した測定時間内の騒音の総エネルギーの時間平均値を音圧レベル表示した値。
環境騒音の評価量として用いられる。
- 単発騒音暴露レベル(SEL)
Single Event Noise exposure level.
単発的に発生する騒音の全エネルギーと等しいエネルギーを持つ継続時間1秒間の定常音の音圧レベルに換算した値。
単発的または間欠的に発生する継続時間の短い騒音を測定する評価量として用いられる。
- ピーク音圧レベル(Lpeak)
指定した測定時間内の騒音の最大値を音圧レベル表示した値。
※本製品はJIS C 1509や計量法に定められた騒音計には適合しておりません。
parameters:
- name: timeSec
in: query
description: |
測定時間 (単位:sec)
省略時:0.1024 sec
required: false
schema:
type: number
format: float
minimum: 0
example: 0.5
- name: timeWeight
in: query
description: |
時間重み付け特性
省略時:Slow
- "slow":時定数1sec。
- "fast":時定数0.125sec。
- "impulse":時定数0.035sec。
required: false
schema:
type: string
enum: ["slow", "fast", "impulse"]
example: "slow"
default: "slow"
# - name: frqWeight
# in: query
# description: |
# 周波数重み付け特性
# 省略時:A特性
# - "A":A特性。感覚量を近似した周波数特性。一般の等価騒音レベル測定に用いる。
# - "C":C特性。音圧レベル(物理量)を近似した周波数特性。
# - "Z":Z特性。重み付けしない平坦な周波数特性。
# required: false
# schema:
# type: string
# enum: ["A", "C", "Z"]
# example: "A"
# default: "A"
# requestBody:
# #description: |
# # timeに測定時間を指定しなければならない。
# content:
# application/json:
# schema:
# $ref: "./../components/soundLevel_setting.yaml"
responses:
200:
description: 測定成功
content:
application/json:
schema:
$ref: "./../components/soundLevel_result.yaml"
409:
description: 生値を取得中のため測定できない
content:
application/json:
schema:
$ref: './../components/error.yaml'
|
mc-uep100/v2.0.x/ja/paths/noiseLevel.yaml
|
---
banner:
enable: true
title: 'Cases : Medical Logbook App'
content: 'Cases : is an iOS App that allows you to keep a track of interesting medical
cases for your own learning. Data is stored locally on the device. Add Anonymised
details about the case, along with images, and organise by tags.'
image: "/images/icon.png"
button:
enable: true
label: Download
link: https://apps.apple.com/us/developer/abhishek-karale/id1087945443
features:
enable: true
title: Our Core Features
image: images/watch-2.png
left_side:
- title: Store Anonymised Cases
icon: tf-ion-ios-file-tray-full
content: Add Case details, learning points and images
- title: Organise via Tags
icon: tf-ion-ios-bookmark
content: Search by Tags or Case Name
- title: Data locally stored
icon: tf-ion-ios-lock-closed
content: 'Data is stored locally on the device. '
right_side: []
promo:
enable: false
image: images/watch.png
title: Designed by professional , the benefit for creative gigs
content: Lorem ipsum dolor sit amet, consectetur adipisicing elit. Quia vel labore,
deleniti minima nisi, velit atque quaerat impedit ea maxime sunt accusamus at
obcaecati dolor iure iusto omnis quis eum. <br><br> Lorem ipsum dolor sit amet,
consectetur adipisicing elit. Facilis commodi odit, illo, qui aliquam dol
about_product:
enable: false
title: Why Choose Apple Watch
items:
- image: images/showcase/showcase-4.png
title: Lorem ipsum dolor sit amet.
content: Lorem ipsum dolor sit amet, consectetur adipisicing elit. Voluptate,
sed, assumenda. Tenetur sed esse, voluptas voluptate est veniam numquam, quis
magni. Architecto minus suscipit quas, quo harum deserunt consequatur cumque!
button:
enable: false
label: Check Features
link: "#"
- image: images/showcase/showcase-3.png
title: Lorem ipsum dolor sit amet.
content: Lorem ipsum dolor sit amet, consectetur adipisicing elit. Voluptate,
sed, assumenda. Tenetur sed esse, voluptas voluptate est veniam numquam, quis
magni. Architecto minus suscipit quas, quo harum deserunt consequatur cumque!
button:
enable: false
label: Check Features
link: "#"
products:
enable: false
title: Checkout Our All Products
subscription:
enable: false
bg_image: images/call-to-action.jpg
title: Get Product Updates
content: Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
placeholder: Your Email Address Here
testimonial:
enable: false
title: Watch Review
testimonial_item:
- name: <NAME>
designation: CEO, Themefisher
image: images/avater.png
content: A wonderful serenity has taken possession of my entire soul, like these
sweet mornings of spring which I enjoy with my whole heart. I am alone, and
feel the charm of existence in this spot, which was created for the bliss of
souls like mine. I am so happy, my dear friend, so absorbed in the exquisite
sense of mere tranquil existence, that I neglect my talents.
- name: <NAME>
designation: CEO, Themefisher
image: images/avater.png
content: A wonderful serenity has taken possession of my entire soul, like these
sweet mornings of spring which I enjoy with my whole heart. I am alone, and
feel the charm of existence in this spot, which was created for the bliss of
souls like mine. I am so happy, my dear friend, so absorbed in the exquisite
sense of mere tranquil existence, that I neglect my talents.
- name: <NAME>
designation: CEO, Themefisher
image: images/avater.png
content: A wonderful serenity has taken possession of my entire soul, like these
sweet mornings of spring which I enjoy with my whole heart. I am alone, and
feel the charm of existence in this spot, which was created for the bliss of
souls like mine. I am so happy, my dear friend, so absorbed in the exquisite
sense of mere tranquil existence, that I neglect my talents.
|
exampleSite/data/en/homepage.yml
|
---
install_repository_dependencies: true
install_resolver_dependencies: true
install_tool_dependencies: false
tools:
# data_retrieval
- name: 'sra_tools'
owner: 'iuc'
tool_panel_section_label: 'Get Data'
# quality_control
- name: 'fastqc'
owner: 'devteam'
tool_panel_section_label: 'FASTQ Quality Control'
- name: 'prinseq'
owner: 'iuc'
tool_panel_section_label: 'FASTQ Quality Control'
- name: 'trim_galore'
owner: 'bgruening'
tool_panel_section_label: "FASTA/FASTQ"
- name: 'trimmomatic'
owner: 'pjbriggs'
tool_panel_section_label: "FASTA/FASTQ"
- name: 'multiqc'
owner: 'iuc'
tool_panel_section_label: 'FASTQ Quality Control'
# clustering
- name: 'cdhit'
owner: 'bebatut'
tool_panel_section_label: 'Multiple Alignments'
- name: 'format_cd_hit_output'
owner: 'bebatut'
tool_panel_section_label: 'Multiple Alignments'
# sorting
- name: 'sortmerna'
owner: 'rnateam'
tool_panel_section_label: 'Annotation'
- name: 'data_manager_sortmerna_database_downloader'
owner: 'rnateam'
tool_panel_section_label: Data Managers
# similarity_search
- name: 'ncbi_blast_plus'
owner: 'devteam'
tool_panel_section_label: 'NCBI Blast'
- name: 'diamond'
owner: 'bgruening'
tool_panel_section_label: 'NCBI Blast'
# mapping
- name: 'bwa'
owner: 'devteam'
tool_panel_section_label: 'Mapping'
- name: 'bowtie2'
owner: 'devteam'
tool_panel_section_label: 'Mapping'
# alignment
- name: 'hmmer3'
owner: 'iuc'
tool_panel_section_label: 'Annotation'
# metagenomics_manipulation
- name: 'vsearch'
owner: 'iuc'
tool_panel_section_label: 'Metagenomic Analysis'
- name: 'nonpareil'
owner: 'iuc'
tool_panel_section_label: 'Metagenomic Analysis'
# assembly
- name: 'megahit'
owner: 'iuc'
tool_panel_section_label: 'Assembly'
- name: 'metaspades'
owner: 'nml'
tool_panel_section_label: 'Assembly'
- name: 'quast'
owner: 'iuc'
tool_panel_section_label: 'Assembly'
- name: 'valet'
owner: 'iuc'
tool_panel_section_label: 'Metagenomic Analysis'
# amplicon_sequence_processing
- name: 'data_manager_qiime_database_downloader'
owner: 'iuc'
tool_panel_section_label: Data Managers
# wgs_taxonomic_assignations
- name: 'format_metaphlan2_output'
owner: 'bebatut'
tool_panel_section_label: 'Metagenomic Analysis'
# metabolism_assignation
- name: 'group_humann2_uniref_abundances_to_go'
owner: 'bebatut'
tool_panel_section_label: 'Metagenomic Analysis'
- name: 'compare_humann2_output'
owner: 'bebatut'
tool_panel_section_label: 'Metagenomic Analysis'
- name: 'interproscan5'
owner: 'bgruening'
tool_panel_section_label: 'Metagenomic Analysis'
# combination_taxo_func
- name: 'combine_metaphlan2_humann2'
owner: 'bebatut'
tool_panel_section_label: 'Metagenomic Analysis'
# visualization
- name: 'export2graphlan'
owner: 'iuc'
tool_panel_section_label: 'Graph/Display Data'
- name: 'taxonomy_krona_chart'
owner: 'crs4'
tool_panel_section_label: 'Graph/Display Data'
|
asaim.yaml
|
# Default values for Landscaper's Helm deployer.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
deployer:
# If the deployer runs in a different cluster than the Landscaper instance, provide the kubeconfig
# to access the remote Landscaper cluster here (inline or via secretRef). When providing a
# secretRef, see ./templates/landscaper-cluster-kubeconfig-secret.yaml for the correct secret format.
# If no value is provided at all, the deployer will default to the in-cluster kubeconfig.
landscaperClusterKubeconfig: {}
# secretRef: my-kubeconfig-secret
# kubeconfig: |
# <landscaper-cluster-kubeconfig>
# identity: ""
namespace: ""
oci:
allowPlainHttp: false
insecureSkipVerify: false
secrets: {}
# <name>: <docker config json>
# verbosityLevel: 9
# targetSelector:
# - annotations:
# - key:
# operator:
# value:
replicaCount: 1
image:
repository: eu.gcr.io/gardener-project/landscaper/helm-deployer-controller
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
# tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
|
charts/helm-deployer/values.yaml
|
uid: "com.azure.storage.common.sas.AccountSasSignatureValues"
fullName: "com.azure.storage.common.sas.AccountSasSignatureValues"
name: "AccountSasSignatureValues"
nameWithType: "AccountSasSignatureValues"
summary: "Used to initialize parameters for a Shared Access Signature (SAS) for an Azure Storage account. Once all the values here are set, use the generateSas method on the desired service client to obtain a representation of the SAS which can then be applied to a new client using the .sasToken(String) method on the desired client builder."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class AccountSasSignatureValues"
constructors:
- "com.azure.storage.common.sas.AccountSasSignatureValues.AccountSasSignatureValues()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.AccountSasSignatureValues(java.time.OffsetDateTime,com.azure.storage.common.sas.AccountSasPermission,com.azure.storage.common.sas.AccountSasService,com.azure.storage.common.sas.AccountSasResourceType)"
methods:
- "com.azure.storage.common.sas.AccountSasSignatureValues.generateSasQueryParameters(com.azure.storage.common.StorageSharedKeyCredential)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getExpiryTime()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getPermissions()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getProtocol()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getResourceTypes()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getSasIpRange()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getServices()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getStartTime()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.getVersion()"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setExpiryTime(java.time.OffsetDateTime)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setPermissions(com.azure.storage.common.sas.AccountSasPermission)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setProtocol(com.azure.storage.common.sas.SasProtocol)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setResourceTypes(java.lang.String)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setSasIpRange(com.azure.storage.common.sas.SasIpRange)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setServices(java.lang.String)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setStartTime(java.time.OffsetDateTime)"
- "com.azure.storage.common.sas.AccountSasSignatureValues.setVersion(java.lang.String)"
type: "class"
metadata: {}
package: "com.azure.storage.common.sas"
artifact: com.azure:azure-storage-common:12.11.0-beta.1
|
preview/docs-ref-autogen/com.azure.storage.common.sas.AccountSasSignatureValues.yml
|
namespace: python-2104
name: validate_unittest
description: Run the unittest using the tox tool
version: 2021.6.23
maintainer: <EMAIL>
images:
manylinux2014: quay.io/pypa/manylinux2014_x86_64
manylinux2010: quay.io/pypa/manylinux2010_x86_64
manylinux1_32: quay.io/pypa/manylinux1_i686
manylinux1_64: quay.io/pypa/manylinux1_x86_64
ubuntu: ubuntu:latest
config:
template: python-2104/base@latest
order: [ begin, init_os, install_dependencies, set_package_env, config_preset, update_version, validate, end, teardown-store_artifacts, teardown-add_test_metadata, teardown-add_coverage_metadata, teardown-codecov ]
environment:
SD_COVERAGE_PLUGIN_ENABLED: 'true'
TEST_RUNNER: pytest
TOX_ARGS: --skip-missing-interpreters true
TOX_ENVLIST: py37,py38,py39,py310
secrets:
- CODECOV_TOKEN
- COVERALLS_REPO_TOKEN
steps:
- validate: screwdrivercd_validate_unittest
- teardown-add_test_metadata: |
$BASE_PYTHON -m pip install junitparser
$BASE_PYTHON << EOF
import json, os,subprocess, sys
from junitparser import JUnitXml
testdir = os.path.join(os.environ["SD_ARTIFACTS_DIR"], 'test')
if not os.path.exists(testdir):
print('No test metadata files to parse', flush=True)
sys.exit(0)
test_failed = 0
test_run = 0
test_ok = 0
for filename in os.listdir(testdir):
if 'test' not in filename:
continue
if not filename.endswith('.xml'):
continue
xml = JUnitXml.fromfile(os.path.join(testdir, filename))
test_failed += xml.errors + xml.failures
test_run += xml.tests - xml.skipped
if test_run == 0:
print('No tests run, not setting metadata', flush=True)
sys.exit(0)
test_ok = test_run - test_failed
test_summary = f'{test_ok}/{test_run}'
print(f'meta set tests.results {test_summary}')
subprocess.call(['meta', 'set', 'tests.results', test_summary])
status = {
"status": "SUCCESS",
"message": f"{test_summary} tests passed"
}
job_name = os.environ.get('SD_JOB_NAME', None)
if job_name:
job_name = job_name.split(':')[-1]
print(f'meta set meta.status.{job_name}', json.dumps(status))
subprocess.call(['meta', 'set', f'meta.status.{job_name}', json.dumps(status)])
EOF
- teardown-add_coverage_metadata: |
$BASE_PYTHON -m pip install coverage
$BASE_PYTHON << EOF
import os, subprocess, sys
if not os.path.exists('.coverage'):
print('No coverage data found', flush=True)
sys.exit(0)
try:
output = subprocess.check_output(['coverage', 'report'])
except (subprocess.CalledProcessError, FileNotFoundError):
print('Not able to access coverage report, not updating coverage', flush=True)
sys.exit(0)
lines = output.decode(errors='ignore').split(os.linesep)
for line in [ _.strip() for _ in lines]:
if line.startswith('TOTAL'):
coverage_percentage = line.split()[-1].replace('%', '')
print(f'meta set tests.coverage {float(coverage_percentage)}')
subprocess.call(['meta', 'set', 'tests.coverage', str(float(coverage_percentage))])
EOF
- teardown-codecov: |
if [ "$CODECOV_TOKEN" != "" ]; then
$BASE_PYTHON -m pip install codecov .[test]
codecov
fi
|
releases/python-2104/templates/validation/code_unit_test.yaml
|
name: Build and Deploy Candidate; Initiate Iter8
on:
workflow_dispatch:
env:
IBM_CLOUD_API_KEY: ${{ secrets.IBM_CLOUD_API_KEY }}
IBM_CLOUD_RESOURCE_GROUP: 585330279c4b4175890b5c59dab786fb
IBM_CLOUD_REGION: us-south
# IKS_CLUSTER: kalantar-20211029-1 # name does not work
IKS_CLUSTER: c5u1ej8d00c0squuquog # name or id of cluster
INGRESS_SUBDOMAIN: 'kalantar-20211029-1-f0f5a1e5d9c5f09a7767f1f253010cba-0000.us-south.containers.appdomain.cloud'
EXPERIMENT_FOLDER: initiate-example
jobs:
build:
name: build, deploy and initiate
runs-on: ubuntu-latest
steps:
# Build new version as docker image and push to registry
- name: Checkout repository
uses: actions/checkout@v2
# Set up docker, determine image tag
- name: Extract details from event
run: |
ownerrepo=${{ github.repository }}
owner=$(echo $ownerrepo | cut -f1 -d/)
if [[ "$owner" == "iter8-tools" ]]; then
owner=iter8
fi
echo "OWNER=$owner" >> $GITHUB_ENV
echo "REPO=$(echo $ownerrepo | cut -f2 -d/)" >> $GITHUB_ENV
- run: echo "IMAGE=${{ env.OWNER }}/${{ env.REPO }}:${{ github.sha }}" >> $GITHUB_ENV
- run: echo "NAMESPACE=initiate-test-${{ github.sha }}" >> $GITHUB_ENV
# Build and push image (to DockerHub)
- uses: docker/setup-buildx-action@v1
- uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_SECRET }}
- name: Build and push image
uses: docker/build-push-action@v2
with:
tags: ${{ env.IMAGE }}
push: true
- run: echo "Deploy candidate"
# Setup: Install Go/Iter8 so can use iter8 gen go to generate a deployment.yaml
# An alternative would be helm or kustomize
# Iter8 is, of course, also used for iter8 gen k8s to initiate the experiment
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.17
- name: Install Iter8 CLI
run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@latest
# Setup: Install IBM Cloud CLI
- name: Install IBM Cloud CLI
run: |
curl -fsSL https://clis.cloud.ibm.com/install/linux | sh
ibmcloud --version
ibmcloud config --check-version=false
ibmcloud plugin install -f kubernetes-service
ibmcloud plugin install -f container-registry
- name: Authenticate with IBM Cloud CLI
run: |
ibmcloud login --apikey "${IBM_CLOUD_API_KEY}" -r "${IBM_CLOUD_REGION}" -g default
ibmcloud cr region-set "${IBM_CLOUD_REGION}"
ibmcloud cr login
- name: Identify target cluster
run: |
ibmcloud target -g $IBM_CLOUD_RESOURCE_GROUP
ibmcloud ks cluster config --cluster $IKS_CLUSTER
kubectl config current-context
- name: Create target namespace
run: |
kubectl create ns ${{ env.NAMESPACE }} || true
- name: Deploy app
run: |
cd $EXPERIMENT_FOLDER
iter8 gen go --set image=${{ env.IMAGE }} | kubectl -n ${{ env.NAMESPACE }} apply -f -
- name: Wait for app to Start
run: kubectl -n ${{ env.NAMESPACE }} wait --for=condition=available --timeout=60s --all deployments
- name: Determine port
run: |
port=$(kubectl -n ${{ env.NAMESPACE }} get svc myservice -o jsonpath='{.spec.ports[0].nodePort}')
echo "PORT=$port" >> $GITHUB_ENV
# Initiate Iter8 experiment by creating experiment
# Then deploying it
- name: Run Iter8 Experiment (in default namespace of cluster)
run: |
cd $EXPERIMENT_FOLDER
iter8 gen exp \
--set ref=${{ github.sha }} \
--set url="http://$INGRESS_SUBDOMAIN:$PORT" \
--set image="${{ env.IMAGE }}" \
--set namespace="${{ env.NAMESPACE }}" \
--set user='$USER' \
--set token='$TOKEN'
cat experiment.yaml
iter8 gen k8s --set loglevel=trace | kubectl apply -f -
|
.github/workflows/build-deploy-test.yaml
|
---
# MaaS Release: stable or next
maas_release: "stable"
# Which component to deploy?
maas_component: "none"
# MaaS defaults
maas_api_server: 172.16.34.1
maas_url: http://{{ maas_api_server }}:5240/MAAS/api/2.0
maas_admin_username: admin
maas_admin_passwd: <PASSWORD>
maas_admin_email: <EMAIL>
maas_admin_api_key: /home/{{ regular_system_user }}/maas_admin_api_key
maas_admin_public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
#maas_rack1_public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
# WARNING: Copying MaaS Secret into an Ansible Var!
#maas_secret: "{{ lookup('file', '/var/lib/maas/secret') }}"
#
# MaaS Networks
#
maas_fabrics:
- name: "mgmt-0"
maas_spaces:
- name: "infra"
- name: "ceph"
- name: "cloud-public"
- name: "cloud-private"
- name: "internet"
maas_vlans:
- name: "ipmi"
id: 1000
space: "infra"
fabric: 0
- name: "servers"
id: 1001
space: "infra"
fabric: 0
- name: "services"
id: 1002
space: "infra"
fabric: 0
- name: "maas-pxe"
id: 1004
space: "infra"
fabric: 1
- name: "ceph-public"
id: 1010
space: "ceph"
fabric: 0
- name: "ceph-private"
id: 1011
space: "ceph"
fabric: 0
- name: "os-infra"
id: 1020
space: "cloud-public"
fabric: 0
- name: "os-mgmt"
id: 1021
space: "cloud-private"
fabric: 0
- name: "os-sdn"
id: 1022
space: "cloud-private"
fabric: 0
- name: "os-storage"
id: 1023
space: "cloud-private"
fabric: 0
- name: "os-float"
id: 1024
space: "cloud-public"
fabric: 0
maas_subnets:
- name: "IPMI"
cidr: "172.16.240.0/24"
vlan_id: 1000
fabric: 0
- name: "Servers"
cidr: "172.16.241.0/24"
vlan_id: 1001
fabric: 0
- name: "Services"
cidr: "172.16.242.0/24"
vlan_id: 1002
fabric: 0
- name: "MaaS PXE"
cidr: "172.16.244.0/23"
vlan_id: 1004
fabric: 1
reserved_ip_start: "172.16.244.10"
reserved_ip_end: "172.16.244.200"
dhcp_on: true
dhcp_rack_primary: "vmara-1"
dhcp_rack_secondary: "vmara-2"
- name: "Ceph Public"
cidr: "172.16.246.0/24"
vlan_id: 1010
fabric: 0
- name: "Ceph Private"
cidr: "172.16.247.0/24"
vlan_id: 1011
fabric: 0
- name: "OpenStack Public Infra"
cidr: "172.16.248.0/24"
vlan_id: 1020
fabric: 0
- name: "OpenStack Management"
cidr: "172.16.249.0/24"
vlan_id: 1021
fabric: 0
- name: "OpenStack SDN"
cidr: "172.16.250.0/24"
vlan_id: 1022
fabric: 0
- name: "OpenStack Storage"
cidr: "172.16.251.0/24"
vlan_id: 1023
fabric: 0
- name: "OpenStack Public Floating IPs"
cidr: "172.16.252.0/22"
vlan_id: 1024
fabric: 0
|
ansible/roles/maas/defaults/main.yml
|
name: Docker Build
on:
release:
types: [published]
env:
BOT_IMAGE_NAME: bot
BACKEND_IMAGE_NAME: backend
jobs:
build-bot:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build image
run: docker build . --build-arg VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') --file cmd/bot/Dockerfile --tag $BOT_IMAGE_NAME
- name: Log into registry
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u ${{ github.actor }} --password-stdin
- name: Push image
run: |
IMAGE_ID=docker.pkg.github.com/${{ github.repository }}/$BOT_IMAGE_NAME
IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]')
VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
[[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//')
echo IMAGE_ID=$IMAGE_ID
echo VERSION=$VERSION
docker tag $BOT_IMAGE_NAME $IMAGE_ID:$VERSION
docker tag $BOT_IMAGE_NAME $IMAGE_ID:latest
docker push --all-tags $IMAGE_ID
build-backend:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build image
run: docker build . --build-arg VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') --file cmd/backend/Dockerfile --tag $BACKEND_IMAGE_NAME
- name: Log into registry
run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u ${{ github.actor }} --password-stdin
- name: Push image
run: |
IMAGE_ID=docker.pkg.github.com/${{ github.repository }}/$BACKEND_IMAGE_NAME
IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]')
VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
[[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//')
echo IMAGE_ID=$IMAGE_ID
echo VERSION=$VERSION
docker tag $BACKEND_IMAGE_NAME $IMAGE_ID:$VERSION
docker tag $BACKEND_IMAGE_NAME $IMAGE_ID:latest
docker push --all-tags $IMAGE_ID
deploy:
needs:
- build-bot
- build-backend
runs-on: ubuntu-latest
steps:
- name: Log into VPS
uses: appleboy/ssh-action@master
with:
host: ${{ secrets.HOST }}
USERNAME: ${{ secrets.USERNAME }}
PORT: ${{ secrets.PORT }}
KEY: ${{ secrets.SSHKEY }}
script: ./deploy.sh
|
.github/workflows/docker-build.yml
|
name: VirtualIPMapping
uid: '@azure/arm-appservice-profile-2020-09-01-hybrid.VirtualIPMapping'
package: '@azure/arm-appservice-profile-2020-09-01-hybrid'
summary: Virtual IP mapping.
fullName: VirtualIPMapping
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: internalHttpPort
uid: >-
@azure/arm-appservice-profile-2020-09-01-hybrid.VirtualIPMapping.internalHttpPort
package: '@azure/arm-appservice-profile-2020-09-01-hybrid'
summary: Internal HTTP port.
fullName: internalHttpPort
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'internalHttpPort?: undefined | number'
return:
description: ''
type: undefined | number
- name: internalHttpsPort
uid: >-
@azure/arm-appservice-profile-2020-09-01-hybrid.VirtualIPMapping.internalHttpsPort
package: '@azure/arm-appservice-profile-2020-09-01-hybrid'
summary: Internal HTTPS port.
fullName: internalHttpsPort
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'internalHttpsPort?: undefined | number'
return:
description: ''
type: undefined | number
- name: inUse
uid: '@azure/arm-appservice-profile-2020-09-01-hybrid.VirtualIPMapping.inUse'
package: '@azure/arm-appservice-profile-2020-09-01-hybrid'
summary: Is virtual IP mapping in use.
fullName: inUse
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'inUse?: undefined | false | true'
return:
description: ''
type: undefined | false | true
- name: virtualIP
uid: '@azure/arm-appservice-profile-2020-09-01-hybrid.VirtualIPMapping.virtualIP'
package: '@azure/arm-appservice-profile-2020-09-01-hybrid'
summary: Virtual IP address.
fullName: virtualIP
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'virtualIP?: undefined | string'
return:
description: ''
type: undefined | string
|
docs-ref-autogen/@azure/arm-appservice-profile-2020-09-01-hybrid/VirtualIPMapping.yml
|
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: {{ template "seaweedfs-csi-driver.name" . }}-controller
spec:
selector:
matchLabels:
app: {{ template "seaweedfs-csi-driver.name" . }}-controller
serviceName: "csi-seaweedfs"
replicas: 1
template:
metadata:
labels:
app: {{ template "seaweedfs-csi-driver.name" . }}-controller
spec:
priorityClassName: system-cluster-critical
serviceAccountName: {{ template "seaweedfs-csi-driver.name" . }}-controller-sa
{{- with .Values.controller.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.controller.tolerations }}
tolerations: {{ toYaml . | nindent 8 }}
{{- end }}
containers:
# provisioner
- name: csi-provisioner
image: {{ .Values.csiProvisioner.image }}
args:
- "--csi-address=$(ADDRESS)"
- -v
- "9"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: {{ .Values.imagePullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
resources:
{{ toYaml .Values.csiProvisioner.resources | nindent 12 }}
# attacher
- name: csi-attacher
image: {{ .Values.csiAttacher.image }}
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--timeout=120s"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: {{ .Values.imagePullPolicy }}
resources:
{{ toYaml .Values.csiAttacher.resources | nindent 12 }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
# resizer
- name: csi-resizer
image: {{ .Values.csiResizer.image }}
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election=false"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: {{ .Values.imagePullPolicy }}
resources:
{{ toYaml .Values.csiAttacher.resources | nindent 12 }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
# SeaweedFs Plugin
- name: seaweedfs-csi-plugin
image: {{.Values.seaweedfsCsiPlugin.image}}
imagePullPolicy: {{ .Values.imagePullPolicy }}
args :
- "--endpoint=$(CSI_ENDPOINT)"
- "--filer=$(SEAWEEDFS_FILER)"
- "--nodeid=$(NODE_ID)"
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: SEAWEEDFS_FILER
value: {{ .Values.seaweedfsFiler | quote }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.tlsSecret }}
- name: WEED_GRPC_CLIENT_KEY
value: /var/run/secrets/app/tls/tls.key
- name: WEED_GRPC_CLIENT_CERT
value: /var/run/secrets/app/tls/tls.crt
- name: WEED_GRPC_CA
value: /var/run/secrets/app/tls/ca.crt
{{- end }}
{{- if .Values.logVerbosity }}
- name: WEED_V
value: {{ .Values.logVerbosity | quote }}
{{- end }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{{- if .Values.tlsSecret }}
- name: tls
mountPath: /var/run/secrets/app/tls
{{- end }}
volumes:
- name: socket-dir
emptyDir: {}
{{- if .Values.tlsSecret }}
- name: tls
secret:
secretName: {{ .Values.tlsSecret }}
{{- end }}
|
deploy/helm/seaweedfs-csi-driver/templates/statefulset.yml
|
---
name: CI
'on':
push:
branches:
- master
pull_request:
schedule:
- cron: '0 6 * * *'
jobs:
ansible-legacy:
runs-on: ubuntu-latest
steps:
- name: Check out code.
uses: actions/checkout@v1
- name: Set up Python.
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install ansible and other dependencies.
run: pip install ansible ansible-lint yamllint docker molecule openshift
- name: Run molecule default test scenario.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
ansible:
runs-on: ubuntu-latest
steps:
- name: Check out code.
uses: actions/checkout@v1
- name: Set up Python.
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install Ansible distribution.
run: pip install --pre ansible>=2.10
- name: Install other dependencies.
run: pip install ansible-lint yamllint docker molecule openshift
- name: Run molecule default test scenario.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
ansible-base:
runs-on: ubuntu-latest
steps:
- name: Check out code.
uses: actions/checkout@v1
- name: Set up Python.
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install Ansible base.
run: pip install --pre ansible-base>=2.10
- name: Install other dependencies.
run: pip install ansible-lint yamllint docker molecule openshift
# Allow this to fail, but have the output available for debugging.
- name: Run molecule default test scenario.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
continue-on-error: true
ansible-base-devel:
runs-on: ubuntu-latest
steps:
- name: Check out code.
uses: actions/checkout@v1
- name: Set up Python.
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install Ansible base.
run: pip install git+https://github.com/ansible/ansible
- name: Install other dependencies.
run: pip install ansible-lint yamllint docker molecule openshift
# Allow this to fail, but have the output available for debugging.
- name: Run molecule default test scenario.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
continue-on-error: true
|
.github/workflows/ci.yml
|
items:
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard
name: Dashboard
fullName: Dashboard
children:
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.description
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.eTag
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.id
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.name
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.ownerId
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.position
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.refreshInterval
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.url
- vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.widgets
langs:
- typeScript
type: interface
summary: ''
source:
path: tfs.d.ts
startLine: 5184
remote:
path: typings\tfs.d.ts
repo: 'https://github.com/Microsoft/vss-web-extension-sdk.git'
branch: master
package: vss-web-extension-sdk
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.description
name: description
fullName: description
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: string description
return:
type:
- string
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.eTag
name: eTag
fullName: eTag
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: string eTag
return:
type:
- string
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.id
name: id
fullName: id
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: string id
return:
type:
- string
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.name
name: name
fullName: name
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: string name
return:
type:
- string
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.ownerId
name: ownerId
fullName: ownerId
children: []
langs:
- typeScript
type: property
summary: >-
Owner for a dashboard. For any legacy dashboards, this would be the unique
identifier for the team associated with the dashboard.
syntax:
content: string ownerId
return:
type:
- string
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.position
name: position
fullName: position
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: number position
return:
type:
- number
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.refreshInterval
name: refreshInterval
fullName: refreshInterval
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: number refreshInterval
return:
type:
- number
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.url
name: url
fullName: url
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: string url
return:
type:
- string
module: TFS/Dashboards/Contracts
- uid: vss-web-extension-sdk.TFS.Dashboards.Contracts.Dashboard.widgets
name: widgets
fullName: widgets
children: []
langs:
- typeScript
type: property
summary: ''
syntax:
content: 'Widget[] widgets'
return:
type:
- '@vss-web-extension-sdk.TFS.Dashboards.Contracts.Widget[]'
module: TFS/Dashboards/Contracts
|
docs-ref-autogen/vss-web-extension-sdk/TFS.Dashboards.Contracts.Dashboard.yml
|
# Executable: ExportTimeDependentCoordinates3D
# Check: parse;execute
# ExpectedOutput:
# ExportTimeDependentCoordinates3DVolume0.h5
# ExportTimeDependentCoordinates3DReductions.h5
DomainCreator:
# Parameters are chosen for an equal-mass, non-spinning binary black hole
# using superposed-Kerr-Schild initial data created with the
# Spectral Einstein Code (SpEC). The time-dependent maps are given
# arbitrary time-dependence.
BinaryCompactObject:
ObjectA:
InnerRadius: 0.4409
OuterRadius: 6.0
XCoord: -10.0
ExciseInterior: true
UseLogarithmicMap: false
AdditionToRadialRefinementLevel: 1
ObjectB:
InnerRadius: 0.4409
OuterRadius: 6.0
XCoord: 10.0
ExciseInterior: true
UseLogarithmicMap: false
AdditionToRadialRefinementLevel: 1
EnvelopingCube:
Radius: 100.0
OuterSphere:
Radius: 590.0
UseLogarithmicMap: false
AdditionToRadialRefinementLevel: 0
InitialRefinement: 0
InitialGridPoints: 3
UseProjectiveMap: true
TimeDependentMaps:
InitialTime: 0.0
InitialExpirationDeltaT: Auto
ExpansionMap:
OuterBoundary: 590.0
InitialExpansion: [1.0, 1.0]
InitialExpansionVelocity: [0.01, 0.02]
FunctionOfTimeNames: ['ExpansionFactor', 'Unity']
RotationAboutZAxisMap:
InitialRotationAngle: 0.0
InitialAngularVelocity: 0.0
FunctionOfTimeName: RotationAngle
SizeMap:
InitialValues: [0.0, 0.0]
InitialVelocities: [-0.1, -0.1]
InitialAccelerations: [-0.2, -0.2]
FunctionOfTimeNames: ['LambdaFactorA0', 'LambdaFactorB0']
SpatialDiscretization:
DiscontinuousGalerkin:
Quadrature: GaussLobatto
Evolution:
InitialTime: 0.0
InitialTimeStep: 0.01
TimeStepper:
AdamsBashforthN:
Order: 1
EventsAndTriggers:
? TimeCompares:
Comparison: GreaterThanOrEqualTo
Value: 0.08
: - Completion
Observers:
VolumeFileName: "ExportTimeDependentCoordinates3DVolume"
ReductionFileName: "ExportTimeDependentCoordinates3DReductions"
|
tests/InputFiles/ExportCoordinates/InputTimeDependent3D.yaml
|
container:
image: ubuntu:bionic
task:
matrix:
- name: Build on Ubuntu (gcc, release)
install_dependencies_script:
- apt-get update
- apt-get install -y cmake gcc g++
script: ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Release
container:
matrix:
- image: ubuntu:20.04
- image: ubuntu:16.04
- image: ubuntu:18.04
- name: Build on Ubuntu (gcc, debug)
install_dependencies_script:
- apt-get update
- apt-get install -y cmake gcc g++
script: ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Debug
container:
matrix:
- image: ubuntu:20.04
- image: ubuntu:16.04
- image: ubuntu:18.04
- name: Build on CentOS (gcc, release)
install_dependencies_script:
- yum install -y cmake make gcc gcc-c++
script: ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Release
container:
matrix:
- image: centos:8
- image: centos:7
- name: Build on CentOS (gcc, debug)
install_dependencies_script:
- yum install -y cmake make gcc gcc-c++
script: ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Debug
container:
matrix:
- image: centos:8
- image: centos:7
- name: Build on FreeBSD (debug)
install_dependencies_script:
- pkg install -y cmake gmake bash
script: bash ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Debug
freebsd_instance:
matrix:
#- image: freebsd-12-1-release-amd64
- image: freebsd-11-4-release-amd64
- name: Build on FreeBSD (release)
install_dependencies_script:
- pkg install -y cmake gmake bash
script: bash ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Release
freebsd_instance:
matrix:
#- image: freebsd-12-1-release-amd64
- image: freebsd-11-4-release-amd64
- name: Build on MacOS Catalina (release)
install_dependencies_script:
- brew install cmake
script: ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Release
macos_instance:
image: catalina-xcode-12.2
- name: Build on MacOS Big Sur (release)
install_dependencies_script:
- brew install cmake
script: ./ci/run_tests.sh -DCMAKE_BUILD_TYPE=Release
macos_instance:
image: big-sur-xcode-12.5
|
.cirrus.yml
|
version: "3.7"
volumes:
grafana-sample:
prometheus-sample:
whisper-sample:
services:
grafana-sample:
container_name: grafana-sample
image: grafana/grafana:latest
ports:
- 9092:3000
volumes:
- grafana-sample:/var/lib/grafana
- ./config/monitoring/grafana/provisioning/:/etc/grafana/provisioning/
env_file:
- ./config/monitoring/grafana/env-docker
networks:
- sample
depends_on:
- prometheus-sample
prometheus-sample:
container_name: prometheus-sample
image: prom/prometheus:latest
ports:
- 9091:9090
volumes:
- ./config/monitoring/prometheus/:/etc/prometheus/
- prometheus-sample:/prometheus
networks:
- sample
cadvisor-sample:
image: google/cadvisor:latest
container_name: cadvisor-sample
volumes:
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
ports:
- 9093:8080
networks:
- sample
node-exporter-sample:
image: prom/node-exporter
container_name: node-exporter-sample
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- --collector.filesystem.ignored-mount-points
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
ports:
- 9094:9100
networks:
- sample
postgres-exported-sample:
image: wrouesnel/postgres_exporter
container_name: postgres-exported-sample
environment:
- DATA_SOURCE_URI=api-postgres:5432/postgres?sslmode=disable
- DATA_SOURCE_USER=dev
- DATA_SOURCE_PASS=<PASSWORD>
- PG_EXPORTER_AUTO_DISCOVER_DATABASES=true
- PG_EXPORTER_EXCLUDE_DATABASES=rdsadmin
ports:
- 9095:9187
networks:
- sample
graphite-sample:
image: sitespeedio/graphite:1.1.5-12
container_name: graphite-sample
ports:
- "2003:2003"
- "4080:80"
restart: always
networks:
- sample
volumes:
- whisper-sample:/opt/graphite/storage/whisper
# grafana-setup-sample:
# image: sitespeedio/grafana-bootstrap:13.1.0
# container_name: grafana-setup-sample
# networks:
# - sample
# environment:
# - GF_PASSWORD=*****
# - GF_USER=admin
# - GF_API=http://grafana-sample:3000/api
networks:
sample:
name: sample-network
driver: bridge
|
docker-compose-monitoring.yml
|
backend:
name: github
repo: ChaituKNag/TheBestDeveloper
branch: master
media_folder: src/images/assets
public_folder: ../images/assets
collections:
- name: blog
label: Blog
folder: site/static/blog
description: All the technical blogs of my site.
create: true
slug: "{{year}}-{{month}}-{{date}}__{{slug}}"
fields:
- { name: path, label: Path, required: true }
- { name: date, label: Date, widget: datetime, required: true }
- { name: title, label: Title, required: true }
- { name: body, label: Body, widget: markdown, required: true }
- { name: tags, label: Tags, widget: list, required: true }
- label: "Pages"
name: "pages"
files:
- label: "Home Page"
name: "home-page"
file: "src/content/home-page.yaml"
fields:
- { label: Page, name: page, widget: string }
- { label: "Intro Url", name: introUrl, widget: string }
- {
label: "Intro Video Cover Image",
name: introCoverImg,
widget: image,
}
- { label: Salutation, name: salutation, widget: string }
- { label: Bio, name: bio, widget: string }
- label: Skills
name: skills
widget: list
fields:
- { label: Title, name: title, widget: string }
- { label: Id, name: id, widget: string }
- {
label: Link,
name: link,
widget: string,
value: /works?topic=,
}
- { label: Project Intro, name: projectIntro, widget: string }
- { label: Fun Stuff Intro, name: funStuffIntro, widget: string }
- label: SocialLinks
name: social-links
widget: list
fields:
- { label: "Icon name", name: icon, widget: string }
- { label: "Info text", name: infoText, widget: string }
- { label: "Box Color", name: backgroundColor, widget: string }
- { label: "Link", name: link, widget: string }
- label: "Projects Page"
name: "projects-page"
file: "src/content/projects-page.yaml"
fields:
- { label: Page, name: page, widget: string }
- { label: Title, name: title, widget: string }
- { label: Disclaimer, name: disclaimer, widget: string }
- { label: Recommendation, name: recommendation, widget: string }
- label: Projects
name: projects
widget: list
fields:
- { label: Title, name: title, widget: string }
- { label: Company, name: company, widget: string }
- { label: Client, name: client, widget: string }
- { label: Role, name: role, widget: string }
- { label: Tech, name: tech, widget: list }
- { label: Demo, name: demo, widget: string, required: false }
- label: Points
name: points
widget: markdown
minimal: true
- label: "Works Page"
name: "works-page"
file: "src/content/works-page.yaml"
fields:
- { label: Page, name: page, widget: string }
- { label: Title, name: title, widget: string }
- { label: Disclaimer, name: disclaimer, widget: string }
- label: Items
name: items
widget: list
fields:
- { label: Work Item Title, name: workTitle, widget: string }
- {
label: Work Item Description,
name: workDescription,
widget: markdown,
}
- { label: Work Item Preview, name: workPreview, widget: image }
- { label: Work Demo Link, name: workDemoLink, widget: string }
- {
label: Work Source Code,
name: workSourceCode,
widget: string,
}
|
static/admin/config.yml
|
interactions:
- request:
body: '{"createTransactionModel": {"companyCode": "DEFAULT", "type": "SalesOrder",
"lines": [{"quantity": 3, "amount": "30.00", "taxCode": "O9999999", "taxIncluded":
true, "itemCode": "123", "description": "Test product"}, {"quantity": 3, "amount":
"15.00", "taxCode": "O9999999", "taxIncluded": true, "itemCode": "123", "description":
"Test product", "ref1": "123"}, {"quantity": 1, "amount": "10.000", "taxCode":
"FR000000", "taxIncluded": true, "itemCode": "Shipping", "description": null}],
"code": "a40feb81-34f2-4146-b0f1-584ce3c837bc", "date": "2021-12-14", "customerCode":
0, "addresses": {"shipFrom": {"line1": "Teczowa 7", "line2": null, "city": "Wroclaw",
"region": "", "country": "PL", "postalCode": "53-601"}, "shipTo": {"line1":
"T\u0119czowa 7", "line2": "", "city": "WROC\u0141AW", "region": "", "country":
"PL", "postalCode": "53-601"}}, "commit": false, "currencyCode": "USD", "email":
""}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Authorization:
- Basic Og==
Connection:
- keep-alive
Content-Length:
- '901'
User-Agent:
- python-requests/2.26.0
method: POST
uri: https://rest.avatax.com/api/v2/transactions/createoradjust
response:
body:
string: !!binary |
H4sIAAAAAAAE<KEY>
headers:
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json; charset=utf-8
Date:
- Tue, 14 Dec 2021 09:32:03 GMT
Location:
- /api/v2/companies/242975/transactions/0
ServerDuration:
- '00:00:00.0210900'
Transfer-Encoding:
- chunked
Vary:
- Accept-Encoding
X-Content-Type-Options:
- nosniff
referrer-policy:
- same-origin
strict-transport-security:
- max-age=31536000; includeSubdomains
x-avalara-uid:
- 13bab346-a6fa-4cdb-aabd-1d0a1243de74
x-correlation-id:
- 13bab346-a6fa-4cdb-aabd-1d0a1243de74
x-frame-options:
- sameorigin
x-permitted-cross-domain-policies:
- none
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
version: 1
|
saleor/plugins/avatax/tests/cassettes/test_calculate_checkout_line_unit_price_with_variant_on_sale.yaml
|
title: Event 103 - MSVideoProcessoftMFT(D3D11)_VideoProcessorBlt
description: null
platform: windows
log_source: Microsoft-Windows-MediaFoundation-MSVProc
event_code: '103'
event_version: '0'
event_fields:
- standard_name: TBD
standard_type: TBD
name: Object
type: Pointer
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: InputSample
type: Pointer
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: InputViewIndex
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: OutputSample
type: Pointer
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: InputFrameOrField
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: OutputIndex
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: OutputFrame
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: SrcWidth
type: Int32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: SrcHeight
type: Int32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: DestWidth
type: Int32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: DestHeight
type: Int32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: SourceFormat
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: DestFormat
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: Rotation
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: Mirrored
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: MaxLuminanceIn
type: UInt32
description: null
sample_value: null
- standard_name: TBD
standard_type: TBD
name: MaxLuminanceOut
type: UInt32
description: null
sample_value: null
references: null
tags:
- etw_level_Verbose
- etw_opcode_Start
- etw_task_MSVideoProcessoftMFT(D3D11)_VideoProcessorBlt
|
windows/etw-providers/Microsoft-Windows-MediaFoundation-MSVProc/events/event-103.yml
|
name: main
on:
push:
branches:
- master
tags:
- "*"
pull_request:
branches:
- master
env:
PYTEST_ADDOPTS: "--color=yes"
# Set permissions at the job level.
permissions: {}
jobs:
test:
runs-on: ubuntu-20.04
continue-on-error: ${{ matrix.allow_failure }}
timeout-minutes: 15
permissions:
contents: read
steps:
- uses: actions/checkout@v2
with:
persist-credentials: false
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Setup mysql
if: contains(matrix.name, 'mysql')
run: |
sudo systemctl start mysql.service
echo "TEST_DB_USER=root" >> $GITHUB_ENV
echo "TEST_DB_PASSWORD=<PASSWORD>" >> $GITHUB_ENV
- name: Setup postgresql
if: contains(matrix.name, 'postgres')
run: |
sudo systemctl start postgresql.service
sudo -u postgres createuser --createdb $USER
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install tox==3.24.4
- name: Run tox
run: tox -e ${{ matrix.name }}
- name: Report coverage
if: contains(matrix.name, 'coverage')
uses: codecov/codecov-action@v2
with:
fail_ci_if_error: true
files: ./coverage.xml
strategy:
fail-fast: false
matrix:
include:
- name: linting,docs
python: 3.8
allow_failure: false
- name: py310-dj40-postgres-xdist-coverage
python: '3.10'
allow_failure: false
- name: py310-dj32-postgres-xdist-coverage
python: '3.10'
allow_failure: false
- name: py39-dj32-postgres-xdist-coverage
python: 3.9
allow_failure: false
- name: py39-dj40-mysql_innodb-coverage
python: 3.9
allow_failure: false
- name: py36-dj22-sqlite-xdist-coverage
python: 3.6
allow_failure: false
- name: py37-dj22-sqlite-xdist-coverage
python: 3.7
allow_failure: false
- name: py38-dj32-sqlite-xdist-coverage
python: 3.8
allow_failure: false
- name: py38-dj40-sqlite-xdist-coverage
python: 3.8
allow_failure: false
- name: py39-djmain-sqlite-coverage
python: 3.9
allow_failure: true
# Explicitly test (older) pytest 5.4.
- name: py35-dj22-postgres-pytest54-coverage
python: 3.5
allow_failure: false
- name: py35-dj22-sqlite_file-coverage
python: 3.5
allow_failure: false
- name: py36-dj32-mysql_myisam-coverage
python: 3.6
allow_failure: false
# pypy3: not included with coverage reports (much slower then).
- name: pypy3-dj22-postgres
python: pypy3
allow_failure: false
deploy:
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') && github.repository == 'pytest-dev/pytest-django'
runs-on: ubuntu-20.04
timeout-minutes: 15
permissions:
contents: read
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
persist-credentials: false
- uses: actions/setup-python@v2
with:
python-version: "3.8"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install --upgrade build
- name: Build package
run: python -m build
- name: Publish package
uses: pypa/gh-action-pypi-publish@v1.4.1
with:
user: __token__
password: ${{ secrets.pypi_token }}
|
.github/workflows/main.yml
|
%YAML 1.2
---
# IMPORTANT:
# - questions are identified by a universally unique identifier (UUID, version 4)
# - you can use a tool to generate them safely: https://www.uuidgenerator.net/version4
# - they MUST be version 4!
# - this UUID is on the left-hand side of the colon for each question
# - when modifying, it's important that you don't change the UUID
# - to add a new item, you'll need to generate a new UUID
# - you should never replace an item completely while keeping the UUID the same
# - if you don't know what you're doing, ask!
-
id: cd<PASSWORD>14-8<PASSWORD>-4<PASSWORD>-9<PASSWORD>-3<PASSWORD>
body: |
Give feedback to your teammate by completing the following sentence:
"{{subject}}, I want to work with you because... and I would want to work with you more if..."
responseType: text
feedbackTypeId: 81b7085c-1c3e-4066-80a7-d5ac3b7fa838
subjectType: member
active: true
-
id: bb2927c9-a16d-49e9-8c3a-dd308a17315a
body: |
Rate your agreement with the following statement:
"Based on {{subject}}'s team play skills, I want to work with them on future projects."
Consider the behaviors of a good team member: is receptive to feedback, focuses on results, practices flexible leadership, reduces friction.
responseType: likert7Agreement
feedbackTypeId: 3b2bf6de-e5e9-4b2f-b7a8-a46e0f888d7f
validationOptions:
min: 0
max: 7
integer: true
subjectType: member
active: true
-
id: 663fb86e-b2d1-4271-950b-8aafbfa23ad7
body: |
Rate your agreement with the following statement:
"{{subject}} uses appropriate technical terms in their descriptions and explanations (e.g., they identify function parameters as parameters or arguments rather than as variables)."
responseType: likert7Agreement
feedbackTypeId: 571de48b-366d-448f-9317-b6989daec527
validationOptions:
min: 0
max: 7
integer: true
subjectType: member
active: true
-
id: 09dff295-c339-4326-8e21-71cf332e0895
body: |
Based on a challenge scale of 1 to 10 where 7 represents your ZPD, which number best reflects the challenge level you felt while doing this project?
Use the following values to calibrate:
1 = Extremely bored
4 = Confident and comfortable
6 = Stretched but still capable
7 = In flow and pushed just beyond capacity
8 = Slightly more challenged than is fun
10 = Completely overwhelmed
responseType: numeric
feedbackTypeId: af6445a3-f631-411f-84b3-d6aaea87d6dc
validationOptions:
min: 1
max: 10
integer: true
subjectType: project
active: true
|
src/data/questions.yaml
|
---
####
####
#### Run on PR
####
####
kind: pipeline
name: Run CI stack
trigger:
branch:
- "*"
event:
- push
steps:
- name: npm-install
image: node:12
depends_on: [ clone ]
commands:
- npm install
- name: Lint Typescript
image: node:12
depends_on: [ npm-install ]
commands:
- npm run lint
- name: Type checking
image: node:12
depends_on: [ npm-install ]
commands:
- npm run tsc
- name: Units Tests
image: node:12
depends_on: [ npm-install ]
commands:
- MONGO_HOST=mongo REDIS_HOST=redis npm run test:ci
- name: coverage
depends_on: [ Units Tests ]
image: plugins/codecov
settings:
token:
from_secret: CODECOV_TOKEN
files:
- ./coverage/clover.xml
flags:
- unit
services:
- name: redis
image: redis
- name: mongo
image: mongo
---
####
####
#### Build Staging
####
####
kind: pipeline
name: Build&Push image
trigger:
branch:
- "*"
event:
- push
steps:
- name: docker
image: plugins/docker
settings:
username:
from_secret: DOCKER_USERNAME
password:
from_secret: DOCKER_PASSWORD
repo: komakio/backend
tags: ${DRONE_COMMIT_SHA}-${DRONE_BUILD_NUMBER}
- name: git deploy
image: bitnami/git
commands:
- git clone "https://$(echo $githubUsername):$(echo $githubPassword)@github.com/wikibusiness/komak-k8s.git"
- cd komak-k8s
- rm -f src/backend-version.yaml
- "echo 'imageTag: ${DRONE_COMMIT_SHA}-${DRONE_BUILD_NUMBER}' > src/backend-version.yaml"
- "git add . && git commit -a -m 'Updated backend to tag ${DRONE_COMMIT_SHA}-${DRONE_BUILD_NUMBER}' && git push"
environment:
githubUsername:
from_secret: GITHUB_USERNAME
githubPassword:
from_secret: GITHUB_PASSWORD
- name: slack
image: plugins/slack
settings:
webhook:
from_secret: SLACK_WEBHOOK
channel: errors
when:
status: [ failure ]
---
#####################
# PROD DEPLOYMENT
#####################
kind: pipeline
name: Build&Push production
trigger:
event:
- tag
steps:
- name: docker
image: plugins/docker
settings:
username:
from_secret: DOCKER_USERNAME
password:
from_secret: DOCKER_PASSWORD
repo: komakio/backend
tags: ${DRONE_TAG}
- name: git deploy
image: bitnami/git
commands:
- git clone "https://$(echo $githubUsername):$(echo $githubPassword)@github.com/wikibusiness/komak-k8s.git"
- cd komak-k8s
- rm -f src/backend-production-version.yaml
- "echo 'imageTag: \"${DRONE_TAG}\"' > src/backend-production-version.yaml"
- "git add . && git commit -a -m 'Updated production backend to tag ${DRONE_TAG}' && git push"
environment:
githubUsername:
from_secret: GITHUB_USERNAME
githubPassword:
from_secret: GITHUB_PASSWORD
- name: slack
image: plugins/slack
settings:
webhook:
from_secret: SLACK_WEBHOOK
template: >
{{#success build.status}}
Production deployment (backend) {{build.tag}} succeeded.
{{else}}
Backend production build {{build.number}} failed. Fix me please.
{{/success}}
---
kind: signature
hmac: a8805c5d03888f08ce08e51cfb3f4b5ed7e6d607f95ee17967fbe41fbf06484b
...
|
.drone.yml
|
---
name: CI
on:
pull_request:
types: ["opened", "synchronize"]
push:
branches: ["devel"]
jobs:
Test:
runs-on: ubuntu-latest
strategy:
matrix:
python:
- version: "3.6"
toxenv: py36
- version: "3.7"
toxenv: py37
- version: "3.8"
toxenv: py38
- version: "3.9"
toxenv: py39
- version: "3.10"
toxenv: py310
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Python ${{ matrix.python.version }}
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python.version }}
- name: Configure Job Cache
uses: actions/cache@v2
with:
path: |
~/.cache/pip
~/.cache/pypoetry/cache
~/.poetry
# Including the hashed poetry.lock in the cache slug ensures that the cache
# will be invalidated, and thus all packages will be redownloaded, if the
# lockfile is updated
key: ${{ runner.os }}-${{ matrix.python.toxenv }}-${{ hashFiles('**/poetry.lock') }}
- name: Configure Path
run: echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Configure Environment
run: .github/scripts/setup-env.sh
- name: Run Toxenv ${{ matrix.python.toxenv }}
run: poetry run tox -e ${{ matrix.python.toxenv }}
Check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Python 3.8
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Configure Job Cache
uses: actions/cache@v2
with:
path: |
~/.cache/pip
~/.cache/pypoetry/cache
~/.poetry
# Hardcoded 'py38' slug here lets this cache piggyback on the 'py38' cache
# that is generated for the tests above
key: ${{ runner.os }}-py38-${{ hashFiles('**/poetry.lock') }}
- name: Configure Path
run: echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Configure Environment
run: .github/scripts/setup-env.sh
- name: Run Static Analysis Checks
run: poetry run tox -e static
- name: Run Static Analysis Checks (Tests)
run: poetry run tox -e static-tests
- name: Run Security Checks
run: poetry run tox -e security
|
.github/workflows/ci.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "hackmd.fullname" . }}
labels:
app: {{ template "hackmd.name" . }}
chart: {{ template "hackmd.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "hackmd.name" . }}
release: {{ .Release.Name }}
strategy:
type: {{ .Values.deploymentStrategy }}
{{- if ne .Values.deploymentStrategy "RollingUpdate" }}
rollingUpdate: null
{{- end }}
template:
metadata:
labels:
app: {{ template "hackmd.name" . }}
release: {{ .Release.Name }}
{{- with .Values.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 3000
protocol: TCP
livenessProbe:
httpGet:
path: /status
port: 3000
initialDelaySeconds: 120
readinessProbe:
httpGet:
path: /status
port: 3000
initialDelaySeconds: 30
env:
- name: CMD_DB_PASSWORD
{{- if .Values.postgresql.install }}
valueFrom:
secretKeyRef:
name: {{ template "hackmd.postgresql.fullname" . }}
key: postgres-password
{{- else }}
value: {{ .Values.postgresql.postgresPassword }}
{{- end }}
- name: CMD_SESSION_SECRET
valueFrom:
secretKeyRef:
name: {{ template "hackmd.fullname" . }}
key: sessionSecret
- name: CMD_DB_URL
value: postgres://{{ .Values.postgresql.postgresUser }}:$(CMD_DB_PASSWORD)@{{ template "hackmd.database.host" . }}:5432/{{ .Values.postgresql.postgresDatabase }}
- name: HMD_DB_URL
value: postgres://{{ .Values.postgresql.postgresUser }}:$(CMD_DB_PASSWORD)@{{ template "hackmd.database.host" . }}:5432/{{ .Values.postgresql.postgresDatabase }}
{{- if .Values.extraVars }}
{{ toYaml .Values.extraVars | indent 12 }}
{{- end }}
volumeMounts:
- name: data
mountPath: "/hackmd/public/uploads"
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "hackmd.fullname" . }}{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
|
stable/hackmd/templates/deployment.yaml
|
tosca_definitions_version: alien_dsl_1_4_0
metadata:
template_name: alien4cloud-topology
template_version: 1.4.0-SNAPSHOT
template_author: alien4cloud
description: Alien4Cloud topology with 1 remote ES node
imports:
- tosca-normative-types:1.0.0-ALIEN14
- org.alien4cloud.lang.java.jdk.linux:1.4.0-SNAPSHOT
- elasticsearch-type:1.4.0-SNAPSHOT
- alien-type:1.4.0-SNAPSHOT
topology_template:
node_templates:
JDK:
type: org.alien4cloud.lang.java.jdk.linux.nodes.OracleJDK
properties:
java_url: "https://edelivery.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz"
java_home: /opt/java
requirements:
- host:
node: Server
capability: tosca.capabilities.Container
relationship: tosca.relationships.HostedOn
Alien:
type: org.alien4cloud.nodes.Alien
properties:
alien_url: "https://fastconnect.org/maven/service/local/repositories/opensource/content/alien4cloud/alien4cloud-ui/1.1.0-SM8/alien4cloud-ui-1.1.0-SM8-standalone.war"
context_root: /
requirements:
- java:
node: JDK
capability: org.alien4cloud.lang.java.pub.capabilities.JDK
relationship: org.alien4cloud.lang.java.pub.relationships.JavaSoftwareHostedOnJDK
- elasticsearch:
node: ElasticSearch
capability: org.alien4cloud.capabilities.ElasticSearch
relationship: org.alien4cloud.relationships.AlienConnectToElasticSearch
ElasticSearch:
type: org.alien4cloud.nodes.ElasticSearch
properties:
elasticsearch_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.0.deb"
requirements:
- java:
node: JDK_2
capability: org.alien4cloud.lang.java.pub.capabilities.JDK
relationship: org.alien4cloud.lang.java.pub.relationships.JavaSoftwareHostedOnJDK
JDK_2:
type: org.alien4cloud.lang.java.jdk.linux.nodes.OracleJDK
properties:
java_url: "https://edelivery.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz"
java_home: /opt/java
requirements:
- host:
node: Database
capability: tosca.capabilities.Container
relationship: tosca.relationships.HostedOn
Server:
type: tosca.nodes.Compute
requirements:
- network:
node: Network
capability: tosca.capabilities.Connectivity
relationship: tosca.relationships.Network
Network:
type: tosca.nodes.Network
properties:
ip_version: 4
Database:
type: tosca.nodes.Compute
requirements:
- network:
node: Network
capability: tosca.capabilities.Connectivity
relationship: tosca.relationships.Network
|
topology-alien4cloud/topology-alien.yml
|
image: $SKA_K8S_TOOLS_DOCKER_BUILDER_IMAGE
# The YAML file defines a set of jobs with constraints stating when they should be run.
# You can specify an unlimited number of jobs which are defined as top-level elements with an arbitrary name and always have to contain at least the script clause.
# In this case we have only the test job which produce an artifacts (it must be placed into a directory called "public")
# It is also specified that only the master branch will be subject of this job.
stages:
- build
- test
- lint
- pages
- publish
lint-check-chart:
stage: lint
tags:
- k8srunner
image: $SKA_K8S_TOOLS_DEPLOY_IMAGE
script:
- make chart_lint
artifacts:
paths:
- build
test:
stage: test
tags:
- k8srunner
script:
- make test
artifacts:
name: "$CI_PROJECT_NAME-$CI_JOB_ID"
paths:
- build
expire_in: 7 days
#integration_test:
# stage: test
# dependencies:
# - build
# tags:
# - k8srunner
# variables:
# MINIKUBE: "false"
# environment:
# name: oet-test
# kubernetes:
# namespace: ci-$CI_PROJECT_NAME-$CI_COMMIT_SHORT_SHA
# image: nexus.engageska-portugal.pt/ska-docker/deploy:0.4.3
# script:
# - kubectl version
# - make install-chart
## - make wait
## - make smoketest
# - make test
# after_script:
# - make down
# - mkdir -p build
# - cp -rf charts/build/* build/
# - ls -la build
# artifacts:
# name: "$CI_PROJECT_NAME-$CI_JOB_ID"
# paths:
# - build
# expire_in: 7 days
lint:
stage: lint
tags:
- k8srunner
script:
- make lint
artifacts:
paths:
- build
pages:
when: always
tags:
- k8srunner
stage: pages
script:
- mkdir public
- cp -R build public
artifacts:
paths:
- public
expire_in: 7 days
# Standardised included jobs
variables:
CHARTS_TO_PUBLISH: ska-oso-oet
include:
# Helm Chart Publish
# https://developer.skatelescope.org/en/latest/development/software_package_release_procedure.html#package-and-publish-helm-charts-to-the-ska-helm-chart-repository
- project: 'ska-telescope/templates-repository'
file: 'gitlab-ci/includes/helm_publish.yml'
# Create Gitlab CI badges from CI metrics
# https://developer.skatelescope.org/en/latest/tools/continuousintegration.html#automated-collection-of-ci-health-metrics-as-part-of-the-ci-pipeline
- project: 'ska-telescope/templates-repository'
file: 'gitlab-ci/includes/post_step.yml'
- project: 'ska-telescope/templates-repository'
file: 'gitlab-ci/includes/build_push.yml'
- project: 'ska-telescope/templates-repository'
file: 'gitlab-ci/includes/build_wheel.yml'
|
.gitlab-ci.yml
|
{{- /*
Copyright 2020 Crown Copyright
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/ -}}
{{- if .Values.shell.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "hdfs.fullname" . }}-shell
labels:
{{- include "hdfs.labels" . | nindent 4 }}
app.kubernetes.io/component: shell
spec:
replicas: 1
selector:
matchLabels:
{{- include "hdfs.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: shell
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
labels:
{{- include "hdfs.labels" . | nindent 8 }}
app.kubernetes.io/component: shell
spec:
containers:
- name: shell
image: {{ .Values.shell.repository }}:{{ .Values.shell.tag }}
imagePullPolicy: {{ .Values.shell.imagePullPolicy }}
command:
- /bin/bash
stdin: true
tty: true
env:
- name: HADOOP_CONF_DIR
value: {{ .Values.config.path }}
volumeMounts:
- name: config
mountPath: {{ .Values.config.path }}
readOnly: true
- name: secrets
mountPath: {{ .Values.secrets.path }}
readOnly: true
resources:
{{- toYaml .Values.shell.resources | nindent 10 }}
volumes:
- name: config
configMap:
name: {{ template "hdfs.fullname" . }}
optional: false
- name: secrets
secret:
secretName: {{ template "hdfs.fullname" . }}
optional: false
{{- with .Values.shell.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.shell.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.shell.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.shell.imagePullSecrets }}
imagePullSecrets:
{{- range . }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end }}
|
kubernetes/hdfs/templates/shell.yaml
|
- position: 1
driverNumber: 8
driverId: martin-brundle
constructorId: mclaren
engineManufacturerId: peugeot
tyreManufacturerId: goodyear
time: "2:32.600"
gap:
interval:
laps: 9
- position: 2
driverNumber: 30
driverId: heinz-harald-frentzen
constructorId: sauber
engineManufacturerId: mercedes
tyreManufacturerId: goodyear
time: "2:34.251"
gap: "+1.651"
interval: "+1.651"
laps: 6
- position: 3
driverNumber: 7
driverId: mika-hakkinen
constructorId: mclaren
engineManufacturerId: peugeot
tyreManufacturerId: goodyear
time: "2:35.033"
gap: "+2.433"
interval: "+0.782"
laps: 9
- position: 4
driverNumber: 28
driverId: gerhard-berger
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: goodyear
time: "2:35.278"
gap: "+2.678"
interval: "+0.245"
laps: 6
- position: 5
driverNumber: 5
driverId: michael-schumacher
constructorId: benetton
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "2:35.347"
gap: "+2.747"
interval: "+0.069"
laps: 5
- position: 6
driverNumber: 2
driverId: david-coulthard
constructorId: williams
engineManufacturerId: renault
tyreManufacturerId: goodyear
time: "2:37.630"
gap: "+5.030"
interval: "+2.283"
laps: 5
- position: 7
driverNumber: 0
driverId: damon-hill
constructorId: williams
engineManufacturerId: renault
tyreManufacturerId: goodyear
time: "2:40.170"
gap: "+7.570"
interval: "+2.540"
laps: 6
- position: 8
driverNumber: 10
driverId: gianni-morbidelli
constructorId: footwork
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "2:41.165"
gap: "+8.565"
interval: "+0.995"
laps: 6
- position: 9
driverNumber: 9
driverId: christian-fittipaldi
constructorId: footwork
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "2:41.177"
gap: "+8.577"
interval: "+0.012"
laps: 10
- position: 10
driverNumber: 20
driverId: erik-comas
constructorId: larrousse
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "2:41.285"
gap: "+8.685"
interval: "+0.108"
laps: 3
- position: 11
driverNumber: 32
driverId: jean-marc-gounon
constructorId: simtek
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "2:44.545"
gap: "+11.945"
interval: "+3.260"
laps: 3
- position: 12
driverNumber: 34
driverId: bertrand-gachot
constructorId: pacific
engineManufacturerId: ilmor
tyreManufacturerId: goodyear
time: "2:45.642"
gap: "+13.042"
interval: "+1.097"
laps: 8
- position: 13
driverNumber: 19
driverId: philippe-alliot
constructorId: larrousse
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "2:48.847"
gap: "+16.247"
interval: "+3.205"
laps: 3
- position: 14
driverNumber: 11
driverId: philippe-adams
constructorId: lotus
engineManufacturerId: mugen-honda
tyreManufacturerId: goodyear
time: "2:53.636"
gap: "+21.036"
interval: "+4.789"
laps: 6
- position: 15
driverNumber: 14
driverId: rubens-barrichello
constructorId: jordan
engineManufacturerId: hart
tyreManufacturerId: goodyear
time: "2:57.980"
gap: "+25.380"
interval: "+4.344"
laps: 3
- position: 16
driverNumber: 24
driverId: michele-alboreto
constructorId: minardi
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "2:59.304"
gap: "+26.704"
interval: "+1.324"
laps: 6
- position: 17
driverNumber: 29
driverId: andrea-de-cesaris
constructorId: sauber
engineManufacturerId: mercedes
tyreManufacturerId: goodyear
time: "3:05.103"
gap: "+32.503"
interval: "+5.799"
laps: 4
- position: 18
driverNumber: 3
driverId: ukyo-katayama
constructorId: tyrrell
engineManufacturerId: yamaha
tyreManufacturerId: goodyear
time: "4:57.558"
gap: "+2:24.958"
interval: "+1:52.455"
laps: 1
- position: 19
driverNumber: 6
driverId: jos-verstappen
constructorId: benetton
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "5:10.728"
gap: "+2:38.128"
interval: "+13.170"
laps: 1
- position: 20
driverNumber: 23
driverId: pierluigi-martini
constructorId: minardi
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "7:27.562"
gap: "+4:54.962"
interval: "+2:16.834"
laps: 1
- position: 21
driverNumber: 4
driverId: mark-blundell
constructorId: tyrrell
engineManufacturerId: yamaha
tyreManufacturerId: goodyear
time: "7:47.295"
gap: "+5:14.695"
interval: "+19.733"
laps: 1
- position: 22
driverNumber: 31
driverId: david-brabham
constructorId: simtek
engineManufacturerId: ford
tyreManufacturerId: goodyear
time: "8:10.748"
gap: "+5:38.148"
interval: "+23.453"
laps: 1
- position: 23
driverNumber: 27
driverId: jean-alesi
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: goodyear
time: "25:37.736"
gap: "+23:05.136"
interval: "+17:26.988"
laps: 2
|
src/data/seasons/1994/races/11-belgium/free-practice-2-results.yml
|
name: CI
on:
push:
branches:
- '**'
tags:
- 'v[0-9]+.[0-9]+-release*'
paths:
- 'src/**'
- '.github/workflows/ci.yml'
pull_request:
branches:
- master
paths:
- 'src/**'
- '.github/workflows/ci.yml'
env:
NUGET_PACKAGES_DIR: ${{ github.workspace }}/nuget-packages
jobs:
build-pack-and-test:
runs-on: ubuntu-latest
timeout-minutes: 5
defaults:
run:
working-directory: ./src
outputs:
nuget-package-version: ${{ steps.pack.outputs.nuget-package-version }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0 # avoid shallow clone so nbgv can do its work
- name: Build
run: dotnet build --configuration Release
- name: Pack
id: pack
run: |
dotnet pack --configuration Release --no-build --output ${{ env.NUGET_PACKAGES_DIR }} ./Vektonn.sln
echo "::set-output name=nuget-package-version::$NBGV_NuGetPackageVersion"
- name: Store nuget packages as artifacts
uses: actions/upload-artifact@v2
with:
name: nuget-packages
path: ${{ env.NUGET_PACKAGES_DIR }}/*
if-no-files-found: error
retention-days: 10
- name: Test
run: dotnet test --configuration Release --no-build --verbosity normal
nuget-publish:
if: startsWith(github.ref, 'refs/tags/v')
needs: build-pack-and-test
runs-on: ubuntu-latest
timeout-minutes: 1
steps:
- name: Download nuget packages from artifacts
uses: actions/download-artifact@v2
with:
name: nuget-packages
path: ${{ env.NUGET_PACKAGES_DIR }}
- name: Publish to nuget.org
run: dotnet nuget push ${{ env.NUGET_PACKAGES_DIR }}/*.nupkg --api-key ${{ secrets.NUGET_ORG_API_KEY }} --source https://api.nuget.org/v3/index.json
docker-publish:
if: startsWith(github.ref, 'refs/tags/v')
needs: build-pack-and-test
runs-on: ubuntu-latest
timeout-minutes: 5
defaults:
run:
working-directory: ./src
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set environment variables
run: |
DOCKER_TAG=${{needs.build-pack-and-test.outputs.nuget-package-version}}
DOCKER_IMAGE_NAME_PREFIX=ghcr.io/${{ github.repository_owner }}
echo "DOCKER_TAG=$DOCKER_TAG" >> $GITHUB_ENV
echo "DOCKER_IMAGE_NAME_AND_TAG_API=$DOCKER_IMAGE_NAME_PREFIX/api-service:$DOCKER_TAG" >> $GITHUB_ENV
echo "DOCKER_IMAGE_NAME_AND_TAG_INDEX_SHARD=$DOCKER_IMAGE_NAME_PREFIX/index-shard-service:$DOCKER_TAG" >> $GITHUB_ENV
- name: Build and tag docker images
run: |
./.build-docker-image.sh api-service $DOCKER_IMAGE_NAME_AND_TAG_API
./.build-docker-image.sh index-shard-service $DOCKER_IMAGE_NAME_AND_TAG_INDEX_SHARD
- name: Push docker images to GitHub registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${{ github.actor }} --password-stdin
docker image push $DOCKER_IMAGE_NAME_AND_TAG_API
docker image push $DOCKER_IMAGE_NAME_AND_TAG_INDEX_SHARD
docker logout ghcr.io
- name: Push docker images to DockerHub registry
run: |
echo "${{ secrets.DOCKER_HUB_PASSWORD }}" | docker login --username vektonn --password-stdin
docker image tag $DOCKER_IMAGE_NAME_AND_TAG_API vektonn/api-service:latest
docker image tag $DOCKER_IMAGE_NAME_AND_TAG_API vektonn/api-service:$DOCKER_TAG
docker image tag $DOCKER_IMAGE_NAME_AND_TAG_INDEX_SHARD vektonn/index-shard-service:latest
docker image tag $DOCKER_IMAGE_NAME_AND_TAG_INDEX_SHARD vektonn/index-shard-service:$DOCKER_TAG
docker image push --all-tags vektonn/api-service
docker image push --all-tags vektonn/index-shard-service
docker logout
|
.github/workflows/ci.yml
|
---
fixtures:
repositories:
aide: https://github.com/simp/pupmod-simp-aide
at: https://github.com/simp/pupmod-simp-at
auditd: https://github.com/simp/pupmod-simp-auditd
augeasproviders_core: https://github.com/simp/augeasproviders_core
augeasproviders_grub: https://github.com/simp/augeasproviders_grub
augeasproviders_puppet: https://github.com/simp/augeasproviders_puppet
augeasproviders_sysctl: https://github.com/simp/augeasproviders_sysctl
augeasproviders_ssh: https://github.com/simp/augeasproviders_ssh
autofs: https://github.com/simp/pupmod-simp-autofs
concat: https://github.com/simp/puppetlabs-concat
chkrootkit: https://github.com/simp/pupmod-simp-chkrootkit
clamav: https://github.com/simp/pupmod-simp-clamav
cron: https://github.com/simp/pupmod-simp-cron
dhcp: https://github.com/simp/pupmod-simp-dhcp
haveged:
repo: https://github.com/simp/puppet-haveged
branch: simp-master
freeradius: https://github.com/simp/pupmod-simp-freeradius
iptables: https://github.com/simp/pupmod-simp-iptables
inifile: https://github.com/simp/puppetlabs-inifile
incron: https://github.com/simp/pupmod-simp-incron
issue: https://github.com/simp/pupmod-simp-issue
kmod: https://github.com/simp/puppet-kmod
krb5: https://github.com/simp/pupmod-simp-krb5
logrotate: https://github.com/simp/pupmod-simp-logrotate
named: https://github.com/simp/pupmod-simp-named
nfs: https://github.com/simp/pupmod-simp-nfs
nsswitch: https://github.com/simp/puppet-nsswitch
ntpd: https://github.com/simp/pupmod-simp-ntpd
oddjob: https://github.com/simp/pupmod-simp-oddjob
pam: https://github.com/simp/pupmod-simp-pam
pki: https://github.com/simp/pupmod-simp-pki
postfix: https://github.com/simp/pupmod-simp-postfix
pupmod: https://github.com/simp/pupmod-simp-pupmod
puppetdb: https://github.com/simp/puppetlabs-puppetdb
postgresql: https://github.com/simp/puppetlabs-postgresql
resolv: https://github.com/simp/pupmod-simp-resolv
rsync: https://github.com/simp/pupmod-simp-rsync
rsyslog: https://github.com/simp/pupmod-simp-rsyslog
selinux: https://github.com/simp/pupmod-simp-selinux
simp: https://github.com/simp/pupmod-simp-simp
simp_openldap: https://github.com/simp/pupmod-simp-simp_openldap
simp_options: https://github.com/simp/pupmod-simp-simp_options
simp_rsyslog: https://github.com/simp/pupmod-simp-simp_rsyslog
simpcat: https://github.com/simp/pupmod-simp-simpcat
simplib: https://github.com/simp/pupmod-simp-simplib
sssd: https://github.com/simp/pupmod-simp-sssd
ssh: https://github.com/simp/pupmod-simp-ssh
stdlib: https://github.com/simp/puppetlabs-stdlib
stunnel: https://github.com/simp/pupmod-simp-stunnel
sudo: https://github.com/simp/pupmod-simp-sudo
sudosh: https://github.com/simp/pupmod-simp-sudosh
swap: https://github.com/simp/pupmod-simp-swap
svckill: https://github.com/simp/pupmod-simp-svckill
timezone:
repo: https://github.com/simp/puppet-timezone
branch: simp-master
tcpwrappers: https://github.com/simp/pupmod-simp-tcpwrappers
tftpboot: https://github.com/simp/pupmod-simp-tftpboot
tuned: https://github.com/simp/pupmod-simp-tuned
useradd: https://github.com/simp/pupmod-simp-useradd
symlinks:
simp_nfs: "#{source_dir}"
|
.fixtures.yml
|
TransformerEstimator_electricity:
hyperparameters:
context_length:
$eval: 2 * $trial.dataset.meta.prediction_length
embedding_dimension: 20
epochs: 100
forecaster_name: gluonts.model.transformer.TransformerEstimator
freq:
$eval: __trial__.dataset.meta.freq
inner_ff_dim_scale: 6
learning_rate: 0.01
listify_dataset: 'yes'
model_dim: 32
num_workers: 8
prediction_length:
$eval: $trial.dataset.meta.prediction_length
image: arangatang/masterthesis:gluonts_cpu_commit_4d1a9a0
instance: local
name: TransformerEstimator
TransformerEstimator_solar_energy:
hyperparameters:
cardinality:
- 137
context_length:
$eval: $trial.dataset.meta.prediction_length
embedding_dimension: 10
epochs: 100
forecaster_name: gluonts.model.transformer.TransformerEstimator
freq:
$eval: __trial__.dataset.meta.freq
inner_ff_dim_scale: 6
learning_rate: 0.001
listify_dataset: 'yes'
model_dim: 64
num_workers: 6
prediction_length:
$eval: $trial.dataset.meta.prediction_length
use_feat_static_cat: true
image: arangatang/masterthesis:gluonts_cpu_commit_4d1a9a0
instance: local
name: TransformerEstimator
TransformerEstimator_m5:
hyperparameters:
cardinality:
- 3049
- 7
- 3
- 10
- 3
context_length:
$eval: $trial.dataset.meta.prediction_length
embedding_dimension: 10
epochs: 100
forecaster_name: gluonts.model.transformer.TransformerEstimator
freq:
$eval: __trial__.dataset.meta.freq
inner_ff_dim_scale: 4
learning_rate: 0.0001
listify_dataset: 'yes'
model_dim: 64
num_workers: 6
prediction_length:
$eval: $trial.dataset.meta.prediction_length
use_feat_static_cat: true
image: arangatang/masterthesis:gluonts_cpu_commit_4d1a9a0
instance: local
name: TransformerEstimator
TransformerEstimator_m4_daily:
hyperparameters:
cardinality:
- 4227
context_length:
$eval: 2 * $trial.dataset.meta.prediction_length
embedding_dimension: 10
epochs: 100
forecaster_name: gluonts.model.transformer.TransformerEstimator
freq:
$eval: __trial__.dataset.meta.freq
inner_ff_dim_scale: 4
learning_rate: 0.001
listify_dataset: 'yes'
model_dim: 16
num_workers: 6
prediction_length:
$eval: $trial.dataset.meta.prediction_length
use_feat_static_cat: true
image: arangatang/masterthesis:gluonts_cpu_commit_4d1a9a0
instance: local
name: TransformerEstimator
|
examples/run_pre_tuned_algorithm/TransformerEstimator/tuned_algorithms.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-20 09:58"
game: "Unreal Tournament 2004"
name: "DM-DarkDays"
author: "<NAME>."
description: "In 3097 Astronauts found a teleporter on the moon, it was brought back\
\ for further study. As they figured out how to open the gate some form of Egyptian\
\ species marched in. For 30 years our worlds clashed untill there was darkness.\
\ It will take great skill and courage to get around the vast plains on a foreign\
\ Egypt and under the surface of Earth, it is indeed evry man for himself."
releaseDate: "2003-06"
attachments:
- type: "IMAGE"
name: "DM-DarkDays_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/D/DM-DarkDays_shot_3.png"
- type: "IMAGE"
name: "DM-DarkDays_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/D/DM-DarkDays_shot_1.png"
- type: "IMAGE"
name: "DM-DarkDays_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/D/DM-DarkDays_shot_2.png"
originalFilename: "dm-darkdays.zip"
hash: "6d53783dcf3eca184b6e1f9975022a442b92e6ac"
fileSize: 23680395
files:
- name: "MichaelM.usx"
fileSize: 41274256
hash: "75b4cea584cfa26dc5bc6417d9e87fbefa03bb54"
- name: "DEBonusMeshes.usx"
fileSize: 197121
hash: "822a1c0d670800f31474faf6000b0630bb72b60a"
- name: "DM-DarkDays.ut2"
fileSize: 24075560
hash: "f7f60edbc5f1b4be9eacadaf59405e1005e98c20"
- name: "DEBonusTextures.utx"
fileSize: 15255505
hash: "d6e69319eb3a496343eba23a3b3f37b8b661e626"
otherFiles: 4
dependencies:
MichaelM.usx:
- status: "MISSING"
name: "Hyde_FusionIII"
- status: "MISSING"
name: "BRuntex"
- status: "MISSING"
name: "myLevel"
DM-DarkDays.ut2:
- status: "MISSING"
name: "BootsOfJumping"
- status: "MISSING"
name: "Graveyard"
- status: "MISSING"
name: "Hyde_FusionIII"
- status: "MISSING"
name: "strawaagony"
- status: "OK"
name: "MichaelM"
- status: "MISSING"
name: "Runmesh1"
- status: "MISSING"
name: "Egyptmeshes"
- status: "MISSING"
name: "Hyde_HFusionIII"
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/DeathMatch/D/dm-darkdays.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/DeathMatch/D/6/d/53783d/dm-darkdays.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/DeathMatch/D/6/d/53783d/dm-darkdays.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DM - DarkDays"
playerCount: "4-16"
themes: {}
bots: true
|
content/Unreal Tournament 2004/Maps/DeathMatch/D/6/d/53783d/dm-darkdays_[6d53783d].yml
|
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Sample serverless data pipeline powered by machine learning
'
Globals:
Function:
Timeout: 30
Outputs:
DataBucket:
Description: Data S3 bucket
Value:
Ref: DataBucket
IndexBucket:
Description: Index S3 bucket
Value:
Ref: IndexBucket
ServerlessMLFuntion:
Description: Serverless ML Lambda Function ARN
Value:
Fn::GetAtt:
- ServerlessMLFuntion
- Arn
ServerlessMLFuntionIamRole:
Description: Implicit IAM Role created for Serverless ML function
Value:
Fn::GetAtt:
- ServerlessMLFuntionRole
- Arn
Resources:
DataBucket:
Properties:
BucketName:
Fn::Sub: ${AWS::StackName}-${AWS::Region}-${AWS::AccountId}-data
Type: AWS::S3::Bucket
IndexBucket:
Type: AWS::S3::Bucket
ServerlessMLFuntion:
Properties:
CodeUri: s3://danilop-packages/2b4fc0a59ad8a4387bf2a35d5b5f0173
Environment:
Variables:
INDEX_BUCKET:
Ref: IndexBucket
Events:
ImageUpload:
Properties:
Bucket:
Ref: DataBucket
Events: s3:ObjectCreated:*
Type: S3
Handler: app.lambdaHandler
Policies:
- Statement:
- Action: s3:GetObject*
Effect: Allow
Resource:
Fn::Sub: arn:aws:s3:::${AWS::StackName}-${AWS::Region}-${AWS::AccountId}-data*
- Statement:
- Action: s3:PutObject
Effect: Allow
Resource:
Fn::Sub: ${IndexBucket.Arn}/*
- Statement:
- Action:
- rekognition:DetectModerationLabels
- rekognition:DetectLabels
- rekognition:DetectText
- rekognition:DetectFaces
- comprehend:DetectDominantLanguage
- comprehend:DetectEntities
- comprehend:DetectKeyPhrases
- comprehend:DetectSentiment
- translate:TranslateText
Effect: Allow
Resource: '*'
Runtime: nodejs8.10
Type: AWS::Serverless::Function
Transform: AWS::Serverless-2016-10-31
|
packaged.yaml
|
- name: Verify Blueprint Source Directory is Defined
ansible.builtin.assert:
fail_msg: Blueprints Source Directory Must be Defined
that:
- blueprint_source_dir is defined
- name: Validate Blueprint Directory and Blueprint File Exists
ansible.builtin.stat:
path: "{{ item }}"
become: no
delegate_to: localhost
failed_when: not blueprint_exists.stat.exists
loop:
- "{{ blueprint_source_dir }}"
- "{{ blueprint_source_dir }}/blueprint.toml"
register: blueprint_exists
- name: Create Blueprints Directory
ansible.builtin.file:
path: "{{ base_rpm_ostree_blueprints_dir }}"
state: directory
- name: Set Blueprint Facts
ansible.builtin.set_fact:
blueprint_name: "{{ blueprint_source_dir | basename }}"
build_blueprint_dir: "{{ base_rpm_ostree_blueprints_dir }}/{{ blueprint_source_dir | basename }}"
- name: Copy Blueprints Directory to Image Builder
ansible.builtin.copy:
dest: "{{ build_blueprint_dir }}/"
src: "{{ blueprint_source_dir }}/"
- name: Push Blueprint to Server
ansible.builtin.command: >
composer-cli blueprints push "{{ build_blueprint_dir }}/blueprint.toml"
- name: Resolve Dependencies
ansible.builtin.command: >
composer-cli blueprints depsolve {{ blueprint_name }}
- name: Freeze Blueprint
ansible.builtin.command: >
composer-cli -j blueprints freeze {{ blueprint_name }}
register: frozen_blueprint
- name: Set Blueprint Version Number
ansible.builtin.set_fact:
blueprint_version: "{{ (frozen_blueprint.stdout | from_json).blueprints[0].blueprint.version }}"
- name: Image Builder
block:
- name: Compose Image
ansible.builtin.command: >
composer-cli -j compose start-ostree {{ blueprint_name }} rhel-edge-container
register: composed_image
- name: Set Build Commit ID
ansible.builtin.set_fact:
build_commit_id: "{{ (composed_image.stdout | from_json).build_id }}"
- name: Wait for Image Build to Complete
ansible.builtin.command: >
composer-cli -j compose status
changed_when: false
delay: 30
failed_when: >
not (image_build_status.stdout | from_json | selectattr('id', 'equalto', build_commit_id) | list | first).finished and
(image_build_status.stdout | from_json | selectattr('id', 'equalto', build_commit_id) | list | first).status != "FINISHED"
register: image_build_status
retries: 200
until: >
(image_build_status.stdout | from_json | selectattr('id', 'equalto', build_commit_id) | list | first).finished and
(image_build_status.stdout | from_json | selectattr('id', 'equalto', build_commit_id) | list | first).status == "FINISHED"
- name: Save Build Commit ID to File
ansible.builtin.copy:
content: "{{ build_commit_id }}"
dest: "{{ build_commit_output_file }}"
become: no
delegate_to: localhost
|
ansible/roles/oci-build-image/tasks/main.yaml
|
deployItems:
- name: deploy
type: landscaper.gardener.cloud/kubernetes-manifest
target:
name: {{ .imports.cluster.metadata.name }}
namespace: {{ .imports.cluster.metadata.namespace }}
config:
apiVersion: manifest.deployer.landscaper.gardener.cloud/v1alpha2
kind: ProviderConfiguration
updateStrategy: update
manifests:
- policy: manage
manifest:
apiVersion: core.gardener.cloud/v1beta1
kind: ControllerDeployment
metadata:
name: networking-calico
type: helm
providerConfig:
{{- $chart := getResource .cd "name" "networking-calico-chart" }}
chart: {{ resolve ( $chart.access ) | toString | b64enc }}
values:
image:
{{- $image := getResource .cd "name" "gardener-extension-networking-calico" }}
repository: {{ ociRefRepo ( $image.access.imageReference ) }}
tag: {{ ociRefVersion ( $image.access.imageReference ) }}
resources:
{{- if .imports.controllerRegistration.resources }}
{{- toYaml .imports.controllerRegistration.resources | nindent 14 }}
{{- else }}
requests:
cpu: "20m"
memory: "64Mi"
limits:
cpu: "100m"
memory: "256Mi"
{{- end }}
vpa:
{{- if .imports.controllerRegistration.vpa }}
{{- toYaml .imports.controllerRegistration.vpa | nindent 14}}
{{- else }}
enabled: true
updatePolicy:
updateMode: "Auto"
{{- end }}
controller:
{{- if .imports.controllerRegistration.concurrentSyncs }}
concurrentSyncs: {{ .imports.controllerRegistration.concurrentSyncs }}
healthcheck:
concurrentSyncs: {{ .imports.controllerRegistration.concurrentSyncs }}
{{- else }}
concurrentSyncs: 20
healthcheck:
concurrentSyncs: 20
{{- end }}
{{- if .imports.imageVectorOverwrite }}
imageVectorOverwrite: |
{{- toYaml .imports.imageVectorOverwrite | nindent 14 }}
{{- end }}
- policy: manage
manifest:
apiVersion: core.gardener.cloud/v1beta1
kind: ControllerRegistration
metadata:
name: networking-calico
spec:
deployment:
deploymentRefs:
- name: networking-calico
resources:
- kind: Network
type: calico
primary: true
|
.landscaper/blueprint/deploy-executions.yaml
|
language: python
cache:
directories:
- lua_install
- testrun/testing_cache-5.1
- testrun/testing_cache-5.2
- testrun/testing_cache-5.3
- testrun/testing_cache-2.1
- testrun/testing_server-5.1
- testrun/testing_server-5.2
- testrun/testing_server-5.3
- testrun/testing_server-2.1
- testrun/binary-samples
smoke_script: &smoke_script
- export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH; else echo $TRAVIS_PULL_REQUEST_BRANCH; fi)
- ./makedist $BRANCH
- ./smoke_test.sh luarocks-$BRANCH.tar.gz
unit_script: &unit_script
- busted -o gtest --exclude-tags=git,integration --verbose -Xhelper "lua_dir=$PWD/lua_install,travis"
- busted -o gtest --exclude-tags=git,integration --verbose -Xhelper "lua_dir=$PWD/lua_install,travis,env=full"
integration_script: &integration_script
- lua -v
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then shellcheck ./configure; fi
- ./configure --with-lua=lua_install
- ./makedist dev
- ./smoke_test.sh luarocks-dev.tar.gz
- busted -o gtest --exclude-tags=gpg,git,unit --verbose -Xhelper "lua_dir=$PWD/lua_install,travis"
- busted -o gtest --exclude-tags=gpg,git,unit --verbose -Xhelper "lua_dir=$PWD/lua_install,travis,env=full"
jobs:
include:
# Smoke tests for release branches
- stage: smoke
if: branch =~ [0-9].*
script: *smoke_script
os: linux
env:
- LUA="lua=5.3"
- stage: smoke
if: branch =~ [0-9].*
script: *smoke_script
os: osx
language: generic
env:
- LUA="luajit=2.1"
# Unit tests for linux
- stage: Unit on Linux
script: *unit_script
os: linux
env:
- LUA="lua=5.1"
- stage: Unit on Linux
script: *unit_script
os: linux
env:
- LUA="lua=5.2"
- stage: Unit on Linux
script: *unit_script
os: linux
env:
- LUA="lua=5.3"
- stage: Unit on Linux
script: *unit_script
os: linux
env:
- LUA="luajit=2.1"
# Integration tests for linux
- stage: Integration on Linux
script: *integration_script
os: linux
env:
- LUA="lua=5.1"
- stage: Integration on Linux
script: *integration_script
os: linux
env:
- LUA="lua=5.2"
- stage: Integration on Linux
script: *integration_script
os: linux
env:
- LUA="lua=5.3"
- stage: Integration on Linux
script: *integration_script
os: linux
env:
- LUA="luajit=2.1"
# Unit tests for mac
- stage: Unit on Mac
script: *unit_script
os: osx
env:
- LUA="lua=5.1"
language: generic
- stage: Unit on Mac
script: *unit_script
os: osx
env:
- LUA="lua=5.2"
language: generic
- stage: Unit on Mac
script: *unit_script
os: osx
env:
- LUA="lua=5.3"
language: generic
- stage: Unit on Mac
script: *unit_script
os: osx
env:
- LUA="luajit=2.1"
language: generic
# Integration tests for mac
- stage: Integration on Mac
script: *integration_script
os: osx
env:
- LUA="lua=5.1"
language: generic
- stage: Integration on Mac
script: *integration_script
os: osx
env:
- LUA="lua=5.2"
language: generic
- stage: Integration on Mac
script: *integration_script
os: osx
env:
- LUA="lua=5.3"
language: generic
- stage: Integration on Mac
script: *integration_script
os: osx
env:
- LUA="luajit=2.1"
language: generic
before_install:
- if [ ! -f lua_install/bin/luarocks ]; then pip install hererocks; fi
- if [ ! -f lua_install/bin/luarocks ]; then hererocks lua_install -r^ --$LUA; fi
- export PATH=$PATH:$PWD/lua_install/bin # Add directory with all installed binaries to PATH
install:
- if [ ! -f lua_install/bin/busted ]; then luarocks install busted; fi
- if [ ! -f lua_install/bin/luacov ]; then luarocks install cluacov; fi
after_success:
- luacov -c $TRAVIS_BUILD_DIR/testrun/luacov.config
- cd $TRAVIS_BUILD_DIR/testrun/ && bash <(curl -s https://codecov.io/bash)
- grep "Summary" -B1 -A1000 $TRAVIS_BUILD_DIR/testrun/luacov.report.out
notifications:
email:
on_success: change
on_failure: change
webhooks:
urls:
- https://webhooks.gitter.im/e/3320fc3f28abbb06e1d8
on_success: change
on_failure: always
on_start: never
|
.deps/build/src/luarocks/.travis.yml
|
items:
- uid: CRMScript.Native.Inbox
commentId: T:CRMScript.Native.Inbox
id: Inbox
langs:
- crmscript
children:
- CRMScript.Native.Inbox.#ctor
- CRMScript.Native.Inbox.addSmtpMail(String,String,Integer)
name: Inbox
nameWithType: Inbox
fullName: CRMScript.Native.Inbox
type: Class
summary: "\nA class for managing the inbox.\n"
example:
- uid: CRMScript.Native.Inbox.#ctor
commentId: M:CRMScript.Native.Inbox.#ctor
id: '#ctor'
so.intellisense: Void.Inbox
langs:
- crmscript
name: Inbox()
nameWithType: Inbox.#ctor
fullName: CRMScript.Native.Inbox.#ctor
type: Constructor
summary: "\nInitializes a new instance of the Inbox class.\n"
remarks:
syntax:
content: Inbox
example:
- uid: CRMScript.Native.Inbox.addSmtpMail(String,String,Integer)
commentId: M:CRMScript.Native.Inbox.addSmtpMail(String,String,Integer)
id: 'addSmtpMail(String,String,Integer)'
so.intellisense: Inbox.addSmtpMail
langs:
- crmscript
name: 'addSmtpMail(String,String,Integer)'
nameWithType: Inbox.addSmtpMail(String,String,Integer)
fullName: CRMScript.Native.Inbox.addSmtpMail(String,String,Integer)
type: Method
summary: "\nInserts an SMTP formatted email into Service's inbox, which then will be imported when ejournalCron runs.\n"
remarks:
syntax:
content: Integer addSmtpMail(String smtpMail, String uidl, Integer filterId)
parameters:
- id: smtpMail
type: CRMScript.Global.String
description: "A string with an SMTP formatted mail. Must conform to the SMTP standard."
- id: uidl
type: CRMScript.Global.String
description: "The UIDL for this email. Can be anything, but it has to be unique for each email"
- id: filterId
type: CRMScript.Global.Integer
description: "The ID of the mailbox that this mail will be imported under."
return:
type: CRMScript.Global.Integer
description: "The ID of the inserted email."
example:
references:
- uid: CRMScript.Native.Inbox
commentId: T:CRMScript.Native.Inbox
isExternal: true
name: Inbox
nameWithType: Inbox
fullName: CRMScript.Native.Inbox
- uid: CRMScript.Global.String
commentId: T:CRMScript.Global.String
isExternal: true
name: String
nameWithType: String
fullName: CRMScript.Global.String
- uid: CRMScript.Global.Integer
commentId: T:CRMScript.Global.Integer
isExternal: true
name: Integer
nameWithType: Integer
fullName: CRMScript.Global.Integer
|
docs/api-reference/CRMScript.Native.Inbox.yml
|
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: {{ .Release.Name }}-kube-agent
labels:
app.kubernetes.io/name: "{{ .Release.Name }}"
app.kubernetes.io/component: codefresh-kube-agent
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: "{{ .Release.Name }}"
app.kubernetes.io/component: codefresh-kube-agent
template:
metadata:
labels:
app.kubernetes.io/name: "{{ .Release.Name }}"
app.kubernetes.io/component: codefresh-kube-agent
spec:
serviceAccountName: {{ .Values.serviceAccount }}
containers:
- image: {{ .Values.imageCodefresh }}
imagePullPolicy: Always
name: kube-agent
env:
- name: API_HOST
value: {{ .Values.codefreshApiEndpoint | default "https://g.codefresh.io" | quote }}
- name: API_TOKEN
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-secret
key: codefreshApiKey
- name: CLUSTER_NAME
value: gke-{{ .Release.Namespace }}-{{ .Release.Name }}
- name: KUBE_ENDPOINT
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-secret
key: kubeEndpoint
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SERVICE_ACCOUNT_NAME
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
{{ if .Values.reportingSecret }}
- name: GOOGLE_ENTITLEMENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.reportingSecret }}
key: entitlement-id
- name: GOOGLE_CONSUMER_ID
valueFrom:
secretKeyRef:
name: {{ .Values.reportingSecret }}
key: consumer-id
- name: GOOGLE_REPORTING_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.reportingSecret }}
key: reporting-key
{{ end }}
|
codefresh/chart/codefresh-gcp-deployer/templates/deployment.yaml
|
---
# Cop supports --auto-correct.
Rails/Pick:
# Offense count: 42
# Temporarily disabled due to too many offenses
Enabled: false
Exclude:
- 'app/models/ci/pipeline.rb'
- 'app/models/merge_request.rb'
- 'app/models/merge_request/metrics.rb'
- 'app/models/merge_request_diff.rb'
- 'db/post_migrate/20210825193652_backfill_cadence_id_for_boards_scoped_to_iteration.rb'
- 'db/post_migrate/20220213103859_remove_integrations_type.rb'
- 'db/post_migrate/20220412143552_consume_remaining_encrypt_integration_property_jobs.rb'
- 'ee/app/models/concerns/epic_tree_sorting.rb'
- 'ee/app/models/ee/group.rb'
- 'ee/app/models/ee/namespace.rb'
- 'ee/app/models/geo/project_registry.rb'
- 'ee/lib/analytics/merge_request_metrics_calculator.rb'
- 'ee/lib/ee/gitlab/background_migration/backfill_iteration_cadence_id_for_boards.rb'
- 'ee/lib/ee/gitlab/background_migration/populate_status_column_of_security_scans.rb'
- 'ee/spec/finders/security/findings_finder_spec.rb'
- 'lib/gitlab/background_migration/backfill_ci_namespace_mirrors.rb'
- 'lib/gitlab/background_migration/backfill_ci_project_mirrors.rb'
- 'lib/gitlab/background_migration/backfill_integrations_type_new.rb'
- 'lib/gitlab/background_migration/backfill_namespace_traversal_ids_children.rb'
- 'lib/gitlab/background_migration/backfill_namespace_traversal_ids_roots.rb'
- 'lib/gitlab/background_migration/backfill_work_item_type_id_for_issues.rb'
- 'lib/gitlab/background_migration/batching_strategies/backfill_project_namespace_per_group_batching_strategy.rb'
- 'lib/gitlab/background_migration/batching_strategies/primary_key_batching_strategy.rb'
- 'lib/gitlab/background_migration/cleanup_orphaned_lfs_objects_projects.rb'
- 'lib/gitlab/background_migration/drop_invalid_security_findings.rb'
- 'lib/gitlab/background_migration/encrypt_static_object_token.rb'
- 'lib/gitlab/background_migration/fix_first_mentioned_in_commit_at.rb'
- 'lib/gitlab/background_migration/populate_vulnerability_reads.rb'
- 'lib/gitlab/background_migration/update_timelogs_null_spent_at.rb'
- 'lib/gitlab/database/dynamic_model_helpers.rb'
- 'lib/gitlab/database/migrations/background_migration_helpers.rb'
- 'lib/gitlab/database/partitioning_migration_helpers/backfill_partitioned_table.rb'
- 'lib/gitlab/github_import/user_finder.rb'
- 'lib/gitlab/relative_positioning/item_context.rb'
- 'spec/lib/gitlab/background_migration/backfill_snippet_repositories_spec.rb'
- 'spec/requests/projects/cycle_analytics_events_spec.rb'
|
.rubocop_todo/rails/pick.yml
|
{{- if .Values.web.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "concourse.web.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels: {{- include "common.labels.standard" . | nindent 4 }}
app.kubernetes.io/component: web
{{- if .Values.service.web.labels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.service.web.labels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.commonAnnotations .Values.service.web.annotations }}
annotations:
{{- if .Values.service.web.annotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.service.web.annotations "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
type: {{ .Values.service.web.type }}
{{- if (or (eq .Values.service.web.type "LoadBalancer") (eq .Values.service.web.type "nodePort")) }}
externalTrafficPolicy: {{ .Values.service.web.externalTrafficPolicy | quote }}
{{- end }}
{{ if eq .Values.service.web.type "LoadBalancer" }}
loadBalancerSourceRanges: {{ .Values.service.web.loadBalancerSourceRanges }}
{{ end }}
{{- if (and (eq .Values.service.web.type "LoadBalancer") (not (empty .Values.service.web.loadBalancerIP))) }}
loadBalancerIP: {{ .Values.service.web.loadBalancerIP }}
{{- end }}
{{- if .Values.service.web.clusterIP }}
clusterIP: {{ .Values.service.web.clusterIP }}
{{- end }}
ports:
- name: http
port: {{ .Values.service.web.port }}
targetPort: http
{{- if and (eq "nodePort" .Values.service.web.type) .Values.service.web.nodePort }}
nodePort: {{ .Values.service.web.nodePort }}
{{- end }}
- name: https
port: {{ .Values.service.web.tlsPort }}
targetPort: https
{{- if and (eq "nodePort" .Values.service.web.type) .Values.service.web.tlsNodePort }}
nodePort: {{ .Values.service.web.tlsNodePort }}
{{- end }}
selector: {{- include "common.labels.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: web
{{- end }}
|
bitnami/concourse/templates/web/service.yaml
|
- title: Home
url: "/"
side: left
- title: 우리의 미션
url: "/our-mission/"
side: left
- title: "ABLESTACK"
url: "/ablestack/"
side: left
dropdown:
- title: "<u>핵심 인프라</u>"
url: "/ablestack/#core-infra"
- title: " Glue - 분산스토리지"
url: "/ablestack/#glue"
- title: " Cell - 가상화"
url: "/ablestack/#cell"
- title: " Mold - 인프라 및 IT운영"
url: "/ablestack/#mold"
- title: " Koral - 쿠버네테스 클러스터"
url: "/ablestack/#koral"
- title: "<u>IT운영 및 DevOps</u>"
url: "/ablestack/#it-ops"
- title: " Wall - 인프라 및 App 모니터링"
url: "/ablestack/#wall"
- title: " Genie - 애플리케이션 관리"
url: "/ablestack/#genie"
- title: " Station - Platform as a Service"
url: "/ablestack/#station"
- title: "<u>스토리지 및 데이터베이스</u>"
url: "/ablestack/#storage"
- title: " Volume - 블록 스토리지"
url: "/ablestack/#volume"
- title: " Files - 파일 시스템"
url: "/ablestack/#file"
- title: " Objects - 오브젝트 스토리지"
url: "/ablestack/#object"
- title: "<u>네트워크 및 보안</u>"
url: "/ablestack/#network"
- title: " Track - 오버레이 가상 네트워킹"
url: "/ablestack/#track"
- title: " Atom - 마이크로 세그멘테이션"
url: "/ablestack/#atom"
- title: "<u>엔드유저 서비스</u>"
url: "/ablestack/#enduser"
- title: " Works - Desktop as a Service"
url: "/ablestack/#works"
- title: " Mommoss - 전문가 협업도구"
url: "/ablestack/#mommoss"
- title: "솔루션"
url: "/solution/"
side: left
dropdown:
- title: "프라이빗클라우드"
url: "/solution/#private"
- title: "빅데이터분석"
url: "/solution/#bigdata"
- title: "감시시스템"
url: "/solution/#surveil"
- title: "인공지능/머신러닝"
url: "/solution/#ai"
- title: "최종사용자컴퓨팅"
url: "/solution/#enduser"
- title: "개발/테스트"
url: "/solution/#dev"
- title: "엣지컴퓨팅"
url: "/solution/#edge"
- title: 파트너
url: "/partner/"
side: left
- title: Blog
url: "/blog/"
side: right
dropdown:
- title: "Blog Archive"
url: "/blog/archive/"
- title: "지원 및 서비스"
url: "/contact/"
side: right
|
_data/navigation.yml
|
---
# tasks file for linux
#
- name: Extract information from customer information spreadsheet
read_csv:
path: "/path/to/customerSpecs.csv"
fieldnames: uuid,template,srvName,customCpu,customMem,ipv4Addr,ipv4Mask,ipv4Gw,epgName,bdName,packProfile
delimiter: ','
register: datacust
delegate_to: localhost
- name: Add Repository bionic main restricted [FR] into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic main restricted
state: present
- name: Add Repository bionic-updates main restricted [FR] into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic-updates main restricted
state: present
- name: Add Repository bionic universe [FR] into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic universe
state: present
- name: Add Repository bionic-updates universe [FR] into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic-updates universe
state: present
- name: Add Repository [FR] bionic multiverse into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic multiverse
state: present
- name: Add Repository [FR] bionic-updates multiverse into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic-updates multiverse
state: present
- name: Add Repository [FR] bionic-security main restricted into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic-security main restricted
state: present
- name: Add Repository [FR] bionic-security universe into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic-security universe
state: present
- name: Add Repository [FR] bionic-security multiverse into sources list
apt_repository:
repo: deb http://fr.archive.ubuntu.com/ubuntu bionic-security multiverse
state: present
- name: Update apt-get repo and cache update
apt:
update_cache: yes
force_apt_get: yes
cache_valid_time: 3600
- name: Install apache2 on Front Servers
apt:
name: apache2
force_apt_get: yes
state: present
update_cache: yes
when: ansible_fqdn is search("FRONT.*")
- name: Install Tomcat9 on App Servers
apt:
name: tomcat9
force_apt_get: yes
state: present
update_cache: yes
when: ansible_fqdn is search("APP.*")
- name: Install postgresql on DB Servers
apt:
name: postgresql
force_apt_get: yes
state: absent
update_cache: yes
when: ansible_fqdn is search("DB.*")
|
LINUX AND ACI MISC OPS/linux/tasks/main.yml
|
app_quiz:
resource: |
alias: app.quiz
templates: PlatformAdminBundle:Crud
except: ['show', 'bulkDelete', 'delete']
redirect: index
grid: app_admin_quiz
vars:
all:
subheader: app.ui.manage_quizzes
index:
icon: globe
type: sylius.resource
app_admin_partial_quiz_latest:
path: /quizzes/latest
methods: [GET]
defaults:
_controller: app.controller.quiz:indexAction
_sylius:
repository:
method: findLatest
arguments:
- "expr:service('App\\\\Provider\\\\UserProvider').getUser()"
- '!!int $count'
template: $template
permission: true
app_admin_partial_quiz_upcoming:
path: /quizzes/upcoming
methods: [GET]
defaults:
_controller: app.controller.quiz:indexAction
_sylius:
repository:
method: findUpcoming
arguments:
- "expr:service('App\\\\Provider\\\\UserProvider').getUser()"
- '!!int $count'
template: $template
permission: true
app_quiz_show:
path: /quizzes/{id}
methods: [GET]
defaults:
_controller: app.controller.quiz:showAction
_sylius:
section: admin
permission: true
template: "@PlatformAdmin/Quiz/show.html.twig"
vars:
templates:
toolbar: "@PlatformAdmin/Quiz/Show/_toolbar.html.twig"
app_quiz_results:
path: /quizzes/{id}/results
methods: [GET, POST]
defaults:
_controller: app.controller.quiz:generateResultsAction
_sylius:
section: admin
permission: true
redirect: referer
template: "@PlatformAdmin/Quiz/Results/generate.html.twig"
vars:
header: app.ui.results
subheader: app.ui.generate_results
icon: file
templates:
breadcrumb: "@PlatformAdmin/Quiz/Results/_breadcrumb.html.twig"
|
config/routes/admin/app_quiz.yaml
|
version: 2 # use CircleCI 2.0
jobs:
build:
working_directory: ~/keycloak-lambda-authorizer # directory where steps will run
docker: # run the steps with Docker
- image: circleci/node:12.16.1 # ...with this image as the primary container; this is where all `steps` will run
steps: # a collection of executable commands
- checkout # check out source code to working directory
- run:
name: build source
command: npm i && cd keycloak-cloudfront-dynamodb && npm i
- run:
name: lint
command: npm run lint
- run:
name: test
command: npm run test
- run:
name: publish coveralls
command: node ./node_modules/coveralls/bin/coveralls.js < .coverage/lcov.info
- store_artifacts:
path: .coverage
- run:
name: build example/keycloak-authorizer serverless
command: cd example/keycloak-authorizer/serverless && npm i
- run:
name: lint example/keycloak-authorizer serverless
command: cd example/keycloak-authorizer/serverless && npm run lint
- run:
name: build example/keycloak-authorizer serverless-jwks
command: cd example/keycloak-authorizer/serverless-jwks && npm i
- run:
name: lint example/keycloak-authorizer serverless-jwks
command: cd example/keycloak-authorizer/serverless-jwks && npm run lint
- run:
name: build example/keycloak-authorizer ui
command: cd example/keycloak-authorizer/ui && npm i
- run:
name: lint example/keycloak-authorizer ui
command: cd example/keycloak-authorizer/ui && npm run lint
- run:
name: build example/keycloak-cloudfront cloudfront-cdk
command: cd example/keycloak-cloudfront/keycloak-cloudfront-cdk && npm i
- run:
name: build example/keycloak-cloudfront lambda-edge-example
command: cd example/keycloak-cloudfront/lambda-edge-example && npm i && npm run build
- run:
name: build example/keycloak-cloudfront cloudfront
command: cd example/keycloak-cloudfront && npm i && npm run build
- run:
name: lint example/keycloak-cloudfront cloudfront
command: cd example/keycloak-cloudfront && npm run lint
- run:
name: build example/keycloak-cloudfront-portal cloudfront-cdk
command: cd example/keycloak-cloudfront-portal/keycloak-cloudfront-cdk && npm i
- run:
name: build example/keycloak-cloudfront-portal lambda-edge-example
command: cd example/keycloak-cloudfront-portal/lambda-edge-example && npm i && npm run build
- run:
name: build example/keycloak-cloudfront-portal cloudfront
command: cd example/keycloak-cloudfront-portal && npm i && npm run build
- run:
name: lint example/keycloak-cloudfront-portal cloudfront
command: cd example/keycloak-cloudfront-portal && npm run lint
|
.circleci/config.yml
|
---
- name: Install and config apache2
hosts: apache
become: yes
tasks:
- name: Install certbot
when: config_cerbot|default(true)|bool
shell: |
apt-get install -y software-properties-common
add-apt-repository -y universe
add-apt-repository -y ppa:certbot/certbot
apt-get update
apt-get install -y certbot python-certbot-apache zip
unzip -P {{ letsencrypt_unzip_password }} {{ letsencrypt_zip_file_src }} -d /etc/
- name: Install apache2 packages
apt:
name:
- apache2
- libapache2-mod-wsgi
- libapache2-mod-evasive
- libapache2-modsecurity
- fail2ban
update_cache: yes
state: latest
- name: Get the local hostname
shell: hostname
register: real_hostname
- name: Update the /etc/hosts file with node name if possible
lineinfile:
path: "/etc/hosts"
regexp: "^127.0.0.1"
line: "127.0.0.1 {{ real_hostname.stdout }} localhost"
- name: Configure apache listening ports
become: yes
copy:
dest: "{{ apache_ports_config_dest }}"
mode: "{{ apache_ports_config_mode }}"
src: "{{ apache_ports_config_src }}"
- name: Configure apache zuul status pages
become: yes
template:
dest: "{{ apache_zuul_status_page_config_dest }}"
mode: "{{ apache_zuul_status_page_config_mode }}"
src: "{{ apache_zuul_status_page_config_src }}"
- name: Configure apache zuul ssl status pages
become: yes
template:
dest: "{{ apache_zuul_status_ssl_page_config_dest }}"
mode: "{{ apache_zuul_status_ssl_page_config_mode }}"
src: "{{ apache_zuul_status_ssl_page_config_src }}"
when:
- apache_zuul_status_ssl_page_config_src is defined
- name: Create evasive log directory
file:
path: /var/log/mod_evasive
state: directory
- name: Copy evasive config file into place
copy:
src: '{{ apache_mod_evasive_conf_file_src }}'
dest: '{{ apache_mod_evasive_conf_file_dest }}'
- name: Create modsecurity config file
command: cp /etc/modsecurity/modsecurity.conf-recommended /etc/modsecurity/modsecurity.conf
- name: Modify modsecurity config file
lineinfile:
dest: /etc/modsecurity/modsecurity.conf
regexp: '^SecRuleEngine DetectionOnly'
line: 'SecRuleEngine On'
- name: Copy fail2ban config files into place
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
with_items:
- { src: "{{ apache_mod_evasive_conf_file_src }}", dest: "{{ apache_mod_evasive_conf_file_dest }}" }
- { src: "{{ fail2ban_jail_file_src }}", dest: "{{ fail2ban_jail_file_dest }}" }
notify: restart fail2ban
- name: Enable dependent apache modules
apache2_module:
name: '{{ item }}'
with_items:
- evasive
- security2
- proxy
- proxy_http
- ssl
- xml2enc
- rewrite
- headers
- proxy_wstunnel
- name: Enable zuul site page
shell: |
a2dissite 000-default
a2ensite zuul-web
notify: restart apache2
- name: Enable zuul site ssl page
shell: |
a2ensite zuul-web-ssl
notify: restart apache2
when:
- apache_zuul_status_ssl_page_config_src is defined
handlers:
- name: restart fail2ban
service: name=fail2ban state="{{ apache_service_state|default('restarted') }}"
- name: restart apache2
service: name=apache2 state="{{ apache_service_state|default('restarted') }}"
|
playbooks/apache.yaml
|
name: CI
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
schedule:
# Prime the caches every Monday
- cron: 0 1 * * MON
jobs:
build:
name: Build and test
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os:
- macos-latest
- ubuntu-latest
ocaml-compiler:
- ocaml-base-compiler.4.11.2
# Add windows once Dream does not depend on `conf-libev`
# include:
# - os: windows-latest
# ocaml-compiler: ocaml-variants.4.11.2+mingw64c
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Use OCaml ${{ matrix.ocaml-compiler }}
# Replace with official action when opam.2.1.0 is supported
# See https://github.com/ocaml/setup-ocaml/pull/184
# uses: ocaml/setup-ocaml@v2
uses: tmattio/setup-ocaml@skip-opam-depext
with:
ocaml-compiler: ${{ matrix.ocaml-compiler }}
dune-cache: ${{ matrix.os != 'macos-latest' }}
- run: sudo apt-get install libev-dev
if: runner.os == 'Linux'
- run: brew install libev
if: runner.os == 'macOS'
- run: opam install --deps-only --with-test .
- run: npm ci
- run: make build
- run: make test
lint-fmt:
strategy:
matrix:
ocaml-compiler:
- ocaml-base-compiler.4.11.2
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Use OCaml ${{ matrix.ocaml-compiler }}
# Replace with official action when opam.2.1.0 is supported
# See https://github.com/ocaml/setup-ocaml/pull/184
# uses: ocaml/setup-ocaml@v2
uses: tmattio/setup-ocaml@skip-opam-depext
with:
ocaml-compiler: ${{ matrix.ocaml-compiler }}
dune-cache: true
- run: sudo apt-get install libev-dev
if: runner.os == 'Linux'
- run: brew install libev
if: runner.os == 'macOS'
- run: opam install ocamlformat.0.19.0
- run: opam install --deps-only .
- run: make fmt
|
.github/workflows/ci.yml
|
name: publish
on:
push:
tags:
- v*
jobs:
docker:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Version
id: version
run: |
echo ::set-output name=VERSION::${GITHUB_REF:11}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- run: echo $DOCKER_TOKEN | docker login --username no0dles --password-stdin
env:
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
- name: Build and push image
run: docker buildx build . --platform linux/arm64,linux/amd64 -t no0dles/hammerkit -t no0dles/hammerkit:$IMAGE_TAG --push
env:
IMAGE_TAG: ${{ steps.version.outputs.VERSION }}
binaries:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup node
uses: actions/setup-node@v2
with:
node-version: '16'
- name: Set Version
id: version
run: |
echo ::set-output name=VERSION::${GITHUB_REF:11}
- name: Install npm packages
run: npm ci
- run: node_modules/.bin/tsc
- name: Build binaries
run: node_modules/.bin/pkg --no-bytecode -t "node16-linux-x64,node16-linux-arm64,node16-macos-x64,node16-macos-arm64,node16-win-x64,node16-win-arm64" .
- name: Create release
uses: actions/create-release@v1
id: create_release
with:
draft: false
prerelease: false
release_name: ${{ steps.version.outputs.version }}
tag_name: ${{ github.ref }}
env:
GITHUB_TOKEN: ${{ github.token }}
- name: upload linux arm64 artifact
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./hammerkit-linux-arm64
asset_name: hammerkit-linux-arm64
asset_content_type: application/octet-stream
- name: upload linux x64 artifact
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./hammerkit-linux-x64
asset_name: hammerkit-linux-x64
asset_content_type: application/octet-stream
- name: upload macos arm64 artifact
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./hammerkit-macos-arm64
asset_name: hammerkit-macos-arm64
asset_content_type: application/octet-stream
- name: upload macos x64 artifact
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./hammerkit-macos-x64
asset_name: hammerkit-macos-x64
asset_content_type: application/octet-stream
- name: upload win arm64 artifact
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./hammerkit-win-arm64.exe
asset_name: hammerkit-win-arm64.exe
asset_content_type: application/octet-stream
- name: upload win x64 artifact
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ github.token }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./hammerkit-win-x64.exe
asset_name: hammerkit-win-x64.exe
asset_content_type: application/octet-stream
npm:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup node
uses: actions/setup-node@v2
with:
node-version: '16'
registry-url: https://registry.npmjs.org/
- name: Set Version
id: version
run: |
echo ::set-output name=VERSION::${GITHUB_REF:11}
- name: Install npm packages
run: npm ci
- name: Publish package
run: npm publish --tag beta
if: contains(steps.version.outputs.version, 'beta')
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Publish package
run: npm publish
if: "!contains(steps.version.outputs.version, 'beta')"
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
.github/workflows/push.yaml
|
---
- name: Get UCLDC operator keys to account, no need for master key
become: no
get_url:
url: https://raw.github.com/ucldc/appstrap/master/cdl/ucldc-operator-keys.txt
dest: ~/.ssh/ucldc_operator_keys
force: no
- name: get lines for adding to authorized_keys
become: no
shell: tail -n+2 ~/.ssh/ucldc_operator_keys
register: operator_keys
-
name: "Add UCLDC operator keys to authorized_keys"
blockinfile:
dest: ~/.ssh/authorized_keys
block: "{{ operator_keys.stdout }}"
state: present
become: no
- name: "Add M McKinley key"
lineinfile:
dest: ~/.ssh/authorized_keys
state: present
line: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDK5pfP/RNh866vqNhLjZDu26MsZAIASGpD1e+YO1Pbg8y0nPaCJoFiFbD3fwM+b8FBNsoUNWuuWULtqMquhWE6whLM7ZcPuXDxDxRh/VPQV1NNJ7GKlBk9dhp8LiZ8sMOtQKkUWVC1jZ7BVjhXe7NhSC2xVn9nA9djvnDpnBtwUmdxNHDfhXje3Yrpc0f94wXkXB21Ki9MhF/yAEhM7AUvO3G9tBOmShI/K9BbrjqU2DW0f9vaBGcwH5aYOHsXYzboB5L77B99aUVXYYmjfbLNhYfmfDUV2uMpTU50W2ZJ/aHz5DlAI4U4zyad29rquJQxToDQPKyj8428pBHR180n mmckinle@CDL-MMCKINL-9M.local"
- name: "Add GAMontoya key"
lineinfile:
dest: ~/.ssh/authorized_keys
state: present
line: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRp7Z1dTG+UvuSCe+3u1zVtvG759Lg/j9bvmNIZEZiUXbQ6k6bg8/ckeRR1qFGU7wVtgvJ+Ns1h28wuTRRsOwSirFz+4jE66mnPqb6MPM9zZSRLqHGUgvVm0GQ7TUQ7Yd+C9TEN+z7YoM0fwSRg0Qi4fBr8uEst2m2ZBBVz76VKB3agSzXngOhE2+PRg3KbkR9H9eQ6ejzJLjdlKMAZkRwwx+Ng12b2P0IwXX3Rko4HDGRHvP9qbmtq24Sg0uAmnGxWDE8lFjSizNxv/ULrGscSxS5uKliKrt84StTnDIhjOVMGlCBqjQ6a7Px3Kj54VQXUm2gFnsb/7m+VZNVIYEv <EMAIL>@<EMAIL>"
- name: "Add Mark's latest macbook key"
lineinfile:
dest: ~/.ssh/authorized_keys
state: present
line: "AAAAB3NzaC1yc2EAAAADAQABAAABAQCl4+GEZh3VQ8JiGS79oazNQgn2grhkgLHgEmQcXkK0AxhuWKvFqIo0w08D+iNsW0pC4hRFUqF7VzfQ9f3T/wCIaAW1AwnZQh5jTccrH2yJvpQ7JAb+3AdF6nM1dCC19h2ugCZWJjzLRMzsYV2OWjF9YCOabVH59XsHgTLmfv3IkE3hSr8G+bn0zPL7q8in+c/QOGoM/Lq3wm2tXuRYGbbd0PXS1x56o3tGzokqrWHldVTmhOf5hLGrLQWYh9WCY4VcnZmUGYesnSWPnsbN8jPSGXNJzZwdnP3ip+Ub5qllPSMi3Z/xOORSsixwjpUm6VlexqLN1FpvTiYP89GQxYKP <EMAIL>"
- name: "Add Adrian's Key"
lineinfile:
dest: ~/.ssh/authorized_keys
state: present
line: "AAAAB3NzaC1yc2EAAAABJQAAAQEA1czCmAZM2WOip8PYgWYN/ewlZoYYy2zO66yO60VUiP3q1KQQ7JQCF06DYwO3U0AZ1H8c6UtIRZpe8g3UpTAX/tTvASXy1bVS+bV5FDlxU9723Iyc2/vcGIciTtFzb4QQfacmXsjKheOs0DR++jUrBuYsd4xjBsQRfSzdmcZDK6BxDdA1T3k0KJ2ShuEGCYVuC8ZyzwHO2Ih8/7zVIqmyXjJ2T/fBndTME2ZJrYWpH9bMRgYh/KBF91cxLx3Udlvqvqswh+M4Fe/vqZtgZ//n+Mt3iutZ4Dzf12w5n78H11kuVXkPv58AGZbavj9PHZD2WZzmvm2f8IgN/WC+oFfwkQ== rsa-key-20151014-aturner"
|
ansible/roles/common/tasks/add_operator_keys.yml
|
---
# tasks file for psm_clean
- name: Find all inf and log files in hardening folder
win_find:
paths: "{{ psm_installation_path }}\\PSM\\Hardening"
patterns: ['*.inf', '*.log']
register: files_to_delete
- name: Delete all inf and log files in the hardening folder
win_file:
path: "{{ item.path }}"
state: absent
with_items: "{{ files_to_delete.files }}"
- name: Find all Relevant Files in the Windows Temp Folder
win_find:
paths: C:\Windows\Temp
patterns: ['PSM*.*', 'InstallationAutomation*.*']
register: files_to_delete
- name: Delete all Relevant Files in the Windows Temp Folder
win_file:
path: "{{ item.path }}"
state: absent
with_items: "{{ files_to_delete.files }}"
- name: Clean cred files
win_file:
path: "{{ item }}"
state: absent
with_items:
- "{{ psm_installation_path }}\\PSM\\Vault\\psmgw.ini"
- "{{ psm_installation_path }}\\PSM\\Vault\\psmapp.ini"
- name: Clean event viewer
win_shell: |
try {
wevtutil el | ForEach-Object { wevtutil cl "$_" }
} catch {
Write-Output "Error occured: $error"
exit 1
}
exit 0
- name: Empty recycle bin
win_shell: |
try {
$Recycler = (New-Object -ComObject Shell.Application).Namespace(0xa)
$Recycler.items() | ForEach-Object { rm $_.path -Force -Recurse }
} catch {
Write-Output "Error occured: $error"
exit 1
}
exit 0
- name: Clean run history
win_shell: |
try {
$path = "HKCU:\Software\Microsoft\Windows\CurrentVersion\Explorer\RunMRU"
if (& { Test-Path $path } 2>&1) {
$arr = (Get-Item -Path $path).Property
foreach ($item in $arr)
{
if ($item -ne "(Default)")
{
Remove-ItemProperty -Path $path -Name $item -ErrorAction SilentlyContinue
}
}
}
} catch {
Write-Output "Error occured: $error"
exit 1
}
exit 0
- name: Keep installation automation folder
win_copy:
src: "{{ psm_installationautomation_folder }}"
dest: "{{ psm_base_bin_drive }}\\Cyberark\\PSM"
remote_src: True
ignore_errors: yes
- name: Keep Registration Tool folder
win_copy:
src: "{{ psm_registrationtool_location }}"
dest: "{{ psm_base_bin_drive }}\\Cyberark\\PSM"
remote_src: True
ignore_errors: yes
- name: Delete the deployment folder
win_file:
path: "{{ psm_extract_folder }}"
state: absent
- name: Validate folder Deletion
win_stat:
path: "{{ psm_extract_folder }}"
register: folder_data
- name: End play if Deployment folder was not deleted
fail: msg="Error on folder deletion, stop play..."
when: folder_data.stat.exists
|
tasks/psm_clean.yml
|
name: ReadResult
uid: '@azure/ai-form-recognizer.ReadResult|beta'
package: '@azure/ai-form-recognizer|beta'
summary: |
The result of analysis using the prebuilt "read" model ("prebuilt-read").
This model produces only textual information: pages and languages.
fullName: ReadResult
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: apiVersion
uid: '@azure/ai-form-recognizer.ReadResult.apiVersion|beta'
package: '@azure/ai-form-recognizer|beta'
summary: The service API version used to produce this result.
fullName: apiVersion
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'apiVersion: FormRecognizerApiVersion'
return:
description: ''
type: <xref uid="@azure/ai-form-recognizer.FormRecognizerApiVersion|beta" />
- name: content
uid: '@azure/ai-form-recognizer.ReadResult.content|beta'
package: '@azure/ai-form-recognizer|beta'
summary: >-
A string representation of all textual and visual elements in the input,
concatenated by reading order (the order
in which the service "reads" or extracts the textual and visual content
from the document).
fullName: content
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'content: string'
return:
description: ''
type: string
- name: languages
uid: '@azure/ai-form-recognizer.ReadResult.languages|beta'
package: '@azure/ai-form-recognizer|beta'
summary: Extracted text languages.
fullName: languages
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'languages: DocumentLanguage[]'
return:
description: ''
type: <xref uid="@azure/ai-form-recognizer.DocumentLanguage|beta" />[]
- name: modelId
uid: '@azure/ai-form-recognizer.ReadResult.modelId|beta'
package: '@azure/ai-form-recognizer|beta'
summary: The unique ID of the model that was used to produce this result.
fullName: modelId
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'modelId: string'
return:
description: ''
type: string
- name: pages
uid: '@azure/ai-form-recognizer.ReadResult.pages|beta'
package: '@azure/ai-form-recognizer|beta'
summary: Pages extracted from the input document.
fullName: pages
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'pages: DocumentPage[]'
return:
description: ''
type: <xref uid="@azure/ai-form-recognizer.DocumentPage|beta" />[]
- name: styles
uid: '@azure/ai-form-recognizer.ReadResult.styles|beta'
package: '@azure/ai-form-recognizer|beta'
summary: >
The text styles that were observed in the input.
For example, this contains information about regions of the input where
handwritten text occurs.
fullName: styles
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'styles: DocumentStyle[]'
return:
description: ''
type: <xref uid="@azure/ai-form-recognizer.DocumentStyle|beta" />[]
extends: <xref uid="@azure/ai-form-recognizer.AnalyzeResultCommon|beta" />
|
reference-docs/cognitive-services/azure-ai-form-recognizer/beta/ReadResult.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-18 08:11"
game: "Unreal Tournament"
name: "MH-QueenSistersV3"
author: "Raddi"
description: "None"
releaseDate: "2005-03"
attachments: []
originalFilename: "mh-queensistersv3.zip"
hash: "367f0ad5c7fba9bd07ead51462b27c4e45b50754"
fileSize: 2450216
files:
- name: "MonsterHunt.u"
fileSize: 1251524
hash: "a57b4fad4d2633c8c814287f88cd37b60c56f476"
- name: "NPIntruder.umx"
fileSize: 1416312
hash: "0df9eb95188b0e5e28c443b68f71a41611e064aa"
- name: "MH-QueenSistersV3.unr"
fileSize: 1586927
hash: "64ea1a33ccb5a7494932823865bb4b2926d3b29b"
- name: "PAIN.umx"
fileSize: 207514
hash: "69ad92efea0af2378fc5aabb403842c1911fd9af"
- name: "pb_totalizator.umx"
fileSize: 323167
hash: "071a09ce4485bee80f8986975d05383f362bf621"
- name: "unreal.umx"
fileSize: 1481578
hash: "b3ff3750a16000282fa96409a3df9ebc752faa97"
- name: "RaddiAndHornD.utx"
fileSize: 89018
hash: "97489e64912925ffa5f5257837b8725ab867a9c1"
otherFiles: 2
dependencies:
MH-QueenSistersV3.unr:
- status: "OK"
name: "pb_totalizator"
- status: "OK"
name: "MonsterHunt"
- status: "OK"
name: "RaddiAndHornD"
- status: "OK"
name: "PAIN"
- status: "OK"
name: "NPIntruder"
- status: "OK"
name: "unreal"
downloads:
- url: "http://www.ut-files.com/index.php?dir=Maps/MonsterHunt/&file=mh-queensistersv3.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/Monster%20Hunt/Q/mh-queensistersv3.zip"
main: true
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/MonsterHunt&file=mh-queensistersv3.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/Monster%20Hunt/Q/3/6/7f0ad5/mh-queensistersv3.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/Monster%20Hunt/Q/3/6/7f0ad5/mh-queensistersv3.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Monster Hunt"
title: "MH Queen Sisters"
playerCount: "4-12"
themes:
Skaarj Crypt: 0.3
Ancient: 0.2
Natural: 0.5
bots: false
|
content/Unreal Tournament/Maps/Monster Hunt/Q/3/6/7f0ad5/mh-queensistersv3_[367f0ad5].yml
|
Dataset:
shape: [1024, 1024] # wxh
keep_size_ratio: True
img_idrid_url: /home/tmp/clpla/data/idrid/A. Segmentation/1. Original Images/a. Training Set/
^mask_idrid_url: /home/tmp/clpla/data/idrid/A. Segmentation/2. All Segmentation Groundtruths/a. Training Set/
img_messidor_url: /home/tmp/clpla/data/messidor/original/img/images/
^mask_messidor_url: /home/tmp/clpla/data/messidor/biomarkers/
img_fgadr_url: /home/tmp/clpla/data/FGADR/Seg-set/Original_Images/
^mask_fgadr_url: /home/tmp/clpla/data/FGADR/Seg-set/
^recursive_loading: True
Test:
img_url: /home/tmp/clpla/data/idrid/A. Segmentation/1. Original Images/b. Testing Set/
^mask_url: /home/tmp/clpla/data/idrid/A. Segmentation/2. All Segmentation Groundtruths/b. Testing Set/
img_retles_url: /home/tmp/clpla/data/retinal-lesions-v20191227/images_896x896/
^mask_retles_url: /home/tmp/clpla/data/retinal-lesions-v20191227/segmentation/
img_idrid_url: /home/tmp/clpla/data/idrid/A. Segmentation/1. Original Images/b. Testing Set/
^mask_idrid_url: /home/tmp/clpla/data/idrid/A. Segmentation/2. All Segmentation Groundtruths/b. Testing Set/
img_ddr_url: /home/tmp/clpla/data/segmentation/DDR/image/
^mask_ddr_url: /home/tmp/clpla/data/segmentation/DDR/label/
Manager:
experiment: FundusSegmentation_ContrastiveLearning
run: Unet
^save_point: /usagers/clpla/Projects/runs
gpu: 0
^max_saved_model: 1
^num_workers: 8 # Workers used for parallel data loading
^dist_backend: nccl
seed: 1234
^tracking_uri: http://localhost:5010
^artifact_uri: sftp://clement@m3202-10.demdgi.polymtl.ca/home/clement/Documents/Clement/runs/server/artifact
grad_scaling: False
amp: False
Preprocessing:
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
random_crop: True
crop_size: [512, 512]
Validation:
size: 0.15
^log_interval: 100
Training:
batch_size: 6
ignore_index: -100
contrastive_pretraining: True
iterations: 10000
Contrastive_training:
training_step: 5000
lr: 0.1
tau: 0.07
size: [64, 64]
Loss:
type: Dice
^fusion: mean
params_loss*:
Dice*:
eps: 1
mode: multilabel
smooth: 0.5
Optimizer:
solver: Adam
params_solver*:
lr: 0.01
weight_decay: 0.00001
Learning_rate_scheduler:
update_type: on_iteration
scheduler: CosineAnnealingLR
params_scheduler*:
eta_min: 0.00001
T_max: 5000
^verbose: False
Network:
architecture: Contrastive_Unet
n_classes: 4
synchronized_batch_norm: True
pretrained: True
|
experiments/contrastive_training/config_contrastive.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-16 18:25"
game: "Unreal Tournament"
name: "CTF-iKes[V2]"
author: "Kris (R3plicant) Andrews"
description: "This version is for Instagib ONLY."
releaseDate: "2006-01"
attachments:
- type: "IMAGE"
name: "CTF-iKes[V2]_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/I/CTF-iKes%5BV2%5D_shot_3.png"
- type: "IMAGE"
name: "CTF-iKes[V2]_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/I/CTF-iKes%5BV2%5D_shot_1.png"
- type: "IMAGE"
name: "CTF-iKes[V2]_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/I/CTF-iKes%5BV2%5D_shot_2.png"
- type: "IMAGE"
name: "CTF-iKes[V2]_shot_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/I/CTF-iKes%5BV2%5D_shot_4.png"
originalFilename: "ctf-ikes[v2].zip"
hash: "ebd9352b39b875af546062853ae583a5330b38c1"
fileSize: 2759189
files:
- name: "CTF-iKes[V2].unr"
fileSize: 10394021
hash: "5e45ffbec828963e0df090fbb8048c57828a0ea4"
otherFiles: 4
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/Capture%20The%20Flag/I/ctf-ikes%5Bv2%5D.zip"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/MapsI/&file=ctf-ikes%5Bv2%5D.zip"
main: false
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/&file=ctf-ikes%5Bv2%5D.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/CTF&file=ctf-ikes%5Bv2%5D-2.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/CTF&file=ctf-ikes%5Bv2%5D.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Maps/CTF/MapsI/ctf-ikes%5bv2%5d.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/Capture%20The%20Flag/I/e/b/d9352b/ctf-ikes%255Bv2%255D.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/Capture%20The%20Flag/I/e/b/d9352b/ctf-ikes%255Bv2%255D.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "CTF-iKes[V2]"
playerCount: "10-16"
themes:
Industrial: 0.7
Ancient: 0.1
Skaarj Tech: 0.2
bots: true
|
content/Unreal Tournament/Maps/Capture The Flag/I/e/b/d9352b/ctf-ikesv2_[ebd9352b].yml
|
title: Developing Mobile Apps
metadata:
title: 'Exam 70-357: Developing Mobile Apps'
description: 'Exam 70-357: Developing Mobile Apps'
uid: exam.70-357
displayName: 70-357
summary: |-
<div><font color='red'><strong>Este examen se retiró el 31 de enero de 2021. Para conocer las opciones disponibles actualmente, por favor consulta la <a href='/learn/certifications/browse/?resource_type=examination'>Página de navegación por Certificaciones y Exámenes</a>.<br/><br/></strong></font></div>
Los candidatos a este examen son desarrolladores con experiencia que diseñan y desarrollan aplicaciones de Windows 10 para implementarlas en la Windows Store o en la Windows Store para empresas.
Los candidatos normalmente tienen más de dos años de experiencia en el desarrollo de aplicaciones Windows con C# y XAML, además de experiencia con WPF o Silverlight para Windows Phone. Los candidatos también deben tener experiencia en el patrón de diseño Model-View-ViewModel (MVVM) y en Entity Framework, además de en las tecnologías de autenticación. Tener experiencia en Visual Studio 2015 es muy recomendable.
subTitle: Los candidatos a este examen son desarrolladores con experiencia que diseñan y desarrollan aplicaciones de Windows 10 para implementarlas en la Windows Store o en la Windows Store para empresas.
iconUrl: /media/learn/certification/badges/certification-exam.svg
pdfDownloadUrl: https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4trjH
practiceTestUrl: https://www.mindhub.com/70-357-developing-mobile-apps-microsoft-official/p/MU-70-357?utm_source=microsoft&utm_medium=certpage&utm_campaign=msofficialpractice
locales:
- en
- es
- zh-cn
- zh-tw
- fr
- de
- ja
- pt-br
- ru
levels:
- intermediate
skills:
- Desarrollo de un diseño de página XAML para una interfaz de usuario adaptativa (10–15%)
- Implementación de la navegación de la página y los eventos del ciclo de vida (10–15%)
- Implementación del acceso a los datos y el enlace de datos (20–25%)
- Implementación de detección de funciones para codificación adaptativa (10–15%)
- Gestión de entradas de datos de usuarios e interacciones personalizadas de usuarios (10–15%)
- Gestión de la autenticación y gestión de identidad (10–15%)
- Implementación de notificaciones, tareas en segundo plano y componentes reutilizables (15–20%)
retirementDate: 01/31/2021
roles:
- developer
products:
- vs
relatedCertifications:
- related:
uid: certification.mcsa-universal-windows-platform
- related:
uid: certification.mcsd-app-builder-certification
resources:
- title: Exam Replay
description: Vea dos grandes ofertas para ayudar a aumentar sus probabilidades de éxito.
href: ../deals.md
- title: Panel de certificación
description: Revise y administre sus citas programadas, certificados y transcripciones.
href: https://www.microsoft.com/learning/dashboard.aspx
- title: Solicite ajustes
description: Obtenga más información sobre cómo solicitar un ajuste para su examen.
href: ../request-accommodations.md
- title: Políticas de examen y preguntas frecuentes
description: Revise y administre sus citas programadas, certificados y transcripciones.
href: ../certification-exam-policies.md
|
learn-certs-pr/exams/70-357.yml
|
os: linux
dist: bionic
language: c
compiler: gcc
# This defines a "matrix" of jobs. Each combination of environment variables
# defines a different job. They run in parallel, five at a time. We have
# divided the matrix into "stages"; if any stage has a failure, testing stops
# and the remaining stages are skipped. Note that some stages are conditional.
#
# Note: Matrixing of variables that happens at the top level is not supported
# within stages: https://github.com/travis-ci/travis-ci/issues/8295.
#
# We do not do any full-scope tests, because they give a >10-minute gap in
# output, so Travis times out.
#
# FIXME: Each job starts with a cold Docker cache, which wastes work heating
# it up in parallel. It would be nice if "make test-build" could be done
# serially before splitting into parallel jobs.
#
# TARBALL= # build in Git checkout
# TARBALL=archive # build from "git archive" tarball
# TARBALL=export # build from "make export" tarball
# MAKE_INSTALL= # run from build directory
# MAKE_INSTALL=yes # "make install"; run that one
#
# Note: $INSTALL is used by Autotools, and setting it to "yes" causes very
# weird errors, e.g.:
#
# make[2]: Entering directory [...]
# /bin/mkdir -p '/var/tmp/lib/charliecloud'
# ../yes base.sh '/var/tmp/lib/charliecloud'
# /bin/bash: line 23: ../yes: No such file or directory
# Makefile:323: recipe for target 'install-dist_pkglibSCRIPTS' failed
#
# Additional options:
#
# CH_BUILDER # which builder to use
# MINIMAL_CONFIG # exclude all optional features with --disable-foo
# MINIMAL_DEPS # minimal dependencies; implies PACK_FMT=tar
# PACK_FMT= # squash pack format, ch-mount/ch-unmount
# PACK_FMT=squash-unpack # squash pack format, but unpack instead of mount
# PACK_FMT=tar # tarball pack format
# SUDO_RM_FIRST # remove sudo before build (implied if non-Docker)
# SUDO_RM_AFTER_BUILD # remove sudo after build
# Only run the tests on master or in a pull request. In principle, people
# might start a branch and want Travis on it before it becomes a PR. However,
# in practice, this doesn't happen, and the merge is what really matters.
if: branch = master OR type = pull_request
stages:
- quick
- builders
- install
- misc
_stage_quick: &stage_quick
stage: quick
_stage_builders: &stage_builders
stage: builders
_stage_install: &stage_install
stage: install
_stage_misc: &stage_misc
stage: misc
jobs:
include:
- <<: *stage_quick
env: CH_BUILDER=docker CH_TEST_SCOPE=quick
- <<: *stage_builders
env: CH_BUILDER=none
- <<: *stage_builders
env: CH_BUILDER=ch-grow
- <<: *stage_builders
env: CH_BUILDER=docker
# - <<: *stage_builders
# env: CH_BUILDER=docker PACK_FMT=squash-unpack
- <<: *stage_builders
env: CH_BUILDER=docker PACK_FMT=tar
- <<: *stage_install
env: CH_BUILDER=none TARBALL=export MAKE_INSTALL=yes
- <<: *stage_install
env: CH_BUILDER=buildah TARBALL=export MAKE_INSTALL=yes
- <<: *stage_install
env: CH_BUILDER=buildah-runc TARBALL=export MAKE_INSTALL=yes
- <<: *stage_install
env: CH_BUILDER=ch-grow TARBALL=export MAKE_INSTALL=yes
- <<: *stage_install
env: CH_BUILDER=docker TARBALL=export MAKE_INSTALL=yes
- <<: *stage_misc
env: CH_BUILDER=buildah MINIMAL_DEPS=yes
- <<: *stage_misc
env: CH_BUILDER=ch-grow MINIMAL_DEPS=yes
- <<: *stage_misc
env: CH_BUILDER=docker MINIMAL_DEPS=yes
- <<: *stage_misc
env: CH_BUILDER=docker MINIMAL_CONFIG=yes
- <<: *stage_misc
env: CH_BUILDER=docker SUDO_RM_AFTER_BUILD=yes
addons:
apt:
sources:
- sourceline: 'ppa:projectatomic/ppa'
packages:
- autoconf
- automake
- bats
- python3-pip
- python3-setuptools
install:
- . test/travis-install.bash # source b/c we're setting variables
before_script:
- . test/travis-before.bash
script:
- test/travis.bash
after_script:
- free -m
- df -h
|
test/travis.yml
|
items:
- uid: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat
id: WithWhatIfResultFormat
artifact: com.microsoft.azure:azure-mgmt-resources:1.36.1
parent: com.microsoft.azure.management.resources
children:
- com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat()
- com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat()
langs:
- java
name: Deployment.ExecutionStages.WithWhatIfResultFormat
nameWithType: Deployment.ExecutionStages.WithWhatIfResultFormat
fullName: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat
type: Interface
package: com.microsoft.azure.management.resources
summary: A deployment execution allowing result format to be specified.
syntax:
content: public static interface Deployment.ExecutionStages.WithWhatIfResultFormat
- uid: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat()
id: withFullResourcePayloadsResultFormat()
artifact: com.microsoft.azure:azure-mgmt-resources:1.36.1
parent: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat
langs:
- java
name: withFullResourcePayloadsResultFormat()
nameWithType: Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat()
fullName: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat()
overload: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat*
type: Method
package: com.microsoft.azure.management.resources
summary: Specifies the result format with value of 'FULL\_RESOURCE\_PAYLOADS' in What-if settings of deployment properties.
syntax:
content: public abstract Deployment.ExecutionStages.WithWhatIf withFullResourcePayloadsResultFormat()
return:
type: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIf
description: the next stage of the execution.
- uid: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat()
id: withResourceIdOnlyResultFormat()
artifact: com.microsoft.azure:azure-mgmt-resources:1.36.1
parent: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat
langs:
- java
name: withResourceIdOnlyResultFormat()
nameWithType: Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat()
fullName: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat()
overload: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat*
type: Method
package: com.microsoft.azure.management.resources
summary: Specifies the result format with value of 'RESOURCE\_ID\_ONLY' in What-if settings of deployment properties.
syntax:
content: public abstract Deployment.ExecutionStages.WithWhatIf withResourceIdOnlyResultFormat()
return:
type: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIf
description: the next stage of the execution.
references:
- uid: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIf
name: Deployment.ExecutionStages.WithWhatIf
nameWithType: Deployment.ExecutionStages.WithWhatIf
fullName: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIf
- uid: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat*
name: withFullResourcePayloadsResultFormat
nameWithType: Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat
fullName: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withFullResourcePayloadsResultFormat
package: com.microsoft.azure.management.resources
- uid: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat*
name: withResourceIdOnlyResultFormat
nameWithType: Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat
fullName: com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.withResourceIdOnlyResultFormat
package: com.microsoft.azure.management.resources
|
docs-ref-autogen/com.microsoft.azure.management.resources.Deployment.ExecutionStages.WithWhatIfResultFormat.yml
|
type: module
# The module category can be either "template" or "helm-chart" or "security-exception"
# For now, we only support "helm-chart", "template will NOT support yet
category: helm-chart
# The name here just for display purpose, not necessary to be the same as the name in the directory. Optional
name: watson-language-pak-3
# The description is mandatory
description: "this is the optional module for installation for language-pak-3"
# Registry name reference maps to the name in the server-info.yaml, mandatory
registry-reference : lt-registry
# Optional. The override yaml that will override the helm chart, the file need to be same directory as the helm chart
# installOverride: install-values-override.yaml
# upgradeOverride: upgrade-values-override.yaml
# This is the matching to addon.json so cpd will depends on the assembly name to find out all service instance type
# serviceInstanceType:
# - type1
# - type2
# Provide a list of the image tag here, mandatory
# If the image tag found in the helm chart but not in the list, cpd will fail
# So make sure
images:
- name: lt-bgbg-enus-general
tag: 1.2
- name: lt-bsba-enus-general
tag: 1.2
- name: lt-caes-eses-general
tag: 1.2
- name: lt-cnrme-enus-general
tag: 1.2
- name: lt-cscz-enus-general
tag: 1.2
- name: lt-cygb-enus-general
tag: 1.2
- name: lt-dadk-enus-general
tag: 1.2
- name: lt-dede-frfr-general
tag: 1.2
- name: lt-dede-itit-general
tag: 1.2
- name: lt-elgr-enus-general
tag: 1.2
- name: lt-enus-bgbg-general
tag: 1.2
- name: lt-enus-bsba-general
tag: 1.2
- name: lt-enus-cnrme-general
tag: 1.2
- name: lt-enus-cscz-general
tag: 1.2
- name: lt-enus-cygb-general
tag: 1.2
- name: lt-enus-dadk-general
tag: 1.2
- name: lt-enus-elgr-general
tag: 1.2
- name: lt-enus-etee-general
tag: 1.2
- name: lt-enus-fifi-general
tag: 1.2
- name: lt-enus-frca-general
tag: 1.2
- name: lt-enus-gaie-general
tag: 1.2
- name: lt-enus-hrhr-general
tag: 1.2
- name: lt-enus-huhu-general
tag: 1.2
- name: lt-enus-ltlt-general
tag: 1.2
- name: lt-enus-lvlv-general
tag: 1.2
- name: lt-enus-nbno-general
tag: 1.2
- name: lt-enus-nlnl-general
tag: 1.2
- name: lt-enus-plpl-general
tag: 1.2
- name: lt-enus-roro-general
tag: 1.2
- name: lt-enus-sksk-general
tag: 1.2
- name: lt-enus-slsi-general
tag: 1.2
- name: lt-enus-srrs-general
tag: 1.2
- name: lt-enus-svse-general
tag: 1.2
- name: lt-enus-ukua-general
tag: 1.2
- name: lt-eses-caes-general
tag: 1.2
- name: lt-eses-eues-general
tag: 1.2
- name: lt-eses-frfr-general
tag: 1.2
- name: lt-etee-enus-general
tag: 1.2
- name: lt-eues-eses-general
tag: 1.2
- name: lt-fifi-enus-general
tag: 1.2
- name: lt-frca-enus-general
tag: 1.2
- name: lt-frfr-dede-general
tag: 1.2
- name: lt-frfr-eses-general
tag: 1.2
- name: lt-gaie-enus-general
tag: 1.2
- name: lt-hrhr-enus-general
tag: 1.2
- name: lt-huhu-enus-general
tag: 1.2
- name: lt-itit-dede-general
tag: 1.2
- name: lt-ltlt-enus-general
tag: 1.2
- name: lt-lvlv-enus-general
tag: 1.2
- name: lt-nbno-enus-general
tag: 1.2
- name: lt-nlnl-enus-general
tag: 1.2
- name: lt-plpl-enus-general
tag: 1.2
- name: lt-roro-enus-general
tag: 1.2
- name: lt-sksk-enus-general
tag: 1.2
- name: lt-slsi-enus-general
tag: 1.2
- name: lt-srrs-enus-general
tag: 1.2
- name: lt-svse-enus-general
tag: 1.2
- name: lt-ukua-enus-general
tag: 1.2
chart: ibm-watson-lt-language-pak-3-1.2.1.tgz
# in minutes, optional, this will wait the pods to all running from the installer, optional, default is 20 minutes
timeout: 30
# The name will be used for helm release name as <namespace-releaseName>, optional, default will be <namespace-moduleName>
releaseName: ibm-watson-lt-language-pak-3
# Set to true will trigger the helm upgrade with force option. Optional. Missing means false
helmUpgradeForce: false
# The cpd scale files to define here. optional, refer to https://ibm.ent.box.com/notes/528081405807 for details
# The list is file names only, and expected to located under config/
# scale:
# - small.yaml
|
repo/cpd/3.5/modules/watson-language-pak-3/x86_64/1.2/main.yaml
|
device:
name: eg
version: 1.0
variables:
name: eg
baresip_base: "/home/interkom/{{id(app).variables.name}}/baresip"
platforms:
- platform: plugins.mqtt
id: mqtt
variables:
base_topic: "home/{{id(app).variables.name}}"
host: home
port: 1883
availability:
topic: "{{id(mqtt).variables.base_topic}}/connected"
payload_on: "yes"
payload_off: "no"
on_connected:
- actions:
- action: print
values:
payload: "on_connected: {{id(mqtt).variables.base_topic}}"
on_disconnected:
- actions:
- action: print
values:
payload: bye
# on_message:
# - actions:
# - action: print #display all messages in the dashboard
# values:
# payload: "I got a message on topic '{{topic}}' with payload: {{{payload}}}"
- platform: plugins.gpio
id: gpio
factory: rpigpio
- platform: plugins.console
id: console
- platform: plugins.dfplayer
id: dfplayer
tx_pin: GPIO5
- platform: plugins.hass
id: hass
connection: mqtt
exports:
- id: player
type: switch
icon: mdi:home
on_command:
- actions:
- action: player
values:
command: next_track
off_command:
- actions:
- action: player
values:
topic: pause
expose_state: state.is_playing
- id: btn_ring_og
type: sensor
on_event: on_press
off_event: on_release
expose_state: state.is_pressed
- id: btn_open_door
type: sensor
on_event: on_press
off_event: on_release
expose_state: state.is_pressed
actions:
- id: print
platform: console
type: PrintAction
- id: player
platform: dfplayer
- id: setVariable
platform: system
type: SetVariableAction
debug: true
- id: baresip_dial_og
platform: mqtt
debug: true
topic: "/home/interkom/{{id(app).variables.name}}/baresip/command/dial"
payload:
command: dial
token: og
params: sip:og
- id: baresip_dial_frontdoor
platform: mqtt
debug: true
topic: "/home/interkom/{{id(app).variables.name}}/baresip/command/dial"
payload:
command: dial
token: frontdoor
params: sip:frontdoor
- id: baresip_hangup
platform: mqtt
debug: true
topic: "/home/interkom/{{id(app).variables.name}}/baresip/command/hangup"
payload:
command: hangup
token: hangup
params: hangup
sensors:
- id: btn_ring_og
platform: gpio
debug: true
#pin33
pin: GPIO13
type: ButtonSensor
check_state_delay: 0.03
hold_time: 0.2
- id: btn_open_door
platform: gpio
debug: true
#pin35
pin: GPIO19
type: ButtonSensor
check_state_delay: 0.03
#- id: btn_hangup_or_dial
# platform: gpio
# debug: true
# #pin37
# pin: GPIO26
# type: ButtonSensor
# inverted: true
# on_release:
# - actions:
# - action: print
# values:
# payload: on_release
# - conditions:
# - actual: "{{id(is_ringing).state.payload}}"
# comperator: equals
# expected: "incoming call"
# actions:
# - action: print
# values:
# payload: dial frontdoor
# - action: baresip_dial_frontdoor
# on_press:
# - actions:
# - action: print
# values:
# payload: on_press
# - action: baresip_hangup
- id: play
#Run dfplayer.next_track()
platform: mqtt
topic: "{{id(mqtt).variables.base_topic}}/play"
on_message:
- actions:
- action: player
values:
command: next_track
#- id: is_ringing
# platform: mqtt
# topic: "{{id(app).variables.baresip_base}}/event/#"
# on_message:
# - actions:
# - action: print
# values:
# payload: is ringing...
- id: baresip_state
#Save baresip state into variable 'connected'
platform: mqtt
topic: "{{id(app).variables.baresip_base}}/event"
on_message:
- conditions:
- actual: "{{#payload}}{{type}}{{/payload}}"
comperator: contains
expected: REGISTER_OK
actions:
- action: print
values:
payload: hello baresip
- action: setVariable
values:
topic: connected
payload: true
- conditions:
- actual: "{{#payload}}{{type}}{{/payload}}"
comperator: equals
expected: EXIT
actions:
- action: print
values:
payload: bye baresip
- action: setVariable
values:
topic: connected
payload: false
|
src/eg.yaml
|
version: "2.4"
services:
uploader:
image: ziemsky/gdrive-uploader:latest
mem_limit: 34m
restart: always
volumes:
# Location of custom configuration file, overriding some or all configuration options.
#
# Optional. Uncomment to enable, in which case make sure to also uncomment the line with
# SPRING_CONFIG_ADDITIONAL_LOCATION under 'environment' below:
#
#- /etc/uploader:/app/config/extra
# Location of Google account credentials.
#
# Required.
# Unless you override the structure of directory via custom application.conf, make sure that this directory
# contains credentials in the following structure:
# .
# └── gdrive
# └── secrets
# ├── credentials.json
# └── tokens
# └── StoredCredential
#
- /etc/uploader/google:/app/config/google
# Location of the log file
#
# Optional. Uncomment to enable generation of the log file, in which case make sure to also uncomment
# line with environment variable LOGGING_FILE_NAME under 'environment' below:
#
#- /var/log/uploader:/app/log
# Directory monitored for incoming files.
#
# Required.
#
- /var/opt/uploader/inbound:/app/inbound
#environment:
# When specifying any paths, keep in mind tht these are resolved _within the container_.
# Enables generation of a log file.
#
# Optional; uncomment to enable logging into a file, in which case make sure to uncomment corresponding
# line under 'volumes' to make the log file visible from the host:
#
#LOGGING_FILE_NAME: '/app/log/uploader.log'
# Enables use of custom configuration file, overriding some or all configuration properties.
#
# Optional. Uncomment to enable, in which case make sure to uncomment corresponding line under 'volumes'
# to give the application visibility of the file located on the host:
#
#SPRING_CONFIG_ADDITIONAL_LOCATION: /app/config/extra/application.conf
|
docker/compose/docker-compose_example.yaml
|
name: VirtualMachineScaleSetUpdateOSProfile
uid: '@azure/arm-compute.VirtualMachineScaleSetUpdateOSProfile'
package: '@azure/arm-compute'
summary: Describes a virtual machine scale set OS profile.
fullName: VirtualMachineScaleSetUpdateOSProfile
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: customData
uid: '@azure/arm-compute.VirtualMachineScaleSetUpdateOSProfile.customData'
package: '@azure/arm-compute'
summary: A base-64 encoded string of custom data.
fullName: customData
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'customData?: undefined | string'
return:
type: undefined | string
description: ''
- name: linuxConfiguration
uid: >-
@azure/arm-compute.VirtualMachineScaleSetUpdateOSProfile.linuxConfiguration
package: '@azure/arm-compute'
summary: The Linux Configuration of the OS profile.
fullName: linuxConfiguration
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'linuxConfiguration?: LinuxConfiguration'
return:
type: <xref uid="@azure/arm-compute.LinuxConfiguration" />
description: ''
- name: secrets
uid: '@azure/arm-compute.VirtualMachineScaleSetUpdateOSProfile.secrets'
package: '@azure/arm-compute'
summary: The List of certificates for addition to the VM.
fullName: secrets
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'secrets?: VaultSecretGroup[]'
return:
type: '<xref uid="@azure/arm-compute.VaultSecretGroup" />[]'
description: ''
- name: windowsConfiguration
uid: >-
@azure/arm-compute.VirtualMachineScaleSetUpdateOSProfile.windowsConfiguration
package: '@azure/arm-compute'
summary: The Windows Configuration of the OS profile.
fullName: windowsConfiguration
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'windowsConfiguration?: WindowsConfiguration'
return:
type: <xref uid="@azure/arm-compute.WindowsConfiguration" />
description: ''
|
docs-ref-autogen/@azure/arm-compute/VirtualMachineScaleSetUpdateOSProfile.yml
|
id: foia_vic4_response_time
label: 'Import VI. ADMINISTRATIVE APPEALS / C. (4) paragraphs from NIEM-XML data.'
migration_group: foia_component_data_import
source:
item_selector: '/iepd:FoiaAnnualReport/foia:AppealResponseTimeSection/foia:ResponseTime'
fields:
-
name: report_year
label: 'Fiscal year for the current report'
selector: '/iepd:FoiaAnnualReport/foia:DocumentFiscalYearDate'
-
name: agency
label: 'Standard abbreviation of the agency submitting the report'
selector: '/iepd:FoiaAnnualReport/nc:Organization/nc:OrganizationAbbreviationText'
-
name: component
label: 'Internal index of the agency component'
selector: '@s:id'
-
name: field_med_num_days
label: 'Administrative appeals median number of days response time'
selector: 'foia:ResponseTimeMedianDaysValue'
-
name: field_avg_num_days
label: 'Administrative appeals average number of days response time'
selector: 'foia:ResponseTimeAverageDaysValue'
-
name: field_low_num_days
label: 'Administrative appeals lowest number of days response time'
selector: 'foia:ResponseTimeLowestDaysValue'
-
name: field_high_num_days
label: 'Administrative appeals highest number of days response time'
selector: 'foia:ResponseTimeHighestDaysValue'
ids:
report_year:
type: integer
agency:
type: string
component:
type: string
process:
type:
plugin: default_value
default_value: admin_app_vic4
langcode:
plugin: default_value
default_value: en
status:
plugin: default_value
default_value: true
field_agency_component:
-
plugin: migration_lookup
source:
- report_year
- agency
- component
migration:
- component_vic4_response_time
no_stub: true
-
plugin: skip_on_empty
method: row
message: 'Cannot find an Agency Component node with the given abbreviation.'
field_med_num_days: field_med_num_days
field_avg_num_days: field_avg_num_days
field_low_num_days: field_low_num_days
field_high_num_days: field_high_num_days
migration_dependencies:
required:
- component_vic4_response_time
|
docroot/modules/custom/foia_upload_xml/config/install/migrate_plus.migration.foia_vic4_response_time.yml
|
name: deploy
on:
workflow_dispatch:
# Inputs the workflow accepts.
inputs:
overlay:
description: The environment to update
default: 'dev'
required: true
tag:
description: The tag to deploy
default: 'dev'
required: true
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "greet"
updateConfig:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Runs a single command using the runners shell
- name: Checkout Repository
uses: actions/checkout@v2
- name: Install Kustomize
run: |
curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
- name: Edit Kustomize
run: |
DIR=overlays/
cd overlays/${{ github.event.inputs.overlay }}
kustomize edit set image ghcr.io/biosimulations/account-api:${{ github.event.inputs.tag }}
kustomize edit set image ghcr.io/biosimulations/dispatch-service:${{ github.event.inputs.tag }}
kustomize edit set image ghcr.io/biosimulations/api:${{ github.event.inputs.tag }}
kustomize edit set image ghcr.io/biosimulations/simulators-api:${{ github.event.inputs.tag }}
kustomize edit set image ghcr.io/biosimulations/mail-service:${{ github.event.inputs.tag }}
kustomize edit set image ghcr.io/biosimulations/combine-api:${{ github.event.inputs.tag }}
- name: Remove Kustomize
run: |
rm -f kustomize
- name: Commit Changes
uses: EndBug/add-and-commit@v7
with:
author_name: Biosimulations-Daemon
author_email: <EMAIL>
branch: main
message: 'Changed image tag of ${{ github.event.inputs.overlay }} to ${{ github.event.inputs.tag }}'
signoff: true
env:
# This is necessary in order to push a commit to the repo
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Leave this line unchanged
|
.github/workflows/deploy.yml
|
name: Startup
on:
push:
branches:
- master
paths:
- '**.yml'
- '**.el'
- lisp/**
- site-lisp/**
pull_request:
branches:
- master
paths-ignore:
- '**.md'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
REPO_BIN: 'jcs-emacs/binaries'
jobs:
test:
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.experimental }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
emacs-version: [28.1]
experimental: [false]
include:
- os: ubuntu-latest
emacs-version: snapshot
experimental: true
- os: macos-latest
emacs-version: snapshot
experimental: true
- os: windows-latest
emacs-version: snapshot
experimental: true
steps:
- uses: jcs090218/setup-emacs@master
with:
version: ${{ matrix.emacs-version }}
- uses: actions/checkout@v2
- name: Copy configuration files
run: |
chmod -R 777 ./
make copy_config
- name: Test startup
run: make startup
- name: Checkout binaries repository
uses: actions/checkout@v3
with:
repository: ${{ env.REPO_BIN }}
persist-credentials: false
fetch-depth: 0
- name: Create LFS file list
run: git lfs ls-files -l | cut -d' ' -f1 | sort > .lfs-assets-id
- name: Restore LFS cache
uses: actions/cache@v3
id: lfs-cache
with:
path: .git/lfs
key: ${{ runner.os }}-lfs-${{ hashFiles('.lfs-assets-id') }}
- name: Git LFS Pull
continue-on-error: true
run: |
git lfs pull
git add .
git reset --hard
- name: Remove before creating tar file
uses: JesseTG/rm@v1.0.2
with:
path: main.tar
- name: Zipping .emacs.d
run: tar -C $HOME -cvf main.tar .emacs.d
- name: Set git config
run: |
git config user.name github-actions
git config user.email <EMAIL>
git config receive.denynonfastforwards false
- name: Commit zipped .emacs.d
continue-on-error: true
run: |
git pull
git add .
git commit -m "Update .emacs.d - main.tar"
- name: Push
uses: jcs090218/github-push-action@master
with:
repository: ${{ env.REPO_BIN }}
github_token: ${{ secrets.PAT }}
branch: master
retry: 7
|
.github/workflows/startup.yml
|
---
- name: Set fact indicating that tests are being run
set_fact:
prometheus_software_testing: true
prometheus_local_archive: '{{ lookup("env", "PROMETHEUS_LOCAL_ARCHIVE") | bool }}'
- name: Gather Operating System specific variables
include_vars: '{{ os_var_file }}'
loop_control:
loop_var: os_var_file
with_first_found:
- '../../vars/distribution/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
- '../../vars/distribution/{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
- '../../vars/distribution/{{ ansible_distribution }}.yml'
- '../../vars/os/{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml'
- '../../vars/os/{{ ansible_os_family }}.yml'
- name: Set install filters from environment
set_fact:
prometheus_install_only: '{{ lookup("env", "PROMETHEUS_INSTALL_ONLY").split(",") | reject("equalto", "") | list }}'
prometheus_install_skip: '{{ lookup("env", "PROMETHEUS_INSTALL_SKIP").split(",") | reject("equalto", "") | list }}'
- block:
- name: Update 'pacman' cache
include_tasks: ../../tasks/_setup_pkg_mgr_Archlinux.yml
when: ansible_os_family == 'Archlinux'
- name: Disable needless USE parameters on Gentoo
become: true
lineinfile:
path: /etc/portage/make.conf
regexp: '^USE=.*'
line: 'USE="headless-awt -X -alsa -cups -gtk"'
when: ansible_os_family == 'Gentoo'
- name: Update 'apk' cache
become: true
apk:
update_cache: true
changed_when: false
when: ansible_os_family == 'Alpine'
- name: Update 'apt' cache
become: true
apt:
update_cache: true
changed_when: false
when: ansible_os_family == 'Debian'
- name: Set install sponge variable
set_fact:
prometheus_install_sponge: true
when:
- ansible_distribution not in ['CentOS']
- name: Workaround for OverlayFS on Fedora 20 and 21
become: true
shell: 'touch /var/lib/rpm/*'
changed_when: false
args:
warn: false
when: ansible_distribution == 'Fedora' and ansible_distribution_major_version in ['20', '21']
- block:
- name: Ensure dbus is installed
become: true
package:
name: dbus
state: present
- name: Ensure dbus is running
become: true
service:
name: dbus
state: started
changed_when: false
when:
- ansible_service_mgr == 'systemd'
- not ansible_os_family in ['Suse']
- not ansible_distribution in ['Ubuntu']
- name: Workaround for java version on Fedora 22
become: true
dnf:
name: nss
state: latest
changed_when: false
when: ansible_distribution == 'Fedora' and ansible_distribution_major_version == '22'
- name: Install java 8 on Alpine 3.3 through 3.5
# This step is necessary due to poor Alpine version detection by Ansible,
# otherwise this would be determined via variables like other distros
become: true
package:
name: openjdk8-jre
state: present
changed_when: false
register: prometheus_java_install_alpine_override
retries: 5
delay: 10
until: prometheus_java_install_alpine_override is succeeded
when:
- prometheus_java_packages is defined and ansible_os_family == 'Alpine' and ansible_distribution_version is version('3.3', '>=') and ansible_distribution_version is version('3.6', '<')
- name: Install java
become: true
package:
name: '{{ prometheus_java_packages }}'
state: present
changed_when: false
register: prometheus_java_install
retries: 5
delay: 10
until: prometheus_java_install is succeeded
when:
- prometheus_java_packages is defined and prometheus_java_install_alpine_override is skipped
- name: Find all prometheus software
delegate_to: localhost
local_action: find paths="{{ lookup('env', 'PWD') }}/tasks" file_type=file excludes='_*.yml,main.yml'
register: tasks
- name: Install prometheus testing prerequisite utilities
become: true
package:
name: '{{ prometheus_testing_packages }}'
state: present
changed_when: false
register: prometheus_testing_packages_install
retries: 5
delay: 10
until: prometheus_testing_packages_install is succeeded
when: prometheus_testing_packages is defined
- name: Create test log file
become: true
copy:
dest: /var/log/test.log
content: "{{ ansible_date_time.iso8601 }} This is a test\n"
force: false
|
tests/playbooks/install_common_pre.yml
|
name: Map Taxa to Functions using FAPROTAX - v1.2.4
subtitle : FAPROTAX is a database that maps prokaryotic clades (e.g. genera or species) to established metabolic or other ecologically relevant functions, using the current literature on cultured strains
tooltip: FAPROTAX maps prokaryotic clades (e.g. genera or species) to metabolic or other ecologically relevant functions using literature on cultured strains
screenshots: [FAPROTAX_schematic.jpg,FAPROTAX_usage_overview.jpg,heatmap.jpg,bubbleplot.jpg]
icon: faprotax.png
#
# define a set of similar apps that might be useful to the user
#
suggestions:
apps:
related:
[app1, app2]
next:
[app3, app4]
#
# Configure the display and description of parameters
#
parameters :
input_upa :
ui-name : |
Input object
short-hint : |
Object of type <code>AmpliconMatrix</code> or <code>GenomeSet</code><br>
<code>AmpliconMatrix</code> objects must reference a row <code>AttributeMapping</code> that has taxonomic assignments
# TODO this needs to be optional because of GenomeSet
tax_field:
ui-name: |
Taxonomy field
short-hint: |
Select the field from input AmpliconMatrix's row AttributeMapping corresponding to the taxonomic assignments for FAPROTAX to run on
output_amplicon_matrix_name:
ui-name: |
Output AmpliconMatrix name
short-hint: |
Output AmpliconMatrix name
description : |
FAPROTAX is a database that maps prokaryotic clades (e.g. genera or species) to established metabolic or other ecologically relevant functions, using the current literature on cultured strains
<!--------------------------------------------------------------------------------------------->
<h3>
Resources
</h3>
<a href="http://www.loucalab.com/archive/FAPROTAX/lib/php/index.php?section=Instructions">FAPROTAX usage</a>
<br><br>
publications:
-
pmid : 27634532
display-text : |
<NAME>, <NAME>, <NAME>. Decoupling function and taxonomy in the global ocean microbiome. Science. 2016;353: 1272-1277. doi:10.1126/science.aaf4507
link: https://doi.org/10.1126/science.aaf4507
|
ui/narrative/methods/faprotax/display.yaml
|
---
- name: Extensions | configure PgBouncer
when: groups.postgresql[0] == inventory_hostname
block:
# Avoid wrong process identification as interpreted script is used
# See https://chris-lamb.co.uk/posts/start-stop-daemon-exec-vs-startas
- name: Extensions | PgBouncer | Debian | Change daemon start stop option from 'exec' to 'startas'
when: ansible_os_family == 'Debian'
lineinfile:
path: /etc/init.d/pgbouncer
regexp: "^SSD="
line: 'SSD="start-stop-daemon --pidfile $PIDFILE --startas $DAEMON --quiet"'
- name: Extensions | PgBouncer | Force systemd to reread configs
systemd:
daemon_reload: true
- name: Extensions | PgBouncer | Ensure that systemd service is started
systemd:
name: pgbouncer
state: started
- name: Extensions | PgBouncer | Change pgbouncer configuration
lineinfile:
path: /etc/pgbouncer/pgbouncer.ini
regexp: "^postgres = host="
line: postgres = host=127.0.0.1 port=5432 dbname=postgres
insertafter: '\[databases\]'
backup: true
register: db_connection_line
- name: Extensions | PgBouncer | Change pgbouncer users configuration
lineinfile:
path: /etc/pgbouncer/userlist.txt
line: '"postgres" "*"'
create: true
mode: u=rw,g=,o=
owner: "{{ pgbouncer.user[ansible_os_family] }}"
group: "{{ pgbouncer.group[ansible_os_family] }}"
backup: true
register: db_user_line
- name: Extensions | PgBouncer | Create logrotate configuration file
template:
src: logrotate-pgbouncer.conf.j2
dest: /etc/logrotate.d/pgbouncer
owner: root
group: root
mode: u=rw,go=r
- name: Extensions | PgBouncer | Restart systemd service
when: db_connection_line.changed or db_user_line.changed
systemd:
name: pgbouncer
state: restarted
- name: Extensions | PgBouncer | Ensure that systemd service is enabled
systemd:
name: pgbouncer
enabled: true
|
ansible/playbooks/roles/postgresql/tasks/extensions/pgbouncer/extension.yml
|
name: Create Release
on:
push:
branches: [ 'main' ]
jobs:
create_release:
runs-on: ubuntu-18.04
name: Create Release
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
# - name: Determine next version
# id: tag_version_dry_run
# uses: mathieudutour/github-tag-action@v5
# with:
# github_token: ${{ secrets.PAT_REPO_ONLY }}
# release_branches: main
# default_bump: patch
# dry_run: True
# - name: Update appVersion in the Helm Chart values.yaml
# uses: fjogeleit/yaml-update-action@0.1.0
# with:
# valueFile: 'deploy/helm/values.yaml'
# propertyPath: 'appVersion'
# value: ${{ steps.tag_version_dry_run.outputs.new_tag }}
# token: ${{ secrets.GITHUB_TOKEN }}
# branch: main
# message: "chore: Updating helm chart values.yaml to use latest tag"
# createPR: false
# - name: Update image in the k8s deployment.yaml
# uses: fjogeleit/yaml-update-action@0.1.0
# with:
# valueFile: 'deploy/manifests/deployment.yaml'
# propertyPath: 'spec.template.spec.containers.image'
# value: chrisjohnson00/solaredge-prometheus-exporter:${{ steps.tag_version_dry_run.outputs.new_tag }}
# token: ${{ secrets.GITHUB_TOKEN }}
# branch: main
# message: "chore: Updating container versions in deployment.yaml to use the latest tag"
# createPR: false
# - name: Update metadata.labels.version in the k8s deployment.yaml
# uses: fjogeleit/yaml-update-action@0.1.0
# with:
# valueFile: 'deploy/manifests/deployment.yaml'
# propertyPath: 'metadata.labels.version'
# value: ${{ steps.tag_version_dry_run.outputs.new_tag }}
# token: ${{ secrets.GITHUB_TOKEN }}
# branch: main
# message: "chore: Updating metadata labels in deployment.yaml to use the latest tag"
# createPR: false
# - name: Update spec.template.metadata.labels.version in the k8s deployment.yaml
# uses: fjogeleit/yaml-update-action@0.1.0
# with:
# valueFile: 'deploy/manifests/deployment.yaml'
# propertyPath: 'spec.template.metadata.labels.version'
# value: ${{ steps.tag_version_dry_run.outputs.new_tag }}
# token: ${{ secrets.GITHUB_TOKEN }}
# branch: main
# message: "chore: Updating template metadata labels in deployment.yaml to use the latest tag"
# createPR: false
- name: Get Latest Tag
id: previoustag
uses: WyriHaximus/github-action-get-previous-tag@1.0.0
- name: Bump version and push tag
id: tag_version
uses: mathieudutour/github-tag-action@v5
with:
github_token: ${{ secrets.PAT_REPO_ONLY }}
release_branches: main
default_bump: patch
- name: Generate incremental changelog
uses: charmixer/auto-changelog-action@v1.1
with:
token: ${{ secrets.GITHUB_TOKEN }}
release_branch: main
since_tag: ${{ steps.previoustag.outputs.tag }}
output: release.md
- name: Read incremental changelog
id: changelog
uses: juliangruber/read-file-action@v1
with:
path: ./release.md
- name: Create a GitHub release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.tag_version.outputs.new_tag }}
release_name: ${{ steps.tag_version.outputs.new_tag }}
body: ${{ steps.changelog.outputs.content }}
- name: Generate changelog
uses: charmixer/auto-changelog-action@v1.1
with:
token: ${{ secrets.GITHUB_TOKEN }}
release_branch: main
- uses: stefanzweifel/git-auto-commit-action@v4
with:
commit_message: "chore: Generating CHANGELOG.md for ${{ steps.tag_version.outputs.new_tag }}"
file_pattern: CHANGELOG.md
|
.github/workflows/auto-release.yaml
|
clusters:
- name: dev
config:
addresses: # A list of Elasticsearch nodes to use.
- http://dev-es:9200
# username: "elastic" # Username for HTTP Basic Authentication
# password: "<PASSWORD>" # Password for HTTP Basic Authentication
# cloudid: "clustername:id" # Endpoint for the Elastic Service (https://elastic.co/cloud)
# apikey: "apikey" # Base64-encoded token for authorization; if set, overridex username/password and service token
# servicetoken: "string" # Service token for authorization; if set, overrides username/password
# certificatefingerprint: "sha256" # SHA256 hex fingerprint given by Elasticsearch on first launch.
# header: {} # Additional headers to include in the request
# retryonstatus: # List of status codes for retry
# - 502
# - 503
# - 504
# disableretry: False # Disables retry
# enableretryontimeout: False # Enable retry on timeout errors
# maxretries: 3 # Number of retries before giving up.
# compressrequestbody: false # Enable compression of request body
# discovernodesonstart: false # Discover additional nodes when initalizing the client.
# discovernodesinterval: 10s
# enablemetrics: false # Enable the collection of metrics
# enabledebuglogger: false # Enable debug logging
# enablecompatibilitymode: false # Enables compatibility header
# disablemetaheader: false # Disables the additional "X-Elastic-Client-Meta" HTTP header.
# useresponsecheckonly: false
- name: uat
config:
addresses: # A list of Elasticsearch nodes to use.
- http://uat-es:9201
# username: "elastic" # Username for HTTP Basic Authentication
# password: "<PASSWORD>" # Password for HTTP Basic Authentication
# cloudid: "clustername:id" # Endpoint for the Elastic Service (https://elastic.co/cloud)
# apikey: "apikey" # Base64-encoded token for authorization; if set, overridex username/password and service token
# servicetoken: "string" # Service token for authorization; if set, overrides username/password
# certificatefingerprint: "sha256" # SHA256 hex fingerprint given by Elasticsearch on first launch.
# header: {} # Additional headers to include in the request
# retryonstatus: # List of status codes for retry
# - 502
# - 503
# - 504
# disableretry: False # Disables retry
# enableretryontimeout: False # Enable retry on timeout errors
# maxretries: 3 # Number of retries before giving up.
# compressrequestbody: false # Enable compression of request body
# discovernodesonstart: false # Discover additional nodes when initalizing the client.
# discovernodesinterval: 10s
# enablemetrics: false # Enable the collection of metrics
# enabledebuglogger: false # Enable debug logging
# enablecompatibilitymode: false # Enables compatibility header
# disablemetaheader: false # Disables the additional "X-Elastic-Client-Meta" HTTP header.
# useresponsecheckonly: false
- name: prod
config:
addresses: # A list of Elasticsearch nodes to use.
- http://prod-es:9201
# username: "elastic" # Username for HTTP Basic Authentication
# password: "<PASSWORD>" # Password for HTTP Basic Authentication
# cloudid: "clustername:id" # Endpoint for the Elastic Service (https://elastic.co/cloud)
# apikey: "apikey" # Base64-encoded token for authorization; if set, overridex username/password and service token
# servicetoken: "string" # Service token for authorization; if set, overrides username/password
# certificatefingerprint: "sha256" # SHA256 hex fingerprint given by Elasticsearch on first launch.
# header: {} # Additional headers to include in the request
# retryonstatus: # List of status codes for retry
# - 502
# - 503
# - 504
# disableretry: False # Disables retry
# enableretryontimeout: False # Enable retry on timeout errors
# maxretries: 3 # Number of retries before giving up.
# compressrequestbody: false # Enable compression of request body
# discovernodesonstart: false # Discover additional nodes when initalizing the client.
# discovernodesinterval: 10s
# enablemetrics: false # Enable the collection of metrics
# enabledebuglogger: false # Enable debug logging
# enablecompatibilitymode: false # Enables compatibility header
# disablemetaheader: false # Disables the additional "X-Elastic-Client-Meta" HTTP header.
# useresponsecheckonly: false
|
example.yml
|
name: MountConfiguration
uid: '@azure/arm-batch.MountConfiguration'
package: '@azure/arm-batch'
summary: The file system to mount on each node.
fullName: MountConfiguration
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: azureBlobFileSystemConfiguration
uid: '@azure/arm-batch.MountConfiguration.azureBlobFileSystemConfiguration'
package: '@azure/arm-batch'
summary: This property is mutually exclusive with all other properties.
fullName: azureBlobFileSystemConfiguration
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration'
return:
description: ''
type: <xref uid="@azure/arm-batch.AzureBlobFileSystemConfiguration" />
- name: azureFileShareConfiguration
uid: '@azure/arm-batch.MountConfiguration.azureFileShareConfiguration'
package: '@azure/arm-batch'
summary: This property is mutually exclusive with all other properties.
fullName: azureFileShareConfiguration
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'azureFileShareConfiguration?: AzureFileShareConfiguration'
return:
description: ''
type: <xref uid="@azure/arm-batch.AzureFileShareConfiguration" />
- name: cifsMountConfiguration
uid: '@azure/arm-batch.MountConfiguration.cifsMountConfiguration'
package: '@azure/arm-batch'
summary: This property is mutually exclusive with all other properties.
fullName: cifsMountConfiguration
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'cifsMountConfiguration?: CifsMountConfiguration'
return:
description: ''
type: <xref uid="@azure/arm-batch.CifsMountConfiguration" />
- name: nfsMountConfiguration
uid: '@azure/arm-batch.MountConfiguration.nfsMountConfiguration'
package: '@azure/arm-batch'
summary: This property is mutually exclusive with all other properties.
fullName: nfsMountConfiguration
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'nfsMountConfiguration?: NFSMountConfiguration'
return:
description: ''
type: <xref uid="@azure/arm-batch.NFSMountConfiguration" />
|
preview-packages/docs-ref-autogen/@azure/arm-batch/MountConfiguration.yml
|
apiVersion: cr.kanister.io/v1alpha1
kind: Blueprint
metadata:
name: k8ssandra-blueprint
actions:
backup:
outputArtifacts:
backupInfo:
keyValue:
apiVersion: '{{ .Phases.createCassandraBackup.Output.apiVersion }}'
group: '{{ .Phases.createCassandraBackup.Output.group }}'
name: '{{ .Phases.createCassandraBackup.Output.name }}'
namespace: '{{ .Phases.createCassandraBackup.Output.namespace }}'
resource: '{{ .Phases.createCassandraBackup.Output.resource }}'
phases:
# Create CassandraBackup CR
- func: KubeOps
name: createCassandraBackup
args:
operation: create
namespace: '{{ .Object.metadata.namespace }}'
spec: |-
apiVersion: cassandra.k8ssandra.io/v1alpha1
kind: CassandraBackup
metadata:
name: kanister-{{ .Object.spec.clusterName }}-{{ .Object.metadata.name }}-{{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02t15-04-05" }}
spec:
name: kanister-{{ .Object.spec.clusterName }}-{{ .Object.metadata.name }}-{{ toDate "2006-01-02T15:04:05.999999999Z07:00" .Time | date "2006-01-02t15-04-05" }}
cassandraDatacenter: "{{ .Object.metadata.name }}"
- func: Wait
name: waitCassandraBackupComplete
args:
timeout: 45m
conditions:
anyOf:
- condition: |-
{{ if "{$.status.finishTime}" }}
{{ if "{$.status.finished[]}" }}
true
{{ end }}
{{ else }}
false
{{ end }}
objectReference:
apiVersion: '{{ .Phases.createCassandraBackup.Output.apiVersion }}'
group: '{{ .Phases.createCassandraBackup.Output.group }}'
name: '{{ .Phases.createCassandraBackup.Output.name }}'
namespace: '{{ .Phases.createCassandraBackup.Output.namespace }}'
resource: '{{ .Phases.createCassandraBackup.Output.resource }}'
restore:
inputArtifactNames:
- backupInfo
phases:
# Create CassandraRestore CR
- func: KubeOps
name: createCassandraRestore
args:
namespace: '{{ .Object.metadata.namespace }}'
operation: create
spec: |-
apiVersion: cassandra.k8ssandra.io/v1alpha1
kind: CassandraRestore
metadata:
name: restore-{{ .ArtifactsIn.backupInfo.KeyValue.name }}
spec:
backup: {{ .ArtifactsIn.backupInfo.KeyValue.name }}
inPlace: true
shutdown: true
cassandraDatacenter:
name: "{{ .Object.metadata.name }}"
clusterName: "{{ .Object.spec.clusterName }}"
- func: Wait
name: waitCassandraRestoreComplete
args:
timeout: 600m
conditions:
anyOf:
- condition: |-
{{ if "{$.status.finishTime}" }}
{{ if "{$.status.restoreKey}" }}
true
{{ end }}
{{ else }}
false
{{ end }}
objectReference:
apiVersion: '{{ .Phases.createCassandraRestore.Output.apiVersion }}'
group: '{{ .Phases.createCassandraRestore.Output.group }}'
name: '{{ .Phases.createCassandraRestore.Output.name }}'
namespace: '{{ .Phases.createCassandraRestore.Output.namespace }}'
resource: '{{ .Phases.createCassandraRestore.Output.resource }}'
- func: KubeOps
name: cassandraRestoreDelete
args:
operation: delete
objectReference:
apiVersion: '{{ .Phases.createCassandraRestore.Output.apiVersion }}'
group: '{{ .Phases.createCassandraRestore.Output.group }}'
name: '{{ .Phases.createCassandraRestore.Output.name }}'
namespace: '{{ .Phases.createCassandraRestore.Output.namespace }}'
resource: '{{ .Phases.createCassandraRestore.Output.resource }}'
delete:
inputArtifactNames:
- backupInfo
phases:
- func: KubeOps
name: cassandraBackupDelete
args:
operation: delete
objectReference:
apiVersion: '{{ .ArtifactsIn.backupInfo.KeyValue.apiVersion }}'
group: '{{ .ArtifactsIn.backupInfo.KeyValue.group }}'
name: '{{ .ArtifactsIn.backupInfo.KeyValue.name }}'
namespace: '{{ .ArtifactsIn.backupInfo.KeyValue.namespace }}'
resource: '{{ .ArtifactsIn.backupInfo.KeyValue.resource }}'
|
examples/k8ssandra/k8ssandra-blueprint.yaml
|
- name: Adatelemzési virtuális gép dokumentációja
href: index.yml
items: null
- name: Áttekintés
expanded: true
items:
- name: Mi az az adatelemzési virtuális gép?
href: overview.md
- name: Mi az a mélytanulási virtuális gép?
href: deep-learning-dsvm-overview.md
- name: Mi az a mesterséges intelligenciát használó geo-adatelemzési virtuális gép?
href: geo-ai-dsvm-overview.md
- name: Gyors útmutatók
expanded: true
items:
- name: Windows rendszerű DSVM létrehozása
href: provision-vm.md
- name: Ubuntu rendszerű DSVM létrehozása
href: dsvm-ubuntu-intro.md
- name: CentOS rendszerű DSVM létrehozása
href: linux-dsvm-intro.md
- name: Mesterséges intelligenciát használó geo-adatelemzési virtuális gép létrehozása
href: provision-geo-ai-dsvm.md
- name: Alapelvek
items:
- name: A DSVM felderítése
href: dsvm-tools-overview.md
- name: Mély tanulás és mesterségesintelligencia-keretrendszerek
href: dsvm-deep-learning-ai-frameworks.md
- name: Programozási nyelvek
href: dsvm-languages.md
- name: Fejlesztési eszközök
href: dsvm-tools-development.md
- name: Gépi tanulási és adatelemzési eszközök
href: dsvm-ml-data-science-tools.md
- name: Adatplatformok
href: dsvm-data-platforms.md
- name: Adatfeldolgozási eszközök
href: dsvm-tools-ingestion.md
- name: Adatáttekintési és vizualizációs eszközök
href: dsvm-tools-explore-and-visualize.md
- name: Vállalati biztonság és útmutatás
href: dsvm-enterprise-overview.md
- name: Példák
href: dsvm-samples-and-walkthroughs.md
- name: Útmutatók
items:
- name: Windowsos DSVM használata
href: vm-do-ten-things.md
- name: Linuxos DSVM használata
href: linux-dsvm-walkthrough.md
- name: Mélytanulási virtuális gép használata
href: use-deep-learning-dsvm.md
- name: Geo AI DSVM használata
href: use-geo-ai-dsvm.md
- name: Automatikus skálázású DSVM-készlet létrehozása
href: dsvm-pools.md
- name: Az integrációt az Active Directoryval.
href: dsvm-common-identity.md
- name: Biztonságos erőforrás-hozzáférési hitelesítő adatok
href: dsvm-secure-access-keys.md
- name: További források
items:
- name: MSDN-fórum
href: 'https://social.microsoft.com/Forums/home?forum=dsvm'
- name: Stack Overflow
href: 'https://stackoverflow.com/search?q=dsvm'
- name: R fejlesztői útmutató
href: /azure/architecture/data-guide/technology-choices/r-developers-guide?context=azure/machine-learning/data-science-virtual-machine/context/ml-context
|
articles/machine-learning/data-science-virtual-machine/toc.yml
|
__id: en
__langname: English
Can be Extended For: Extension
contest_create: Contest Create
contest_detail_problem_submit: Submit Contest Problem
contest_edit: Contest Edit
contest_main: Contest
contest_scoreboard: Scoreboard
discussion_create: Create Discussion
discussion_detail: Discussion Detail
discussion_edit: Discussion Edit
discussion_main: Discussion
discussion_node: Discussion
display_name: Display Name
domain_dashboard: Dashboard
domain_discussion: Discussion Nodes
domain_edit: Edit Info
domain_join_applications: Join Applications
domain_main: Main
domain_permission: System Permission
domain_role: System Role
domain_user: System User
fs_upload: File Upload
home_account: Account Settings
home_domain_account: Profile @ Domain
home_domain_create: Create Domain
home_domain: My Domains
home_messages: Messages
home_preference: Preference
home_security: Security
homepage: Home
homework_create: Homework Create
homework_detail_problem_submit: Submit Homework Problem
homework_edit: Homework Edit
homework_main: Homework
homework_scoreboard: Scoreboard
judge_playground: Judge Playground
main: Home
manage_dashboard: Control Panel
manage_module: Module Management
manage_script: Scripts
manage_setting: System Settings
manage_user_import: Import Users
manage: System Manage
no_translation_warn: <blockquote class="warn">This part of content is under translation.</blockquote>
page.problem_detail.sidebar.show_category: Click to Show
page.training_detail.invalid_when_not_enrolled: You cannot view problem details unless enrolled.
pager_first: « First
pager_last: Last »
pager_next: Next ›
pager_previous: ‹ Previous
perm_contest: Contests
perm_discussion: Discussions
perm_general: General
perm_problem_solution: Problem Solutions
perm_problem: Problems
perm_record: Records
perm_training: Trainings
problem_create: Problem Create
problem_detail: Problem Detail
problem_edit: Problem Edit
problem_files: Problem Files
problem_import: Import Problem
problem_main: Problem Set
problem_solution: Problem Solution
problem_statistics: Problem Statistics
problem_submit: Problem Submit
problem-category-delim: '|'
problem-subcategory-delim: ', '
problem.import.additional_file: '## Additional Files'
problem.import.hint: '## Hint'
problem.import.input_format: '## Input Format'
problem.import.limit_and_hint: '## Limits'
problem.import.output_format: '## Output Format'
problem.import.problem_description: '## Description'
ranking: Ranking
record_detail: Record Detail
record_main: Judging Queue
setting_customize: Customize
setting_display: Display Preference
setting_function: Function
setting_info_domain: Personal Info
setting_info: Personal Info
setting_preference: Preference
setting_privacy: Privacy
setting_server: Server Settings
setting_session: Session Settings
setting_smtp: SMTP Settings
setting_usage: Usage Preference
timeago_locale: en_US
training_create: Training Create
training_detail: Training Detail
training_edit: Training Edit
training_main: Training
user_detail: User Detail
user_login: Login
user_logout: Logout
user_lostpass: <PASSWORD>
user_register: Register
wiki_help: Help
|
packages/hydrooj/locales/en.yaml
|
guidelines:
1: PAS.v1
test_cases:
- id: Age 3, Low
input:
1:
gt0003|Respiration rate: 30,/min
gt0005|Age: 3,a
gt0009|Auscultation: 1|local::at0009|Normal breath sounds to end-expiratory wheeze only|
gt0010|Oxygen requirements: 1|local::at0005|>95% on room air|
gt0011|Retractions: 1|local::at0013|None or intercostal|
gt0012|Dyspnea: 1|local::at0017|Speaks in sentences (or coos and babbles)|
expected_output:
1:
gt0015|Total score: 5
gt0014|Respiratory rate by age: 1|local::at0022|≤34 for 2-3 yrs, ≤30 for 4-5 yrs, ≤26 for 6-12 yrs, ≤23 for >12 yrs|
gt0017|Severity of Exacerbation: 0|local::at0004|Mild|
- id: Age 4, Low
input:
1:
gt0003|Respiration rate: 30,/min
gt0005|Age: 4,a
gt0009|Auscultation: 1|local::at0009|Normal breath sounds to end-expiratory wheeze only|
gt0010|Oxygen requirements: 1|local::at0005|>95% on room air|
gt0011|Retractions: 1|local::at0013|None or intercostal|
gt0012|Dyspnea: 1|local::at0017|Speaks in sentences (or coos and babbles)|
expected_output:
1:
gt0015|Total score: 5
gt0014|Respiratory rate by age: 1|local::at0022|≤34 for 2-3 yrs, ≤30 for 4-5 yrs, ≤26 for 6-12 yrs, ≤23 for >12 yrs|
gt0017|Severity of Exacerbation: 0|local::at0004|Mild|
- id: Age 7, Low
input:
1:
gt0003|Respiration rate: 20,/min
gt0005|Age: 7,a
gt0009|Auscultation: 1|local::at0009|Normal breath sounds to end-expiratory wheeze only|
gt0010|Oxygen requirements: 1|local::at0005|>95% on room air|
gt0011|Retractions: 1|local::at0013|None or intercostal|
gt0012|Dyspnea: 1|local::at0017|Speaks in sentences (or coos and babbles)|
expected_output:
1:
gt0015|Total score: 5
gt0014|Respiratory rate by age: 1|local::at0022|≤34 for 2-3 yrs, ≤30 for 4-5 yrs, ≤26 for 6-12 yrs, ≤23 for >12 yrs|
gt0017|Severity of Exacerbation: 0|local::at0004|Mild|
- id: Age 13, Low
input:
1:
gt0003|Respiration rate: 20,/min
gt0005|Age: 13,a
gt0009|Auscultation: 1|local::at0009|Normal breath sounds to end-expiratory wheeze only|
gt0010|Oxygen requirements: 1|local::at0005|>95% on room air|
gt0011|Retractions: 1|local::at0013|None or intercostal|
gt0012|Dyspnea: 1|local::at0017|Speaks in sentences (or coos and babbles)|
expected_output:
1:
gt0015|Total score: 5
gt0014|Respiratory rate by age: 1|local::at0022|≤34 for 2-3 yrs, ≤30 for 4-5 yrs, ≤26 for 6-12 yrs, ≤23 for >12 yrs|
gt0017|Severity of Exacerbation: 0|local::at0004|Mild|
- id: Age 3, Moderate
input:
1:
gt0003|Respiration rate: 30,/min
gt0005|Age: 3,a
gt0009|Auscultation: 2|local::at0010|Expiratory wheezing|
gt0010|Oxygen requirements: 2|local::at0006|90-95% on room air|
gt0011|Retractions: 2|local::at0014|Intercostal and substernal|
gt0012|Dyspnea: 1|local::at0017|Speaks in sentences (or coos and babbles)|
expected_output:
1:
gt0015|Total score: 8
gt0014|Respiratory rate by age: 1|local::at0022|≤34 for 2-3 yrs, ≤30 for 4-5 yrs, ≤26 for 6-12 yrs, ≤23 for >12 yrs|
gt0017|Severity of Exacerbation: 1|local::at0005|Moderate|
- id: Age 3, Severe
input:
1:
gt0003|Respiration rate: 39,/min
gt0005|Age: 3,a
gt0009|Auscultation: 3|local::at0011|Inspiratory and expiratory wheezing to diminished breath sounds|
gt0010|Oxygen requirements: 2|local::at0006|90-95% on room air|
gt0011|Retractions: 3|local::at0015|Intercostal, substernal, and supraclavicular|
gt0012|Dyspnea: 3|local::at0019|Speaks in single words or short phrases (or grunts)|
expected_output:
1:
gt0015|Total score: 13
gt0014|Respiratory rate by age: 2|local::at0023|35-39 for 2-3 yrs,31-35 for 4-5 yrs, 27-30 for 6-12 yrs, 24-27 for >12 yrs|
gt0017|Severity of Exacerbation: 2|local::at0006|Severe|
- id: Age 3, max point for respiratory rate
input:
1:
gt0003|Respiration rate: 40,/min
gt0005|Age: 3,a
gt0009|Auscultation: 3|local::at0011|Inspiratory and expiratory wheezing to diminished breath sounds|
gt0010|Oxygen requirements: 2|local::at0006|90-95% on room air|
gt0011|Retractions: 3|local::at0015|Intercostal, substernal, and supraclavicular|
gt0012|Dyspnea: 3|local::at0019|Speaks in single words or short phrases (or\
\ grunts)|
expected_output:
1:
gt0015|Total score: 14
gt0014|Respiratory rate by age: 3|local::at0024|≥40 for 2-3 yrs, ≥36 for 4-5 yrs, ≥31 for 6-12 yrs, ≥28 for >12 yrs|
gt0017|Severity of Exacerbation: 2|local::at0006|Severe|
|
gdl2/PAS.v1.test.yml
|
AWSTemplateFormatVersion: '2010-09-09'
Metadata:
AWS::ServerlessRepo::Application:
Name: eks-kubectl-apply
Description: "This Lambda function `kubectl apply -f` to update kubernetes objects for you"
Author: <NAME>
SpdxLicenseId: Apache-2.0
LicenseUrl: LICENSE
ReadmeUrl: README.md
Labels: ['eks','kubectl','kubernetes','hook']
HomePageUrl: https://github.com/aws-samples/aws-lambda-layer-kubectl/tree/master/samples/update-custom-k8s-objects
SemanticVersion: 1.0.0
SourceCodeUrl: https://github.com/aws-samples/aws-lambda-layer-kubectl/tree/master/samples/update-custom-k8s-objects
Parameters:
ClusterName:
Type: String
Default: default
FunctionName:
Type: String
Default: defaultFunc
NodeInstanceRoleArn:
Type: String
LambdaRoleArn:
Type: String
LambdaLayerKubectlArn:
Type: String
Transform: AWS::Serverless-2016-10-31
Description: kubernetes resource object handler
Resources:
Func:
Type: AWS::Serverless::Function
Properties:
FunctionName: !Ref FunctionName
Handler: main
CodeUri: ./func.d
Runtime: provided
Layers:
- !Ref LambdaLayerKubectlArn
MemorySize: 512
Environment:
Variables:
cluster_name: !Ref ClusterName
iam_role_arn: !Ref NodeInstanceRoleArn
aws_account_id: !Sub "${AWS::AccountId}"
lambda_role_arn: !Ref LambdaRoleArn
Role: !Ref LambdaRoleArn
Timeout: 10
WebServices:
Type: Custom::KubernetesResource
Properties:
ServiceToken: !GetAtt Func.Arn
Objects:
# nginx service
- https://gist.githubusercontent.com/pahud/54906d24e7889a0adaed72ce4d4baefe/raw/680659932542f5b155fa0f4d2590896729784045/nginx.yaml
# caddy service
- https://gist.githubusercontent.com/pahud/54906d24e7889a0adaed72ce4d4baefe/raw/680659932542f5b155fa0f4d2590896729784045/caddy.yaml
Outputs:
LambdaFuncArn:
Description: Lambda Func ARN
Value: !GetAtt Func.Arn
LambdaFuncName:
Description: Lambda Func Name
Value: !Ref Func
LambdaRoleArn:
Description: Lambda Role ARN
Value: !Ref LambdaRoleArn
|
samples/create-k8s-objects/sar-sam.yaml
|
name: OFRP CI
on:
workflow_dispatch:
jobs:
build:
name: OFRP Build CI
runs-on: ubuntu-18.04
env:
OEM: samsung
DEVICE: j1pop3g
BRANCH: twrp-5.1
steps:
- uses: actions/checkout@v2
- name: Installing build dependencies
run: |
sudo apt-get update
sudo apt-get install zip gcc-multilib g++-multilib \
libc6-dev-i386 lib32ncurses5-dev lib32z1-dev \
libgl1-mesa-dev libxml2-utils xsltproc schedtool axel
mkdir ~/.jdk_7
cd ~/.jdk_7
axel -q -n $(nproc --all) https://download.java.net/openjdk/jdk7u75/ri/openjdk-7u75-b13-linux-x64-18_dec_2014.tar.gz
tar -xzf openjdk-7u75-b13-linux-x64-18_dec_2014.tar.gz
- name: Syncing OFRP sources
run: |
mkdir ~/OFRP
cd ~/OFRP
mkdir ~/.bin
curl http://commondatastorage.googleapis.com/git-repo-downloads/repo > ~/.bin/repo
python3 ~/.bin/repo init --depth=1 -u https://github.com/TriDiscord/fox-5.1_manifest.git -b fox_5.1
python3 ~/.bin/repo sync -c --force-sync --optimized-fetch --no-tags --no-clone-bundle --prune -j$(nproc --all)
git clone --branch ${{ env.BRANCH }} --single-branch https://github.com/J110H-Android/twrp_device_${{ env.OEM }}_${{ env.DEVICE }}.git ~/OFRP/device/${{ env.OEM }}/${{ env.DEVICE }}
- name: Building OFRP
run: |
OLDPATH=$PATH
OLDJAVAHOME=$JAVA_HOME
export PATH="$HOME/.jdk_7/java-se-7u75-ri/bin:$PATH"
export JAVA_HOME="$HOME/.jdk_7/java-se-7u75-ri"
cd ~/OFRP
source build/envsetup.sh
export ALLOW_MISSING_DEPENDENCIES=true
export FOX_USE_TWRP_RECOVERY_IMAGE_BUILDER=1
export LC_ALL=C
export OF_LEGACY_SHAR512=1
lunch omni_${{ env.DEVICE }}-eng
make -j$(nproc --all) recoveryimage
export PATH=$OLDPATH
export JAVA_HOME=$OLDJAVAHOME
- name: Uploading OFRP builds
uses: actions/upload-artifact@v2
with:
name: twrp
path: /home/runner/OFRP/out/target/product/${{ env.DEVICE }}/recovery.*
|
.github/workflows/ofrp.yml
|
version: '3' # 配置文件兼容版本
networks: # 网络设置
back-tire:
services: # 容器编排中不同服务的配置
web: # 容器名称
build: . # 使用当前目录下 Dockerfile 构建镜像
depends_on: # 服务依赖,启动当前服务的容器时,必须先启动其依赖容器。即 redis 和 mysql 会先于 web 启动。
- redis
- mysql
environment: # 环境变量,未指定值的环境变量会从当前环境变量中读取,或从.env文件中读取。
TZ: Asia/Shanghai
FLASK_APP: "manage:app"
FLASK_ENV: "production"
PROD_DATABASE_URI: "mysql+pymysql://${MYSQL_USER}:${MYSQL_PASSWORD}@mysql:3306/${MYSQL_DATABASE}?charset=utf8mb4"
REPO_DIR: "/mrchiblog/repo"
REPO_SSHKEY: "/mrchiblog/sshkey/id_rsa"
WEBHOOK_TOKEN:
HMAC_KEY:
PROD_DINGTALK_TOKEN:
PROD_REDIS_URI:
PROD_RQ_REDIS_URI:
volumes: # 数据卷挂载,rw是读写模式
- /mrchiblog/cache/whoosh:/mrchiblog/prod_whoosh_idx:rw # Whoosh 索引数据文件
- /mrchiblog/logs:/mrchiblog/logs:rw # gunicorn 日志文件
- /mrchiblog/sshkey:/mrchiblog/sshkey:ro # Git SSH key
- /mrchiblog/repo:/mrchiblog/repo:rw # Git Repo
working_dir: /mrchiblog # 工作目录,command 命令在工作目录下执行
command: gunicorn -c gunicorn.py -w 1 -b 0.0.0.0:5000 manage:app # 容器启动时默认执行的命令
ports: # 端口映射,这里把容器的 5000 端口映射到宿主机的 5000 端口,并监听宿主机 12172.16.17.32 的连接
- 127.0.0.1:5000:5000
networks: # 加入的网络,加入同名网络的不同容器间可以通过容器名称来相互访问
- back-tire
restart: on-failure
rq-update:
build: .
depends_on:
- redis
- mysql
environment:
TZ: Asia/Shanghai
FLASK_APP: "manage:app"
FLASK_ENV: "production"
PROD_DATABASE_URI: "mysql+pymysql://${MYSQL_USER}:${MYSQL_PASSWORD}@mysql:3306/${MYSQL_DATABASE}?charset=utf8mb4"
REPO_DIR: "/mrchiblog/repo"
REPO_SSHKEY: "/mrchiblog/sshkey/id_rsa"
WEBHOOK_TOKEN:
HMAC_KEY:
PROD_DINGTALK_TOKEN:
PROD_REDIS_URI:
PROD_RQ_REDIS_URI:
volumes:
- /mrchiblog/cache/whoosh:/mrchiblog/prod_whoosh_idx:rw # Whoosh 索引数据文件
- /mrchiblog/logs:/mrchiblog/logs:rw # gunicorn 日志文件
- /mrchiblog/sshkey:/mrchiblog/sshkey:ro # Git SSH key
- /mrchiblog/repo:/mrchiblog/repo:rw # Git Repo
working_dir: /mrchiblog
command: flask rq worker update
networks:
- back-tire
redis:
image: redis:alpine # 使用 redis:alpine 镜像,镜像不存在时会自动从镜像源拉取
environment:
TZ: Asia/Shanghai
volumes:
- /mrchiblog/data/redis:/data:rw # redis 数据文件
networks:
- back-tire
restart: on-failure
mysql:
image: mysql:5.7
environment:
TZ: Asia/Shanghai
MYSQL_RANDOM_ROOT_PASSWORD: 'yes'
MYSQL_USER:
MYSQL_PASSWORD:
MYSQL_DATABASE:
volumes:
- ./conf/mysql.ini:/etc/mysql/conf.d/mrchiblog.cnf:ro # MySQL 配置
- /mrchiblog/data/mysql:/var/lib/mysql:rw # MySQL 数据文件
networks:
- back-tire
restart: on-failure
|
docker-compose.yaml
|
allOf:
- $ref: "self_mailer_base.yml"
- $ref: "../../../shared/models/form_factor/input_to.yml"
- $ref: "../../../shared/models/form_factor/input_from_us.yml"
- type: object
required:
- to
- inside
- outside
properties:
inside:
description: >
The artwork to use as the inside of your self mailer.
Notes:
- HTML merge variables should not include delimiting whitespace.
- PDF, PNG, and JPGs must be sized at 6"x18" at 300 DPI, while supplied
HTML will be rendered to the specified `size`.
- Be sure to leave room for address and postage information by following
the templates provided here:
- [6x18 bifold template](https://s3-us-west-2.amazonaws.com/public.lob.com/assets/templates/self_mailers/6x18_sfm_bifold_template.pdf)
- [12x9 bifold template](https://s3-us-west-2.amazonaws.com/public.lob.com/assets/templates/self_mailers/12x9_sfm_bifold_template.pdf)
See [here](#section/HTML-Examples) for HTML examples.
oneOf:
- $ref: "../../../shared/attributes/html_string.yml"
- $ref: "../../../shared/attributes/model_ids/tmpl_id.yml"
- $ref: "../../../shared/attributes/remote_file_url.yml"
- $ref: "../../../shared/attributes/local_file_path.yml"
outside:
description: >
The artwork to use as the outside of your self mailer.
Notes:
- HTML merge variables should not include delimiting whitespace.
- PDF, PNG, and JPGs must be sized at 6"x18" at 300 DPI, while supplied
HTML will be rendered to the specified `size`.
See [here](#section/HTML-Examples) for HTML examples.
oneOf:
- $ref: "../../../shared/attributes/html_string.yml"
- $ref: "../../../shared/attributes/model_ids/tmpl_id.yml"
- $ref: "../../../shared/attributes/remote_file_url.yml"
- $ref: "../../../shared/attributes/local_file_path.yml"
billing_group_id:
$ref: "../../../shared/attributes/billing_group_id.yml"
|
resources/self_mailers/models/self_mailer_editable.yml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-trigger-git_poll/99/
timestamp: 2016-04-27 19:50:08 UTC
duration: 1h26m5s
active_duration: 1h54m24s
parameters: {}
change:
git_remote: <EMAIL>:chef/chef-server.git
git_commit: bbc<PASSWORD>b99c9db9db<PASSWORD>fa
project: chef-server
version: 12.5.0+20160427195043
stages:
chef-server-12-build/architecture=x86_64,platform=ubuntu-10.04,project=chef-server,role=builder:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=ubuntu-10.04,project=chef-server,role=builder/246/
duration: 28m42s
chef-server-12-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-promote/142/
duration: 7s
chef-server-12-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/206/
duration: 49m50s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-5,project=chef-server,role=tester/206/
duration: 49m49s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-6,project=chef-server,role=tester/206/
duration: 49m
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-7,project=chef-server,role=tester/206/
duration: 38m44s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-10.04,project=chef-server,role=tester/206/
duration: 49m12s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-12.04,project=chef-server,role=tester/206/
duration: 43m22s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-14.04,project=chef-server,role=tester/206/
duration: 43m55s
chef-server-12-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/246/
duration: 35m17s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-5,project=chef-server,role=builder/246/
duration: 34m48s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-6,project=chef-server,role=builder/246/
duration: 33m43s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-7,project=chef-server,role=builder/246/
duration: 33m31s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=ubuntu-10.04,project=chef-server,role=builder/246/
duration: 28m42s
chef-server-12-trigger-git_poll:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-trigger-git_poll/99/
duration: 25s
|
reports/wilson.ci.chef.co/job/chef-server-12-trigger-git_poll/99.yaml
|
quarkus:
profile: dev
package:
type: fast-jar
# type: native
native:
additional-build-args: --allow-incomplete-classpath
# container-build: true
# container-runtime: docker
# container-image:
# build: true
vertx:
prefer-native-transport: true
http:
so-reuse-port: true
swagger-ui:
always-include: true
mp:
openapi:
extensions:
smallrye:
info:
title: Gene Search API
version: 1.0.0
description: REST API for Gene Search Engine
contact:
email: <EMAIL>
name: Gene Search API
url: margostino.com
license:
name: Apache 2.0
url: http://www.apache.org/licenses/LICENSE-2.0.html
"%dev":
quarkus:
log:
level: INFO
category:
"org.gene":
level: ALL
"io.quarkus":
level: INFO
"org.apache":
level: INFO
console:
json: false
event-bus:
session:
create: eventbus.new_seeds
mp:
openapi:
extensions:
smallrye:
info:
title: Gene Search API (GSA / development)
# messaging:
# outgoing:
# new_sessions_kafka_out:
# connector: smallrye-kafka
# type: io.smallrye.reactive.messaging.kafka.Kafka
# topic: new_seeds
# bootstrap.servers: localhost:9092
# key:
# serializer: org.apache.kafka.common.serialization.StringSerializer
# value:
# serializer: org.apache.kafka.common.serialization.StringSerializer
# acks: 1
# incoming:
# new_sessions_kafka_in:
# connector: smallrye-kafka # smallrye-vertx-eventbus
# type: io.smallrye.reactive.messaging.kafka.Kafka
# topic: new_seeds
# bootstrap.servers: localhost:9092
# key:
# deserializer: org.apache.kafka.common.serialization.StringDeserializer
# value:
# deserializer: org.apache.kafka.common.serialization.StringDeserializer
#
# kafka:
# bootstrap.servers: localhost:9092
|
src/main/resources/application.yaml
|
---
# tasks file for cassandra
- apt_repository:
repo: ppa:openjdk-r/ppa
state: present
- name: apt-get update
command: apt-get update
sudo: yes
- name: Import the Apache Cassandra Repository Tasks
include: apache_repo.yml
when: cassandra_configure_apache_repo == True
- name: Custom Facts Part 1 of 2
set_fact:
cassandra_cms_max_heapsize_mb: "{{
[
([ansible_memtotal_mb * 0.5, 1024] | min),
([ansible_memtotal_mb * 0.25, 14336] | min)
] | max | round | int }}"
cassandra_max_heapsize_mb: "{{
[
([ansible_memtotal_mb * 0.5, 1024] | min),
([ansible_memtotal_mb * 0.25, 8192] | min)
] | max | round | int }}"
when:
- ansible_memtotal_mb is defined
- name: Custom Facts Part 2 of 2
set_fact:
cassandra_cms_heap_new_size_mb: "{{
[
(ansible_processor_vcpus * 100.0),
((cassandra_cms_max_heapsize_mb | int) * 0.25)
] | min | round | int }}"
cassandra_heap_new_size_mb: "{{
[
(ansible_processor_vcpus * 100.0),
((cassandra_max_heapsize_mb | int) * 0.25)
] | min | round | int }}"
when:
- ansible_memtotal_mb is defined
- ansible_processor_vcpus is defined
- name: Debug Custom Facts
debug:
msg: {
'ansible_memtotal_mb': "{{ ansible_memtotal_mb }}",
'ansible_processor_vcpus': "{{ ansible_processor_vcpus }}",
'cassandra_cms_heap_new_size_mb': "{{ cassandra_cms_heap_new_size_mb }}",
'cassandra_cms_max_heapsize_mb': "{{ cassandra_cms_max_heapsize_mb }}",
'cassandra_heap_new_size_mb': "{{ cassandra_heap_new_size_mb }}",
'cassandra_max_heapsize_mb': "{{ cassandra_max_heapsize_mb }}"
}
verbosity: 1
when:
- ansible_memtotal_mb is defined
- ansible_processor_vcpus is defined
- name: Install the Cassandra Package (YUM)
yum:
name: "{{ cassandra_package }}"
update_cache: yes
when:
- ansible_os_family is defined
- ansible_os_family == 'RedHat'
- not ansible_check_mode
- name: Install the Cassandra Package (Apt)
apt:
name: "{{ cassandra_package }}"
cache_valid_time: 600
when:
- ansible_os_family is defined
- ansible_os_family == 'Debian'
- not ansible_check_mode
- name: Custom Directories
include: directory.yml
with_items: "{{ cassandra_directories.keys() }}"
loop_control:
loop_var: cassandra_directory_set
when:
- cassandra_directories is defined
- name: Set Default Configuration File Location (Debian)
set_fact:
cassandra_configuration_file: /etc/cassandra/cassandra.yaml
when:
- ansible_os_family is defined
- ansible_os_family == 'Debian'
- cassandra_configuration_file is not defined
- name: Set Default Configuration File Location (RedHat)
set_fact:
cassandra_configuration_file: /etc/cassandra/default.conf/cassandra.yaml
when:
- ansible_os_family is defined
- ansible_os_family == 'RedHat'
- cassandra_configuration_file is not defined
- name: Apply Cassandra Configuration
template:
src: "{{ cassandra_configuration_templ }}"
dest: "{{ cassandra_configuration_file }}"
owner: root
group: root
mode: 0644
notify:
- cassandra_restart_service
- name: Set the DC for Cassandra
lineinfile:
dest: "{{ cassandra_configuration_file | dirname }}/cassandra-rackdc.properties"
regexp: '^dc='
line: "dc={{ cassandra_dc }}"
when:
- cassandra_dc is defined
- not ansible_check_mode
notify:
- cassandra_restart_service
- name: Set the Rack for Cassandra
lineinfile:
dest: "{{ cassandra_configuration_file | dirname }}/cassandra-rackdc.properties"
regexp: '^rack='
line: "rack={{ cassandra_rack }}"
when:
- cassandra_rack is defined
- not ansible_check_mode
notify:
- cassandra_restart_service
- name: Ensure the Service is in the Required State (RedHat)
systemd:
name: cassandra
enabled: "{{ cassandra_service_enabled }}"
daemon_reload: yes
state: "{{ cassandra_service_state }}"
when:
- not ansible_check_mode
- ansible_os_family is defined
- ansible_os_family == 'RedHat'
- name: Ensure the Service is in the Required State
service:
name: cassandra
enabled: "{{ cassandra_service_enabled }}"
state: "{{ cassandra_service_state }}"
when:
- not ansible_check_mode
- ansible_os_family is defined
- ansible_os_family == 'Debian'
|
roles/cassandra/tasks/main.yml
|
language: python
sudo: required
# https://docs.travis-ci.com/user/ci-environment/ to get a modern gcc etc
dist: trusty
# notes from http://conda.pydata.org/docs/travis.html
python:
# We don't actually use the Travis Python, but this keeps it organized.
- "2.7"
- "3.5"
- "3.6"
install:
# We do this conditionally because it saves us some downloading if the
# version is the same.
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh;
export RANDOM_ID=`python -c "from random import choice; print ''.join([choice('1234567890') for i in range(6)])"`;
else
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
export RANDOM_ID=`python -c "from random import choice; print(''.join([choice('1234567890') for i in range(6)]))"`;
fi
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH="$HOME/miniconda/bin:$PATH"
- printenv TRAVIS_PULL_REQUEST
- printenv TRAVIS_PULL_REQUEST_BRANCH
- printenv TRAVIS_COMMIT
- printenv TRAVIS_BRANCH
- printenv
- hash -r
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
# Useful for debugging any issues with conda
- conda info -a
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy pytest cython nose boto3 PyYAML Click pytest numba
- source activate test-environment
- pip install glob2 pylint tornado awscli pylint
- tests/install_pywren.sh
- tests/run_pylint.sh
before_script:
- export EPOCH_STAMP=`date +%s`
- export BUILD_GUID=`echo $TRAVIS_JOB_NUMBER | sed 's/\./_/g'`_${EPOCH_STAMP}_${RANDOM_ID}
- echo CLEAN BUILD ID IS $BUILD_GUID
- export BUCKET_NAME=`echo pywren_travis_$BUILD_GUID | sed 's/_/-/g'`
- echo $BUCKET_NAME
- pywren test_config
- pywren get_aws_account_id
script:
- pywren create_config --lambda_role=pywren_travis_test_$BUILD_GUID --function_name=pywren_travis_$BUILD_GUID --bucket_name=$BUCKET_NAME --sqs_queue=pywren_travis_$BUILD_GUID --standalone_name=pywren_travis_$BUILD_GUID
- pywren create_role
- pywren create_bucket
- pywren create_instance_profile
- pywren create_queue
- sleep 10
- pywren deploy_lambda
- tests/lambda_tests.sh
- tests/ec2_standalone_tests.sh
- tests/ec2_macroreduce_tests.sh
- tests/cmdline_tests.sh
after_script:
# always make sure we do this cleanup if we can
- pywren delete_queue
- pywren delete_lambda
- pywren delete_instance_profile
- pywren delete_role
- pywren delete_bucket
- pywren standalone terminate_instances
after_failure:
- pywren delete_queue
- pywren delete_lambda
- pywren delete_instance_profile
- pywren delete_role
- pywren delete_bucket
- pywren standalone terminate_instances
env:
global:
- secure: "<KEY>
- secure: "<KEY>
matrix:
- RUN_STANDALONE=true
- RUN_LAMBDA=true
- RUN_MACROREDUCE=true
- RUN_COMMANDLINE=true
|
.travis.yml
|
branches:
only:
- master
- development
- dev-build-test
environment:
nodejs_version: "8"
matrix:
- COMPONENT_NAME: process-service
- COMPONENT_NAME: content-service
- COMPONENT_NAME: core
- COMPONENT_NAME: insights
# - COMPONENT_NAME: ng2-demo-shell
# Install scripts. (runs after repo cloning)
install:
# Get the latest stable version of Node.js or io.js
- ps: Install-Product node $env:nodejs_version
# install module
- if %COMPONENT_NAME% EQU process-service (
if %APPVEYOR_REPO_BRANCH EQU master
(cd scripts && sh npm-build-all.sh -t "process-services" || exit 1)
else
(cd scripts && sh npm-build-all.sh -t "process-services" -vjsapi alpha|| exit 1)
)
- if %COMPONENT_NAME% EQU content-service (
if %APPVEYOR_REPO_BRANCH EQU master
(cd scripts && sh npm-build-all.sh -t "content-services" || exit 1)
else
(cd scripts && sh npm-build-all.sh -t "content-services" -vjsapi alpha|| exit 1)
)
- if %COMPONENT_NAME% EQU insights (
if %APPVEYOR_REPO_BRANCH EQU master
(cd scripts && sh npm-build-all.sh -t "insights" || exit 1)
else
(cd scripts && sh npm-build-all.sh -t "insights" -vjsapi alpha|| exit 1)
)
- if %COMPONENT_NAME% EQU core (
if %APPVEYOR_REPO_BRANCH EQU master
(cd scripts && sh npm-build-all.sh -t "core" || exit 1)
else
(cd scripts && sh npm-build-all.sh -t "core" -vjsapi alpha|| exit 1)
)
- if %COMPONENT_NAME% EQU process-service-cloud (
if %APPVEYOR_REPO_BRANCH EQU master
(cd scripts && sh npm-build-all.sh -t "process-services-cloud" || exit 1)
else
(cd scripts && sh npm-build-all.sh -t "process-services-cloud" -vjsapi alpha|| exit 1)
)
# - if %COMPONENT_NAME% EQU ng2-demo-shell (
# if %APPVEYOR_REPO_BRANCH EQU master
# (cd scripts && sh start.sh -t -ss || exit 1)
# else
# (cd scripts && sh start.sh -dev -t -ss -vjsapi alpha || exit 1)
# )
# Don't actually build.
build: off
matrix:
fast_finish: true
|
appveyor.yml
|
---
http_interactions:
- request:
method: post
uri: https://api.trello.com/1/boards?key=DEVELOPER_PUBLIC_KEY&token=MEMBER_TOKEN
body:
encoding: UTF-8
string: name=IT+99&idOrganization=5e93ba154634282b6df23bcc&desc=testing+board+create
headers:
Accept:
- "*/*"
User-Agent:
- rest-client/2.1.0 (linux-gnu x86_64) ruby/2.6.6p146
Content-Length:
- '76'
Content-Type:
- application/x-www-form-urlencoded
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Host:
- api.trello.com
response:
status:
code: 200
message: OK
headers:
X-Dns-Prefetch-Control:
- 'off'
X-Frame-Options:
- DENY
Strict-Transport-Security:
- max-age=15552000; includeSubDomains
X-Download-Options:
- noopen
X-Content-Type-Options:
- nosniff
Referrer-Policy:
- strict-origin-when-cross-origin
X-Xss-Protection:
- 1; mode=block
Surrogate-Control:
- no-store
X-Trello-Version:
- 1.2126.1
X-Trello-Environment:
- Production
Access-Control-Allow-Origin:
- "*"
Access-Control-Allow-Methods:
- GET, PUT, POST, DELETE
Access-Control-Allow-Headers:
- Authorization, Accept, Content-Type
Access-Control-Expose-Headers:
- x-rate-limit-api-key-interval-ms, x-rate-limit-api-key-max, x-rate-limit-api-key-remaining,
x-rate-limit-api-token-interval-ms, x-rate-limit-api-token-max, x-rate-limit-api-token-remaining
X-Rate-Limit-Api-Token-Interval-Ms:
- '10000'
X-Rate-Limit-Api-Token-Max:
- '100'
X-Rate-Limit-Api-Token-Remaining:
- '99'
X-Rate-Limit-Api-Key-Interval-Ms:
- '10000'
X-Rate-Limit-Api-Key-Max:
- '300'
X-Rate-Limit-Api-Key-Remaining:
- '299'
X-Rate-Limit-Member-Interval-Ms:
- '10000'
X-Rate-Limit-Member-Max:
- '200'
X-Rate-Limit-Member-Remaining:
- '199'
X-Server-Time:
- '1592155999528'
Content-Type:
- application/json; charset=utf-8
Vary:
- Accept-Encoding
Expires:
- Sun, 14 Jun 2020 17:33:19 GMT
Cache-Control:
- max-age=0, no-cache, no-store
Pragma:
- no-cache
Date:
- Sun, 14 Jun 2020 17:33:19 GMT
Content-Length:
- '945'
Connection:
- keep-alive
body:
encoding: UTF-8
string: '{"id":"5ee65f5fa64d6f2a7aee514c","name":"IT 99","desc":"testing board
create","descData":null,"closed":false,"idOrganization":"5e93ba154634282b6df23bcc","idEnterprise":null,"pinned":false,"url":"https://trello.com/b/s0ETUJD5/it-99","shortUrl":"https://trello.com/b/s0ETUJD5","prefs":{"permissionLevel":"private","hideVotes":false,"voting":"disabled","comments":"members","invitations":"members","selfJoin":true,"cardCovers":true,"isTemplate":false,"cardAging":"regular","calendarFeedEnabled":false,"background":"blue","backgroundImage":null,"backgroundImageScaled":null,"backgroundTile":false,"backgroundBrightness":"dark","backgroundColor":"#0079BF","backgroundBottomColor":"#0079BF","backgroundTopColor":"#0079BF","canBePublic":true,"canBeEnterprise":true,"canBeOrg":true,"canBePrivate":true,"canInvite":true},"labelNames":{"green":"","yellow":"","orange":"","red":"","purple":"","blue":"","sky":"","lime":"","pink":"","black":""},"limits":{}}'
http_version: null
recorded_at: Sun, 14 Jun 2020 17:33:19 GMT
recorded_with: VCR 5.1.0
|
spec/cassettes/can_success_create_a_board.yml
|
items:
- uid: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile"
id: "WithServicePrincipalProfile"
parent: "com.microsoft.azure.management.containerservice"
children:
- "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal(java.lang.String,java.lang.String)"
langs:
- "java"
name: "ContainerService.DefinitionStages.WithServicePrincipalProfile"
nameWithType: "ContainerService.DefinitionStages.WithServicePrincipalProfile"
fullName: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile"
type: "Interface"
package: "com.microsoft.azure.management.containerservice"
summary: "The stage allowing properties for cluster service principals to be specified."
syntax:
content: "public static interface ContainerService.DefinitionStages.WithServicePrincipalProfile"
- uid: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal(java.lang.String,java.lang.String)"
id: "withServicePrincipal(java.lang.String,java.lang.String)"
parent: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile"
langs:
- "java"
name: "withServicePrincipal(String clientId, String secret)"
nameWithType: "ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal(String clientId, String secret)"
fullName: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal(String clientId, String secret)"
overload: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal*"
type: "Method"
package: "com.microsoft.azure.management.containerservice"
summary: "Properties for cluster service principals."
syntax:
content: "public abstract ContainerService.DefinitionStages.WithLinux withServicePrincipal(String clientId, String secret)"
parameters:
- id: "clientId"
type: "java.lang.String"
description: "the ID for the service principal"
- id: "secret"
type: "java.lang.String"
description: "the secret password associated with the service principal"
return:
type: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithLinux"
description: "the next stage"
references:
- uid: "java.lang.String"
spec.java:
- uid: "java.lang.String"
name: "String"
fullName: "java.lang.String"
- uid: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithLinux"
name: "ContainerService.DefinitionStages.WithLinux"
nameWithType: "ContainerService.DefinitionStages.WithLinux"
fullName: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithLinux"
- uid: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal*"
name: "withServicePrincipal"
nameWithType: "ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal"
fullName: "com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile.withServicePrincipal"
package: "com.microsoft.azure.management.containerservice"
|
docs-ref-autogen/com.microsoft.azure.management.containerservice.ContainerService.DefinitionStages.WithServicePrincipalProfile.yml
|
items:
- uid: ext_azure-cli-iot-ext_az_iot_pnp
name: az iot pnp
summary: Manage entities of an IoT Plug and Play model repository.
description: ''
langs:
- azurecli
children: []
commands:
- uid: ext_azure-cli-iot-ext_az_iot_pnp_capability_model
name: az iot pnp capability-model
summary: Manage device capability models in an IoT Plug and Play model repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_capability_model_create
name: az iot pnp capability-model create
summary: Create a capability-model in the company repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_capability_model_delete
name: az iot pnp capability-model delete
summary: Delete the capability-model in the company repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_capability_model_list
name: az iot pnp capability-model list
summary: List all capability-model.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_capability_model_publish
name: az iot pnp capability-model publish
summary: Publish the capability-model to public repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_capability_model_show
name: az iot pnp capability-model show
summary: Get the details of a capability-model.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_capability_model_update
name: az iot pnp capability-model update
summary: Update the capability-model in the company repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_interface
name: az iot pnp interface
summary: Manage interfaces in an IoT Plug and Play model repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_interface_create
name: az iot pnp interface create
summary: Create an interface in the company repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_interface_delete
name: az iot pnp interface delete
summary: Delete an interface in the company repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_interface_list
name: az iot pnp interface list
summary: List all interfaces.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_interface_publish
name: az iot pnp interface publish
summary: Publish an interface to public repository.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_interface_show
name: az iot pnp interface show
summary: Get the details of an interface.
- uid: ext_azure-cli-iot-ext_az_iot_pnp_interface_update
name: az iot pnp interface update
summary: Update an interface in the company repository.
globalParameters:
- name: --debug
summary: Increase logging verbosity to show all debug logs.
- name: --help -h
summary: Show this help message and exit.
- name: --only-show-errors
summary: Only show errors, suppressing warnings.
- name: --output -o
defaultValue: json
parameterValueGroup: json, jsonc, table, tsv
summary: Output format.
- name: --query
summary: JMESPath query string. See <a href="http://jmespath.org/">http://jmespath.org/</a> for more information and examples.
- name: --verbose
summary: Increase logging verbosity. Use --debug for full debug logs.
description: Manage entities of an IoT Plug and Play model repository.
|
latest/docs-ref-autogen/ext/azure-cli-iot-ext/iot/pnp.yml
|
uuid: 7554506c-f2e7-4e16-a2d7-8cdb7975350d
langcode: en
status: true
dependencies:
config:
- field.field.node.encyclopedia_item.body
- field.field.node.encyclopedia_item.field_glossary_section
- field.field.node.encyclopedia_item.field_image
- field.field.node.encyclopedia_item.field_image_caption
- field.field.node.encyclopedia_item.field_listing_title
- image.style.thumbnail
- node.type.encyclopedia_item
module:
- field_group
- image
- path
- text
third_party_settings:
field_group:
group_listing_override:
children:
- field_listing_title
- field_glossary_section
parent_name: ''
weight: 1
format_type: details
format_settings:
id: ''
classes: ''
description: ''
open: false
required_fields: true
label: 'Listing Overrides'
region: content
group_image:
children:
- field_image
- field_image_caption
parent_name: ''
weight: 3
format_type: details
format_settings:
id: ''
classes: ''
description: ''
open: false
required_fields: true
label: Image
region: content
id: node.encyclopedia_item.default
targetEntityType: node
bundle: encyclopedia_item
mode: default
content:
body:
type: text_textarea_with_summary
weight: 2
settings:
rows: 9
summary_rows: 3
placeholder: ''
show_summary: false
third_party_settings: { }
region: content
field_glossary_section:
weight: 3
settings:
size: 60
placeholder: ''
third_party_settings: { }
type: string_textfield
region: content
field_image:
weight: 5
settings:
progress_indicator: throbber
preview_image_style: thumbnail
third_party_settings: { }
type: image_image
region: content
field_image_caption:
weight: 6
settings:
size: 60
placeholder: ''
third_party_settings: { }
type: text_textfield
region: content
field_listing_title:
weight: 2
settings:
size: 60
placeholder: ''
third_party_settings: { }
type: string_textfield
region: content
langcode:
type: language_select
weight: 4
region: content
settings:
include_locked: true
third_party_settings: { }
path:
type: path
weight: 6
region: content
settings: { }
third_party_settings: { }
status:
type: boolean_checkbox
settings:
display_label: true
weight: 7
region: content
third_party_settings: { }
title:
type: string_textfield
weight: 0
region: content
settings:
size: 60
placeholder: ''
third_party_settings: { }
hidden:
created: true
promote: true
sticky: true
uid: true
|
config-yml/core.entity_form_display.node.encyclopedia_item.default.yml
|
---
name: Release
on:
workflow_dispatch:
push:
branches:
- master
tags:
- '*'
env:
GO_VERSION: 1.15
GORELEASER_VERSION: v0.146.0
jobs:
create-packages:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '${{ env.GO_VERSION }}'
- name: Setup Mage
run: |
git clone https://github.com/magefile/mage
cd mage
go run bootstrap.go
- name: Setup GoReleaser
run: |
curl -O -L https://github.com/goreleaser/goreleaser/releases/download/${{ env.GORELEASER_VERSION }}/goreleaser_amd64.deb
sudo dpkg -i goreleaser_amd64.deb
rm goreleaser_amd64.deb
- name: Set GoReleaser flags
id: set-goreleaser-flags
run: |
if ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags') }} ; then
echo "::set-output name=GORELEASER_FLAGS::--rm-dist --snapshot --skip-publish --skip-validate"
# If you want to publish a new release on tag simply remove --skip-publish flag for tags
# echo "::set-output name=GORELEASER_FLAGS::--rm-dist --skip-validate"
else
echo "::set-output name=GORELEASER_FLAGS::--rm-dist --snapshot --skip-publish --skip-validate"
fi
- name: Build packages
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_GITHUB_TOKEN }}
run: |
goreleaser release ${{ steps.set-goreleaser-flags.outputs.GORELEASER_FLAGS }}
- name: Upload packages artifacts
uses: actions/upload-artifact@v2
with:
name: packages
path: dist
# publish-s3:
# needs: create-packages
# runs-on: ubuntu-latest
# if: github.event_name == 'workflow_dispatch' || startsWith(github.ref, 'refs/tags')
# steps:
# - uses: actions/checkout@master
# - name: Install packages required for publishing script
# run: |
# sudo apt-get -y update
# sudo apt-get install -y procmail createrepo awscli reprepro
# - name: Import GPG key
# run: |
# mkdir -p ~/.gnupg
# echo 'digest-algo sha256' >> ~/.gnupg/gpg.conf
# gpg --import <(echo "${{ secrets.GPG_KEY }}")
# - name: Setup Go
# uses: actions/setup-go@v2
# with:
# go-version: '${{ env.GO_VERSION }}'
# - name: Setup Mage
# run: |
# git clone https://github.com/magefile/mage
# cd mage
# go run bootstrap.go
# - name: Download packages artifacts
# uses: actions/download-artifact@v2
# with:
# name: packages
# path: dist
# - name: Set S3_FOLDER
# id: set-s3-folder
# run: |
# if ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags') }} ; then
# echo "::set-output name=S3_FOLDER::release/modules"
# else
# echo "::set-output name=S3_FOLDER::check/modules"
# fi
# - name: Publish packages to S3 repo
# env:
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# GPG_SIGN_KEY: ${{ secrets.GPG_SIGN_KEY }}
# S3_UPDATE_REPO_SCRIPT_URL: ${{ secrets.S3_UPDATE_REPO_SCRIPT_URL }}
# S3_BUCKET: ${{ secrets.S3_BUCKET }}
# S3_FOLDER: ${{ steps.set-s3-folder.outputs.S3_FOLDER }}
# run: mage publishS3
|
.github/workflows/release.yml
|
fos_user:
db_driver: orm
firewall_name: main
user_class: Cdlr\codeBundle\Entity\User
service:
user_manager: pugx_user_manager
pugx_multi_user:
users:
user_one:
entity:
class: Cdlr\codeBundle\Entity\Admin
# factory:
registration:
form:
type: Cdlr\codeBundle\Form\Type\RegistrationAdminFormType
name: fos_user_registration_form
validation_groups: [Registration, Default]
template: CdlrcodeBundle:Registration:Admin.form.html.twig
profile:
form:
type: Cdlr\codeBundle\Form\Type\ProfileAdminFormType
name: fos_user_profile_form
validation_groups: [Profile, Default]
user_two:
entity:
class: Cdlr\codeBundle\Entity\Candidat
registration:
form:
type: Cdlr\codeBundle\Form\Type\RegistrationCandidatFormType
template: CdlrcodeBundle:Registration:Candidat.form.html.twig
profile:
form:
type: Cdlr\codeBundle\Form\Type\ProfileCandidatFormType
user_three:
entity:
class: Cdlr\codeBundle\Entity\Gerant
# factory:
registration:
form:
type: Cdlr\codeBundle\Form\Type\RegistrationGerantFormType
name: fos_user_registration_form
validation_groups: [Registration, Default]
template: CdlrcodeBundle:Registration:Gerant.form.html.twig
profile:
form:
type: Cdlr\codeBundle\Form\Type\ProfileGerantFormType
name: fos_user_profile_form
validation_groups: [Profile, Default]
user_four:
entity:
class: Cdlr\codeBundle\Entity\Moniteur
registration:
form:
type: Cdlr\codeBundle\Form\Type\RegistrationMoniteurFormType
template: CdlrcodeBundle:Registration:Moniteur.form.html.twig
profile:
form:
type: Cdlr\codeBundle\Form\Type\ProfileMoniteurFormType
|
src/Cdlr/codeBundle/Resources/config/config.yml
|
name: Excel.ConditionalColorScaleCriterion
uid: 'excel!Excel.ConditionalColorScaleCriterion:interface'
package: excel!
fullName: Excel.ConditionalColorScaleCriterion
summary: 'Represents a color scale criterion which contains a type, value, and a color.'
remarks: '\[ [API set: ExcelApi 1.6](/office/dev/add-ins/reference/requirement-sets/excel-api-requirement-sets) \]'
isPreview: false
isDeprecated: false
type: interface
properties:
- name: color
uid: 'excel!Excel.ConditionalColorScaleCriterion#color:member'
package: excel!
fullName: color
summary: 'HTML color code representation of the color scale color (e.g., \#FF0000 represents Red).'
remarks: '\[ [API set: ExcelApi 1.6](/office/dev/add-ins/reference/requirement-sets/excel-api-requirement-sets) \]'
isPreview: false
isDeprecated: false
syntax:
content: 'color?: string;'
return:
type: string
- name: formula
uid: 'excel!Excel.ConditionalColorScaleCriterion#formula:member'
package: excel!
fullName: formula
summary: 'A number, a formula, or `null` (if `type` is `lowestValue`<!-- -->).'
remarks: '\[ [API set: ExcelApi 1.6](/office/dev/add-ins/reference/requirement-sets/excel-api-requirement-sets) \]'
isPreview: false
isDeprecated: false
syntax:
content: 'formula?: string;'
return:
type: string
- name: type
uid: 'excel!Excel.ConditionalColorScaleCriterion#type:member'
package: excel!
fullName: type
summary: What the criterion conditional formula should be based on.
remarks: '\[ [API set: ExcelApi 1.6](/office/dev/add-ins/reference/requirement-sets/excel-api-requirement-sets) \]'
isPreview: false
isDeprecated: false
syntax:
content: >-
type: Excel.ConditionalFormatColorCriterionType | "Invalid" | "LowestValue" | "HighestValue" | "Number" |
"Percent" | "Formula" | "Percentile";
return:
type: >-
<xref uid="excel!Excel.ConditionalFormatColorCriterionType:enum" /> | "Invalid" | "LowestValue" |
"HighestValue" | "Number" | "Percent" | "Formula" | "Percentile"
|
docs/docs-ref-autogen/excel_online/excel/excel.conditionalcolorscalecriterion.yml
|
# Please remember the following before you begin:
# - This plugin is not a standalone IRCd, and will not function without an InspIRCd
# instance to link to. You can find InspIRCd at http://inspircd.org/
# - This plugin is currently only compatible with recent versions of InspIRCd. This is
# the only type of IRCd you should use with this plugin, NOT UnrealIRCd etc.
# - Make sure you enable m_spanningtree.so in modules.conf.
# This section mainly uses info from your link block, in links.conf.
server:
# The address the plugin needs to connect to.
# Put in 127.0.0.1 if the IRC is on the same box.
host: "127.0.0.1"
# The port the plugin needs to connect to.
# The "port" in your link block.
port: 7000
# The "name" in your link block.
servername: "bungee.example.org"
# The friendly name for this server. It can be whatever you please.
realname: "BungeeCord running BungeeRelay"
# The password the plugin expects to receive.
# The "sendpass" in your link block.
recvpass: "<PASSWORD>"
# The password the plugin will send to the server.
# The "recvpass" in your link block.
sendpass: "<PASSWORD>"
# The length of time to wait before automatically reconnecting, should connection to the
# IRC server be lost. The time is in milliseconds. Set to -1 for no reconnect attempt.
reconnect: 3000
# Name of the channel.
channel: "#minecraft"
# After being killed from the server the user will
# "false": be disconnected from the minecraft server
# "true": not be disconnected from the minecraft server and
# immediately reconnect to the IRC server and channel
reconnectkill: true
# After being kicked from the channel the user will
# "false": be disconnected from the minecraft server
# "true": not be disconnected from the minecraft server and
# immediately reconnect to the IRC channel
reconnectkick: true
# You can set a prefix and suffix to the nicknames of Minecraft IRC users to add in
# the case of a collision.
userprefix: ""
usersuffix: "|mc"
# Set to "true" to make BungeeRelay say each message it receives raw in console.
# Used for debugging purposes only. For general use, leave "false".
debug: false
# This section contains the format of each message going to your Minecraft servers.
# Accepts colour codes, and each parameter is capital and surrounded in curly brackets.
# Leaving a particular message empty is acceptable.
formats:
# The ident of a user connecting to IRC.
# Parameters: {IDENT}
ident: "minecraft"
# How public messages sent from IRC will appear in Minecraft.
# Parameters: sender, message
msg: "<&7[IRC]&r {SENDER}> {MESSAGE}"
# How public messages sent with /me will appear in Minecraft.
# Parameters: sender, message
me: "&d* [IRC] {SENDER} {MESSAGE}"
# How private messages sent to a user will appear in Minecraft.
# Parameters: sender, message
privatemsg: "&7[PM]&r <&7[IRC]&r {SENDER}> {MESSAGE}"
# How private messages appear if it was sent with /me in Minecraft.
privateme: "&7[PM] &d* [IRC] {SENDER} {MESSAGE}"
# The output of the global /say command in Minecraft
# Parameters: message
saycommand: "&d[SERVER] {MESSAGE}"
# How mode changes will appear in Minecraft
# Parameters: sender, mode
mode: "&7{SENDER} set mode {MODE}"
# How nick changes will appear in Minecraft
# Parameters: old_nick, new_nick
nick: "&7{OLD_NICK} is now know as &7{NEW_NICK}"
# How joins will appear in Minecraft
# Parameters: sender
join: "&7{SENDER} joined the channel"
# How parts (leaving the channel) will appear in Minecraft
# Parameters: sender, reason
part: "&7{SENDER} left the channel ({REASON})"
# How Minecraft quits will appear in IRC
# Parameters: sender
mcquit: "{SENDER} logged out of Minecraft"
# How IRC quits will appear in Minecraft
# Parameters: sender, reason
ircquit: "&7{SENDER} left IRC ({REASON})"
# How kills will appear in Minecraft
# Parameters: sender, target, reason
kill: "&7{SENDER} killed {TARGET} from the channel ({REASON})"
# How kicks will appear in Minecraft
# Parameters: sender, target, reason
kick: "&7{SENDER} kicked {TARGET} from the channel ({REASON})"
# The message players will be disconnected with if they are killed through IRC.
# Parameters: sender, target, reason
disconnectkill: "&7You were killed by {SENDER} through IRC.&r\n\n{REASON}"
# The message players will be disconnected with if they are kicked through IRC.
# Parameters: sender, target, reason
disconnectkick: "&7You were kicked by {SENDER} through IRC.&r\n\n{REASON}"
|
config.yml
|
version: 0.1
name: test1
providers:
openwhisk:
ow_cluster1:
auth:
ow_auth: <PASSWORD>:<PASSWORD>zO<PASSWORD> <KEY>
ow_api_host:
ow_apigw_access_token: hello
path: ./Functions/nodeinfo/openwhisk/
monitoring:
openwhisk: ""
kubernetes: ""
meta:
service_name: myservice
memory: 256
timeout: 60
phases:
init:
commands:
- npm install
post_init:
commands:
- serverless deploy
delete:
commands:
- serverless remove
ow_cluster2:
auth:
ow_auth: <PASSWORD>:<PASSWORD>O3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP
ow_api_host:
ow_apigw_access_token: APIGW_ACCESS_TOKEN
path: ./Functions/nodeinfo/openwhisk/
monitoring:
openwhisk: ""
kubernetes: ""
meta:
service_name: myservice
memory: 256
timeout: 60
phases:
init:
commands:
- npm install
post_init:
commands:
- serverless deploy
delete:
commands:
- serverless remove
google:
gcf_cluster:
auth:
type: service_account
project_id: long-walker-228011
private_key_id: ""
private_key: ""
client_email: ""
client_id: ""
auth_uri: ""
token_uri: ""
auth_provider_x509_cert_url: ""
client_x509_cert_url: ""
path: ./Functions/nodeinfo/gcf/
meta:
service_name: myservice
memory: 256
timeout: 60
region: us-central-1
phases:
init:
commands:
- npm install
post_init:
commands:
- serverless deploy
- gcloud functions add-iam-policy-binding <service_name>-dev-<function_name> --member=allUsers --role=roles/cloudfunctions.invoker
delete:
commands:
- serverless remove
aws:
aws_cluster:
auth:
aws_access_key_id: hhjj
aws_secret_access_key: jjj
path: ./Functions/nodeinfo/aws/
meta:
service_name: myservice
memory: 256
timeout: 60
region: us-east-1
phases:
init:
commands:
- npm install
post_init:
commands:
- serverless deploy
delete:
commands:
- serverless remove
influxdb:
auth:
username: root
password: <PASSWORD>
hostinfo:
host: localhost
port: 8086
database:
dbname: demo2
protocol: line
phases:
install:
commands:
- npm install -g serverless@1.20.2
post_install:
commands:
- cd Backend
- serverless deploy --region eu-west-1 --verbose
|
config-sample.yaml
|
_portal: &portal
typ: PORTAL
imagename: objects/portal.png
passable: True
takeable: False
collided: True
_floor: &floor
imagename: objects/block0.png
typ: MAPGROUND
passable: True
takeable: False
collided: False
_block: &block
imagename: objects/block0.png
typ: MAPWALL
passable: False
takeable: False
collided: True
_enemy: &enemy
typ: ENEMY
imagename: objects/none.png
collided: False
count: 1
_food: &food
imagename: objects/block0.png
typ: FOOD
basePrice: 1
material: bio
weight: 100
takeable: True
collided: True
passable: True
enemy_crab:
<<: *enemy
mapchar: c
name: crab
enemy_poringp:
<<: *enemy
mapchar: G
name: poringp
enemy_poringb:
<<: *enemy
mapchar: B
name: poringb
apple:
<<: *food
imagename: objects/apple.png
mapchar: A
hpGain: 20
cherry:
<<: *food
basePrice: 2
imagename: objects/cherry.png
weight: 20
mapchar: C
hpGain: 1
fish:
<<: *food
imagename: objects/fish.png
mapchar: F
hpGain: 2
stonewall:
<<: *block
imagename:
- objects/block1.png
- objects/block11.png
- objects/block12.png
- objects/block13.png
mapchar: x
block1Brown:
<<: *block
imagename: objects/block1Brown.png
mapchar: x
stolb:
<<: *block
imagename: objects/stolb.png
mapchar: l
tree:
<<: *block
imagename: objects/tree.png
mapchar: T
fire:
<<: *block
imagename:
- objects/fire0.png
- objects/fire1.png
mapchar: F
stone_floor:
<<: *floor
imagename: objects/stone_floor.png
mapchar: _
ground0:
<<: *floor
imagename: objects/ground1.png
mapchar: _
ground_sand:
<<: *floor
imagename: objects/pack0/grass.png
mapchar: _
water:
<<: *block
imagename: objects/water.png
mapchar: w
water_sand:
<<: *floor
imagename: objects/ground_water.png
mapchar: s
void:
<<: *block
imagename: objects/void.png
mapchar: V
sand_void:
<<: *floor
imagename: objects/ground_void.png
mapchar: v
floor:
<<: *floor
imagename: objects/floor.png
mapchar: f
ground:
<<: *block
imagename: objects/ground.png
mapchar: x
ground1Brown:
<<: *floor
imagename: objects/ground1Brown.png
mapchar: _
rock:
<<: *block
imagename: objects/rockL.png
mapchar: R
rocka:
<<: *block
imagename: objects/rockA.png
mapchar: a
rock_portal:
<<: *portal
mapname: data/cave1.yaml
imagename: objects/rock_portal.png
mapchar: b
shrub:
<<: *block
imagename: objects/shrub.png
mapchar: S
grass:
<<: *floor
imagename: objects/grass0.png
mapchar: _
wood:
typ: FOOD
basePrice: 1
imagename: objects/wood0.png
material: bio
weight: 100
takeable: True
passable: True
mapchar: W
collided: True
hpGain: 0
trailportal:
<<: *portal
imagename: objects/portal.png
mapchar: T
mapname: data/trail.yaml
portal:
<<: *portal
imagename:
- objects/portal_1.png
- objects/portal_2.png
- objects/portal_3.png
- objects/portal_4.png
mapchar: P
mapname: data/cave1.yaml
portalMap:
<<: *portal
imagename: objects/portal.png
mapchar: P
mapname: data/map.yaml
portal_meadow:
<<: *portal
imagename:
- objects/portal_1.png
- objects/portal_2.png
- objects/portal_3.png
- objects/portal_4.png
mapchar: P
mapname: data/meadow.yaml
gen_portal:
<<: *portal
mapchar: g
mapname: generate
test_portal:
<<: *portal
imagename:
- objects/portal_1.png
- objects/portal_2.png
- objects/portal_3.png
- objects/portal_4.png
mapchar: z
mapname: data/cavegen.yaml
town_H_fence:
<<: *block
imagename: objects/town_H_fence.png
mapchar: F
town_brick_wall:
<<: *block
imagename: objects/town_brick_wall.png
mapchar: B
town_roof_center:
<<: *block
typ: MAPGROUND
imagename: objects/town_roof_center.png
mapchar: r
town_big_roof:
<<: *block
imagename: objects/town_big_roof.png
mapchar: D
town_road_H:
<<: *floor
imagename: objects/town_road_H.png
mapchar: h
town_road_V:
<<: *floor
imagename: objects/town_road_V.png
mapchar: I
|
surgame/data/objects.yaml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-trigger-nightly-master/65/
timestamp: 2016-02-12 08:10:00 UTC
duration: 3h56m16s
active_duration: 3h55m49s
parameters: {}
change:
git_remote: <EMAIL>:chef-partners/omnibus-marketplace.git
git_commit: <PASSWORD>
project: chef-marketplace
version: 0.0.6+20160212081016
stages:
chef-marketplace-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-promote/83/
duration: 4s
chef-marketplace-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-test/99/
duration: 1h33m47s
runs:
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-test/architecture=x86_64,platform=el-6,project=chef-marketplace,role=tester/99/
duration: 1h33m46s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-test/architecture=x86_64,platform=el-7,project=chef-marketplace,role=tester/99/
duration: 27m25s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-test/architecture=x86_64,platform=ubuntu-14.04,project=chef-marketplace,role=tester/99/
duration: 50m4s
chef-marketplace-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-build/108/
duration: 2h21m50s
runs:
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-build/architecture=x86_64,platform=el-6,project=chef-marketplace,role=builder/108/
duration: 1h52m31s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-build/architecture=x86_64,platform=el-7,project=chef-marketplace,role=builder/108/
duration: 52m3s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-build/architecture=x86_64,platform=ubuntu-14.04,project=chef-marketplace,role=builder/108/
duration: 2h21m43s
chef-marketplace-trigger-nightly-master:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-marketplace-trigger-nightly-master/65/
duration: 7s
|
reports/wilson.ci.chef.co/job/chef-marketplace-trigger-nightly-master/65.yaml
|
---
- name: Configure node for test
hosts: oioswift
become: true
gather_facts: true
any_errors_fatal: true
environment: "{{ openio_environment }}"
pre_tasks:
- name: set openio_bind_address as fact
set_fact:
openio_bind_address: "{{ openio_bind_address }}"
tasks:
- name: Install AWS client
package:
name: awscli
- name: Install OpenStack client
package:
name: python-openstackclient
ignore_errors: "{{ ansible_check_mode }}"
- name: Get AWS credentials (Tempauth)
block:
- name: Get user
no_log: "{{ openio_no_log }}"
set_fact:
openio_oioswift_tempauth_user: "{{ openio_oioswift_filter_tempauth.keys() | select('match', 'user_.*') | list | first }}"
- name: Get password of user
no_log: "{{ openio_no_log }}"
set_fact:
openio_oioswift_tempauth_pass: "{{ openio_oioswift_filter_tempauth[openio_oioswift_tempauth_user].split(' ')[0] }}"
- name: register AWS creds (Tempauth)
no_log: "{{ openio_no_log }}"
register: aws_cred
set_fact:
access: "{{ openio_oioswift_tempauth_user.split('_')[1:] | join(':') }}"
secret: "{{ openio_oioswift_tempauth_pass }}"
when:
- "'keystone' not in groups or ('keystone' in groups and groups['keystone'] | length == 0)"
- openio_oioswift_filter_tempauth | length > 1
ignore_errors: "{{ ansible_check_mode }}"
when: openio_oioswift_filter_tempauth is defined
- name: Create .aws folder
file:
path: /root/.aws
state: directory
mode: 0750
- name: Set aws credentials
no_log: "{{ openio_no_log }}"
copy:
dest: /root/.aws/credentials
content: |
[default]
aws_access_key_id = {{ access }}
aws_secret_access_key = {{ secret }}
ignore_errors: "{{ ansible_check_mode }}"
- name: Configure environment for test
copy:
dest: "{{ item.path }}"
content: "{{ item.content }}"
with_items:
- path: /root/.aws/config
content: |
[default]
s3 =
signature_version = s3v4
max_concurrent_requests = 10
max_queue_size = 100
#multipart_threshold = 50MB
#multipart_chunksize = 10MB
region = {{ openio_s3_region }}
- path: /etc/profile.d/openio.sh
content: |
export OIO_NS={{ namespace }}
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
ignore_errors: "{{ ansible_check_mode }}"
- name: Add script for tests in /root/checks.sh
template:
src: "{{ playbook_dir ~ '/../templates/checks.sh.j2' }}"
dest: /root/checks.sh
owner: root
group: root
mode: 0750
...
|
products/sds/playbooks/postinstall.yml
|
interactions:
- request:
body: null
headers:
User-Agent:
- azsdk-python-storage-blob/12.4.0b1 Python/3.7.3 (Windows-10-10.0.18362-SP0)
x-ms-date:
- Tue, 04 Aug 2020 17:44:07 GMT
x-ms-version:
- '2019-12-12'
method: PUT
uri: https://storagenamestorname.blob.core.windows.net/acontainer36e9155b?restype=container
response:
body:
string: ''
headers:
content-length: '0'
date: Tue, 04 Aug 2020 17:44:06 GMT
etag: '"0x8D8389DFCC2EFA4"'
last-modified: Tue, 04 Aug 2020 17:44:07 GMT
server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-version: '2019-12-12'
status:
code: 201
message: Created
url: https://emilyeuap.blob.core.windows.net/acontainer36e9155b?restype=container
- request:
body: hello world
headers:
Content-Length:
- '11'
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-blob/12.4.0b1 Python/3.7.3 (Windows-10-10.0.18362-SP0)
x-ms-blob-type:
- BlockBlob
x-ms-date:
- Tue, 04 Aug 2020 17:44:07 GMT
x-ms-tags:
- tag1=firsttag&tag2=secondtag&tag3=thirdtag
x-ms-version:
- '2019-12-12'
method: PUT
uri: https://storagenamestorname.blob.core.windows.net/acontainer36e9155b/blob1
response:
body:
string: ''
headers:
content-length: '0'
content-md5: XrY7u+Ae7tCTyyK7j1rNww==
date: Tue, 04 Aug 2020 17:44:07 GMT
etag: '"0x8D8389DFCD0DE66"'
last-modified: Tue, 04 Aug 2020 17:44:07 GMT
server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-content-crc64: vo7q9sPVKY0=
x-ms-request-server-encrypted: 'true'
x-ms-version: '2019-12-12'
x-ms-version-id: '2020-08-04T17:44:07.5492966Z'
status:
code: 201
message: Created
url: https://emilyeuap.blob.core.windows.net/acontainer36e9155b/blob1
- request:
body: hello world
headers:
Content-Length:
- '11'
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-blob/12.4.0b1 Python/3.7.3 (Windows-10-10.0.18362-SP0)
x-ms-blob-type:
- BlockBlob
x-ms-date:
- Tue, 04 Aug 2020 17:44:07 GMT
x-ms-tags:
- tag1=firsttag&tag2=secondtag&tag3=thirdtag
x-ms-version:
- '2019-12-12'
method: PUT
uri: https://storagenamestorname.blob.core.windows.net/acontainer36e9155b/blob2
response:
body:
string: ''
headers:
content-length: '0'
content-md5: XrY7u+Ae7tCTyyK7j1rNww==
date: Tue, 04 Aug 2020 17:44:07 GMT
etag: '"0x8D8389DFCDCEE8D"'
last-modified: Tue, 04 Aug 2020 17:44:07 GMT
server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-content-crc64: vo7q9sPVKY0=
x-ms-request-server-encrypted: 'true'
x-ms-version: '2019-12-12'
x-ms-version-id: '2020-08-04T17:44:07.6283533Z'
status:
code: 201
message: Created
url: https://emilyeuap.blob.core.windows.net/acontainer36e9155b/blob2
- request:
body: hello world
headers:
Content-Length:
- '11'
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-blob/12.4.0b1 Python/3.7.3 (Windows-10-10.0.18362-SP0)
x-ms-blob-type:
- BlockBlob
x-ms-date:
- Tue, 04 Aug 2020 17:44:07 GMT
x-ms-tags:
- tag1=firsttag&tag2=secondtag&tag3=thirdtag
x-ms-version:
- '2019-12-12'
method: PUT
uri: https://storagenamestorname.blob.core.windows.net/acontainer36e9155b/blob3
response:
body:
string: ''
headers:
content-length: '0'
content-md5: XrY7u+Ae7tCTyyK7j1rNww==
date: Tue, 04 Aug 2020 17:44:07 GMT
etag: '"0x8D8389DFCE94CD7"'
last-modified: Tue, 04 Aug 2020 17:44:07 GMT
server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-content-crc64: vo7q9sPVKY0=
x-ms-request-server-encrypted: 'true'
x-ms-version: '2019-12-12'
x-ms-version-id: '2020-08-04T17:44:07.7094103Z'
status:
code: 201
message: Created
url: https://emilyeuap.blob.core.windows.net/acontainer36e9155b/blob3
version: 1
|
sdk/storage/azure-storage-blob/tests/recordings/test_container_async.test_delete_blobs_with_if_tagss.yaml
|
services:
apiserver:
extends:
file: common.yml
service: common
image: ${AS_IMAGE}
environment:
DS_HOST: ${DS_HOST}
DS_PORT: ${DS_PORT}
DS_SECRET: ${DS_SECRET}
# Warning: Temporal variable
FS_HOST: ${FS_HOST_PUBLIC}
FS_PORT: ${FS_PORT}
FS_USERNAME: ${FS_USERNAME}
FS_SECRET: ${FS_SECRET}
FS_DEFAULT_STORAGE_BUCKET: ${FS_DEFAULT_STORAGE_BUCKET}
EVENTS_BROKERS: amqp://${EB_USERNAME}:${EB_SECRET}@${EB_HOST}:${EB_PORT}
SIPPROXY_HOST: ${SIPPROXY_HOST}
SIPPROXY_API_PORT: ${SIPPROXY_API_PORT}
SIPPROXY_API_USERNAME: ${SIPPROXY_API_USERNAME}
SIPPROXY_API_SECRET: ${SIPPROXY_API_SECRET}
LOGS_DRIVER_HOST: ${LOGS_DRIVER_HOST}
LOGS_DRIVER_PORT: ${LOGS_DRIVER_PORT}
MS_ENDPOINT: ${MS_ENDPOINT}
MS_TRUNK: ${MS_TRUNK}
MS_CONTEXT: ${MS_CONTEXT}
MS_EXTENSION: ${MS_EXTENSION}
MS_ARI_URL: ${MS_ARI_URL}
MS_ARI_USERNAME: ${MS_ARI_USERNAME}
MS_ARI_SECRET: ${MS_ARI_SECRET}
ports:
- ${APISERVER_PORT}:${APISERVER_PORT}
logging:
options:
tag: ${LOG_OPT_TAG_PREFIX}.${COMPOSE_PROJECT_NAME}.apiserver
networks:
fonos:
aliases:
- ${APISERVER_ENDPOINT}
secrets:
- source: jwt-salt
target: /home/fonos/.fonos/jwt.salt
- source: config
target: /home/fonos/.fonos/config
datasource:
extends:
file: common.yml
service: common
image: ${DS_IMAGE}
deploy:
placement:
constraints: [node.role == manager]
command: ['redis-server', '/etc/redis/redis.conf' ]
healthcheck:
test: ['CMD', 'redis-cli','ping']
interval: 30s
timeout: 10s
retries: 3
logging:
options:
tag: ${LOG_OPT_TAG_PREFIX}.${COMPOSE_PROJECT_NAME}.datasource
networks:
fonos:
aliases:
- ${DS_HOST}
configs:
- source: redis-conf
target: /etc/redis/redis.conf
volumes:
- datasource:/data
eventsbroker:
extends:
file: common.yml
service: common
image: ${EB_IMAGE}
environment:
RABBITMQ_USERNAME: ${EB_USERNAME}
RABBITMQ_PASSWORD: ${<PASSWORD>}
RABBITMQ_NODE_PORT_NUMBER: ${EB_PORT}
logging:
options:
tag: ${LOG_OPT_TAG_PREFIX}.${COMPOSE_PROJECT_NAME}.eventsbroker
networks:
fonos:
aliases:
- ${EB_HOST}
|
.compose/01_base.yml
|
test: off
version: '0.3.3.{build}'
image: Visual Studio 2017
environment:
my_pfx:
secure: jxL/b1xngUepPiYC48vTzA==
branches:
only:
- master
- develop
skip_tags: true
assembly_info:
patch: true
file: '**\AssemblyInfo.*'
assembly_version: '{version}'
assembly_file_version: '{version}'
assembly_informational_version: '{version}'
dotnet_csproj:
patch: true
file: '**\netcore.csproj'
version: '{version}'
package_version: '{version}'
assembly_version: '{version}'
file_version: '{build}'
informational_version: '{version}'
init:
# Good practice, because Windows line endings are different from Unix/Linux ones
- cmd: git config --global core.autocrlf true
install:
# Install repo specific stuff here
- ps: try { Import-PfxCertificate -FilePath .\examples\uwpapp\uwpapp_TemporaryKey.pfx -CertStoreLocation cert:\CurrentUser\My -Password (ConvertTo-SecureString $env:my_pfx -AsPlainText -Force) } catch {}
before_build:
# Restore packages
- cmd: echo %CD%
- cmd: nuget restore ./fbchat-sharp.sln
build_script:
# Build project
cmd: msbuild /t:build /p:Configuration=Release /p:Platform=x64 ./fbchat-sharp.sln
after_build:
- ps: .\examples\uwpapp\runAppcert.ps1 -target 'examples\uwpapp\AppPackages\uwpapp_1.0.0.0_Release_Test\uwpapp_1.0.0.0_x64_Release.appxbundle'
- cmd: nuget pack ./fbchat-sharp/fbchat-sharp.nuspec
- cmd: for /D %%x in (.\examples\uwpapp\AppPackages\uwpapp_*) do set "f=%%~fx"
- cmd: 7z a example_uwp.zip %f%
- cmd: 7z a example_wpf.zip .\examples\wpfapp\bin\Release
artifacts:
- path: '*.nupkg'
- path: 'example_uwp.zip'
- path: 'example_wpf.zip'
deploy:
- provider: GitHub
release: $(appveyor_build_version)
description: ''
auth_token:
secure: <KEY> # your encrypted token from GitHub
artifact: /.*\.nupkg/,example_uwp.zip,example_wpf.zip
draft: true
prerelease: false
force_update: true
on:
branch: master # release from master branch only
appveyor_repo_tag: false # deploy on non tag push only
- provider: GitHub
release: $(appveyor_build_version) beta
description: ''
auth_token:
secure: <KEY> # your encrypted token from GitHub
artifact: /.*\.nupkg/,example_uwp.zip,example_wpf.zip
draft: true
prerelease: true
force_update: true
on:
branch: develop # pre-release from develop branch only
appveyor_repo_tag: false # deploy on non tag push only
|
appveyor.yml
|