code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
---
- name: ADC_IN / ADC_INN / ADC_INP
match: "^ADC(?:\\d+)?_IN[NP]?\\d+$"
mode: analog
- name: CAN_RX
match: "^CAN\\d+_RX$"
mode: alternate
bias: pull-up
- name: CAN_TX
match: "^CAN\\d+_TX$"
mode: alternate
- name: DAC_OUT
match: "^DAC(?:\\d+)?_OUT\\d+$"
mode: analog
- name: FDCAN_RX
match: "^FDCAN\\d+_RX$"
mode: alternate
- name: FDCAN_TX
match: "^FDCAN\\d+_TX$"
mode: alternate
- name: I2C_SCL
match: "^I2C\\d+_SCL$"
mode: alternate
drive: open-drain
bias: pull-up
- name: I2C_SDA
match: "^I2C\\d+_SDA$"
mode: alternate
drive: open-drain
bias: pull-up
- name: I2S_CK
match: "^I2S\\d+_CK$"
mode: alternate
- name: I2S_WS
match: "^I2S\\d+_WS$"
mode: alternate
- name: I2S_SD
match: "^I2S\\d+_SD$"
mode: alternate
- name: SPI_MISO
match: "^SPI\\d+_MISO$"
mode: alternate
bias: pull-down
- name: SPI_MOSI
match: "^SPI\\d+_MOSI$"
mode: alternate
bias: pull-down
# NOTE: The SPI_SCK pins speed must be set to very-high-speed to avoid last data
# bit corruption which is a known issue on multiple STM32F4 series SPI
# peripheral (ref. ES0182 Rev 12, 2.5.12, p. 22).
- name: SPI_SCK
match: "^SPI\\d+_SCK$"
mode: alternate
slew-rate: very-high-speed
- name: SPI_NSS
match: "^SPI\\d+_NSS$"
mode: alternate
bias: pull-up
- name: TIM_CH_PWM / TIM_CHN_PWM
match: "^TIM\\d+_CH\\d+N?$"
mode: alternate
variant: pwm
- name: UART_CTS / USART_CTS / LPUART_CTS
match: "^(?:LP)?US?ART\\d+_CTS$"
mode: alternate
drive: open-drain
bias: pull-up
- name: UART_RTS / USART_RTS / LPUART_RTS
match: "^(?:LP)?US?ART\\d+_RTS$"
mode: alternate
drive: open-drain
bias: pull-up
- name: UART_TX / USART_TX / LPUART_TX
match: "^(?:LP)?US?ART\\d+_TX$"
mode: alternate
bias: pull-up
- name: UART_RX / USART_RX / LPUART_RX
match: "^(?:LP)?US?ART\\d+_RX$"
mode: alternate
- name: USB_OTG_FS_DM
match: "^USB_OTG_FS_DM$"
mode: alternate
- name: USB_OTG_FS_DP
match: "^USB_OTG_FS_DP$"
mode: alternate
|
scripts/genpinctrl/config.yaml
|
- name: Get download webpage
delegate_to: localhost
run_once: True
get_url:
url: 'https://fritzing.org/download/'
http_agent: "Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0"
dest: '/tmp/fritzingDownloadPage'
timeout: 60
- name: Read file from download webpage
delegate_to: localhost
run_once: True
set_fact:
latest_file: "{{ lookup('file', '/tmp/fritzingDownloadPage') | regex_search('/download/.*/linux-64bit/fritzing-.*bz2') }}"
- name: Create download dir
delegate_to: localhost
run_once: True
file:
path: "{{ role_path }}/downloads"
state: directory
- name: Get stat of local file
delegate_to: localhost
run_once: True
stat:
path: "{{ role_path }}/downloads/{{ latest_file | basename }}"
register: local_archive_stat
- name: Get download archive file from server
delegate_to: localhost
run_once: True
when: not local_archive_stat.stat.exists
get_url:
url: "https://fritzing.org{{ latest_file }}"
http_agent: "Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0"
dest: "{{ role_path }}/downloads/"
- name: Get Version Number
delegate_to: localhost
run_once: True
set_fact:
latest_version: "{{ ( latest_file | basename | regex_findall('fritzing-(.*).linux') )[0] }}"
- name: Get stat version file
stat:
path: "{{ fritzing_install_directory }}/fritzing/ansible_install_{{ latest_version }}"
register: version_stat
- name: Remove fritzing dir if already exists
when: not version_stat.stat.exists
become: yes
become_user: 'root'
file:
path: "{{ fritzing_install_directory }}/fritzing"
state: absent
- name: Upload archive to target and extract
when: not version_stat.stat.exists
become: yes
become_user: 'root'
unarchive:
src: "{{ role_path }}/downloads/{{ latest_file | basename }}"
dest: "{{ fritzing_install_directory }}"
- name: Move directory
when: not version_stat.stat.exists
become: yes
become_user: 'root'
shell: "cd {{ fritzing_install_directory }}; mv {{ ( latest_file | basename | regex_findall('(.*)[.]tar[.]bz2') )[0] }} fritzing"
- name: Create symlink in /usr/bin
when: not version_stat.stat.exists
become: yes
become_user: 'root'
file:
src: "{{ fritzing_install_directory }}/fritzing/Fritzing"
dest: "/usr/bin/fritzing"
state: link
- name: Create version file
when: not version_stat.stat.exists
become: yes
become_user: 'root'
file:
path: "{{ fritzing_install_directory }}/fritzing/ansible_install_{{ latest_version }}"
state: touch
|
tasks/main.yml
|
---
- name: apt | Add rabbitmq key
become: yes
apt_key:
keyserver: "hkp://keyserver.ubuntu.com:80"
id: "0A9AF2115F4687BD29803A206B73A36E6026DFCA"
state: present
register: _task
retries: 5
delay: 3
until: _task is succeeded
tags: rabbitmq
- name: Ensure pre-requisites are installed
become: yes
apt:
name: "{{ item }}"
state: present
register: _task
retries: 5
delay: 3
until: _task is succeeded
tags: rabbitmq
loop:
- apt-transport-https
- gnupg
- name: Add launchpad key
become: yes
apt_key:
url: https://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=0xf77f1eda57ebb1cc
id: "<KEY>"
state: present
register: _task
retries: 5
delay: 3
until: _task is succeeded
tags: rabbitmq
- name: apt | Add PackageCloud key
become: yes
apt_key:
url: "https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey"
state: present
register: _task
retries: 5
delay: 3
until: _task is succeeded
tags: rabbitmq
- name: Add erlang repos
become: yes
apt_repository:
repo: "deb http://ppa.launchpad.net/rabbitmq/rabbitmq-erlang/ubuntu {{ ansible_facts.distribution_release|lower }} main"
state: present
tags: rabbitmq
- name: Add rabbitmq repos
become: yes
apt_repository:
repo: "deb https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu/ {{ ansible_facts.distribution_release|lower }} main"
state: present
tags: rabbitmq
- name: Install latest erlang packages on {{ ansible_facts.distribution }}
become: yes
package:
name: "{{ item }}"
state: present
register: _eltask
retries: 5
delay: 3
until: _eltask is succeeded
tags: rabbitmq
loop:
- erlang-base
- erlang-asn1
- erlang-crypto
- erlang-eldap
- erlang-ftp
- erlang-inets
- erlang-mnesia
- erlang-os-mon
- erlang-parsetools
- erlang-public-key
- erlang-runtime-tools
- erlang-snmp
- erlang-ssl
- erlang-syntax-tools
- erlang-tftp
- erlang-tools
- erlang-xmerl
|
roles/StackStorm.rabbitmq/tasks/rabbitmq_debian.yml
|
---
name: Build Image Nightly for Docker
on:
workflow_dispatch:
inputs:
version:
description: Image Tag
default: nightly
required: true
git-ref:
description: Git Ref
default: master
required: true
schedule:
- cron: "30 23 * * *"
#push:
#branches: [ ci ]
jobs:
docker:
name: Build image and push after successfull calculation
runs-on: ubuntu-latest
steps:
# This Checkout is necessary when using a context in docker/build-push-action
- name: Clone Repository (Latest)
uses: actions/checkout@v2
if: github.event.inputs.git-ref == ''
- name: Clone Repository (Custom Ref)
uses: actions/checkout@v2
if: github.event.inputs.git-ref != ''
with:
ref: ${{ github.event.inputs.git-ref }}
- name: Build image engine with tag nightly during scheduled task
if: github.event.inputs.version == ''
env:
DOCKER_USERNAME: ${{ secrets.docker_username }}
DOCKER_PASSWORD: ${{ secrets.docker_password }}
DOCKER_TAG: ${{ github.event.inputs.version }}
REPO_REF: ${{ github.event.inputs.git-ref }}
id: docker_engine_scheduled
run: |
docker build --build-arg oq_branch=master -t openquake/engine:nightly -f docker/Dockerfile.dev docker
docker image ls
time docker run openquake/engine:nightly "oq engine --run /usr/src/oq-engine/demos/risk/ScenarioDamage/job_hazard.ini /usr/src/oq-engine/demos/risk/ScenarioDamage/job_risk.ini"
echo " push image engine with tag nightly on docker hub "
docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD"
docker push openquake/engine:nightly
- name: Build image engine with tag ${{ github.event.inputs.version }} during manual run
if: github.event.inputs.version != ''
env:
DOCKER_USERNAME: ${{ secrets.docker_username }}
DOCKER_PASSWORD: ${{ secrets.docker_password }}
DOCKER_TAG: ${{ github.event.inputs.version }}
REPO_REF: ${{ github.event.inputs.git-ref }}
id: docker_engine_manual
run: |
docker build --build-arg oq_branch=$REPO_REF -t openquake/engine:$DOCKER_TAG -f docker/Dockerfile.dev docker
docker image ls
#time docker run openquake/engine:$DOCKER_TAG "(oq dbserver start &) ; sleep 10 ; (oq engine --run "https://github.com/gem/oq-engine/blob/master/openquake/server/tests/data/classical.zip?raw=true")"
time docker run openquake/engine:$DOCKER_TAG "(oq engine --run "https://github.com/gem/oq-engine/blob/master/openquake/server/tests/data/classical.zip?raw=true")"
echo " push image engine with tag nightly on ocker hub "
docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD"
docker push openquake/engine:$DOCKER_TAG
|
out/gem/oq-engine/.github_workflows_docker_dev.yml
|
mgn_article:
path: /articles/{page}
defaults: { _controller: MgnArticleBundle:Article:index, page: 1 }
requirements:
page: \d+
mgn_article_read:
pattern: /article/{id}/{category}/{date}/{slug}
defaults: { _controller: MgnArticleBundle:Article:read }
requirements:
id: \d+
mgn_article_add_comment:
pattern: /article/commentaire/ajouter/{id}
defaults: { _controller: MgnMessageBundle:Message:addToArticle }
requirements:
_method: POST
id: \d+
mgn_article_archives:
pattern: /articles/archives/{filtre}
defaults: { _controller: MgnArticleBundle:Article:archives, filtre: all }
mgn_admin_article_picture_add:
path: /articles/upload/image
defaults: { _controller: MgnMediaBundle:Article:addPicture }
mgn_admin_article_list:
pattern: /admin/articles/liste/{status}
defaults: { _controller: MgnArticleBundle:Admin:articleList, status: NULL }
mgn_admin_article_category:
pattern: /admin/articles/categories
defaults: { _controller: MgnArticleBundle:Admin:category }
mgn_admin_article_category_edit:
pattern: /admin/articles/categories/editer/{id}
defaults: { _controller: MgnArticleBundle:Admin:categoryEdit }
requirements:
id: \d+
mgn_admin_article_category_delete:
pattern: /admin/articles/categories/supprimer/{id}
defaults: { _controller: MgnArticleBundle:Admin:categoryDelete }
requirements:
id: \d+
mgn_admin_article_delete:
pattern: /admin/article/supprimer/{id}
defaults: { _controller: MgnArticleBundle:Admin:delete }
requirements:
id: \d+
mgn_admin_article_redaction:
pattern: /admin/article/redaction
defaults: { _controller: MgnArticleBundle:Admin:redaction }
mgn_admin_article_edition:
pattern: /admin/article/edition/{id}
defaults: { _controller: MgnArticleBundle:Admin:edition }
requirements:
id: \d+
mgn_admin_article_edit_title:
pattern: /admin/article/edition/titre/{article}/{action}
defaults: { _controller: MgnArticleBundle:Admin:editTitle, action: null }
requirements:
article: \d+
mgn_admin_article_publish:
pattern: /admin/article/publier/{id}
defaults: { _controller: MgnArticleBundle:Admin:publish }
requirements:
id: \d+
|
src/Mgn/ArticleBundle/Resources/config/routing.fr.yml
|
header:
title: WELCOME TO ZHANG LAB #<br> Academy of Medical Engineering and Translational Medicine Tianjin University
stitle: Academy of Medical Engineering and Translational Medicine, Tianjin University
text: 'We focus on computational immunology and omics data mining and modeling'
buttonall:
- button: Join us!
buttonlink: "#contact"
- button: Research
buttonlink: "#research"
research:
title: "Research"
section: research
list:
- title: "Computational Immunology "
desc: "Lorem ipsum dolor sit amet, consectetur adipisicing elit. Minima maxime quam architecto quo inventore harum ex magni, dicta impedit."
#img: assets/img/portfolio/04-thumbnail.jpg
icon: fas fa-book-medical #find more at https://fontawesome.com/icons
- title: "Bioinformatics "
desc: "Lorem ipsum dolor sit amet, consectetur adipisicing elit. Minima maxime quam architecto quo inventore harum ex magni, dicta impedit."
#img: assets/img/portfolio/05-thumbnail.jpg
icon: fas fa-laptop-medical
- title: "Omics Data Mining and Modeling "
desc: "Your description here, full **Markdown** support"
icon: fas fa-dna
#img: assets/img/portfolio/06-thumbnail.jpg
resource:
title: "Resources"
section: resource
list:
- title: "First "
desc: "Lorem ipsum dolor sit amet, consectetur adipisicing elit. Minima maxime quam architecto quo inventore harum ex magni, dicta impedit."
#img: assets/img/portfolio/04-thumbnail.jpg
icon: fas fa-laptop #find more at https://fontawesome.com/icons
- title: "Second "
desc: "Lorem ipsum dolor sit amet, consectetur adipisicing elit. Minima maxime quam architecto quo inventore harum ex magni, dicta impedit."
#img: assets/img/portfolio/05-thumbnail.jpg
icon: fas fa-tools
- title: "and" #"[Markdown](https://baidu.com)"
desc: "Your description here, full **Markdown** support"
icon: fas fa-heartbeat
#img: assets/img/portfolio/06-thumbnail.jpg
portfolio:
title: "Publications"
section: portfolio
text: "Recent Highlights"
closebutton: "Close Paper"
rawbutton: "Journal Page"
button: View More
buttonlink: "publication"
timeline:
title: "About"
section: timeline
# left is the default
#start_align: "left"
events:
- title: "Start"
year: "2020-2021"
desc: "Lorem ipsum dolor sit amet, consectetur adipisicing elit. Sunt ut voluptatum eius sapiente, totam reiciendis temporibus qui quibusdam, recusandae sit vero unde, sed, incidunt et ea quo dolore laudantium consectetur!"
image: assets/img/timeline/1.jpg
alt:
- title: "An Lab is Born"
year: "March 2011"
desc: "Lorem ipsum dolor sit amet, consectetur adipisicing elit. Sunt ut voluptatum eius sapiente, totam reiciendis temporibus qui quibusdam, recusandae sit vero unde, sed, incidunt et ea quo dolore laudantium consectetur!"
image: assets/img/timeline/2.jpg
alt:
- title: "Title"
year: "2009-2011"
desc: "Your description here, **Markdown** is fully supported."
image: assets/img/timeline/4.jpg
alt:
# you can enforce the aligment
align: left
end: "Evert thing <br> will be <br> Past!"
team:
title: "TEAM MEMBERS"
subtext:
section: team
people:
- name: "<NAME>"
role: "PRINCIPAL INVESTIGATOR"
description: "balabalbala blabalbal baabdbasdbasjdbjsakbdjksabdk asb"
image: assets/img/team/1.jpg
social:
- url: https:/garyddd.github.io/moreinfo
icon: far fa-id-card-o
- name: "<NAME>"
role: "GRADUATE STUDENT"
description: '<NAME> is a TJU master student in intelligent medical engineering. He is currently interested in omics data mining and modeling, and then through the analysis of the data, he can solve or raise some interesting phenomena and problems in medicine. Yunsheng got his bachelor’s degree of communication engineering at Hohai University in China.'
image: assets/img/team/2.jpg
contact:
title: "Contact Us"
text: ""
section: contact
footer:
legal: "Privacy Policy"
# social:
# - url: https://twitter.com
# icon: "fab fa-twitter"
# - url: https://facebook.com
# icon: "fab fa-facebook-f"
# - url: https://linkedin.com
# icon: "fab fa-linkedin-in"
# - url: https://github.com/raviriley/agency-jekyll-theme
# icon: "fab fa-github"
# - url: https://instagram.com
# icon: "fab fa-instagram"
# {{ site.data.sitetext.ex.ex | markdownify | default: example }}
|
_data/sitetext.yml
|
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue
name: ComputerVisionInnerErrorCodeValue
fullName: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue
summary: An enumeration.
module: azure.cognitiveservices.vision.computervision.models
constructor:
syntax: ComputerVisionInnerErrorCodeValue(value)
inheritances:
- builtins.str
- enum.Enum
fields:
- name: bad_argument
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.bad_argument
- name: cancelled_request
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.cancelled_request
- name: detect_face_error
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.detect_face_error
- name: failed_to_process
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.failed_to_process
- name: internal_server_error
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.internal_server_error
- name: invalid_details
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.invalid_details
- name: invalid_image_format
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.invalid_image_format
- name: invalid_image_size
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.invalid_image_size
- name: invalid_image_url
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.invalid_image_url
- name: invalid_model
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.invalid_model
- name: invalid_thumbnail_size
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.invalid_thumbnail_size
- name: not_supported_feature
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.not_supported_feature
- name: not_supported_image
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.not_supported_image
- name: not_supported_language
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.not_supported_language
- name: not_supported_visual_feature
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.not_supported_visual_feature
- name: storage_exception
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.storage_exception
- name: timeout
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.timeout
- name: unspecified
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.unspecified
- name: unsupported_media_type
uid: azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.unsupported_media_type
|
docs-ref-autogen/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.models.ComputerVisionInnerErrorCodeValue.yml
|
language: cpp
sudo: required
dist: trusty
matrix:
include:
- os: linux
env: PYTHON=2.7
CUDA_VERSION="9.1.85_387.26"
- os: linux
env: PYTHON=3.4
CUDA_VERSION="9.1.85_387.26"
- os: linux
env: CONDA=2.7
CUDA_VERSION="9.1.85_387.26"
- os: linux
env: CONDA=3.6
CUDA_VERSION="9.1.85_387.26"
- os: osx
env: PYTHON=2.7
CUDA_VERSION="9.1.128"
- os: osx
env: PYTHON=3.6
CUDA_VERSION="9.1.128"
- os: osx
env: CONDA=2.7
CUDA_VERSION="9.1.128"
- os: osx
env: CONDA=3.6
CUDA_VERSION="9.1.128"
before_install:
- |
if [ "${PYTHON:0:1}" = "2" -o "${CONDA:0:1}" = "2" ]; then
export PY=python2
elif [ "${PYTHON:0:1}" = "3" -o "${CONDA:0:1}" = "3" ]; then
export PY=python3
fi
if [ "${PYTHON:0:1}" = "2" ]; then
export PIP=pip2
elif [ "${PYTHON:0:1}" = "3" ]; then
export PIP=pip3
fi
if [ "$TRAVIS_OS_NAME" = "linux" ]; then
export CXX=g++-4.8
export CC=gcc-4.8
source ./script/install-cuda-trusty.sh
if [ "${PYTHON:0:1}" = "3" ]; then sudo apt-get install python$PYTHON python3-pip; fi
elif [ "$TRAVIS_OS_NAME" = "osx" ]; then
source ./script/install-cuda-osx.sh
fi
if [ -n "$PYTHON" ]; then
${PIP} install --user --disable-pip-version-check --no-warn-script-location --upgrade pip virtualenv
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
echo $PATH | grep -q "/Users/travis/Library/Python/$PYTHON/bin"
if [ $? -eq 0 ]; then
export PATH="/Users/travis/Library/Python/$PYTHON/bin:$PATH"
fi
fi
virtualenv -p python$PYTHON venv$PYTHON
source venv$PYTHON/bin/activate
${PIP} install --upgrade pytest
elif [ -n "$CONDA" ]; then
if [ "$TRAVIS_OS_NAME" = "linux" ]; then OS=Linux-x86_64; else OS=MacOSX-x86_64; fi
wget -O miniconda.sh https://repo.continuum.io/miniconda/Miniconda${CONDA:0:1}-latest-$OS.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
conda config --set always_yes yes --set changeps1 no
conda config --add channels conda-forge
conda update -q conda
conda install -q conda-build
conda create -q -n cvenv$CONDA python=$CONDA
source activate cvenv$CONDA
conda install -q pytest
fi
install:
- |
if [ -n "$PYTHON" ]; then
${PY} setup.py install
elif [ -n "$CONDA" ]; then
conda build conda.recipe
conda install --use-local pynvx
fi
script:
- |
${PY} -m pytest tests/test.py
|
.travis.yml
|
title: Linux commands
description: This assignment is to help you self-assess your comprehension on **Linux**. Read carefully each question. Once answered, you will receive feedback. Good luck!
questions:
- type: "fill-single"
points: 1
question: "You have a file `robot_script.sh` with read-only permission in your current directory belonging to the current user `class`.<br>
`class@ubuntu:~$ ls -al robot_script.sh` <br>
`-r--r--r-- 1 class class 0 Nov 3 13:08 robot_script.sh`<br>
What command would you run to give write and execute permissions to the current user, owner of that file? (please type exactly how you would type in the terminal.)
"
answer:
- chmod u+wx robot_script.sh
feedback_p: Great job!
feedback_n: Which command was to change permission? What are the options that are necessary?
- type: "fill-single"
points: 1
question: "Consider this absolute path: `/home/class/test1/`. If you are in your home directory (`/home/class`), after running this command `cd test1/../../` what will be your current directory? Write the full absolute path."
answer:
- /home/
- /home
feedback_p: Great! Two times the characters for the previous directory were used, starting from test1 directory.
feedback_n: Think about the meaning of `..` and from where it starts resolving the path.
- type: "fill-single"
points: 1
question: "What is the command if you want to search through all files and subdirectories for the word 'Amazon' from your current directory? Write the full command that you would use in the terminal."
answer:
- grep -r Amazon ./
- grep -r Amazon .
feedback_p: Correct!
feedback_n: Think about the command, the option, the pattern, and the directory/files you should use.
- type: "fill-single"
points: 1
question: "Assume, you are in your home directory (`~/`) and you have a file named `old.txt`. Now you want to rename it to `new.txt` and move it to `Desktop`, which is a directory in the user's home directory. What command should you run? Run a single command."
answer:
- mv old.txt Desktop/new.txt
- mv old.txt ~/Desktop/new.txt
feedback_p: Correct!
feedback_n: Think about the order for what should be the source and what should be the destination.
- type: "matching"
points: 1
question: "Match commands with functionality."
items_a:
- htop
- man
- nano
- pwd
- cd
items_b:
- monitor running processes.
- getting help
- edit files
- print current directory
- change directory
distractors:
- view files
- remove files
- change file permissions
|
getting-started-with-ubuntu/_data/assessment/mod-1-assessment.yml
|
language: php
services:
- docker
matrix:
fast_finish: true
include:
- php: 7.2
env: LARAVEL='5.6.*' TESTBENCH='3.6.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.2
env: LARAVEL='5.6.*' TESTBENCH='3.6.*' COMPOSER_FLAGS='--prefer-stable'
- php: 7.3
env: LARAVEL='5.6.*' TESTBENCH='3.6.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.3
env: LARAVEL='5.6.*' TESTBENCH='3.6.*' COMPOSER_FLAGS='--prefer-stable'
- php: 7.2
env: LARAVEL='5.7.*' TESTBENCH='3.7.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.2
env: LARAVEL='5.7.*' TESTBENCH='3.7.*' COMPOSER_FLAGS='--prefer-stable'
- php: 7.3
env: LARAVEL='5.7.*' TESTBENCH='3.7.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.3
env: LARAVEL='5.7.*' TESTBENCH='3.7.*' COMPOSER_FLAGS='--prefer-stable'
- php: 7.2
env: LARAVEL='5.8.*' TESTBENCH='3.8.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.2
env: LARAVEL='5.8.*' TESTBENCH='3.8.*' COMPOSER_FLAGS='--prefer-stable'
- php: 7.3
env: LARAVEL='5.8.*' TESTBENCH='3.8.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.3
env: LARAVEL='5.8.*' TESTBENCH='3.8.*' COMPOSER_FLAGS='--prefer-stable'
- php: 7.2
env: LARAVEL='6.*' TESTBENCH='4.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.2
env: LARAVEL='6.*' TESTBENCH='4.*' COMPOSER_FLAGS='--prefer-stable'
- php: 7.3
env: LARAVEL='6.*' TESTBENCH='4.*' COMPOSER_FLAGS='--prefer-lowest'
- php: 7.3
env: LARAVEL='6.*' TESTBENCH='4.*' COMPOSER_FLAGS='--prefer-stable'
sudo: false
before_install:
- travis_retry composer self-update
- travis_retry composer require --no-update --no-interaction "illuminate/support:${LARAVEL}" "orchestra/testbench:${TESTBENCH}"
install:
- travis_retry composer update ${COMPOSER_FLAGS} --prefer-dist --no-interaction --no-suggest
before_script:
- sudo service mysql stop
- docker-compose -f ./tests/docker-compose.yml up -d
- php tests/wait.php
script:
- vendor/bin/phpunit
after_script:
- docker-compose -f ./tests/docker-compose.yml down
|
.travis.yml
|
nameWithType: OpenShiftManagedClustersInner.delete
type: method
members:
- fullName: com.microsoft.azure.management.containerservice.implementation.OpenShiftManagedClustersInner.delete(String resourceGroupName, String resourceName)
name: delete(String resourceGroupName, String resourceName)
nameWithType: OpenShiftManagedClustersInner.delete(String resourceGroupName, String resourceName)
parameters:
- description: <p>The name of the resource group. </p>
name: resourceGroupName
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
- description: <p>The name of the OpenShift managed cluster resource. </p>
name: resourceName
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
exceptions:
- type: <xref href="IllegalArgumentException?alt=IllegalArgumentException&text=IllegalArgumentException" data-throw-if-not-resolved="False"/>
description: <p>thrown if parameters fail the validation </p>
- type: <xref href="CloudException?alt=CloudException&text=CloudException" data-throw-if-not-resolved="False"/>
description: <p>thrown if the request is rejected by server </p>
- type: <xref href="RuntimeException?alt=RuntimeException&text=RuntimeException" data-throw-if-not-resolved="False"/>
description: <p>all other wrapped checked exceptions if the request fails to be sent </p>
summary: >-
<p>Deletes an OpenShift managed cluster. Deletes the OpenShift managed cluster with a specified resource group and name.</p>
<p></p>
syntax: public void delete(String resourceGroupName, String resourceName)
uid: com.microsoft.azure.management.containerservice.implementation.OpenShiftManagedClustersInner.delete(String,String)
uid: com.microsoft.azure.management.containerservice.implementation.OpenShiftManagedClustersInner.delete*
fullName: com.microsoft.azure.management.containerservice.implementation.OpenShiftManagedClustersInner.delete
name: delete(String resourceGroupName, String resourceName)
package: com.microsoft.azure.management.containerservice.implementation
metadata: {}
|
legacy/docs-ref-autogen/com.microsoft.azure.management.containerservice.implementation.OpenShiftManagedClustersInner.delete.yml
|
uid: management.azure.com.storsimple.managers.listfeaturesupportstatus
name: List Feature Support Status
service: StorSimple
groupName: Managers
apiVersion: 2017-06-01
summary: Lists the features and their support status
consumes:
- application/json
produces:
- application/json
paths:
- content: GET https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/features?api-version=2017-06-01
- content: GET https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/features?api-version=2017-06-01&$filter={$filter}
isOptional: true
uriParameters:
- name: subscriptionId
in: path
isRequired: true
description: The subscription id
types:
- uid: string
- name: resourceGroupName
in: path
isRequired: true
description: The resource group name
types:
- uid: string
- name: managerName
in: path
isRequired: true
description: The manager name
types:
- uid: string
- name: api-version
in: query
isRequired: true
description: The api version
types:
- uid: string
- name: $filter
in: query
description: OData Filter options
types:
- uid: string
responses:
- name: 200 OK
description: The collection of features.
types:
- uid: FeatureList
requestHeader: []
definitions:
- name: FeatureList
description: The collections of features.
kind: object
properties:
- name: value
description: The value.
types:
- uid: Feature
isArray: true
- name: Feature
description: The feature.
kind: object
properties:
- name: name
description: The name of the feature.
types:
- uid: string
- name: status
description: The feature support status.
types:
- uid: FeatureSupportStatus
- name: FeatureSupportStatus
description: The feature support status.
kind: enum
properties:
- name: NotAvailable
types:
- uid: string
- name: UnsupportedDeviceVersion
types:
- uid: string
- name: Supported
types:
- uid: string
examples:
- name: ManagersListFeatureSupportStatus
request:
uri: GET https://management.azure.com/subscriptions/4385cf00-2d3a-425a-832f-f4285b1c9dce/resourceGroups/ResourceGroupForSDKTest/providers/Microsoft.StorSimple/managers/ManagerForSDKTest1/features?api-version=2017-06-01
responses:
- statusCode: "200"
body: >-
{
"value": [
{
"name": "BandwidthThrottling",
"status": "Supported"
},
{
"name": "OtherCloud",
"status": "UnsupportedDeviceVersion"
},
{
"name": "DeviceUpdate",
"status": "NotAvailable"
},
{
"name": "ControllerRestartFeature",
"status": "Supported"
},
{
"name": "GetDRTargetDevicesApi",
"status": "Supported"
},
{
"name": "Migration",
"status": "Supported"
},
{
"name": "Update",
"status": "Supported"
},
{
"name": "GetSupportedVirtualApplianceVersionInfoApi",
"status": "Supported"
},
{
"name": "LocalOnlyVolume",
"status": "Supported"
},
{
"name": "PremiumSVA",
"status": "Supported"
},
{
"name": "VersionBasedARMApiAccess",
"status": "NotAvailable"
},
{
"name": "SubscriptionBasedARMApiAccess",
"status": "NotAvailable"
}
]
}
security:
- type: oauth2
description: Azure Active Directory OAuth2 Flow
flow: implicit
authorizationUrl: https://login.microsoftonline.com/common/oauth2/authorize
scopes:
- name: user_impersonation
description: impersonate your user account
|
docs-ref-autogen/storsimple/Managers/ListFeatureSupportStatus.yml
|
gato_past:
instructors: instructor1
topic: gato
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 3
tracs_past:
instructors: instructor1
topic: tracs
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
no_survey_topic_past:
instructors: instructor1
topic: no_survey_topic
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
gato_past_surveys_sent:
instructors: instructor1
topic: gato
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
survey_sent: true
gato:
instructors: instructor1
topic: gato
location: Alkek 155
location_url: http://gato.its.txstate.edu
site: sanmarcos
cancelled: false
seats: 20
gato_cancelled:
instructors: instructor1
topic: gato
location: Alkek 155
site: sanmarcos
cancelled: true
seats: 20
gato_2:
instructors: instructor1
topic: gato
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
tracs:
instructors: instructor2
topic: tracs
location: JCK 1100
site: sanmarcos
cancelled: false
seats: 15
tracs_tiny:
instructors: instructor2
topic: tracs
location: JCK 1100
site: sanmarcos
cancelled: false
seats: 1
tracs_tiny_full:
instructors: instructor2
topic: tracs
location: JCK 1100
site: sanmarcos
cancelled: false
seats: 1
gato_huge:
instructors: instructor1
topic: gato
location: Alkek 155
site: sanmarcos
cancelled: false
gato_overbooked:
instructors: instructor1
topic: gato
location: JCK Overbooked
site: sanmarcos
cancelled: false
seats: 2
tracs_multiple_instructors:
instructors: instructor1, instructor2
topic: tracs
location: JCK 1100
site: sanmarcos
cancelled: false
seats: 2
multi_time_topic:
instructors: instructor1
topic: multi_time_topic
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
multi_time_topic_started:
instructors: instructor1
topic: multi_time_topic
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
multi_time_topic_completed:
instructors: instructor1
topic: multi_time_topic
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
topic_with_attached_documents:
instructors: editor1
topic: topic_with_attached_documents
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
survey_sent: true
inactive_topic:
instructors: instructor1
topic: inactive_topic
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
topic_to_make_inactive:
instructors: instructor1
topic: topic_to_make_inactive
location: Alkek 155
site: sanmarcos
cancelled: false
seats: 20
|
test/fixtures/sessions.yml
|
asyncapi: 1.2.0
info:
title: Standard Events for Real Estate Software Integration and Automation.
version: '0.0.2'
description: |
This document describes an event vocabulary for automating business processes across organizational boundaries in the residential real estate domain.
The vocabulary and related libraries are 100% open-source software actively maintained by a small but passionate community of developers and supported by forward-thinking real estate businesses.
## Technical Acknowledgments
The Real Estate vocabulary is largely based on the tremendous work being done at schema.org.
Linked Data Platform and Notifications are W3C specs credited to <NAME>, University of Bonn, <EMAIL>
<NAME>, University of Edinburgh, <EMAIL>
<NAME>, IBM Corporation
<NAME>, IBM Corporation
<NAME>, Oracle Corporation
The Profile schema comes from the SOLID project developed at MIT.
And all of the above can be traced directly to the work of the incomparable Tim Berners-Lee.
contact:
name: <NAME>
email: <EMAIL>
license:
name: MIT
url: https://opensource.org/licenses/MIT
x-info:
route: /
x-topics:
name: Topics
hideProps: false
hideExample: false
intro: |
## Permission model
The Events API leverages existing object-driven OAuth scope system to
control access to events. For example, if your app has access to profiles
through the `profiles:read` scope, you can choose to subscribe to any or
none of the profile-related events like `profile_added` and
`profile_updated`.
You will only receive events from users who've authorized your app.
x-messages:
name: Events
hideProps: false
hideExample: false
x-schemas:
menu: Types
# baseTopic: realestate
topics:
award:
$ref: schemas/award/topic.yaml
calendar:
$ref: schemas/calendar/topic.yaml
contact:
$ref: schemas/contact/topic.yaml
franchise:
$ref: schemas/franchise/topic.yaml
lead:
$ref: schemas/lead/topic.yaml
listing:
$ref: schemas/listing/listing.topic.yaml
marketing:
$ref: schemas/marketing/topic.yaml
marketingpreferences:
$ref: schemas/marketingpreferences/topic.yaml
profile:
$ref: schemas/profile/topic.yaml
servicearea:
$ref: schemas/servicearea/topic.yaml
website:
$ref: schemas/website/topic.yaml
transaction:
$ref: schemas/transaction/topic.yaml
components:
schemas:
$ref: schemas/type-index.yaml
securitySchemes:
https:
name: api-key
type: httpApiKey
in: header
|
www/src-content/asyncapi.yaml
|
name: build and test
on: [push]
env:
CONTAINER_REGISTRY: ghcr.io
CONTAINER_REGISTRY_REPO: ghcr.io/${{ github.repository_owner }}
CONTAINER_REGISTRY_USER: ${{ secrets.CONTAINER_REGISTRY_USER }}
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.CONTAINER_REGISTRY_PASSWORD }}
DOCKER_IMAGE_NAME: kolga
BUILD_HOST_IMAGE: ghcr.io/andersinno/kolga:v3
BUILT_DOCKER_TEST_IMAGE: ghcr.io/${{ github.repository_owner }}/kolga:${{ github.sha }}-development
jobs:
build:
runs-on: ubuntu-20.04
steps:
- name: checkout
uses: actions/checkout@v2
- name: login to Docker container registry
uses: docker/login-action@v1
with:
registry: ${{ env.CONTAINER_REGISTRY }}
username: ${{ env.CONTAINER_REGISTRY_USER }}
password: ${{ env.CONTAINER_REGISTRY_PASSWORD }}
- name: pull Kólga Docker image
run: docker pull $BUILD_HOST_IMAGE
- name: ensure lowercase image
run: |
echo BUILT_DOCKER_TEST_IMAGE=$(echo ${{ env.BUILT_DOCKER_TEST_IMAGE }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
echo CONTAINER_REGISTRY_REPO=$(echo ${{ env.CONTAINER_REGISTRY_REPO }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: use Kólga to build Kólga
id: build
run: |
env > ./.docker-env
docker run --env-file .docker-env --workdir /github/workspace --rm -v "/var/run/docker.sock":"/var/run/docker.sock" -v "/home/runner/work/_temp/_github_home":"/github/home" -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" -v "$PWD":"/github/workspace" \
$BUILD_HOST_IMAGE bash -c './devops create_images && export BUILT_IMAGE=$(./devops docker_test_image)'
test:
runs-on: ubuntu-20.04
needs: [build]
strategy:
matrix:
tests: ["style-tests", "typing-tests", "package-tests", "docs", "test"]
steps:
- uses: actions/checkout@v2
- name: ${{ matrix.tests }}
run: |
if [[ "${{ matrix.tests }}" == "test" ]]
then
sudo sysctl -w net/netfilter/nf_conntrack_max=131072
sudo chown $(whoami) /etc/hosts && echo 127.0.0.1 docker-registry >> /etc/hosts
make ${{ matrix.tests }}
else
docker run -v "$GITHUB_WORKSPACE":/app ${{ env.BUILT_DOCKER_TEST_IMAGE }} /bin/bash -c "make ${{ matrix.tests }}"
fi
if: always()
|
.github/workflows/on_push.yml
|
---
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "docker exec ceph-osd-{{ ansible_hostname }}"
when:
- containerized_deployment
- name: include check_mandatory_vars.yml
include: check_mandatory_vars.yml
- name: include system_tuning.yml
include: system_tuning.yml
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: install dependencies
package:
name: parted
state: present
when:
- not containerized_deployment
- ansible_os_family != 'ClearLinux'
- name: include common.yml
include: common.yml
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include ceph_disk_cli_options_facts.yml
include: ceph_disk_cli_options_facts.yml
- name: include build_devices.yml
include: build_devices.yml
- name: read information about the devices
parted:
device: "{{ item }}"
unit: MiB
register: parted_results
with_items: "{{ devices }}"
- name: include check_gpt.yml
include: check_gpt.yml
- name: include scenarios/collocated.yml
include: scenarios/collocated.yml
when:
- osd_scenario == 'collocated'
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include scenarios/non-collocated.yml
include: scenarios/non-collocated.yml
when:
- not osd_auto_discovery
- osd_scenario == 'non-collocated'
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include scenarios/lvm.yml
include: scenarios/lvm.yml
when:
- osd_scenario == 'lvm'
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include activate_osds.yml
include: activate_osds.yml
when:
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include start_osds.yml
include: start_osds.yml
when:
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include docker/main.yml
include: docker/main.yml
when:
- containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
|
roles/ceph-osd/tasks/main.yml
|
- name:
detail: Carnegie Mellon University
i18n: company_e
desc:
detail: Research Programmer under Prof. <NAME> at Auton Lab, CMU
i18n: company_e_desc
date:
detail: Aug 2020
i18n: company_e_date
job:
detail: Research Programmer/Analyst
i18n: company_e_job
icon: fa-plus-square
- name:
detail: ReadMyECG
i18n: company_f
desc:
detail: Data Science intern who worked on time-series analysis of relevant biomedical time series data.
i18n: company_f_desc
date:
detail: May 2020
i18n: company_e_date
job:
detail: Data Science Intern
i18n: company_f_job
icon: fa-plus-square
- name:
detail: Carnegie Mellon University
i18n: company_c
desc:
detail: TA for Introduction to Deep Learning (11785) at CMU for Spring 2020.
i18n: company_c_desc
date:
detail: Jan 2020
i18n: company_c_date
job:
detail: Graduate Teaching Assistant
i18n: company_c_job
icon: fa-plus-square
- name:
detail: Carnegie Mellon University
i18n: company_a
desc:
detail: I worked under various research labs at CMU on machine learning, signal processing and Multimodal ML.
i18n: company_a_desc
date:
detail: May 2019
i18n: company_a_date
job:
detail: Graduate Research Assistant
i18n: company_a_job
icon: fa-plus-square
- name:
detail: Aeronatuical Development Establishment
i18n: company_d
desc:
detail: I worked under the flight sim division at DRDO, specifically on researching and integrating flight controllers on UAVs.
i18n: company_d_desc
date:
detail: June 2017
i18n: company_d_date
job:
detail: FSim Div Intern
i18n: company_d_job
icon: fa-plus-square
- name:
detail: SRMAUV
i18n: company_b
desc:
detail: I designed the Electronic systems and subsystems for two iterations of the team's vehicles, Sedna and Alpheus.
i18n: company_b_desc
date:
detail: Sept 2015
i18n: company_b_date
job:
detail: Electronics Domain Lead
i18n: company_b_job
icon: fa-plus-square
|
_data/index/careers.yml
|
en:
authorization:
create_city_not_allowed: This user cannot create city instances.
cure_diseases:
already_cured: The disease is already cured.
not_the_same_color: The cards are not the same color.
wrong_number_of_cards: You do not have the correct number of cards.
player_must_own_cards: The player must own the submitted city cards.
charter_flights:
player_must_own_card: The player does not own the city card at the current location.
dispatcher:
must_be_a_dispatcher: This action requires the dispatcher role.
errors:
missing_param: Required parameter missing.
not_authorized: Not Authorized.
forecasts:
bad_staticids: Bad static ids provided.
updated: Player %{username} has used a forecast event. These infection cards are coming %{cards}.
games:
already_started: This game has already started.
incorrect_nr_of_epidemic_cards: Incorrect number of epidemic cards provided.
minimum_number_of_players: Cannot start a game with only one player.
government_grant:
alread_exists: Cannot build a research station here. A research station is already placed at this location.
invitations:
game_owner: This player is already part of the game.
game_started: Cannot accept invitation. Game already started.
maximum_number_sent: A maximum number of invitations have been sent.
user_not_found: Could not find user with that username.
user_invited: An invitation for this user already exists.
line_movements:
destination_is_not_a_neighbor: Destination city is not a neighbor.
movement_proposals:
expired: This proposal expired.
not_allowed: Destination is not a neighbor and there is no other player at that location.
operations_expert:
one_action_per_turn: This action has already been performed and it can only be performed once per turn.
player_actions:
bad_turn: This action is not allowed on this turn.
city_staticid: A city staticid must be provided for this action.
city_with_no_station: There is no research station in %{name}.
discard_player_card: Player %{username} has too many cards. Only 7 player cards are allowed.
must_own_card: The player does not own the card.
must_be_an_operations_expert: This action can only be performed by an Operations Expert.
no_actions_left: You have no player actions left.
not_a_researcher: Only a researcher can pass in a location.
draw_cards: You must draw two cards before starting the infections stage.
flipped_max: You have already flipped 2 cards. Time to infect!
research_stations:
none_left: There are no research stations left to be placed.
share_cards:
not_the_same_location: You are requesting a card from a player at a different location.
not_an_owner: The card to be shared is not owned by the player.
not_authorized: This player is not authorized to accept this card transfer.
not_same_location: Players are not at the same location.
special_cards:
bad_role: This player is not a Contingency Planner.
treat_diseases:
not_enough_actions_left: You don't have enough actions to cure that many cubes.
quantity: You cannot cure that many infections.
no_color: Color must be provided for this request.
|
config/locales/en.yml
|
functions:
- {summary: Converts a given big integer to an array of Booleans. The 0 element of
the array is the least significant bit of the big integer., uid: Microsoft.Quantum.Convert.BigIntAsBoolArray}
- {summary: Converts a given array of Booleans to an equivalent big integer. The 0
element of the array is the least significant bit of the big integer., uid: Microsoft.Quantum.Convert.BoolArrayAsBigInt}
- {summary: Produces a positive integer from a string of bits in little endian format.,
uid: Microsoft.Quantum.Convert.BoolArrayAsInt}
- {summary: 'Given a bit string, returns a multi-qubit Pauli operator represented
as an array of single-qubit Pauli operators.', uid: Microsoft.Quantum.Convert.BoolArrayAsPauli}
- {summary: 'Converts a `Bool[]` type to a `Result[]` type, where `true` is mapped
to `One` and `false` is mapped to `Zero`.', uid: Microsoft.Quantum.Convert.BoolArrayAsResultArray}
- {summary: 'Converts a `Bool` type to a `Result` type, where `true` is mapped to
`One` and `false` is mapped to `Zero`.', uid: Microsoft.Quantum.Convert.BoolAsResult}
- {summary: Converts a given boolean value to an equivalent string representation.,
uid: Microsoft.Quantum.Convert.BoolAsString}
- {summary: Converts a given double-precision floating-point number to an equivalent
string representation., uid: Microsoft.Quantum.Convert.DoubleAsString}
- {summary: 'Converts a given double-precision floating-point number to an equivalent
string representation, using the given format.', uid: Microsoft.Quantum.Convert.DoubleAsStringWithFormat}
- {summary: Converts functions to operations., uid: Microsoft.Quantum.Convert.FunctionAsOperation}
- {summary: Converts a given integer to an equivalent big integer., uid: Microsoft.Quantum.Convert.IntAsBigInt}
- {summary: 'Produces a binary representation of a positive integer, using the little-endian
representation for the returned array.', uid: Microsoft.Quantum.Convert.IntAsBoolArray}
- {summary: Converts a given integer to an equivalent double-precision floating-point
number., uid: Microsoft.Quantum.Convert.IntAsDouble}
- {summary: Converts a given integer number to an equivalent string representation.,
uid: Microsoft.Quantum.Convert.IntAsString}
- {summary: 'Converts a given integer number to an equivalent string representation,
using the given format.', uid: Microsoft.Quantum.Convert.IntAsStringWithFormat}
- {summary: 'Converts a given big integer to an equivalent integer, if possible. The
function returns a pair of the resulting integer and a Boolean flag which is true,
if and only if the conversion was possible.', uid: Microsoft.Quantum.Convert.MaybeBigIntAsInt}
- {summary: Encodes a multi-qubit Pauli operator represented as an array of single-qubit
Pauli operators into an integer., uid: Microsoft.Quantum.Convert.PauliArrayAsInt}
- {summary: Creates an array `arr` of integers enumerated by start..step..end., uid: Microsoft.Quantum.Convert.RangeAsIntArray}
- {summary: 'Converts a `Result[]` type to a `Bool[]` type, where `One` is mapped
to `true` and `Zero` is mapped to `false`.', uid: Microsoft.Quantum.Convert.ResultArrayAsBoolArray}
- {summary: Produces a positive integer from a string of Results in little endian
format., uid: Microsoft.Quantum.Convert.ResultArrayAsInt}
- {summary: 'Converts a `Result` type to a `Bool` type, where `One` is mapped to `true`
and `Zero` is mapped to `false`.', uid: Microsoft.Quantum.Convert.ResultAsBool}
name: Microsoft.Quantum.Convert
newtypes: []
operations:
- {summary: Calls a function with a given input., uid: Microsoft.Quantum.Convert.Call}
summary: This namespace contains functions for converting between various Q# data
types.
uid: Microsoft.Quantum.Convert
|
api/qsharp/microsoft.quantum.convert.yml
|
uid: "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl"
fullName: "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl"
name: "EventGridPublisherClientImpl"
nameWithType: "EventGridPublisherClientImpl"
summary: "Initializes a new instance of the EventGridPublisherClient type."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class EventGridPublisherClientImpl"
methods:
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.getApiVersion()"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.getHttpPipeline()"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.getSerializerAdapter()"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCloudEventEventsAsync(java.lang.String,java.util.List<com.azure.core.models.CloudEvent>)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCloudEventEventsAsync(java.lang.String,java.util.List<com.azure.core.models.CloudEvent>,com.azure.core.util.Context)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCloudEventEventsWithResponseAsync(java.lang.String,java.util.List<com.azure.core.models.CloudEvent>)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCloudEventEventsWithResponseAsync(java.lang.String,java.util.List<com.azure.core.models.CloudEvent>,com.azure.core.util.Context)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCustomEventEventsAsync(java.lang.String,java.util.List<java.lang.Object>)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCustomEventEventsAsync(java.lang.String,java.util.List<java.lang.Object>,com.azure.core.util.Context)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCustomEventEventsWithResponseAsync(java.lang.String,java.util.List<java.lang.Object>)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishCustomEventEventsWithResponseAsync(java.lang.String,java.util.List<java.lang.Object>,com.azure.core.util.Context)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishEventsAsync(java.lang.String,java.util.List<com.azure.messaging.eventgrid.implementation.models.EventGridEvent>)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishEventsAsync(java.lang.String,java.util.List<com.azure.messaging.eventgrid.implementation.models.EventGridEvent>,com.azure.core.util.Context)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishEventsWithResponseAsync(java.lang.String,java.util.List<com.azure.messaging.eventgrid.implementation.models.EventGridEvent>)"
- "com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.publishEventsWithResponseAsync(java.lang.String,java.util.List<com.azure.messaging.eventgrid.implementation.models.EventGridEvent>,com.azure.core.util.Context)"
type: "class"
metadata: {}
package: "com.azure.messaging.eventgrid.implementation"
artifact: com.azure:azure-messaging-eventgrid:4.0.0
|
docs-ref-autogen/com.azure.messaging.eventgrid.implementation.EventGridPublisherClientImpl.yml
|
---
- name: include OS-specific vars
include: vars.yml
- name: OS packages
package: "name={{ item }} state=present"
with_items: "{{ consul_os_packages }}"
tags: installation
- include: user.yml
when: consul_create_user
tags: consul_user
- block:
- name: download hashicorp gpg key file from keybase
get_url:
url: https://keybase.io/hashicorp/key.asc
dest: "{{ consul_download_dir }}/hashicorp.asc"
validate_certs: "{{ consul_validate_certs }}"
- name: install gnupg package
become: yes
package:
name: gnupg
state: present
- name: import hashicorp gpg key
command: gpg --import "{{ consul_download_dir }}/hashicorp.asc"
changed_when: no
- name: download consul checksum signature file
get_url:
url: "{{ consul_signature_url }}"
dest: "{{ consul_download_dir }}/{{ consul_signature_file }}"
validate_certs: "{{ consul_validate_certs }}"
force: yes
when: consul_verify_checksum_signature
- name: Get consul checksum file
get_url:
url: "{{ consul_checksum_file_url }}"
dest: "{{ consul_download_dir }}"
validate_certs: "{{ consul_validate_certs }}"
- name: verify checksum file
command: gpg --batch --verify {{ consul_download_dir }}/{{ consul_signature_file }} {{ consul_download_dir }}/{{ consul_checksum_file }}
changed_when: no
register: gpg_check
when: consul_verify_checksum_signature
failed_when: gpg_check.rc == 1
tags: gpg_check
- name: Get checksum of consul zip
shell: "grep {{ consul_zip }} {{ consul_download_dir }}/{{ consul_checksum_file }}"
register: chksum
changed_when: False
tags:
- skip_ansible_lint
- name: Download Consul
get_url:
url: "{{ consul_zip_url }}"
dest: "{{ consul_download_dir }}/consul.zip"
checksum: "sha256:{{ chksum.stdout.split(' ')|first }}"
validate_certs: "{{ consul_validate_certs }}"
when: not ansible_check_mode
tags: consul
- name: create consul binary directory
file:
state: directory
path: "{{ consul_install_dir }}/consul-{{ consul_version }}"
owner: 0
group: 0
recurse: yes
- name: Unzip Consul to installation directory
unarchive:
src: "{{ consul_download_dir }}/consul.zip"
dest: "{{ consul_install_dir }}/consul-{{ consul_version }}"
copy: no
creates: "{{ consul_install_dir }}/consul-{{ consul_version }}/consul"
tags:
- consul
- consul-unzip
- name: symlink consul binary
file:
state: link
src: "{{ consul_install_dir }}/consul-{{ consul_version }}/consul"
dest: "{{ consul_install_dir }}/consul"
- include: configure.yml
tags: consul_config
|
tasks/install.yml
|
---
- hosts: pelican-gitreceive-openbsd
gather_facts: false
roles: [adarnimrod.openbsd-bootstrap]
- hosts: pelican-gitreceive-xenial
gather_facts: false
roles: [adarnimrod.debian-bootstrap]
- hosts: all
tasks:
- name: Create SSH keypair
become: False
local_action: command ssh-keygen -t rsa -N '' -f files/id_rsa
run_once: True
args:
creates: files/id_rsa
- name: Install SSH server
when: ansible_pkg_mgr == 'apt'
apt:
name: openssh-server
state: present
- name: Start SSH service
when: ansible_pkg_mgr == 'apt'
service:
name: ssh
state: started
- hosts: all
roles:
- role: adarnimrod.nginx
- role: pelican-gitreceive
pelican_gitreceive_public_keys: ['{{ lookup("file", "id_rsa.pub") }}']
post_tasks:
- name: Install curl
package:
name: curl
state: present
- name: Create .ssh directory
file:
path: /root/.ssh
owner: root
group: 0
mode: 0o0700
state: directory
- name: Copy private SSH key
copy:
src: id_rsa
dest: /root/.ssh/blog_rsa
owner: root
group: 0
mode: 0o0400
- name: Add SSH config
blockinfile:
dest: /root/.ssh/config
state: present
create: yes
block: |
Host localhost
HostName localhost
User git
IdentityFile /root/.ssh/blog_rsa
- name: Clone test blog
git:
dest: /root/blog
repo: https://www.shore.co.il/git/blog
version: master
recursive: yes
- name: Add localhost host keys to known hosts
shell: ssh-keyscan localhost > /root/.ssh/known_hosts
args:
creates: /root/.ssh/known_hosts
- name: Add localhost as a git remote
blockinfile:
dest: /root/blog/.git/config
block: |
[remote "test"]
url = git@localhost:test
fetch = +refs/heads/*:refs/remotes/test/*
|
tests/playbook.yml
|
author: <NAME>
copyright: '2021'
dependencies:
- base >= 4.7 && < 5
- attoparsec
- bytestring
- containers
- parallel
- vector
description: Please see the README on GitHub at <https://github.com/erochest/day2#readme>
executables:
day1-exe:
dependencies:
- Advent2021
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Main.hs
source-dirs: app/day1
day2-exe:
dependencies:
- Advent2021
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Main.hs
source-dirs: app/day2
day3-exe:
dependencies:
- Advent2021
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Main.hs
source-dirs: app/day3
day4-exe:
dependencies:
- Advent2021
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Main.hs
source-dirs: app/day4
day5-exe:
dependencies:
- Advent2021
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Main.hs
source-dirs: app/day5
day6-exe:
dependencies: &id001
- Advent2021
ghc-options: &id002
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Main.hs
source-dirs: app/day6
day7-exe:
dependencies: *id001
ghc-options: *id002
main: Main.hs
source-dirs: app/day7
extra-source-files:
- README.md
- ChangeLog.md
github: erochest/advent-2021
library:
source-dirs: src
license: BSD3
maintainer: <EMAIL>
name: Advent2021
tests:
day2-test:
dependencies:
- Advent2021
- tasty
- tasty-hunit
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Spec.hs
source-dirs: test/day2
day3-test:
dependencies:
- Advent2021
- tasty
- tasty-hunit
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Spec.hs
source-dirs: test/day3
day4-test:
dependencies:
- Advent2021
- tasty
- tasty-hunit
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Spec.hs
source-dirs: test/day4
day5-test:
dependencies:
- Advent2021
- tasty
- tasty-hunit
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Spec.hs
source-dirs: test/day5
day6-test:
dependencies: &id003
- Advent2021
- tasty
- tasty-hunit
ghc-options: &id004
- -threaded
- -rtsopts
- -with-rtsopts=-N
main: Spec.hs
source-dirs: test/day6
day7-test:
dependencies: *id003
ghc-options: *id004
main: Spec.hs
source-dirs: test/day7
version: 0.1.0.0
|
package.yaml
|
_id: 64201c30-aa12-11e9-96bc-538bd6997c7c
message: >-
Give vhu.phug.zacharythomas.github.io.zsm.aq coagulase-negative stenosis
[URL=http://tattoosideasblog.com/cialis-generic/ - cialis reviews[/URL -
[URL=http://iowansforsafeaccess.org/tadalafil-20-mg/ - cialis generic[/URL -
cialis 20mg [URL=http://thegrizzlygrowler.com/cialis/ - once daily cialis
dose[/URL - [URL=http://gatorsrusticburger.com/cialis-generic/ - cialis[/URL
- [URL=http://thegrizzlygrowler.com/viagra-buy-in-canada/ - viagra buy in
canada[/URL - [URL=http://dive-courses-bali.com/propecia-online/ - propecia
online[/URL - [URL=http://dive-courses-bali.com/cialis/ - cheap cialis[/URL
- [URL=http://bootstrapplusplus.com/cialis-5mg/ - cialis[/URL -
[URL=http://bootstrapplusplus.com/kamagra/ - kamagra[/URL - probing disc
denies <a href="http://tattoosideasblog.com/cialis-generic/">cialis
reviews</a> <a href="http://iowansforsafeaccess.org/tadalafil-20-mg/">cheap
cialis</a> <a href="http://thegrizzlygrowler.com/cialis/">tadalafil
walmart</a> <a href="http://gatorsrusticburger.com/cialis-generic/">cialis</a>
<a href="http://thegrizzlygrowler.com/viagra-buy-in-canada/">subaction
showcomments viagra smile older</a> canadian viagra <a
href="http://dive-courses-bali.com/propecia-online/">cheap propecia</a> <a
href="http://dive-courses-bali.com/cialis/">lowest price generic cialis</a> <a
href="http://bootstrapplusplus.com/cialis-5mg/">cialis 5mg</a> <a
href="http://bootstrapplusplus.com/kamagra/">kamagra</a> co-exist: secondarily
ligation http://tattoosideasblog.com/cialis-generic/ cialis
http://iowansforsafeaccess.org/tadalafil-20-mg/ tadalafil 20 mg
http://thegrizzlygrowler.com/cialis/ cialis pills
http://gatorsrusticburger.com/cialis-generic/ cialis generic
http://thegrizzlygrowler.com/viagra-buy-in-canada/ viagra for sale
http://dive-courses-bali.com/propecia-online/ buy propecia
http://dive-courses-bali.com/cialis/ cialis
http://bootstrapplusplus.com/cialis-5mg/ cialis 10mg
http://bootstrapplusplus.com/kamagra/ viagra sperm disabilities,
anti-inflammatories seconds, varus.
name: epunosom
email: d<PASSWORD>
url: 'http://tattoosideasblog.com/cialis-generic/'
hidden: ''
date: '2019-07-19T10:45:58.104Z'
|
_data/comments/elasticsearch-restore/comment-1563533158105.yml
|
front_cart:
path: /warenkorb/
defaults: { _controller: ShopsysShopBundle:Front\Cart:index }
front_customer_edit:
path: /kunde/bearbeiten/
defaults: { _controller: ShopsysShopBundle:Front\Customer:edit }
front_customer_orders:
path: /kunde/bestellungen/
defaults: { _controller: ShopsysShopBundle:Front\Customer:orders }
front_customer_order_detail_registered:
path: /kunde/bestellungen/angaben/{orderNumber}
defaults: { _controller: ShopsysShopBundle:Front\Customer:orderDetailRegistered }
front_customer_order_detail_unregistered:
path: /bestellung-angaben/{urlHash}
defaults: { _controller: ShopsysShopBundle:Front\Customer:orderDetailUnregistered }
front_login:
path: /einloggen/
defaults: { _controller: ShopsysShopBundle:Front\Login:login }
front_logout:
path: /abmelden/{_csrf_token}
# controller action is unnecessary, because firewall processes whole request
front_order_index:
path: /bestellung/
defaults: { _controller: ShopsysShopBundle:Front\Order:index }
front_order_sent:
path: /bestellung-bestätigen/
defaults: { _controller: ShopsysShopBundle:Front\Order:sent }
front_product_search:
path: /suche
defaults: { _controller: ShopsysShopBundle:Front\Product:search }
front_registration_register:
path: /anmeldung/
defaults: { _controller: ShopsysShopBundle:Front\Registration:register }
front_registration_reset_password:
path: /vergessen-passwort/
defaults: { _controller: ShopsysShopBundle:Front\CustomerPassword:resetPassword }
front_registration_set_new_password:
path: /neues-passwort/
defaults: { _controller: ShopsysShopBundle:Front\CustomerPassword:setNewPassword }
front_brand_list:
path: /markenliste/
defaults: { _controller: ShopsysShopBundle:Front\Brand:list }
front_personal_data:
path: /personliche-informationen-ubersicht/
defaults: { _controller: ShopsysShopBundle:Front\PersonalData:index }
front_personal_data_access:
path: /personliche-informationen-ubersicht/{hash}/
defaults: { _controller: ShopsysShopBundle:Front\PersonalData:accessDisplay }
front_personal_data_export:
path: /personliche-informationen-export/
defaults: { _controller: ShopsysShopBundle:Front\PersonalData:export }
front_personal_data_access_export:
path: /personliche-informationen-export/{hash}/
defaults: { _controller: ShopsysShopBundle:Front\PersonalData:accessExport }
|
src/Shopsys/ShopBundle/Resources/config/routing_front_de.yml
|
title: slackmoji-regional-indicator
emojis:
- name: regional_indicator_a
src: https://emojis.slackmojis.com/emojis/images/1532523105/4266/regional_indicator_a.png
- name: regional_indicator_b
src: https://emojis.slackmojis.com/emojis/images/1532523142/4267/regional_indicator_b.png
- name: regional_indicator_c
src: https://emojis.slackmojis.com/emojis/images/1532523182/4268/regional_indicator_c.png
- name: regional_indicator_d
src: https://emojis.slackmojis.com/emojis/images/1532523209/4269/regional_indicator_d.png
- name: regional_indicator_e
src: https://emojis.slackmojis.com/emojis/images/1532523230/4270/regional_indicator_e.png
- name: regional_indicator_f
src: https://emojis.slackmojis.com/emojis/images/1532523255/4271/regional_indicator_f.png
- name: regional_indicator_g
src: https://emojis.slackmojis.com/emojis/images/1532523285/4272/regional_indicator_g.png
- name: regional_indicator_h
src: https://emojis.slackmojis.com/emojis/images/1532523314/4273/regional_indicator_h.png
- name: regional_indicator_i
src: https://emojis.slackmojis.com/emojis/images/1532523340/4274/regional_indicator_i.png
- name: regional_indicator_j
src: https://emojis.slackmojis.com/emojis/images/1532523369/4275/regional_indicator_j.png
- name: regional_indicator_k
src: https://emojis.slackmojis.com/emojis/images/1532523397/4276/regional_indicator_k.png
- name: regional_indicator_l
src: https://emojis.slackmojis.com/emojis/images/1532523430/4277/regional_indicator_l.png
- name: regional_indicator_m
src: https://emojis.slackmojis.com/emojis/images/1532523461/4278/regional_indicator_m.png
- name: regional_indicator_n
src: https://emojis.slackmojis.com/emojis/images/1532523511/4279/regional_indicator_n.png
- name: regional_indicator_o
src: https://emojis.slackmojis.com/emojis/images/1532523541/4280/regional_indicator_o.png
- name: regional_indicator_p
src: https://emojis.slackmojis.com/emojis/images/1532523571/4281/regional_indicator_p.png
- name: regional_indicator_q
src: https://emojis.slackmojis.com/emojis/images/1532523597/4282/regional_indicator_q.png
- name: regional_indicator_r
src: https://emojis.slackmojis.com/emojis/images/1532523628/4283/regional_indicator_r.png
- name: regional_indicator_s
src: https://emojis.slackmojis.com/emojis/images/1532523663/4284/regional_indicator_s.png
- name: regional_indicator_t
src: https://emojis.slackmojis.com/emojis/images/1532523706/4285/regional_indicator_t.png
- name: regional_indicator_u
src: https://emojis.slackmojis.com/emojis/images/1532523757/4286/regional_indicator_u.png
- name: regional_indicator_v
src: https://emojis.slackmojis.com/emojis/images/1532523807/4287/regional_indicator_v.png
- name: regional_indicator_w
src: https://emojis.slackmojis.com/emojis/images/1532523866/4288/regional_indicator_w.png
- name: regional_indicator_x
src: https://emojis.slackmojis.com/emojis/images/1532523905/4289/regional_indicator_x.png
- name: regional_indicator_y
src: https://emojis.slackmojis.com/emojis/images/1532523952/4290/regional_indicator_y.png
- name: regional_indicator_z
src: https://emojis.slackmojis.com/emojis/images/1532523985/4291/regional_indicator_z.png
|
packs/slackmojis-regional-indicator.yaml
|
_known_files:
/repositories/permian-frac-exchange/data/Apache Frac Schedule.xlsx: '2019-08-23T18:14:59.588020'
/repositories/permian-frac-exchange/data/CQ Frac Schedule 081919.xlsx: '2019-08-23T18:15:35.476604'
/repositories/permian-frac-exchange/data/DE3_Frac_Schedule_8-21-2019.xlsx: '2019-08-23T18:15:47.430579'
/repositories/permian-frac-exchange/data/Diamondback_3MonthFracSchedule_8.19.19_Midland.xlsx: '2019-08-23T18:15:41.416352'
/repositories/permian-frac-exchange/data/Discovery NR 3 Month Frac Schedule 08-19-2019.xlsx: '2019-08-23T18:15:12.569296'
/repositories/permian-frac-exchange/data/EER_FracSchedule_2019-08-19.xlsx: '2019-08-23T18:15:25.418005'
/repositories/permian-frac-exchange/data/Earthstone Operating- Frac Schedule (FSEC).xlsx: '2019-08-23T18:15:34.397122'
/repositories/permian-frac-exchange/data/Encana completions_081919.xlsx: '2019-08-23T18:15:08.812870'
/repositories/permian-frac-exchange/data/Guidon Energy Frac_Schedule_20190821.xlsx: '2019-08-23T18:15:02.437182'
/repositories/permian-frac-exchange/data/HeadingtonEnergy_19August19.xlsx: '2019-08-23T18:15:05.275727'
/repositories/permian-frac-exchange/data/HenryResources_3MonthFracSchedule_20Aug2019.xlsx: '2019-08-23T18:15:27.926988'
/repositories/permian-frac-exchange/data/HuntOil_3MonthFracSchedule_20Aug19.xlsx: '2019-08-23T18:15:21.372277'
/repositories/permian-frac-exchange/data/IRM_3MonthFracSchedule_20190819.xlsx: '2019-08-23T18:15:26.444716'
/repositories/permian-frac-exchange/data/LPI_3MonthFracSchedule_08192019.xls: '2019-08-23T18:15:19.984642'
/repositories/permian-frac-exchange/data/Lario3MoFracSched_8_19_2019.xlsx: '2019-08-23T18:15:32.729337'
/repositories/permian-frac-exchange/data/PB_Chevron_Frac Schedule 08-22-2019.xlsx: '2019-08-23T18:15:10.073985'
/repositories/permian-frac-exchange/data/PE Frac Schedule_08.21.19.xlsx: '2019-08-23T18:15:01.183393'
/repositories/permian-frac-exchange/data/Pioneer_FracSchedule_20190819.xlsx: '2019-08-23T18:15:16.327677'
/repositories/permian-frac-exchange/data/SMENERGY_FracSchedule_22AUG2019.xlsx: '2019-08-23T18:15:07.349911'
/repositories/permian-frac-exchange/data/SabaloOperating_3MonthFracSchedule_2019Aug19.xlsx: '2019-08-23T18:15:13.827501'
/repositories/permian-frac-exchange/data/XTO_3MonthFracSchedule_ 8-21-19.xlsx: '2019-08-23T18:15:44.115745'
test1: '2019-08-23T19:43:44.415657'
test1: test1
|
config/download_log.yaml
|
%YAML 1.2
---
$id: http://devicetree.org/schemas/iio/adc/aspeed,ast2600-adc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ADC that forms part of an ASPEED server management processor.
maintainers:
- <NAME> <<EMAIL>>
description: |
• 10-bits resolution for 16 voltage channels.
• The device split into two individual engine and each contains 8 voltage
channels.
• Channel scanning can be non-continuous.
• Programmable ADC clock frequency.
• Programmable upper and lower threshold for each channels.
• Interrupt when larger or less than threshold for each channels.
• Support hysteresis for each channels.
• Built-in a compensating method.
• Built-in a register to trim internal reference voltage.
• Internal or External reference voltage.
• Support 2 Internal reference voltage 1.2v or 2.5v.
• Integrate dividing circuit for battery sensing.
properties:
compatible:
enum:
- aspeed,ast2600-adc0
- aspeed,ast2600-adc1
description:
Their trimming data, which is used to calibrate internal reference volage,
locates in different address of OTP.
reg:
maxItems: 1
clocks:
maxItems: 1
description:
Input clock used to derive the sample clock. Expected to be the
SoC's APB clock.
resets:
maxItems: 1
"#io-channel-cells":
const: 1
vref-supply:
description:
The external regulator supply ADC reference voltage.
aspeed,int-vref-microvolt:
enum: [1200000, 2500000]
description:
ADC internal reference voltage in microvolts.
aspeed,battery-sensing:
type: boolean
description:
Inform the driver that last channel will be used to sensor battery.
aspeed,trim-data-valid:
type: boolean
description: |
The ADC reference voltage can be calibrated to obtain the trimming
data which will be stored in otp. This property informs the driver that
the data store in the otp is valid.
required:
- compatible
- reg
- clocks
- resets
- "#io-channel-cells"
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/ast2600-clock.h>
adc0: adc@1e6e9000 {
compatible = "aspeed,ast2600-adc0";
reg = <0x1e6e9000 0x100>;
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_ADC>;
#io-channel-cells = <1>;
aspeed,int-vref-microvolt = <2500000>;
};
adc1: adc@1e6e9100 {
compatible = "aspeed,ast2600-adc1";
reg = <0x1e6e9100 0x100>;
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_ADC>;
#io-channel-cells = <1>;
aspeed,int-vref-microvolt = <2500000>;
};
...
|
Documentation/devicetree/bindings/iio/adc/aspeed,ast2600-adc.yaml
|
- uid: type2docfx.Directions
name: Directions
fullName: Directions
children:
- uid: type2docfx.Directions.Bottom
name: Bottom
children: []
langs:
- typeScript
summary: A simple enum member.
type: field
package: type2docfx
- uid: type2docfx.Directions.Left
name: Left
children: []
langs:
- typeScript
summary: A simple enum member.
type: field
package: type2docfx
- uid: type2docfx.Directions.Right
name: Right
children: []
langs:
- typeScript
summary: A simple enum member.
type: field
package: type2docfx
- uid: type2docfx.Directions.Top
name: Top
children: []
langs:
- typeScript
summary: A simple enum member.
type: field
package: type2docfx
- uid: type2docfx.Directions.TopLeft
name: TopLeft
children: []
langs:
- typeScript
summary: A composite enum member.
type: field
numericValue: .nan
package: type2docfx
- uid: type2docfx.Directions.TopRight
name: TopRight
children: []
langs:
- typeScript
summary: A composite enum member.
type: field
numericValue: .nan
package: type2docfx
langs:
- typeScript
type: enum
summary: This is a simple Enumeration.
package: type2docfx
- uid: type2docfx.Size
name: Size
fullName: Size
children:
- uid: type2docfx.Size.Large
name: Large
children: []
langs:
- typeScript
summary: A simple enum member.
type: field
package: type2docfx
- uid: type2docfx.Size.Medium
name: Medium
children: []
langs:
- typeScript
summary: A simple enum member.
type: field
package: type2docfx
- uid: type2docfx.Size.Small
name: Small
children: []
langs:
- typeScript
summary: A simple enum member.
type: field
package: type2docfx
- uid: type2docfx.Size.isSmall
name: isSmall(Size)
children: []
type: function
langs:
- typeScript
summary: A function that is attached to an enumeration.
syntax:
content: 'function isSmall(value: Size)'
parameters:
- id: value
type:
- typeName: Size
typeId: !<tag:yaml.org,2002:js/undefined> ''
description: The value that should be tested.
optional: false
return:
type:
- typeName: boolean
typeId: !<tag:yaml.org,2002:js/undefined> ''
description: TRUE when the given value equals Size.Small.
package: type2docfx
langs:
- typeScript
type: enum
summary: |
This is a enumeration extended by a module.
This comment is ignored, as the enumeration is already defined.
You should see both the enum members and the module members.
package: type2docfx
|
tests/data/enumerations/spec.yml
|
---
environment:
matrix:
# Ruby 2.4 (32bit)
- ruby_version: "24"
PYTHONDIR: "C:\\Python27"
PYTHON: "C:\\Python27\\python.exe"
- ruby_version: "24"
PYTHONDIR: "C:\\Python35"
PYTHON: "C:\\Python35\\python.exe"
- ruby_version: "24"
PYTHONDIR: "C:\\Python36"
PYTHON: "C:\\Python36\\python.exe"
# Ruby 2.4 (64bit)
- ruby_version: "24-x64"
PYTHONDIR: "C:\\Python27-x64"
PYTHON: "C:\\Python27-x64\\python.exe"
- ruby_version: "24-x64"
PYTHONDIR: "C:\\Python35-x64"
PYTHON: "C:\\Python35-x64\\python.exe"
- ruby_version: "24-x64"
PYTHONDIR: "C:\\Python36-x64"
PYTHON: "C:\\Python36-x64\\python.exe"
# Ruby 2.3 (32bit)
- ruby_version: "23"
PYTHONDIR: "C:\\Python27"
PYTHON: "C:\\Python27\\python.exe"
- ruby_version: "23"
PYTHONDIR: "C:\\Python34"
PYTHON: "C:\\Python34\\python.exe"
- ruby_version: "23"
PYTHONDIR: "C:\\Python35"
PYTHON: "C:\\Python35\\python.exe"
- ruby_version: "23"
PYTHONDIR: "C:\\Python36"
PYTHON: "C:\\Python36\\python.exe"
# Ruby 2.3 (64bit)
- ruby_version: "23-x64"
PYTHONDIR: "C:\\Python27-x64"
PYTHON: "C:\\Python27-x64\\python.exe"
- ruby_version: "23-x64"
PYTHONDIR: "C:\\Python35-x64"
PYTHON: "C:\\Python35-x64\\python.exe"
- ruby_version: "23-x64"
PYTHONDIR: "C:\\Python36-x64"
PYTHON: "C:\\Python36-x64\\python.exe"
branches:
only:
- master
- /release-.*/
notifications:
- provider: Email
on_build_success: false
on_build_failure: false
on_build_status_changed: false
deploy: off
build: off
install:
- "SET PATH=%PYTHONDIR%;%PYTHONDIR%\\Scripts;%PATH%"
- "SET PATH=C:\\Ruby%ruby_version%\\bin;%PATH%"
- "bundle install"
- "pip install numpy"
before_test:
- "bundle exec rake -rdevkit clobber compile"
- ECHO "=== python investigator.py ==="
- "python lib\\pycall\\python\\investigator.py"
test_script:
- "SET PYCALL_DEBUG_FIND_LIBPYTHON=1"
- rake
matrix:
allow_failures:
- PYTHONDIR: "C:\\Python27"
- PYTHONDIR: "C:\\Python27-x64"
|
appveyor.yml
|
---
result: SUCCESS
timestamp: 2016-05-11 08:01:12 UTC
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/76/
trigger_url: http://manhattan.ci.chef.co/job/push-jobs-client-trigger-nightly-master/21/
duration: 57m25s
runs:
debian-6-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=i386,platform=debian-6,project=push-jobs-client,role=builder/76/
duration: 15m51s
el-5-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=i386,platform=el-5,project=push-jobs-client,role=builder/76/
duration: 17m52s
el-6-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=i386,platform=el-6,project=push-jobs-client,role=builder/76/
duration: 19m15s
ubuntu-10.04-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=i386,platform=ubuntu-10.04,project=push-jobs-client,role=builder/76/
duration: 15m46s
windows-2008r2-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=i386,platform=windows-2008r2,project=push-jobs-client,role=builder/76/
duration: 57m20s
debian-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=x86_64,platform=debian-6,project=push-jobs-client,role=builder/76/
duration: 25m56s
el-5:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=x86_64,platform=el-5,project=push-jobs-client,role=builder/76/
duration: 25m27s
el-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=x86_64,platform=el-6,project=push-jobs-client,role=builder/76/
duration: 29m4s
el-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=x86_64,platform=el-7,project=push-jobs-client,role=builder/76/
duration: 17m35s
mac_os_x-10.9:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=x86_64,platform=mac_os_x-10.9,project=push-jobs-client,role=builder/76/
duration: 11m42s
ubuntu-10.04:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/push-jobs-client-build/architecture=x86_64,platform=ubuntu-10.04,project=push-jobs-client,role=builder/76/
duration: 23m38s
|
reports/stages/manhattan.ci.chef.co/job/push-jobs-client-build/76.yaml
|
items:
- uid: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat"
id: "WithFormat"
parent: "com.microsoft.azure.management.apimanagement.v2019_01_01"
children:
- "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat.withFormat(com.microsoft.azure.management.apimanagement.v2019_01_01.ContentFormat)"
langs:
- "java"
name: "ApiContract.UpdateStages.WithFormat"
nameWithType: "ApiContract.UpdateStages.WithFormat"
fullName: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat"
type: "Interface"
package: "com.microsoft.azure.management.apimanagement.v2019_01_01"
summary: "The stage of the apicontract update allowing to specify Format."
syntax:
content: "public static interface ApiContract.UpdateStages.WithFormat"
- uid: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat.withFormat(com.microsoft.azure.management.apimanagement.v2019_01_01.ContentFormat)"
id: "withFormat(com.microsoft.azure.management.apimanagement.v2019_01_01.ContentFormat)"
parent: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat"
langs:
- "java"
name: "withFormat(ContentFormat format)"
nameWithType: "ApiContract.UpdateStages.WithFormat.withFormat(ContentFormat format)"
fullName: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat.withFormat(ContentFormat format)"
overload: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat.withFormat*"
type: "Method"
package: "com.microsoft.azure.management.apimanagement.v2019_01_01"
summary: "Specifies format."
syntax:
content: "public abstract ApiContract.Update withFormat(ContentFormat format)"
parameters:
- id: "format"
type: "com.microsoft.azure.management.apimanagement.v2019_01_01.ContentFormat"
description: "Format of the Content in which the API is getting imported. Possible values include: 'wadl-xml', 'wadl-link-json', 'swagger-json', 'swagger-link-json', 'wsdl', 'wsdl-link', 'openapi', 'openapi+json', 'openapi-link'"
return:
type: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.Update"
description: "the next update stage"
references:
- uid: "com.microsoft.azure.management.apimanagement.v2019_01_01.ContentFormat"
name: "ContentFormat"
nameWithType: "ContentFormat"
fullName: "com.microsoft.azure.management.apimanagement.v2019_01_01.ContentFormat"
- uid: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.Update"
name: "ApiContract.Update"
nameWithType: "ApiContract.Update"
fullName: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.Update"
- uid: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat.withFormat*"
name: "withFormat"
nameWithType: "ApiContract.UpdateStages.WithFormat.withFormat"
fullName: "com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat.withFormat"
package: "com.microsoft.azure.management.apimanagement.v2019_01_01"
|
docs-ref-autogen/com.microsoft.azure.management.apimanagement.v2019_01_01.ApiContract.UpdateStages.WithFormat.yml
|
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
carrier: galloper
kompose.cmd: kompose convert
kompose.version: 1.7.0 (HEAD)
traefik.enable: "true"
traefik.http.middlewares.galloper-auth.forwardauth.address: http://carrier-auth/forward-auth/auth?target=json&scope=galloper
traefik.http.routers.galloper.middlewares: galloper-auth@docker
traefik.http.routers.galloper.rule: PathPrefix(`/`)
traefik.http.services.galloper.loadbalancer.server.port: "5000"
creationTimestamp: null
labels:
io.kompose.service: galloper
name: galloper
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
io.kompose.service: galloper
spec:
containers:
- env:
- name: APP_HOST
value: http://localhost
- name: CPU_CORES
value: "2"
- name: DATABASE_SCHEMA
value: carrier
- name: DATABASE_VENDOR
value: postgres
- name: MINIO_ACCESS_KEY
value: admin
- name: MINIO_HOST
value: http://carrier-minio:9000
- name: MINIO_REGION
value: us-east-1
- name: MINIO_SECRET_KEY
value: password
- name: POSTGRES_DB
value: carrier
- name: POSTGRES_HOST
value: carrier-postgres
- name: POSTGRES_PASSWORD
value: password
- name: POSTGRES_USER
value: carrier
- name: REDIS_DB
value: "2"
- name: REDIS_HOST
value: carrier-redis
- name: REDIS_PASSWORD
value: password
- name: VAULT_URL
value: http://carrier-vault:8200
image: getcarrier/galloper:latest
name: carrier-galloper
resources: {}
volumeMounts:
- mountPath: /var/run/docker.sock
name: galloper-claim0
restartPolicy: Always
volumes:
- name: galloper-claim0
persistentVolumeClaim:
claimName: galloper-claim0
status: {}
|
dev/newmanifests/galloper-deployment.yaml
|
gr:
homepage: "Αρχική"
welcome: "Καλώς ήρθατε στο ResearchGr"
start_immediately: "Μπορείτε να ξεκινήσετε αμέσως δημιουργώντας ένα νέο έργο"
account_activated: "Ο λογαριασμός σας έχει ενεργοποιηθεί. Μπορείτε τώρα να εισέλθετε και να δημιουργήσετε το πρώτο σας έργο."
activation_email: "Έχετε λάβει email με οδηγίες για την ενεργοποίηση του λογαριασμού σας."
no_access: "Δεν έχετε πρόσβαση στη σελίδα που ζητήσατε. Έγινε ανακατεύθυνση στην αρχική σελίδα."
search: "Αναζήτηση χρήστη ή έργου"
search_results: "Αποτελέσματα αναζήτησης"
project_results: "Έργα"
user_results: "Χρήστες"
search_user: "Αναζήτηση χρήστη"
no_results: "Δε βρέθηκαν αποτελέσματα για την αναζήτησή σας."
submit: "Υποβολή"
save: "Αποθήκευση"
cancel: "Ακύρωση"
ok: "Ok"
word_yes: "Ναι"
word_no: "Όχι"
word_or: "Ή"
are_you_sure: "Είστε σίγουρος/η"
question_mark: ";"
show: "Προβολή"
edit: "Επεξεργασία"
delete: "Διαγραφή"
reason: "Αιτία"
back: "Επιστροφή"
english: "Aγγλικά"
greek: "Ελληνικά"
my_projects: "Τα έργα μου"
owner: "Μέλος"
collaborator: "Συνεργάτης"
send: "Αποστολή"
try_again: "Παρακαλώ δοκιμάστε ξανά."
error_persists: "Εαν το συγκεκριμένο σφάλμα παραμείνει επικοινωνήστε με το τμήμα εξυπηρέτησης στο... (support_mail)"
write_comment: "Γράψτε ένα σχόλιο"
provide_comment: "Σε αυτή την περίπτωση <b>πρέπει</b> να γράψετε ένα σχόλιο"
large_comment: "Το σχόλιο σας δεν πρέπει να υπερβαίνει τους 200 χαρακτήρες"
search_restriction: "Μόνο οι εγγεγραμένοι χρήστες μπορούν να αναζητήσουν άλλους χρήστες ή/και ερευνητικά έργα."
previous: "Προηγούμενη"
next: "Επόμενη"
invite_search_email: "Email χρήστη"
invited: "Έχετε λάβει πρόσκληση από τον/την"
participate: "για να συμμετέχετε στο έργο"
exact_phrase_match: "Να περιλαμβάνει ολόκληρη τη φράση"
for_example: "π.χ."
time:
formats:
short: "%d/%m/%Y"
long: "%d/%m/%Y (%H:%m:%S)"
more_than: "περισσότερο από"
ago: "πριν"
few_seconds: "πριν από λίγα δευτερόλεπτα"
year:
one: "1 έτος"
other: "%{count} έτη"
month:
one: "1 μήνα"
other: "%{count} μήνες"
day:
one: "1 μέρα"
other: "%{count} μέρες"
hour:
one: "1 ώρα"
other: "%{count} ώρες"
minute:
one: "1 λεπτό"
other: "%{count} λεπτά"
|
config/locales/defaults/gr.yml
|
name: '🏁 Create Release PR'
# Controls when the action will run. Workflow runs when manually triggered using the UI or API.
on:
workflow_dispatch:
# Inputs the workflow accepts.
inputs:
newVersion:
# Friendly description to be shown in the UI instead of 'name'
description: 'The version label; follows the same format as `npm version` command'
# Default value if no value is explicitly provided
default: 'patch'
# Input has to be provided for the workflow to run
required: true
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "greet"
bump-version:
# The type of runner that the job will run on
runs-on: ubuntu-latest
name: 'Bump npm & manifest versions'
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v1
with:
node-version: '12'
- name: Cache node modules
uses: actions/cache@v2
env:
cache-name: cache-node-modules
with:
# npm cache files are stored in `~/.npm` on Linux/macOS
path: ~/.npm
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- name: 'install dependencies'
run: npm install
# Runs a single command using the runners shell
- name: 'bump npm version'
run: npm version --no-git-tag-version "${{ github.event.inputs.newVersion }}"
- name: 'bump manifest version'
id: 'bump-version'
shell: python
run: |
import json, os
with open('package.json') as read_package_file:
package = json.load(read_package_file)
with open('src/manifest.json') as read_manifest_file:
manifest = json.load(read_manifest_file)
newVersion = package['version']
manifest['version'] = newVersion
with open('src/manifest.json', 'w') as write_manifest_file:
json.dump(manifest, write_manifest_file)
print("new version in package.json", newVersion)
os.system('echo "::set-output name=NEW_VERSION::{0}"'.format(newVersion))
- name: 'eslint/prettier'
run: npx eslint --fix src/manifest.json
- name: 'create pull request'
uses: peter-evans/create-pull-request@v3
id: create-pull-request
with:
commit-message: "🏁: v${{ steps.bump-version.outputs.NEW_VERSION }}"
branch: "release/v${{ steps.bump-version.outputs.NEW_VERSION }}"
delete-branch: true
title: "🏁: v${{ steps.bump-version.outputs.NEW_VERSION }}"
reviewers: "rheaditi,abinavseelan"
- name: Output
run: |
echo "Pull Request Number - ${{ steps.create-pull-request.outputs.pull-request-number }}"
echo "Pull Request URL - ${{ steps.create-pull-request.outputs.pull-request-url }}"
|
.github/workflows/create-release-pr.yml
|
name: NodeBalancer Config
description: >
Describes a configuration for a NodeBalancer.
schema:
id:
editable: false
type: Integer
value: 804
description: The ID of this NodeBalancer config.
port:
editable: true
type: Integer
value: 80
description: Port to bind to on the public interfaces. 1-65534
protocol:
editable: true
type: Enum
subtype: protocol
value: 'https'
description: The protocol used for the config.
algorithm:
editable: true
type: Enum
subtype: algorithm
value: 'roundrobin'
description: Balancing algorithm
stickiness:
editable: true
type: Enum
subtype: stickiness
value: 'table'
description: Session persistence. Route subsequent requests from a client to the same backend.
check:
editable: true
type: Enum
subtype: check
value: 'connection'
description: Perform active health checks on the backend nodes.
check_interval:
editable: true
type: Integer
value: 5
description: Seconds between health check probes.
check_timeout:
editable: true
type: Integer
value: 3
description: Seconds to wait before considering the probe a failure. 1-30. Must be less than check_interval.
check_attempts:
editable: true
type: Integer
value: 20
description: Number of failed probes before taking a node out of rotation. 1-30.
check_path:
editable: true
type: String
value: "/"
description: When check is "http", the path to request.
check_body:
editable: true
type: String
value:
description: When check is "http", a regex to match within the first 16,384 bytes of the response body.
check_passive:
editable: true
type: Boolean
value: true
description: Enable passive checks based on observing communication with back-end nodes.
ssl_commonname:
editable: true
type: String
value:
description: Common Name of the configured SSL certificate, if present.
ssl_fingerprint:
editable: true
type: String
value:
description: SSL Certificate fingerprint, if present.
cipher_suite:
editable: true
type: Enum
subtype: cipher_suite
value: recommended
description: SSL cipher suite to enforce.
enums:
protocol:
http: http
https: https
tcp: tcp
algorithm:
roundrobin: Round robin
leastconn: Assigns connections to the backend with the least connections.
source: Uses the client's IPv4 address.
stickiness:
none: None
table: Table
http_cookie: Http cookie
check:
none: None
connection: Requires a successful TCP handshake.
http: Requires a 2xx or 3xx response from the backend node.
http_body: Uses a regex to match against an expected result body.
cipher_suite:
recommended: Recommended
legacy: Legacy
|
docs/src/data/objects/nodebalancer_config.yaml
|
title: Nicolas Hoizey, passionate about the Web and photography
author: <NAME>
email: <EMAIL>
description: > # this means to ignore newlines until "baseurl:"
Nicolas Hoizey, passionate about the Web and photography, among many other interests.
baseurl: ""
url: "https://nicolas-hoizey.com"
twitter_username: nhoizey
github_username: nhoizey
# Handling content with date in the future
future: false
# Markdown parsing
markdown: kramdown
kramdown:
input: GFM
syntax_highlighter: rouge
footnote_backlink: "⬆︎"
header_offset: 1
# Indexing for related post using the classifier-reborn plugin
lsi: false
# Defaults
defaults:
- scope:
path: ""
type: "posts"
values:
layout: "default"
- scope:
path: "articles"
type: "posts"
values:
layout: "post"
lang: en
categories:
- articles
permalink: :year/:month/:title.html
- scope:
path: "links"
type: "posts"
values:
layout: "link"
lang: en
categories:
- links
permalink: :categories/:year/:month/:title.html
# Tags
tag_page_layout: tag
tag_page_dir: tags
tag_feed_layout: atom-tag
tag_feed_dir: tags
exclude:
- autoprefixer.yml
- Gemfile*
- README.md
- LICENSE.md
- tags/*.md
- Rakefile
include:
- .htaccess
- .well-known
- favicon.jpg
- tags
# Jekyll Assets
assets:
compression: true
plugins:
css: { autoprefixer: {} }
autoprefixer:
browsers:
- "last 2 versions"
- "IE > 10"
# Get data from Notist
jekyll_get:
data: talks
json: "https://noti.st/nhoizey.json"
# POSSEify plugin settings
posseify:
twitter:
max_length: 280
url_length: 23
link:
min_hashtags: 1
max_hashtags: 5
template: "🔗 @posse_content<br>\n@posse_url<br>\n<br>\n@posse_tags"
post:
min_hashtags: 3
max_hashtags: 5
template: "@posse_title\n\n@posse_url\n\n@posse_tags"
# Algolia
algolia:
application_id: …
index_name: …
read_only_api_key: …
nodes_to_index: "p,li,figcaption,blockquote"
files_to_exclude:
- 404.html
- index.html
- search.html
- offline.html
- offline-fallback.html
- about/mentions.html
- tags/index.html
- links/index.html
- tools/*
# Webmention
webmentions:
# debug: true
username: nicolas-hoizey.com
cache_folder: _data/webmentions
cache_bad_uris_for: 5
pause_lookups: false
collections:
- links
templates:
count: _includes/webmentions/count.html
likes: _includes/webmentions/likes.html
reposts: _includes/webmentions/reposts.html
webmentions: _includes/webmentions/webmentions.html
js: false
legacy_domains:
- http://nicolas-hoizey.com
throttle_lookups:
# last_week: daily
last_month: daily
last_year: weekly
older: every 2 weeks
# YouTube
youtube:
api_key: …
# Cloudinary
cloudinary:
cloud_name: nho
only_prod: true
verbose: false
presets:
default:
min_width: 360
max_width: 1600
fallback_max_width: 800
steps: 5
sizes: "(max-width: 67rem) 90vw, 60rem"
figure: always
twothirds:
min_width: 240
max_width: 1120
fallback_max_width: 600
steps: 5
sizes: "(max-width: 20rem) 45vw, (max-width: 67rem) 60vw, 40rem"
figure: always
attributes:
class: twothirds
onehalf:
min_width: 180
max_width: 800
fallback_max_width: 400
steps: 5
sizes: "(max-width: 67rem) 45vw, 30rem"
figure: always
attributes:
class: onehalf
onethird:
min_width: 120
max_width: 560
fallback_max_width: 300
steps: 5
sizes: "(max-width: 20rem) 45vw, (max-width: 67rem) 30vw, 20rem"
figure: always
attributes:
class: onethird right
onefourth:
min_width: 100
max_width: 400
fallback_max_width: 200
steps: 5
sizes: "(max-width: 20rem) 45vw, (max-width: 30rem) 30vw, (max-width: 67rem) 22.5vw, 15rem"
figure: always
attributes:
class: onefourth right
logo:
min_width: 120
max_width: 560
fallback_max_width: 300
steps: 5
sizes: "(max-width: 20rem) 45vw, (max-width: 67rem) 30vw, 20rem"
figure: never
attributes:
class: logo
# Archives
jekyll-archives:
enabled:
- year
- month
layouts:
year: archives-year
month: archives-month
permalinks:
year: /:year/
month: /:year/:month/
|
_config.yml
|
---
language: php
cache:
directories:
- $HOME/.composer/cache/files
php:
- nightly
- 7.3
- 7.2
- 7.1
- 7.0
- 5.6
- 5.5
- 5.4
env:
global:
# - PATH=~/bin:~/.composer/vendor/bin:vendor/bin:$PATH
- PATH=~/.composer/vendor/bin:$PATH
- TEST_LINT=`true`
matrix:
fast_finish: true
include:
- php: 5.3
dist: precise
allow_failures:
- php: nightly
- php: 5.4
- php: 5.3
before_install:
- xdebug=/home/travis/.phpenv/versions/$TRAVIS_PHP_VERSION/etc/conf.d/xdebug.ini
- if [[ -f $xdebug ]]; then cat $xdebug; cp $xdebug /tmp; phpenv config-rm xdebug.ini; fi
- xdebug_php=
- if [[ -f /tmp/xdebug.ini ]]; then xdebug_php='-c /tmp/xdebug.ini'; fi
- composer global require --dev friendsofphp/php-cs-fixer:">=1.12" || true
- composer global require --dev phpstan/phpstan-phpunit || composer global require --dev phpunit/phpunit
- composer global require --dev sensiolabs/security-checker || composer global require --dev sensiolabs/security-checker:^3.0
install:
- composer validate
- composer install --no-scripts --no-progress --no-suggest
- if [[ -f config/parameters.yml.travis ]]; then cp config/parameters.yml.travis config/parameters.yml; fi
- composer up --no-progress --no-suggest --optimize-autoloader --apcu-autoloader
- composer show
before_script:
script:
- for f in $(find . -path './vendor' -prune -o -path './tests' -prune -o -name \*.php -print) ; do php -l $f ; done # Do this first!
- if [[ -v TEST_LINT ]]; then for f in $(find ./tests -name \*.php -print); do php -l $f; done; fi
- if command -v php-cs-fixer; then php-cs-fixer fix --diff --dry-run -vvv --using-cache=no; fi
- if command -v phpstan; then phpstan analyse -v --level max *.php; fi
- php $xdebug_php $(command -v phpunit)
- security-checker security:check ~/.composer/composer.lock
- security-checker security:check
#^ This checks that the application doesn't use dependencies with known security vulnerabilities
- if [[ -v VENDOR_LINT ]]; then for f in $(find ./vendor -name \*.php); do php -l $f > /dev/null; done; fi
#^ Should be the last line of install but sometimes long to execute and little can be done for errors found.
after_success:
- bash <(curl -s https://codecov.io/bash) -s /tmp
# inspirated from:
# symfony/symfony-demo
# https://github.com/doctrine/dbal/blob/v2.5.13/.travis.yml
|
.travis.yml
|
{% set name = "ngspice" %}
{% set version = "36" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
url: https://downloads.sourceforge.net/project/ngspice/ng-spice-rework/{{ version }}/ngspice-{{ version }}.tar.gz
sha256: 4f818287efba245341046635b757ae81f879549b326a4316b5f6e697aa517f8c
patches:
- patches/libtoolize-name.patch # [osx]
- patches/vngspice-install-location.patch # [win]
build:
number: 0
skip: True # [win and vc<14]
# Due to how 'conda render' extracts metadata info, the 'outputs'
# key must appear OUTSIDE of the jinja conditional below!
outputs:
##
## UNIX
##
{% if unix %}
- name: ngspice-lib
script: build_lib.sh
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- llvm-openmp # [osx]
- libgomp # [linux]
- autoconf
- automake
- libtool
- texinfo
- bison
- flex
- make
host:
- ncurses
- readline
run:
- ncurses
- readline
test:
commands:
- test -f $PREFIX/lib/libngspice{{ SHLIB_EXT }}
- name: ngspice-exe
script: build_exe.sh
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- llvm-openmp # [osx]
- libgomp # [linux]
- autoconf
- automake
- libtool
- texinfo
- bison
- flex
- make
host:
- ncurses
- readline
- xorg-libx11
- xorg-libxaw
- xorg-libxt
run:
- ncurses
- readline
- xorg-libx11
- xorg-libxaw
- xorg-libxt
test:
files:
test-files
commands:
- $PREFIX/bin/ngspice --version
- $PREFIX/bin/ngspice -b -r /tmp/raw.out test-files/test.cir
- name: ngspice
requirements:
run:
- {{ pin_subpackage('ngspice-lib', exact=True) }}
- {{ pin_subpackage('ngspice-exe', exact=True) }}
test:
commands:
- test -f $PREFIX/lib/libngspice{{ SHLIB_EXT }}
- $PREFIX/bin/ngspice --version
##
## WINDOWS
##
{% else %}
- name: ngspice-lib
script: build_lib.bat
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
# technically this ought to be in the 'build' requirements,
# but it's easier to locate if we put it here, via LIBRARY_BIN
- winflexbison
test:
commands:
- if not exist %PREFIX%\\bin\\ngspice.dll exit 1
- name: ngspice-exe
script: build_exe.bat
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
host:
# technically this ought to be in the 'build' requirements,
# but it's easier to locate if we put it here, via LIBRARY_BIN
- winflexbison
test:
files:
test-files
commands:
- "call %PREFIX%\\bin\\ngspice_con.exe --version"
- "call %PREFIX%\\bin\\ngspice_con.exe -b -r raw.out test-files\\test.cir"
- name: ngspice
requirements:
run:
- {{ pin_subpackage('ngspice-lib', exact=True) }}
- {{ pin_subpackage('ngspice-exe', exact=True) }}
test:
commands:
- if not exist %PREFIX%\\bin\\ngspice.dll exit 1
- "%PREFIX%\\bin\\ngspice_con.exe --version"
{% endif %}
about:
home: http://ngspice.sourceforge.net
doc_url: http://ngspice.sourceforge.net/docs.html
dev_url: http://ngspice.sourceforge.net/devel.html
license: BSD-3-Clause
license_family: BSD
license_file: COPYING
summary: 'The open source spice simulator for electric and electronic circuits'
description: |
ngspice is the open source spice simulator for electric and electronic circuits.
Such a circuit may comprise of JFETs, bipolar and MOS transistors, passive elements
like R, L, or C, diodes, transmission lines and other devices, all interconnected
in a netlist. Digital circuits are simulated as well, event driven and fast, from
single gates to complex circuits. And you may enter the combination of both analog
and digital as a mixed-signal circuit.
ngspice offers a wealth of device models for active, passive, analog, and digital
elements. Model parameters are provided by the semiconductor manufacturers.
The user add her circuits as a netlist, and the output is one or more graphs of
currents, voltages and other electrical quantities or is saved in a data file.
Note:
This build was configured with `--enable-xspice --enable-cider --enable=openmp`
See the [build script](https://git.io/JfVZX) for more specifics.
extra:
recipe-maintainers:
- stuarteberg
|
recipe/meta.yaml
|
de:
views:
lifestyle_footprints:
title: Berechnen Sie Ihre CO2-Bilanz
questions:
region: In welcher Region wohnen Sie?
home: Wie wohnen Sie?
home_area: Wie groß ist Ihr Zuhause?
heating: Wie heizen Sie?
green_electricity: Nutzen Sie Ökostrom?
food: Wie sieht Ihre Ernährung hauptsächlich aus?
shopping: Im Hinblick auf Kleidung, Schuhe, Elektronik, Möbel und andere Gegenstände, wie viel davon kaufen sie brandneu?
car_type: Was für eine Art Auto nutzen Sie?
car_distance: Wie häufig nutzen Sie das Auto pro Woche?
flight_hours: Wie viele Stunden sind Sie im vergangenen Jahr im Flugzeug gereist?
options:
region:
west: Westen
midwest: Mittlerer Westen
northeast: Nordosten
south: Süden
other: Andere
home:
house: Haus
apartment: Wohnung
one_or_two_bedrooms: 1-2 Schlafzimmer
three_or_four_bedrooms: 3-4 Schlafzimmer
five_or_more_bedrooms: 5+ Schlafzimmer
#TODO: Remove after LifestyleFootprints have been migrated to use home_area for these
fifteen_sqm: 15 m² pro Person
twentyfive_sqm: 25 m² pro Person
fortytwo_sqm: 42 m² pro Person
sixty_sqm: 60 m² pro Person
eighty_sqm: 80 m² pro Person
home_area:
fifteen_sqm: 15 Quadratmeter pro Person
twentyfive_sqm: 25 Quadratmeter pro Person
fortytwo_sqm: 42 Quadratmeter pro Person
sixty_sqm: 60 Quadratmeter pro Person
eighty_sqm: 80 Quadratmeter pro Person
heating:
electricity: Strom
natural_gas: Gas
district: Fernwärme
coal: Kohle
biomass: Biomasse
environmental_or_solar: Erneuerbare bzw. Solarenergie
airpump: Luftwärmepumpe
geothermal: Geothermiepumpe
heating_oil: Heizöl
propane: Propan
no_heating: Ich beheize mein Haus nicht
dont_know: Ich weiß nicht / Andere
green_electricity:
'yes': 'Ja'
seventyfive_percent: 75% grün
fifty_percent: 50% grün
twentyfive_percent: 25% grün
'no': 'Nein'
dont_know: Ich weiß nicht
food:
vegan: Pflanzenbasiert / vegan
vegetarian: Vegetarisch
pescetarian: Pescetarisch (Fisch aber kein Fleisch)
meat_low: Etwas Fleisch (weniger als 50 g pro Tag)
meat_medium: Fleischesser
meat_high: Viel Fleisch (mehr als 100 g pro Tag)
shopping:
low: Nichts oder nur wenig im letzten Jahr
average: Ab und zu etwas
high: Mehreres pro Monat oder mehrere größere Produkte im letzten Jahr
car_type:
gas: Benzin
petrol: Benzin
diesel: Diesel
hybrid_plugin: Plug-in-Hybrid
etanol: Ethanol (E85)
electric: Elektrisch
hvo_electric: Elektrisch oder HVO
vehicle_gas: Autogas (CNG)
no_car: Ich habe kein Auto
next: Weiter
back: Zurück
methodology: Wie funktioniert die Berechnung?
index:
heading: Ihre CO2-Bilanzen
text: Dies ist eine Übersicht über Ihre CO2-Bilanzen, die Sie berechnet haben.
show:
heading_public: Meine jährliche CO2-Bilanz
my_climate_footprint: Meine CO2-Bilanz von %{footprint} ist %{relative} als der Durchschnitt in %{country}. Um dem 1,5-Grad-Ziel des Pariser Abkommens zu entsprechen, müssen wir bis zum Jahr 2030 weltweit einen Durchschnitt von 2,5 Tonnen pro Person erreichen.
my_climate_footprint_compared_world: Meine CO2-Bilanz von %{footprint} ist %{relative} als der weltweite Durchschnitt. Um dem 1,5-Grad-Ziel des Pariser Abkommens zu entsprechen, müssen wir bis zum Jahr 2030 weltweit einen Durchschnitt von 2,5 Tonnen pro Person erreichen.
new_calculation: Neue Berechnung
delete: Löschen
delete_confirmation:
heading: Sicher?
text: Sie sind dabei, Ihre CO2-Berechnung zu löschen, die am %{date} erstellt wurde.
cta: Berechnung löschen
abort: Berechnung behalten
share:
text: Einer der wirksamsten Wege, Veränderungen herbeizuführen, besteht darin, auch andere zum Handeln zu inspirieren, und wir würden uns sehr freuen, wenn Sie unsere Social-Media-Kanäle nutzen und sie in Ihren eigenen Kanälen teilen würden.
social_quote: Mein CO2-Fußabdruck beträgt %{tonnes} Tonnen! Berechnen Sie Ihren eigenen CO2-Fußabdruck unter %{url}
|
config/locales/views/lifestyle_footprints.de.yml
|
Create Link
Create a link for an alternate character.
This resets the owner_hash of the character to an empty string
---
consumes:
- application/json
operationId: post_accounts_account_id
tags:
- Accounts
parameters:
- name: account_id
in: path
description: account's id to add the character link to
required: true
type: integer
format: int32
- name: character_identifier
in: body
description: information that identifies the character to add
required: true
schema:
description: character information
type: object
properties:
character_id:
description: id of the character
required: false
type: integer
format: int32
character_name:
description: name of the character
required: false
type: string
responses:
201:
description: Link between Account and Character was created
schema:
required:
- message
- character_id
- account_id
- character_name
properties:
message:
description: message that can be displayed
type: string
character_id:
description: id of the character that was added as alt
type: integer
format: int32
account_id:
description: id of the account the character was added as alt
type: integer
format: int32
character_name:
description: name of the character that was added as alt
type: string
400:
description: Character does not exist in eve or link already existed
examples:
application/json:
error: Character does not exist
schema:
$ref: '#/definitions/ErrorBadRequest'
403:
description: You are missing the required permission for this route
examples:
application/json:
error: You are missing the required permission change_character_links
schema:
$ref: '#/definitions/ErrorForbidden'
404:
description: Account did not exist
examples:
application/json:
error: Account did not exist
schema:
$ref: '#/definitions/ErrorNotFound'
|
waitlist/blueprints/swagger_api/accounts/links_post_v1.yml
|
name: CI
on: [push, pull_request]
jobs:
build:
name: ${{ matrix.platform.name }} ${{ matrix.config.name }}
runs-on: ${{ matrix.platform.os }}
strategy:
fail-fast: false
matrix:
platform:
- { name: Windows VS2019, os: windows-latest }
- { name: Linux GCC, os: ubuntu-latest }
- {
name: Linux Clang,
os: ubuntu-latest,
flags: -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++,
}
- { name: MacOS XCode, os: macos-latest }
steps:
- name: Install Linux Dependencies
if: runner.os == 'Linux'
run: sudo apt-get update && sudo apt-get install libxrandr-dev libxcursor-dev libudev-dev libopenal-dev libflac-dev libvorbis-dev libgl1-mesa-dev libegl1-mesa-dev
- name: Examples - Checkout Code
uses: actions/checkout@v2
with:
path: Examples
- name: SFML - Checkout Code
uses: actions/checkout@v2
with:
repository: SFML/SFML
path: SFML
- name: SFML - Configure CMake
shell: bash
run: cmake -S $GITHUB_WORKSPACE/SFML -B $GITHUB_WORKSPACE/SFML/build -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/SFML/install -DCMAKE_VERBOSE_MAKEFILE=ON -DBUILD_SHARED_LIBS=ON ${{matrix.platform.flags}}
- name: SFML - Build
shell: bash
run: cmake --build $GITHUB_WORKSPACE/SFML/build --config Release --target install
- name: zlib - Checkout Code
uses: actions/checkout@v2
with:
repository: madler/zlib
path: zlib
- name: zlib - Configure CMake
shell: bash
run: cmake -S $GITHUB_WORKSPACE/zlib -B $GITHUB_WORKSPACE/zlib/build -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/zlib/install -DCMAKE_VERBOSE_MAKEFILE=ON -DBUILD_SHARED_LIBS=ON ${{matrix.platform.flags}}
- name: zlib - Build
shell: bash
run: cmake --build $GITHUB_WORKSPACE/zlib/build --config Release --target install
- name: Examples - Configure CMake
shell: bash
run: cmake -S $GITHUB_WORKSPACE/Examples -B $GITHUB_WORKSPACE/Examples/build -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/Examples/install -DCMAKE_VERBOSE_MAKEFILE=ON -DSFML_DIR=$GITHUB_WORKSPACE/SFML/install/lib/cmake/SFML -DZLIB_ROOT=$GITHUB_WORKSPACE/zlib/install -DEXAMPLES_BUILD_SFML=ON -DEXAMPLES_LINK_SFML_STATICALLY=OFF -DEXAMPLES_BUILD_ZLIB=ON -DEXAMPLES_LINK_ZLIB_STATICALLY=OFF -DEXAMPLES_LINK_RUNTIME_STATICALLY=OFF ${{matrix.platform.flags}}
- name: Examples - Build
shell: bash
run: cmake --build $GITHUB_WORKSPACE/Examples/build --config Release --target install
|
out/eXpl0it3r/Examples/.github_workflows_ci.yml
|
name: Bug report
title: "[Bug] Bug title "
description: If something isn't working as expected.
labels: [ "bug" ]
body:
- type: markdown
attributes:
value: |
For better global communication, Please write in English.
- type: checkboxes
attributes:
label: Search before asking
description: >
Please make sure to search in the [issues](https://github.com/apache/eventmesh/issues?q=is%3Aissue)
first to see whether the same issue was reported already.
options:
- label: >
I had searched in the [issues](https://github.com/apache/eventmesh/issues?q=is%3Aissue) and found
no similar issues.
required: true
- type: dropdown
attributes:
label: Environment
description: Describe the environment.
options:
- Mac
- Window
- Linux
- Other
validations:
required: true
- type: dropdown
attributes:
label: EventMesh version
description: Describe the EventMesh version.
options:
- master
- 1.3.0
- 1.2.0
- Other
validations:
required: true
- type: textarea
attributes:
label: What happened
description: Describe what happened.
placeholder: >
A clear and concise description of what the bug is.
validations:
required: true
- type: textarea
attributes:
label: How to reproduce
description: >
Describe the steps to reproduce the bug here.
placeholder: >
Please make sure you provide a reproducible step-by-step case of how to reproduce the problem
as minimally and precisely as possible.
validations:
required: true
- type: textarea
attributes:
label: Debug logs
description: Anything else we need to know?
placeholder: >
Add your debug logs here.
validations:
required: false
- type: checkboxes
attributes:
label: Are you willing to submit PR?
description: >
This is absolutely not required, but we are happy to guide you in the contribution process
especially if you already have a good understanding of how to implement the fix.
options:
- label: Yes I am willing to submit a PR!
- type: markdown
attributes:
value: "Thanks for completing our form!"
|
.github/ISSUE_TEMPLATE/bug_report.yml
|
---
values:
ingress:
enabled: true
hosts:
- "{{ service_name }}.{{ domain }}"
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
cert-manager.io/cluster-issuer: cloudflare-issuer
tls:
- secretName: "{{ service_name }}-tls"
hosts:
- "{{ service_name }}.{{ domain }}"
persistence:
enabled: true
admin:
existingSecret: "{{ service_name }}-admin"
userKey: admin-user
passwordKey: <PASSWORD>
smtp:
existingSecret: "{{ service_name }}-smtp"
userKey: smtp-user
passwordKey: smtp-password
grafana.ini:
analytics:
check_for_updates: true
smtp:
enabled: true
host: "smtp.gmail.com:587"
port: "587"
skip_verify: false
from_address: "grafana@{{ domain }}"
plugins:
- grafana-piechart-panel
- grafana-worldmap-panel
- grafana-clock-panel
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus-server.prometheus
access: proxy
isDefault: true
- name: Loki
type: loki
url: http://loki.loki:3100
access: proxy
- name: InfluxDB
type: influxdb
url: http://influxdb.influxdb:8086
access: proxy
database: influxdb
- name: Varken
type: influxdb
url: http://influxdb.influxdb:8086
access: proxy
database: varken
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
- name: 'unifi'
orgId: 1
folder: 'Unifi'
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/unifi
- name: 'media'
orgId: 1
folder: 'Media'
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/media
dashboards:
default:
prometheus:
gnetId: 2
revision: 2
datasource: Prometheus
traefik:
gnetId: 4475
revision: 5
datasource: Prometheus
logs:
gnetId: 15141
revision: 1
datasource: Loki
pihole:
gnetId: 10176
revision: 2
datasource: Prometheus
unifi:
client-insights:
gnetId: 11315
revision: 8
datasource: Prometheus
uap-insights:
gnetId: 11314
revision: 9
datasource: Prometheus
network-sites:
gnetId: 11311
revision: 4
datasource: Prometheus
usw-insights:
gnetId: 11312
revision: 8
datasource: Prometheus
usg-insights:
gnetId: 11313
revision: 8
datasource: Prometheus
client-dpi:
gnetId: 11310
revision: 4
datasource: Prometheus
media:
varken-official:
gnetId: 9585
revision: 5
datasource: Varken
varken-unofficial:
gnetId: 9558
revision: 15
datasource: Varken
namespaceOverride: "{{ service_name }}"
|
ansible/host_vars/grafana/values.yml
|
--- !<SKIN>
contentType: "SKIN"
firstIndex: "2018-12-25 23:25"
game: "Unreal Tournament"
name: "FragManiacs"
author: "Unknown"
description: "None"
releaseDate: "1999-11"
attachments:
- type: "IMAGE"
name: "FragManiacs_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_2.png"
- type: "IMAGE"
name: "FragManiacs_shot_5.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_5.png"
- type: "IMAGE"
name: "FragManiacs_shot_13.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_13.png"
- type: "IMAGE"
name: "FragManiacs_shot_7.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_7.png"
- type: "IMAGE"
name: "FragManiacs_shot_6.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_6.png"
- type: "IMAGE"
name: "FragManiacs_shot_10.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_10.png"
- type: "IMAGE"
name: "FragManiacs_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_1.png"
- type: "IMAGE"
name: "FragManiacs_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_3.png"
- type: "IMAGE"
name: "FragManiacs_shot_9.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_9.png"
- type: "IMAGE"
name: "FragManiacs_shot_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_4.png"
- type: "IMAGE"
name: "FragManiacs_shot_12.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_12.png"
- type: "IMAGE"
name: "FragManiacs_shot_8.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_8.png"
- type: "IMAGE"
name: "FragManiacs_shot_11.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/F/FragManiacs_shot_11.png"
originalFilename: "fragmaniac.zip"
hash: "6b74d3237b1abbbd557a565011f630cddfed6a38"
fileSize: 2257883
files:
- name: "SoldierSkins_FR.utx"
fileSize: 4425036
hash: "2ad7e2ba9fe01adbfed0c5b78324e547999442fe"
otherFiles: 3
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Skins/F/fragmaniac.zip"
main: true
repack: false
state: "OK"
- url: "https://gamefront.online/files2/service/thankyou?id=1420063"
main: false
repack: false
state: "MISSING"
- url: "http://www.ut-files.com/index.php?dir=Skins/SkinsF/&file=fragmaniac.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Skins/Misc/SkinsF/fragmaniac.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Skins/SkinPacks/fragmaniac.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Skins/&file=fragmaniac.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Skins/F/6/b/74d323/fragmaniac.zip"
main: false
repack: false
state: "OK"
- url: "http://ut-files.com/index.php?dir=Skins/SkinsF/&file=fragmaniac.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Skins/F/6/b/74d323/fragmaniac.zip"
main: false
repack: false
state: "OK"
deleted: false
skins:
- "FragManiacs"
- "Punisher"
- "<NAME>"
- "DareDevil"
- "Flash"
- "Hulk"
- "Spiderman"
faces:
- "Mel"
- "Sean"
- "Ian"
- "Jim"
- "Gary"
- "Guest"
- "Punisher"
- "<NAME>"
- "DareDevil"
- "Flash"
- "Hulk"
- "Spiderman"
model: "Unknown"
teamSkins: true
|
content/Unreal Tournament/Skins/F/6/b/74d323/fragmaniacs_[6b74d323].yml
|
version: '3'
services:
example_app:
container_name: example_app
restart: always
build: .
networks:
- web
labels:
- "traefik.http.routers.example.rule=Host(`app.your.domain`)"
- "traefik.http.routers.example.entrypoints=websecure"
- "traefik.http.routers.example.service=example"
- "traefik.http.services.example.loadbalancer.server.port=5005"
- "traefik.docker.network=web"
- "traefik.http.routers.example.tls=true"
example_traefik:
container_name: "example_traefik"
image: "traefik:latest"
restart: always
command:
- "--entrypoints.web.address=:80"
- "--entrypoints.websecure.address=:443"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=true"
- "--api.dashboard=true"
- "--certificatesresolvers.myresolver.acme.httpchallenge=true"
- "--certificatesresolvers.myresolver.acme.httpchallenge.entrypoint=web"
- "--certificatesresolvers.myresolver.acme.caserver=https://acme-v01.api.letsencrypt.org/directory"
- "--certificatesresolvers.myresolver.acme.email=<EMAIL>"
- "--certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json"
ports:
- "80:80"
- "443:443"
networks:
- web
volumes:
- "./letsencrypt:/letsencrypt"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
labels:
# Dashboard
- "traefik.http.routers.traefik.rule=Host(`your.domain`)"
- "traefik.http.routers.traefik.service=api@internal"
- "traefik.http.routers.traefik.entrypoints=websecure"
- "traefik.http.routers.traefik.tls=true"
- "traefik.http.routers.traefik.tls.certresolver=myresolver"
# global redirect to https
- "traefik.http.routers.http-catchall.rule=hostregexp(`{host:.+}`)"
- "traefik.http.routers.http-catchall.entrypoints=web"
- "traefik.http.routers.http-catchall.middlewares=redirect-to-https"
networks:
web:
external: true
|
docker-compose-traefik.yml
|
name: minimal_BUILD
on:
pull_request:
branches:
- master
push:
branches:
- master
paths:
- '.github/workflows/build_for_minimal.yml'
- '!**.md'
schedule:
- cron: 0 10 * * *
# watch:
# types: [started]
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC)
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
# │ │ │ │ │
# │ │ │ │ │
# │ │ │ │ │
# * * * * *
env:
REPO_URL: https://github.com/micropython/micropython.git
REPO_BRANCH: master
CUSTOM_MPCONFIGPORT: true
jobs:
build:
name: Build
runs-on: ubuntu-18.04
steps:
- name: Checkout this repos
uses: actions/checkout@v2
- name: Install the depend
run: |
sudo apt-get update
sudo apt-get install gcc-multilib
sudo apt-get install gcc-arm-none-eabi
- name: Clone the micropython source code
run: |
git clone --depth 1 $REPO_URL -b $REPO_BRANCH my_micropython
git submodule update --init
- name: Build the mpy-cross tool
working-directory: ./my_micropython/mpy-cross
run: |
make
- name: Build the unix port
id: unix_port
run: |
cd my_micropython/ports/unix
make submodules
make
find . -name "*micropython*"
MOREF=$(./micropython -c "import sys;print('v{0:d}_{1:d}'.format(sys.implementation[1][0], sys.implementation[1][1]))")
echo $MOREF
echo "::set-output name=version::$MOREF"
echo "::set-output name=gitid::$(git rev-parse --short HEAD)"
- name: Custom the mpconfigport.h file
working-directory: ./my_micropython/ports/minimal/
run: |
echo "#define MODULE_XXX_ENABLED (1)" >> mpconfigport.h
- name: Get the datetime
id: get_datetime
uses: ccccmagicboy/get_datetime@master
with:
tz1: 'Asia'
tz2: 'Shanghai'
- name: Build the minimal port linux
working-directory: ./my_micropython/ports/minimal
run: |
make clean
make
ls ./build -al
file ./build/firmware.elf
- name: Build the minimal port arm
working-directory: ./my_micropython/ports/minimal
run: |
make clean
make CROSS=1
ls ./build -al
file ./build/firmware.elf
|
.github/workflows/build_for_minimal.yml
|
l_english:
estate_magnates:0 "Magnates"
magnates_loyalty_modifier:0 "Magnates Loyalty Equilibrium"
magnates_influence_modifier:0 "Magnates Influence"
estate_magnates_desc:0 "Polish Magnates were owners of huge fields and folwarks. They had big influence on ruler and other estates."
estate_magnates_lower_recruitment:0 "Reduce the Amount of Soldiers"
estate_magnates_lower_recruitment_desc:0 "Reduce the Amount of Soldiers in your army."
estate_magnates_hire_advisor:0 "Hire Advisor"
estate_magnates_hire_advisor_desc:0 "Hire administrative advisor to your court."
estate_magnates_ownership:0 "Magnates Land Ownership"
estate_magnates_land_rights:0 "Magnates Land Rights"
estate_magnates_land_rights_desc:0 ""
estate_magnates_folwarks:0 "Folwarks"
estate_magnates_folwarks_desc:0 ""
estate_magnates_production_law:0 "Production Law"
estate_magnates_production_law_desc:0 ""
estate_magnates_supremacy_over_szlachta:0 "Supremacy Over Szlachta"
estate_magnates_supremacy_over_szlachta_desc:0 ""
estate_magnates_free_printing_rights:0 "Free Printing Rights"
estate_magnates_free_printing_rights_desc:0 ""
estate_magnates_royal_advisors:0 "Royal Advisors"
estate_magnates_royal_advisors_desc:0 ""
estate_magnates_curtail_cossacks:0 "Curtail Cossacks"
estate_magnates_curtail_cossacks_desc:0 ""
estate_magnates_priviliges_and_promises:0 "After election privileges for Magnates"
estate_magnates_priviliges_and_promises_desc:0 ""
estate_magnates_anti_absolutism:0 "Anti-Absolutism Edict"
estate_magnates_anti_absolutism_desc:0 ""
estate_nobles_priviliges_and_promises:0 "After election privileges for Szlachta"
estate_nobles_priviliges_and_promises_desc:0 ""
WFAS_estate_nomadic_tribes_focus_on_infantry_quality:0 "Focus on Infantry Quality"
WFAS_estate_nomadic_tribes_focus_on_infantry_quality_desc:0 ""
WFAS_estate_nomadic_tribes_focus_on_cavalry_quality:0 "Focus on Cavalry Quality"
WFAS_estate_nomadic_tribes_focus_on_cavalry_quality_desc:0 ""
WFAS_estate_nomadic_tribes_heathens_tolerance_policy:0 "Heathens Tolerance Policy"
WFAS_estate_nomadic_tribes_heathens_tolerance_policy_desc:0 ""
|
localisation/WFAS_estate_l_english.yml
|
uid: "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey"
fullName: "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey"
name: "ServiceAuthenticationWithSharedAccessPolicyKey"
nameWithType: "ServiceAuthenticationWithSharedAccessPolicyKey"
summary: "Extend AuthenticationMethod class, provide getters for protected properties and implement populate function to set ServiceAuthenticationWithSharedAccessPolicyKey type policy on given IotHubConnectionString object."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"com.microsoft.azure.sdk.iot.service.AuthenticationMethod\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "com.microsoft.azure.sdk.iot.service.AuthenticationMethod.populate(com.microsoft.azure.sdk.iot.service.IotHubConnectionString)"
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public class ServiceAuthenticationWithSharedAccessPolicyKey extends AuthenticationMethod"
constructors:
- "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey.ServiceAuthenticationWithSharedAccessPolicyKey(java.lang.String,java.lang.String)"
methods:
- "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey.getKey()"
- "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey.getPolicyName()"
- "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey.populate(com.microsoft.azure.sdk.iot.service.IotHubConnectionString)"
- "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey.setKey(java.lang.String)"
- "com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey.setPolicyName(java.lang.String)"
type: "class"
metadata: {}
package: "com.microsoft.azure.sdk.iot.service"
artifact: com.microsoft.azure.sdk.iot:iot-service-client-preview:1.2.0
|
preview/docs-ref-autogen/com.microsoft.azure.sdk.iot.service.ServiceAuthenticationWithSharedAccessPolicyKey.yml
|
cs:
plugins:
refinery_users:
title: Uživatelé
description: Spravovat uživatele
admin:
users:
delete: Smazat tohoto uživatele
edit: Editovat tohoto uživatele
update:
cannot_remove_user_plugin_from_current_user: Nemůžete odstranit 'Users' plugin z aktuálně přihlášeného konta.
form:
blank_password_keeps_current: Pokud necháte toto pole prázné bude zachováno stávající heslo
enable_all: povolot vše
index:
create_new_user: Přidat nového uživatele
user:
email_user: Poslat uživateli email
preview: (%{who}) přidán %{created_at}
sessions:
new:
hello_please_sign_in: Prosím přihlašte se
sign_in: Přihlášení
forgot_password: Zapomenuté heslo
users:
setup_website_name_html: Dejte vašemu webu jméno. <a href='%{link}' name='%{title}'>Zde</a> můžete jméno editovat
new:
fill_form: Vyplňte údaje níže a můžeme začít.
sign_up: Registrace
create:
welcome: Vítejte %{who}
forgot:
email_address: Emailová adresa
enter_email_address: Prosím zadejte emailovou adresu.
reset_password: Re<PASSWORD>vat <PASSWORD>
blank_email: Musíte zadat emailovou adresu.
email_not_associated_with_account_html: Email '%{email}' není asociován z žádným uživatelským kontem.<br />Jste si jisti, že jste použili správnou adresu?
email_reset_sent: Byl vám odeslán email s odkazem na resetování hesla.
reset:
code_invalid: Omlouváme se, ale kód pro resetování hesla je buď chybný nebo expirovaný. Zkuste zkopírovat adresu pro resetování hesle z emailu který jste obdržel/a, nebo si nechejte email zaslat znovu.
successful: Heslo bylo úspěšně resetováno pro '%{email}'
pick_new_password_for: Zadejte nové heslo pro %{email}
reset_password: <PASSWORD>
user_mailer:
reset_notification:
subject: Odkaz pro reset hesla
activerecord:
models:
user: uživatel
attributes:
user:
login: Přihlásit
email: Email
password: <PASSWORD>
remember_me: Zapamatovat heslo
|
authentication/config/locales/cs.yml
|
---
result: ABORTED
url: http://manhattan.ci.chef.co/job/chefdk-trigger-ad_hoc/407/
failure_category: network
failure_cause: 'aborted: acceptance; worker disconnected: mac_os_x-10.11: chefdk-test'
timestamp: 2016-07-07 20:44:56 UTC
duration: 59m58s
triggered_by: tyler-ball
active_duration: 59m41s
parameters:
GIT_REF: rspec_rollback
EXPIRE_CACHE: false
change:
git_remote: https://github.com/chef/chef-dk.git
git_commit: <PASSWORD>
project: chefdk
version: 0.16.23+20160707204507
stages:
chefdk-test:
result: ABORTED
failure_category: network
failure_cause: 'aborted: acceptance; worker disconnected: mac_os_x-10.11'
url: http://manhattan.ci.chef.co/job/chefdk-test/256/
duration: 32m35s
runs:
acceptance:
result: ABORTED
failure_category: aborted
failure_cause: aborted
failed_in:
step: CHEF-ACCEPTANCE TOP-COOKBOOKS::PROVISION
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=acceptance,project=chefdk,role=tester/256/
duration: 32m35s
mac_os_x-10.11:
result: FAILURE
failure_category: network
failure_cause: worker disconnected
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=mac_os_x-10.11,project=chefdk,role=tester/256/
duration: 1m35s
windows-2008r2-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=i386,platform=windows-2008r2,project=chefdk,role=tester/256/
duration: 8m39s
debian-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=debian-6,project=chefdk,role=tester/256/
duration: 2m52s
debian-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=debian-7,project=chefdk,role=tester/256/
duration: 2m15s
debian-8:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=debian-8,project=chefdk,role=tester/256/
duration: 3m39s
el-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=el-6,project=chefdk,role=tester/256/
duration: 4m18s
el-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=el-7,project=chefdk,role=tester/256/
duration: 2m28s
mac_os_x-10.10:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=mac_os_x-10.10,project=chefdk,role=tester/256/
duration: 2m12s
mac_os_x-10.9:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=mac_os_x-10.9,project=chefdk,role=tester/256/
duration: 2m24s
ubuntu-12.04:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=ubuntu-12.04,project=chefdk,role=tester/256/
duration: 2m15s
ubuntu-14.04:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-test/architecture=x86_64,platform=ubuntu-14.04,project=chefdk,role=tester/256/
duration: 2m42s
chefdk-build:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-build/616/
duration: 27m2s
runs:
windows-2008r2-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=i386,platform=windows-2008r2,project=chefdk,role=builder/616/
duration: 24m26s
debian-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=debian-6,project=chefdk,role=builder/616/
duration: 22m13s
el-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=el-6,project=chefdk,role=builder/616/
duration: 26m56s
el-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=el-7,project=chefdk,role=builder/616/
duration: 17m48s
mac_os_x-10.9:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=mac_os_x-10.9,project=chefdk,role=builder/616/
duration: 9m46s
ubuntu-12.04:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=ubuntu-12.04,project=chefdk,role=builder/616/
duration: 20m42s
chefdk-trigger-ad_hoc:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/chefdk-trigger-ad_hoc/407/
duration: 2s
|
reports/manhattan.ci.chef.co/job/chefdk-trigger-ad_hoc/407.yaml
|
name: ci
on:
push:
branches:
- master
paths:
- "Cargo.toml"
- "src/**"
- ".github/workflows/**"
jobs:
build:
name: ${{ matrix.kind }} ${{ matrix.os }}
runs-on: ${{ matrix.os }}
timeout-minutes: 60
strategy:
matrix:
os: [macOS-latest, ubuntu-latest]
env:
GH_ACTIONS: true
RUST_BACKTRACE: full
DENO_BUILD_MODE: release
steps:
- name: Clone repository
uses: actions/checkout@v1
with:
submodules: recursive
- name: Install rust
uses: hecrj/setup-rust-action@v1
with:
rust-version: "1.43.1"
- name: Log versions
run: |
node -v
rustc --version
cargo --version
- name: Cache cargo registry
uses: actions/cache@v1
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v1
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v1
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
- name: Install python
uses: actions/setup-python@v1
with:
python-version: "2.7.x"
architecture: x64
- name: Add msbuild to PATH
if: matrix.os == 'windows-latest'
uses: microsoft/setup-msbuild@v1.0.0
- name: Remove Some Cache
if: matrix.os == 'windows-latest'
run: |
npm install --global --production windows-build-tools@4.0.0
rm target/release/gn_root -Recurse -ErrorAction Ignore
rm target/debug/gn_root -Recurse -ErrorAction Ignore
& "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=14.13
- name: Build
env:
RUST_BACKTRACE: 1
run: cargo build --release
- name: Release Plugin
uses: softprops/action-gh-release@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: "release draft"
draft: true
files: |
target/release/libdeno_sass.dylib
target/release/deno_sass.dll
target/release/libdeno_sass.so
|
.github/workflows/main.yml
|
name: CI Docker Image
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push to outputs
uses: docker/build-push-action@v2
with:
context: .
file: Dockerfile
tags: andriygav/grpc-proxy:latest
outputs: type=docker,dest=/tmp/image.tar
- name: Upload artifact
uses: actions/upload-artifact@v2
with:
name: image
path: /tmp/image.tar
test:
runs-on: ubuntu-latest
needs: build
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Download artifact
uses: actions/download-artifact@v2
with:
name: image
path: /tmp
- name: Load Docker image
run: |
docker load --input /tmp/image.tar
- name: Testing
run: |
export TESTS=$(/bin/pwd)/tests
docker run -v $TESTS:/tests -v /tmp:/tmp andriygav/grpc-proxy:latest /bin/bash -c "pip install -U pytest pytest-cov && pytest /tests --cov=grpc_proxy --cov-report=xml:/tmp/coverage.xml"
- name: Upload to Codecov
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: /tmp/coverage.xml
flags: unittests
name: codecov-grpcproxy
fail_ci_if_error: true
deploy:
runs-on: ubuntu-latest
needs: [build, test]
steps:
- name: Check out the repo
uses: actions/checkout@v2
- name: Docker meta
id: meta
uses: crazy-max/ghaction-docker-meta@v2
with:
images: andriygav/grpc-proxy
tags: |
type=raw,value=latest,enable=${{ endsWith(GitHub.ref, 'master') }}
type=ref,event=tag
flavor: |
latest=false
- name: Download artifact
uses: actions/download-artifact@v2
with:
name: image
path: /tmp
- name: Load Docker image
run: |
docker load --input /tmp/image.tar
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Push to Docker Hub
uses: docker/build-push-action@v2
with:
push: ${{ GitHub.event_name != 'pull_request' }}
context: .
file: Dockerfile
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
|
.github/workflows/docker.yml
|
---
version: 2.1
# ----------------
# Orbs declaration
# ----------------
orbs:
noos-ci: noosenergy/noos-ci@0.1.14
# --------------
# Pipeline tasks
# --------------
jobs:
build-image-circleci:
executor: noos-ci/default
steps:
- checkout
- noos-ci/docker-build-image:
registry-provider: docker
image-context: ./docker/circleci
image-name: circleci
image-tag: ${CIRCLE_SHA1}
build-image-dbbackup:
executor: noos-ci/default
steps:
- checkout
- noos-ci/docker-build-image:
registry-provider: docker
image-context: ./docker/dbbackup
image-name: dbbackup
image-tag: ${CIRCLE_SHA1}
build-image-jupyterlab:
executor: noos-ci/default
steps:
- checkout
- noos-ci/docker-build-image:
registry-provider: docker
image-context: ./docker/jupyterlab
image-name: jupyterlab
image-tag: ${CIRCLE_SHA1}
build-image-jupyterlab-dash:
executor: noos-ci/default
steps:
- checkout
- noos-ci/docker-build-image:
registry-provider: docker
image-context: ./docker/jupyterlab-dash
image-name: jupyterlab-dash
image-tag: ${CIRCLE_SHA1}
# -----------------
# Pipeline workflow
# -----------------
workflows:
build-image-circleci:
jobs:
- hold:
type: approval
filters: &filter-master-only
branches:
only: master
- build-image-circleci:
context: DOCKERHUB_SHARED
requires:
- hold
filters:
<<: *filter-master-only
build-image-dbbackup:
jobs:
- hold:
type: approval
filters:
<<: *filter-master-only
- build-image-dbbackup:
context: DOCKERHUB_SHARED
requires:
- hold
filters:
<<: *filter-master-only
build-image-jupyterlab:
jobs:
- hold:
type: approval
filters:
<<: *filter-master-only
- build-image-jupyterlab:
context: DOCKERHUB_SHARED
requires:
- hold
filters:
<<: *filter-master-only
build-image-jupyterlab-dash:
jobs:
- hold:
type: approval
filters:
<<: *filter-master-only
- build-image-jupyterlab-dash:
context: DOCKERHUB_SHARED
requires:
- hold
filters:
<<: *filter-master-only
|
.circleci/config.yml
|
items:
- uid: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType"
id: "WithAccessType"
parent: "com.microsoft.azure.management.network"
children:
- "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.allowAccess()"
- "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.denyAccess()"
langs:
- "java"
name: "RouteFilterRule.UpdateStages.WithAccessType"
nameWithType: "RouteFilterRule.UpdateStages.WithAccessType"
fullName: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType"
type: "Interface"
package: "com.microsoft.azure.management.network"
summary: "The stage of the route filter rule definition allowing access type of the rule."
syntax:
content: "public static interface RouteFilterRule.UpdateStages.WithAccessType"
- uid: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.allowAccess()"
id: "allowAccess()"
parent: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType"
langs:
- "java"
name: "allowAccess()"
nameWithType: "RouteFilterRule.UpdateStages.WithAccessType.allowAccess()"
fullName: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.allowAccess()"
overload: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.allowAccess*"
type: "Method"
package: "com.microsoft.azure.management.network"
summary: "Set 'Allow' acces type of the rule."
syntax:
content: "public abstract RouteFilterRule.Update allowAccess()"
return:
type: "com.microsoft.azure.management.network.RouteFilterRule.Update"
description: "the next stage of the definition"
- uid: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.denyAccess()"
id: "denyAccess()"
parent: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType"
langs:
- "java"
name: "denyAccess()"
nameWithType: "RouteFilterRule.UpdateStages.WithAccessType.denyAccess()"
fullName: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.denyAccess()"
overload: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.denyAccess*"
type: "Method"
package: "com.microsoft.azure.management.network"
summary: "Set 'Deny' access type of the rule."
syntax:
content: "public abstract RouteFilterRule.Update denyAccess()"
return:
type: "com.microsoft.azure.management.network.RouteFilterRule.Update"
description: "the next stage of the definition"
references:
- uid: "com.microsoft.azure.management.network.RouteFilterRule.Update"
name: "RouteFilterRule.Update"
nameWithType: "RouteFilterRule.Update"
fullName: "com.microsoft.azure.management.network.RouteFilterRule.Update"
- uid: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.allowAccess*"
name: "allowAccess"
nameWithType: "RouteFilterRule.UpdateStages.WithAccessType.allowAccess"
fullName: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.allowAccess"
package: "com.microsoft.azure.management.network"
- uid: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.denyAccess*"
name: "denyAccess"
nameWithType: "RouteFilterRule.UpdateStages.WithAccessType.denyAccess"
fullName: "com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.denyAccess"
package: "com.microsoft.azure.management.network"
|
docs-ref-autogen/com.microsoft.azure.management.network.RouteFilterRule.UpdateStages.WithAccessType.yml
|
Reaper:
Trail: "PARTICLE_SMOKE"
Trail_Radius: 6
Trail_Go_Through: "entities"
Space_Between_Trails: 2.0
Trail_Settings:
Speed: 0.1
Particle_Count: 3
Circle:
Maxium_Circle_Size: 2.0
Executor:
Trail: "VOID_FOG"
Trail_Catch:
Required_Distance_To_Catch: 0.5
Space_Between_Trails: 0.5
PlasmaBurst:
Trail: "MAGIC_CRIT"
Trail_Radius: 60
Trail_Go_Through: "entities"
Space_Between_Trails: 0.5
Trail_Settings:
Speed: 0.1
Particle_Count: 2
Circle:
Maxium_Circle_Size: 1.0
Flamethrower:
Trail: "FLAME"
Trail_Radius: 8
Trail_Go_Through: "entities"
Space_Between_Trails: 0.5
Trail_Settings:
Speed: 0.1
Particle_Count: 2
VoidMagic:
Trail: "WITCH_MAGIC"
Trail_Radius: 20
Trail_Go_Through: "all"
Space_Between_Trails: 4.0
Trail_Settings:
Speed: 0.1
Particle_Count: 5
Circle:
Maxium_Circle_Size: 4.0
FireTube:
Trail: "FLAME"
Trail_Radius: 10
Trail_Go_Through: "all"
Space_Between_Trails: 4.0
Trail_Settings:
Speed: 0.1
Particle_Count: 3
Circle:
Maxium_Circle_Size: 2.0
WideMagic:
Trail: "SPELL"
Trail_Radius: 5
Trail_Go_Through: "all"
Space_Between_Trails: 4.0
Trail_Settings:
Speed: 0.1
Particle_Count: 1
Circle:
Maxium_Circle_Size: 3.0
BigCloud:
Trail: "CLOUD"
Trail_Radius: 1
Trail_Go_Through: "entities"
Space_Between_Trails: 1.0
Circle:
Maxium_Circle_Size: 1.0
CloudSaber:
Trail: "INSTANT_SPELL"
Trail_Radius: 6
Trail_Go_Through: "all"
Space_Between_Trails: 4.0
Trail_Settings:
Speed: 0.1
Particle_Count: 3
Circle:
Maxium_Circle_Size: 4.0
Arch_Sword:
Trail: "LARGE_SMOKE"
Trail_Radius: 6
Trail_Go_Through: "all"
Space_Between_Trails: 4.0
Trail_Settings:
Speed: 0.1
Particle_Count: 1
Circle:
Maxium_Circle_Size: 4.0
Arch_Sword2:
Trail: "WITCH_MAGIC"
Trail_Radius: 6
Trail_Go_Through: "all"
Space_Between_Trails: 4.0
Trail_Settings:
Speed: 0.1
Particle_Count: 3
Circle:
Maxium_Circle_Size: 4.0
|
CrackShotPlus/trails/T_Unique.yml
|
ID: ID
Office: Офисы
Locale: Локаль
Actions: Действия
Created at: Дата создания
Updated at: Дата изменения
admin.id: ID
admin.admin: Админ
admin.name: Название
admin.alias: Алиас
admin.base: По-умолчанию
admin.state: Статус
admin.locale: Локаль
admin.locale_list: Локали
admin.common: Общее
admin.position: Позиция
admin.office: Офисы
admin.office_list: Офисы
admin.protocol: Протокол
admin.host: Доменное имя офиса
admin.related_url: URL связанного проекта
admin.default_language: Язык по-умолчанию
admin.recognize_language: Офис распознаётся по языку
admin.available_languages: Доступные языки
admin.recognize_language_disabled: -- Отключено --
admin.office_data: Данные офиса
admin.office_name: Название офиса
admin.placeholder_office_url: 'URL офиса с протоколом, сегментом языка и без слеша на конце.<br /><span style="padding-left: 80px;">Например: <b>http://example.com/ru</b></span>'
admin.placeholder_office_url_wolang: 'URL офиса с протоколом, без сегмента языка и без слеша на конце.<br /><span style="padding-left: 80px;">Например: <b>http://example.com</b></span>'
admin.placeholder_office_domain: 'Доменное имя офиса<br /><span style="padding-left: 80px;">Например: <b>example.com</b></span>'
admin.placeholder_related_url: 'URL связанного с офисом сайта с протоколом и без слеша на конце <br /><span style="padding-left: 80px;">Например: <b>http://related.eample.com</b></span>'
admin.currencies: Доступные валюты
admin.email: Email
admin.update_date: Изменен
admin.include_lang_in_url: Включить язык в урл
admin.error.office_locale_host_exists: Офис с локалью '{locale}' и хостом '{host}' уже существует. Алиас существующего офиса '{alias}'
admin.error.office_host_exists: Офис для хоста '{host}' уже существует. Алиас существующего офиса '{alias}'
admin.cache_clear_success: 'Кеш успешно очищен'
admin.clear_cache: 'Очистить кеш'
admin.translation_list: Перевод
admin.translations: Перевод
admin.domain: Домен
admin.all: Все
admin.link_export: Экспорт
admin.link_import: Импортировать
admin.import_success_message: Сообщения успешно сохранены
admin.source: Источник
admin.action: Действие
admin.export: Экпорт
admin.translation_export: Экпорт переводов
admin.import: Импорт
admin.translation_import: Импорт переводов
admin.language: Язык
admin.for_language: Для языка
Translation: Перевод
admin.filter.show_locale_value: Доп. язык
admin.filter.translations: Значение
admin.filter.empty_value: Пустые значения
admin.filter.location: Расположение
admin.filter.location.frontend: Фронтенд
admin.filter.location.backend: Админка
|
src/Octava/Bundle/MuiBundle/Resources/translations/OctavaMuiBundle.ru.yml
|
objects:
- apiVersion: image.openshift.io/v1
generation: 0
image:
dockerImageLayers: null
dockerImageMetadata:
ContainerConfig: {}
Created: null
Id: ''
apiVersion: '1.0'
kind: DockerImage
dockerImageMetadataVersion: '1.0'
metadata:
creationTimestamp: null
kind: ImageStreamTag
lookupPolicy:
local: false
metadata:
creationTimestamp: null
labels:
app: {{.application}}
name: {{.serviceName}}:5.7
tag:
annotations: null
from:
kind: DockerImage
name: 172.30.1.1:5000/openshift/mysql:5.7
generation: null
importPolicy: {}
name: '5.7'
referencePolicy:
type: ''
- apiVersion: apps.openshift.io/v1
kind: DeploymentConfig
metadata:
creationTimestamp: null
labels:
app: {{.application}}
name: {{.serviceName}}
spec:
replicas: 1
selector:
app: {{.application}}
deploymentconfig: {{.serviceName}}
strategy:
resources: {}
template:
metadata:
creationTimestamp: null
labels:
app: {{.application}}
deploymentconfig: {{.serviceName}}
spec:
containers:
- image: 172.30.1.1:5000/openshift/mysql:5.7
name: {{.serviceName}}
ports:
- containerPort: 3306
protocol: TCP
resources: {}
test: false
triggers:
- type: ConfigChange
- imageChangeParams:
automatic: true
containerNames:
- {{.serviceName}}
from:
kind: ImageStreamTag
name: mysql:5.7
namespace: openshift
type: ImageChange
status:
availableReplicas: 0
latestVersion: 0
observedGeneration: 0
replicas: 0
unavailableReplicas: 0
updatedReplicas: 0
- apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: {{.application}}
name: {{.serviceName}}
spec:
ports:
- name: 3306-tcp
port: 3306
protocol: TCP
targetPort: 3306
selector:
app: {{.application}}
deploymentconfig: {{.serviceName}}
status:
loadBalancer: {}
parameters: []
apiVersion: template.openshift.io/v1
kind: Template
|
creator/src/main/resources/META-INF/catalog/app-images/resources/mysql.yaml
|
uid: "com.azure.resourcemanager.loganalytics.LogAnalyticsManager"
fullName: "com.azure.resourcemanager.loganalytics.LogAnalyticsManager"
name: "LogAnalyticsManager"
nameWithType: "LogAnalyticsManager"
summary: "Entry point to LogAnalyticsManager. Operational Insights Client."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class LogAnalyticsManager"
methods:
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.authenticate(com.azure.core.credential.TokenCredential,com.azure.core.management.profile.AzureProfile)"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.availableServiceTiers()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.clusters()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.configure()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.dataExports()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.dataSources()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.deletedWorkspaces()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.gateways()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.intelligencePacks()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.linkedServices()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.linkedStorageAccounts()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.managementGroups()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.operationStatuses()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.operations()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.savedSearches()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.schemas()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.serviceClient()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.sharedKeysOperations()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.storageInsightConfigs()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.tables()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.usages()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.workspacePurges()"
- "com.azure.resourcemanager.loganalytics.LogAnalyticsManager.workspaces()"
type: "class"
metadata: {}
package: "com.azure.resourcemanager.loganalytics"
artifact: com.azure.resourcemanager:azure-resourcemanager-loganalytics:1.0.0-beta.1
|
preview/docs-ref-autogen/com.azure.resourcemanager.loganalytics.LogAnalyticsManager.yml
|
wake_on_lan:
binary_sensor:
- name: Gaming Room Gaming PC Power State
platform: ping
host: !secret GAMING_ROOM_GAMING_PC_IP
scan_interval: 20
script:
gaming_room_turn_on_gaming_pc:
sequence:
- service: wake_on_lan.send_magic_packet
data:
mac: !secret GAMING_ROOM_GAMING_PC_MAC
broadcast_address: !secret GAMING_ROOM_GAMING_PC_IP
gaming_room_turn_off_gaming_pc:
sequence:
- service: shell_command.turn_off_gaming_room_gaming_pc
shell_command:
turn_off_gaming_room_gaming_pc: !secret TURN_OFF_GAMING_ROOM_GAMING_PC_COMMAND
media_player:
- name: Gaming Room Universal Gaming PC
platform: universal
children:
- media_player.gaming_room_avr
- media_player.gaming_room_tv
state_template: >
{{ states('binary_sensor.gaming_room_gaming_pc_power_state') }}
commands:
turn_on:
service: script.turn_on
entity_id: script.gaming_room_turn_on_gaming_pc
turn_off:
service: script.turn_on
entity_id: script.gaming_room_turn_off_gaming_pc
volume_set:
service: media_player.volume_set
entity_id: media_player.gaming_room_avr
data_template:
volume_level: "{{ volume_level }}"
volume_up:
service: media_player.volume_up
entity_id: media_player.gaming_room_avr
volume_down:
service: media_player.volume_down
entity_id: media_player.gaming_room_avr
volume_mute:
service: media_player.volume_mute
entity_id: media_player.gaming_room_avr
data_template:
is_volume_muted: "{{ is_volume_muted }}"
select_source:
service: mqtt.publish
data_template:
topic: "/homeassistant/games/play/pc"
qos: 2
payload: "{{ source }}"
attributes:
is_volume_muted: media_player.gaming_room_avr|is_volume_muted
volume_level: media_player.gaming_room_avr|volume_level
source: PC
state: binary_sensor.gaming_room_gaming_pc_power_state
|
apps/home-assistant/src/packages/gaming_room/entities/gaming_pc.yaml
|
commonfields:
id: DocumentationAutomation
version: -1
name: DocumentationAutomation
script: |+
import yaml
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
entryID = demisto.args()['entryID']
res = demisto.executeCommand('getFilePath', {'id': entryID})
if isError(res[0]):
demisto.results(res)
sys.exit(0)
path = res[0]['Contents']['path']
yamlFile = open(path)
dataMap = yaml.safe_load(yamlFile)
yamlFile.close()
def addLines(str):
output = ''
lastDigit = 0
for i in range(len(str)):
if str[i].isdigit():
if str[i+1] == '.':
output += str[lastDigit:i] + '\n'
lastDigit = i
output += str[lastDigit:len(str)] + '\n'
return output
name = dataMap['name']
doc = ''
# Overview
doc += 'Overview:\n'
if 'overview' in demisto.args():
doc += demisto.args()['overview'] + '\n'
#Setup integration to work with Demisto
doc += '\nTo set up ' + name + ' on Demisto:'
if 'setupOnIntegration' in demisto.args():
doc += addLines(demisto.args()['setupOnIntegration']) + '\n'
#Setup integration on Demisto
doc += "To set up the integration on Demisto:\n1.Go to 'Settings > Integrations > Servers & Services'\n\
2.Locate " + name + " by searching for it using the search box on the top of the page.\n\
3.Click 'Add instance' to create and configure a new integration\n\
You should configure the following settings:\n1.Name: a textual name for the integration instance.\n"
j=2;
for i in range(len(dataMap['configuration'])):
doc += str(j) + '.' + dataMap['configuration'][i]['display'] +'\n'
j+=1;
doc += str(j) + '.Test: Checks correctness of URLs and token.' +'\n'
# Top Use-cases
doc += '\nTop Use-cases:'
if 'useCases' in demisto.args():
doc +=addLines(demisto.args()['useCases']) + '\n'
# Fetched incidents data
doc += '\nFetched incidents data:'
if 'fetchedData' in demisto.args():
doc +=addLines(demisto.args()['fetchedData']) + '\n'
# Commands
doc += '\nCommands:\n'
for i in range(len(dataMap['script']['commands'])):
doc += str(i+1) + '.Command name: ' + dataMap['script']['commands'][i]['name'] + '\n'
#Inputs
doc += 'Inputs:\n'
if len(dataMap['script']['commands'][i]['arguments']) != 0:
for j in range(len(dataMap['script']['commands'][i]['arguments'])):
doc += str(j+1)+ '.' +dataMap['script']['commands'][i]['arguments'][j]['name']
doc += ' - '+ dataMap['script']['commands'][i]['arguments'][j]['description'] + '\n'
else:
doc += '-\n'
#Context output
doc += 'Context Output:\n'
if 'outputs' in dataMap['script']['commands'][i]:
for k in range(len(dataMap['script']['commands'][i]['outputs'])):
doc += 'Path: ' + dataMap['script']['commands'][i]['outputs'][k]['contextPath'] + '\n'
doc += 'Description: ' + dataMap['script']['commands'][i]['outputs'][k]['description'] + '\n'
else:
doc += '-\n'
#Raw output:
doc += 'Raw output:\n'
doc += '\n'
# Additional info
doc += '\nAdditional information:'
if 'addInfo' in demisto.args():
doc +=addLines(demisto.args()['addInfo']) + '\n'
# Known limitations
doc += '\nKnown limitations:'
if 'limit' in demisto.args():
doc +=addLines(demisto.args()['limit']) + '\n'
# Troubleshooting
doc += '\nTroubleshooting:\n'
if 'troubleshooting' in demisto.args():
doc +=addLines(demisto.args()['troubleshooting'])
filename = name + '-documantation.txt'
demisto.results(fileResult(filename, doc))
type: python
tags: []
comment: 'Automates integration documentation'
enabled: true
args:
- name: entryID
required: true
description: War-room entry ID of sample file
- name: overview
description: Documentation overview
- name: setupOnIntegration
description: Setting to work with Demisto instructions. Number the steps by 'x.'
(i.e. '1.')
- name: useCases
description: Top use-cases. Number the steps by 'x.' (i.e. '1.')
- name: troubleshooting
description: Integration troubleshooting
- name: fetchedData
description: Fetched incidents data. Number the steps by 'x.' (i.e. '1.')
- name: addInfo
description: Additional information. Number the steps by 'x.' (i.e. '1.')
- name: limit
description: Known limitations. Number the steps by 'x.' (i.e. '1.')
scripttarget: 0
dockerimage: demisto/python
|
Utils/script-DocumentationAutomation.yml
|
version: "3.8"
services:
prometheus:
image: prom/prometheus:v2.31.1
user: "1000:1000"
ports:
- "9090:9090/tcp"
networks:
- monitoring
deploy:
replicas: 1
volumes:
- ${PROMETHEUS_DATA}:/prometheus
- ${APP_CONFIG}/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- ${APP_CONFIG}/alertmanager/alerts.rules.yml:/alertmanager/alert.rules/alerts.rules.yml
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=365d'
- '--web.enable-lifecycle'
grafana:
image: grafana/grafana:8.2.6
networks:
- monitoring
user: "1000:1000"
ports:
- "3000:3000/tcp"
deploy:
replicas: 1
volumes:
- ${GRAFANA_DATA}:/var/lib/grafana
- ${APP_CONFIG}/grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml
- ${APP_CONFIG}/grafana/dashboards.yml:/etc/grafana/provisioning/dashboards/dashboards.yml
- ${APP_CONFIG}/grafana/grafana.ini:/etc/grafana/grafana.ini
- ${APP_CONFIG}/grafana/dashboards:/var/lib/grafana/dashboards
blackbox:
image: prom/blackbox-exporter:v0.19.0
networks:
- monitoring
ports:
- "9115:9115/tcp"
deploy:
replicas: 1
placement:
constraints:
- node.labels.monitor == true
volumes:
- ${APP_CONFIG}/blackbox/blackbox.yml:/etc/blackbox_exporter/config.yml
alertmanager:
image: prom/alertmanager:v0.23.0
networks:
- monitoring
ports:
- "9093:9093/tcp"
deploy:
replicas: 1
volumes:
- ${APP_CONFIG}/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml
command:
- '--config.file=/etc/alertmanager/alertmanager.yml'
- '--log.level=debug'
loki:
image: grafana/loki:2.4.0
user: "1000:1000"
ports:
- "3100:3100"
command:
- '--config.file=/etc/loki/config.yaml'
volumes:
- ${LOKI_DATA}:/tmp/loki
- ${APP_CONFIG}/loki/config.yaml:/etc/loki/config.yaml
networks:
- monitoring
networks:
monitoring:
external: true
|
prometheus-grafana/docker-compose.yml
|
version: "3.2"
networks:
ffc:
name: ffc-network
services:
# loki:
# image: grafana/loki:2.3.0
# container_name: grafana-loki
# restart: always
# ports:
# - "3100:3100"
# command: -config.file=/etc/loki/local-config.yaml
# networks:
# - ffc
# profiles:
# - costeffective
# - recommended
# - development
# - developmentportal
# - developmentapi
# promtail:
# image: grafana/promtail:2.3.0
# container_name: grafana-promtail
# restart: always
# volumes:
# - /var/log:/var/log
# command: -config.file=/etc/promtail/config.yml
# networks:
# - ffc
# profiles:
# - costeffective
# - recommended
# - development
# - developmentportal
# - developmentapi
# grafana:
# image: grafana/grafana:latest
# container_name: grafana
# restart: always
# ports:
# - "3000:3000"
# environment:
# - GF_SECURITY_ADMIN_USER=admin
# - GF_SECURITY_ADMIN_PASSWORD=<PASSWORD>
# networks:
# - ffc
# profiles:
# - docker
# - development
# - developmentportal
# - developmentapi
rabbitmq:
image: rabbitmq:3.9-management
container_name: rabbitmq
restart: always
hostname: rabbitmq
ports:
- 15672:15672
- 5672:5672
volumes:
- ./data:/var/lib/rabbitmq
networks:
- ffc
profiles:
- docker
- development
- backend
mongodb:
image: mongo
container_name: mongodb
restart: always
ports:
- 27017:27017
environment:
- MONGO_INITDB_ROOT_USERNAME=admin
- MONGO_INITDB_ROOT_PASSWORD=password
- MONGO_INITDB_DATABASE=featureflagsco
volumes:
- ./docker-entrypoint-initdb.d/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
networks:
- ffc
profiles:
- docker
- development
mongo-express:
image: mongo-express
container_name: mongo-express
ports:
- 8081:8081
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME=admin
- ME_CONFIG_MONGODB_ADMINPASSWORD=password
- ME_CONFIG_MONGODB_SERVER=mongodb
restart: unless-stopped
networks:
- ffc
profiles:
- docker
- development
mssql:
image: "mcr.microsoft.com/mssql/server:2017-latest"
container_name: sqldb
restart: always
ports:
- 1433:1433
volumes:
- /var/opt/mssql
# we copy our scripts onto the container
- ./docker-entrypoint-initdb.d/sql:/usr/src/app
working_dir: /usr/src/app
# run the entrypoint.sh that will import the data AND sqlserver
command: sh -c ' chmod +x ./entrypoint.sh; ./entrypoint.sh & /opt/mssql/bin/sqlservr;'
environment:
- ACCEPT_EULA=Y
- SA_PASSWORD=<PASSWORD>
- MSSQL_PID=Express
networks:
- ffc
profiles:
- docker
- development
elasticsearch:
image: "docker.elastic.co/elasticsearch/elasticsearch:7.14.0"
container_name: elasticsearch
environment:
- xpack.security.enabled=false
- discovery.type=single-node
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
cap_add:
- IPC_LOCK
ports:
- 9200:9200
- 9300:9300
networks:
- ffc
profiles:
- docker
- development
es-kibana:
image: "docker.elastic.co/kibana/kibana:7.14.0"
environment:
SERVER_NAME: kibana.example.org
ELASTICSEARCH_URL: http://elasticsearch:9200
ports:
- 5601:5601
networks:
- ffc
profiles:
- docker
- development
redis:
image: 'bitnami/redis:latest'
environment:
- ALLOW_EMPTY_PASSWORD=yes
networks:
- ffc
ports:
- 6379:6379
profiles:
- docker
- development
- backend
api:
build:
context: ../FeatureFlagsCo.APIs
dockerfile: ../FeatureFlagsCo.APIs/Dockerfiles/Recommended/Dockerfile
image: ffc-api
container_name: api
ports:
- 5001:5001
networks:
- ffc
profiles:
- docker
portal:
build:
context: ../FeatureFlagsCo.Portal
dockerfile: Dockerfile
args:
API_URL: http://localhost:5001
STATISTIC_URL: http://localhost:3000
image: ffc-portal
container_name: portal
ports:
- 4200:80
restart: unless-stopped
profiles:
- docker
|
FeatureFlagsCo.Docker/docker-compose.yaml
|
globals:
last: c60
includes:
- from: c60.png
to: images/c60.png
pages:
- _id: c60
_path: 60.html
_template: page.html
prev: c59
next: c61
datetime: 2010-11-01 05:00:00
src: images/c60.png
width: 900
height: 650
name: Ultrahazardous
description: Brown Rabbit has a hangover.
alt: Yo momma jokes are ultrahazardous.
notes: |
<p><b><date></b>. <a href="http://<domain>/7.html">I've referenced this before</a>, but see <a href="http://www.discourse.net/archives/2003/10/why_lawyers_fear_minks.html"><i>Foster v. Preston Mill Co.</i></a>, 268 P.2d 645 (Wash. 1954) and <a href="http://www.debmark.com/rabbits/faq/eatyoung.htm">this</a>. Under tort law, you are strictly liable for the consequences of "ultrahazardous" activities. In contrast, when engaged in less-than-ultra-hazardous activities, you are only liable if you are negligent and breach a standard of care. Modern tort law uses the term "inherently dangerous" rather than "ultrahazardous," but I think ultrahazardous sounds funnier.</p>
<p style="text-align:center;">And one more thing.</p>
<a name="giants" id="giants"></a>
<p style="text-align:center;"><img style="margin-left:auto; margin-right:auto;" src="http://<domain>/images/giants.png" width="670" height="250" alt="Go Giants!" /></p>
transcript: |
Brown Rabbit wakes up next to a Beer-pong / Beirut table with plastic beer cups.
Negligent Lop: I see you had a fun night. ;
Brown Rabbit: I'm holding you liable
for this headache. ;
Negligent Lop: Come again? ;
Brown Rabbit: Strictly liable. ;
-----
Brown Rabbit (off-panel): Everclear and Tort Bunny
have two things in common. ;
Brown Rabbit (off-panel): One, they are both
ultrahazardous. Two,
you brought them both
to last night's party. ;
-----
Negligent Lop: Tort Bunny isn't
ultrahazardous. ;
Brown Rabbit: She hit me in the face. Also,
her name is <NAME>. ;
Brown Rabbit has a black eye. ;
-----
Negligent Lop: Well, my name is <NAME>. ;
Brown Rabbit: If you want to dig that hole,
I will gladly hand you a shovel
once my hangover is done
hitting me on the head with it. ;
-----
Negligent Lop: You know what's inherently
dangerous? Yo momma. ;
Brown Rabbit: That's actually true. My
mother was startled by
an explosion when I was
born and instinctively
tried to eat me. ;
-----
Negligent Lop: I ... I had no idea. ;
Brown Rabbit: No one ever does. ;
revision: 60
|
meta/c60.yaml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-trigger-ad_hoc/21/
timestamp: 2016-04-21 15:24:59 UTC
duration: 5h31s
triggered_by: scottopherson
active_duration: 1h55m39s
queue_delays: 10s
retry_delays: 3h4m29s
parameters:
GIT_REF: praj/SPOOL-142/testbuild
EXPIRE_CACHE: true
change:
git_remote: <EMAIL>:opscode/omnibus-pushy.git
git_commit: <PASSWORD>
project: opscode-push-jobs-server
version: 2.0.0-alpha.4+20160421152516.git.1.bafe516
stages:
opscode-push-jobs-server-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-promote/42/
duration: 0s
opscode-push-jobs-server-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-test/107/
duration: 3h55m31s
active_duration: 51m1s
retries: 3
retry_delay: 3h4m29s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-test/architecture=x86_64,platform=el-5,project=opscode-push-jobs-server,role=tester/107/
duration: 51m1s
steps:
total: 51m1s
before (setup time): 8m47s
chef-client private-chef::default: 4m45s
chef-client opscode-pushy-server::default: 23s
after (cleanup time): 32m58s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-test/architecture=x86_64,platform=el-6,project=opscode-push-jobs-server,role=tester/107/
duration: 47m46s
steps:
total: 47m46s
before (setup time): 10m16s
chef-client private-chef::default: 4m42s
chef-client opscode-pushy-server::default: 25s
after (cleanup time): 28m16s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-test/architecture=x86_64,platform=el-7,project=opscode-push-jobs-server,role=tester/107/
duration: 44m21s
steps:
total: 44m21s
before (setup time): 6m1s
chef-client private-chef::default: 2m34s
chef-client opscode-pushy-server::default: 15s
after (cleanup time): 31m25s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-test/architecture=x86_64,platform=ubuntu-10.04,project=opscode-push-jobs-server,role=tester/107/
duration: 48m9s
steps:
total: 48m9s
before (setup time): 5m58s
chef-client private-chef::default: 5m34s
chef-client opscode-pushy-server::default: 15s
after (cleanup time): 32m15s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-test/architecture=x86_64,platform=ubuntu-12.04,project=opscode-push-jobs-server,role=tester/107/
duration: 45m13s
steps:
total: 45m13s
before (setup time): 6m3s
chef-client private-chef::default: 2m32s
chef-client opscode-pushy-server::default: 17s
after (cleanup time): 32m15s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-test/architecture=x86_64,platform=ubuntu-14.04,project=opscode-push-jobs-server,role=tester/107/
duration: 45m16s
steps:
total: 45m16s
before (setup time): 5m21s
chef-client private-chef::default: 3m19s
chef-client opscode-pushy-server::default: 16s
after (cleanup time): 32m12s
opscode-push-jobs-server-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-build/158/
duration: 1h4m30s
queue_delay: 10s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-build/architecture=x86_64,platform=el-5,project=opscode-push-jobs-server,role=builder/158/
duration: 57m35s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-build/architecture=x86_64,platform=el-6,project=opscode-push-jobs-server,role=builder/158/
duration: 1h4m25s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-build/architecture=x86_64,platform=el-7,project=opscode-push-jobs-server,role=builder/158/
duration: 39m2s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-build/architecture=x86_64,platform=ubuntu-10.04,project=opscode-push-jobs-server,role=builder/158/
duration: 52m37s
opscode-push-jobs-server-trigger-ad_hoc:
result: SUCCESS
url: http://wilson.ci.chef.co/job/opscode-push-jobs-server-trigger-ad_hoc/21/
duration: 6s
|
reports/wilson.ci.chef.co/job/.deleted/opscode-push-jobs-server-trigger-ad_hoc/21.yaml
|
apiVersion: v1
kind: ConfigMap
metadata:
name: init
namespace: default
data:
init.sh: |
#!/bin/sh
while ! wget -qO /dev/null ${VAULT_ADDR}/v1/sys/health 2>/dev/null
do
echo "Waiting for vault to be ready"
sleep 1
done
if vault read auth/kubernetes/config > /dev/null; then
echo "Vault Kubernetes Auth was already configured"
exit 0
fi
echo "Configuring Vault Kubernetes Auth"
vault auth enable kubernetes
vault write auth/kubernetes/config token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" kubernetes_host="https://kubernetes.default" kubernetes_ca_cert="$(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt)"
vault write auth/kubernetes/role/cluster-service bound_service_account_names=cluster-service bound_service_account_namespaces=giantswarm policies=cluster-service ttl=4320h
vault write auth/kubernetes/role/cert-operator bound_service_account_names="cert-operator-*" bound_service_account_namespaces=giantswarm policies=cert-operator ttl=4320h
while :
do
# 1 year
sleep 31536000
done
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vault
namespace: default
labels:
app: vault
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app: vault
template:
metadata:
labels:
app: vault
spec:
volumes:
- name: init
configMap:
name: init
restartPolicy: Always
containers:
- name: vault
image: vault:0.10.3
ports:
- name: http
containerPort: 8200
securityContext:
capabilities:
add: ["IPC_LOCK"]
args:
- server
- -dev
- -dev-root-token-id={{.Values.vault.token}}
- -log-level=debug
- name: config
image: vault:0.10.3
volumeMounts:
- mountPath: /init/
name: init
env:
- name: VAULT_ADDR
value: http://127.0.0.1:8200
- name: VAULT_TOKEN
value: "{{.Values.vault.token}}"
command:
- sh
- /init/init.sh
---
apiVersion: v1
kind: Service
metadata:
name: vault
namespace: default
labels:
app: vault
spec:
type: NodePort
ports:
- name: api
nodePort: 30010
port: 8200
targetPort: http
selector:
app: vault
sessionAffinity: None
|
helm/e2esetup-vault/templates/vault.yaml
|
items:
- uid: office.Office.Auth
summary: Office Auth 名前空間 (Office) は、Office ホストがアドインの web アプリケーションへのアクセストークンを取得できるようにするメソッドを提供します。 これにより、間接的に、サインインしたユーザーの Microsoft Graph データにアドインがアクセスできるようにもなります。ユーザーがもう一度サインインする必要はありません。
isPreview: true
name: Office.Auth
fullName: Office.Auth
langs:
- typeScript
type: interface
package: office
children:
- office.Office.Auth.getAccessTokenAsync
- office.Office.Auth.getAccessTokenAsync_1
- uid: office.Office.Auth.getAccessTokenAsync
summary: |-
Azure Active Directory V 2.0 のエンドポイントを呼び出して、アドインの Web アプリケーションへのアクセス トークンを取得します。 アドインでユーザーを識別できるようにします。 サーバー側のコードは、このトークンを使用して、 [「代理人」 OAuth フロー](https://docs.microsoft.com/azure/active-directory/develop/active-directory-v2-protocols-oauth-on-behalf-of)を使用してアドインの web アプリケーションの Microsoft Graph にアクセスすることができます。<!-- -->.
重要: Outlook では、アドインが Outlook.com または Gmail メールボックスに読み込まれている場合、この API はサポートされていません。
remarks: |-
**ホスト**: Excel、OneNote、Outlook、PowerPoint、Word
**要件セット**: 「id」 [api](https://docs.microsoft.com/en-us/office/dev/add-ins/reference/requirement-sets/identity-api-requirement-sets)
この API では、アドインを Azure アプリケーションにブリッジするシングル サインオン構成が必要です。 Office ユーザーは、組織アカウントと Microsoft アカウントを使用してサインインします。 Microsoft Azure では、Microsoft Graph のリソースにアクセスするために、両方のユーザー アカウントの種類を対象にしたトークンを返します。
#### <a name="examples"></a>例
```javascript
Office.context.auth.getAccessTokenAsync(function(result) {
if (result.status === "succeeded") {
var token = result.value;
// ...
} else {
console.log("Error obtaining token", result.error);
}
});
```
isPreview: true
name: 'getAccessTokenAsync(options, callback)'
fullName: 'getAccessTokenAsync(options, callback)'
langs:
- typeScript
type: method
syntax:
content: 'getAccessTokenAsync(options?: AuthOptions, callback?: (result: AsyncResult<string>) => void): void;'
return:
type:
- void
description: ''
parameters:
- id: options
description: 省略可能。 AuthOptions オブジェクトを受け入れて、サインオン動作を定義します。
type:
- office.Office.AuthOptions
- id: callback
description: 省略可能。 ユーザーの ID に対してトークンを解析するためのコールバックメソッドを受け入れるか、または "代理人" フローのトークンを使用して Microsoft Graph へのアクセスを取得します。 状態が "succeeded" の場合は、AsyncResult. 値は、生の AAD v です。 2.0 形式のアクセス トークンになります。
type:
- '(result: AsyncResult<string>) => void'
- uid: office.Office.Auth.getAccessTokenAsync_1
summary: |-
Azure Active Directory V 2.0 のエンドポイントを呼び出して、アドインの Web アプリケーションへのアクセス トークンを取得します。 アドインでユーザーを識別できるようにします。 サーバー側のコードは、このトークンを使用して、 [「代理人」 OAuth フロー](https://docs.microsoft.com/azure/active-directory/develop/active-directory-v2-protocols-oauth-on-behalf-of)を使用してアドインの web アプリケーションの Microsoft Graph にアクセスすることができます。<!-- -->.
重要: Outlook では、アドインが Outlook.com または Gmail メールボックスに読み込まれている場合、この API はサポートされていません。
remarks: |-
**ホスト**: Excel、OneNote、Outlook、PowerPoint、Word
**要件セット**: 「id」 [api](https://docs.microsoft.com/en-us/office/dev/add-ins/reference/requirement-sets/identity-api-requirement-sets)
この API では、アドインを Azure アプリケーションにブリッジするシングル サインオン構成が必要です。 Office ユーザーは、組織アカウントと Microsoft アカウントを使用してサインインします。 Microsoft Azure では、Microsoft Graph のリソースにアクセスするために、両方のユーザー アカウントの種類を対象にしたトークンを返します。
isPreview: true
name: getAccessTokenAsync(callback)
fullName: getAccessTokenAsync(callback)
langs:
- typeScript
type: method
syntax:
content: 'getAccessTokenAsync(callback?: (result: AsyncResult<string>) => void): void;'
return:
type:
- void
description: ''
parameters:
- id: callback
description: 省略可能。 ユーザーの ID に対してトークンを解析するためのコールバックメソッドを受け入れるか、または "代理人" フローのトークンを使用して Microsoft Graph へのアクセスを取得します。 状態が "succeeded" の場合は、AsyncResult. 値は、生の AAD v です。 2.0 形式のアクセス トークンになります。
type:
- '(result: AsyncResult<string>) => void'
|
docs/docs-ref-autogen/office/office/office.auth.yml
|
version: 2.1
jobs:
build:
machine:
image: ubuntu-1604:202007-01
working_directory: ~/project/capstone
steps:
# Initial setup
- checkout:
path: ~/project
- run:
name: "Info"
command: |
docker-compose --version
docker version
# Quick tests to fail fast on common errors
- run:
name: "Flake8"
command: |
export PYENV_VERSION=3.8.3 # use the version of python3 installed on circleci
pip install `egrep -o 'flake8==\S+' requirements.txt` # install our version of flake8
flake8
# Docker image building and caching
# This block shaves a minute or two off of the test runtime by using cached docker images.
# Otherwise we could omit this step entirely and let `docker-compose run` build what it needs to.
- restore_cache:
key: docker-images-{{ checksum "docker-compose.yml" }}
- run:
name: "Build docker images"
command: |
if test -f ~/docker-cache.tar; then
echo "Loading cached docker images"
docker load -i ~/docker-cache.tar
else
echo "Building new docker images"
docker-compose build
docker save -o ~/docker-cache.tar capstone capstone-postgres
fi
- save_cache:
key: docker-images-{{ checksum "docker-compose.yml" }}
paths:
- "~/docker-cache.tar"
# Actual tests
- run:
name: "Test"
command: |
sudo sysctl -w vm.max_map_count=262144 # for elasticsearch's bootstrap check
docker-compose up -d
docker-compose exec web ./manage.py collectstatic --noinput # collect static files
mkdir -p junit/pytest # storage for circleci test info
docker-compose exec web pytest \
--junitxml=junit/pytest/test-results.xml `# write test results so they can be displayed by circleci` \
--cov --cov-config=setup.cfg --cov-report xml `# write coverage data to .coverage for upload by codecov` \
-v
# Upload test details to circleci
- store_test_results:
path: junit
# Upload coverage to Codecov
# Recommended approach is to use an orb: https://circleci.com/blog/making-code-coverage-easy-to-see-with-the-codecov-orb/
# Currently using python package instead of orb, because of https://github.com/codecov/codecov-circleci-orb/issues/12
- run:
name: "Upload coverage"
command: |
sudo pip install codecov
codecov
# Commit built assets if necessary, then deploy via Salt reactor
- run:
name: "Deploy"
command: |
if [[ "$CIRCLE_PULL_REQUEST" == "" && "$CIRCLE_BRANCH" == "develop" ]] ; then
docker-compose exec web yarn build
if [[ `git status static/dist/ webpack-stats.json --porcelain` ]] ; then
docker-compose exec web fab update_docker_image_version
git config user.email "<EMAIL>"
git config user.name "Circle CI"
git add static/dist/ webpack-stats.json docker-compose.yml
git commit -m 'Add built JS [skip ci]'
git push origin develop || exit 1
fi
export DEPLOY_CONTENT='{"CIRCLE_BUILD_NUM":"'$CIRCLE_BUILD_NUM'","CIRCLE_SHA1":"'$CIRCLE_SHA1'","CIRCLE_BRANCH":"'$CIRCLE_BRANCH'","CIRCLE_PROJECT_REPONAME":"'$CIRCLE_PROJECT_REPONAME'","CIRCLE_PROJECT_USERNAME":"'$CIRCLE_PROJECT_USERNAME'"}' ;
export DEPLOY_SIG="sha1=`echo -n "$DEPLOY_CONTENT" | openssl sha1 -hmac $DEPLOY_KEY | sed 's/^.* //'`" ;
curl -X POST "$DEPLOY_URL" --data "$DEPLOY_CONTENT" -H "Content-Type: application/json" -H "X-Circle-Signature: $DEPLOY_SIG" ;
fi
|
.circleci/config.yml
|
---
- name: Nvidia-Docker | ensure nvidia-docker repository public key is installed
shell: "curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -"
register: result
until: result is succeeded
retries: 5
delay: 5
when: inventory_hostname in nvidia_gpu_nodes
- name: Nvidia-Docker |ensure nvidia-docker repository is enabled
shell: >
curl -s -L https://nvidia.github.io/nvidia-docker/$(. /etc/os-release;echo $ID$VERSION_ID)/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
register: result
until: result is succeeded
retries: 5
delay: 5
when: inventory_hostname in nvidia_gpu_nodes
- name: Nvidia-Docker |ensure docker is stopped
service:
name: docker
state: stopped
when: inventory_hostname in nvidia_gpu_nodes
- name: Nvidia-Docker |ensure nvidia-container-runtime are installed
apt:
name: nvidia-container-runtime=2.0.0+docker18.06.2-1
update_cache: yes
register: result
until: result is succeeded
retries: 5
delay: 5
when: inventory_hostname in nvidia_gpu_nodes
- name: Nvidia-Docker |ensure nvidia-docker are installed
apt:
name: nvidia-docker2=2.0.3+docker18.06.2-1
update_cache: yes
register: result
until: result is succeeded
retries: 5
delay: 5
when: inventory_hostname in nvidia_gpu_nodes
- name: Nvidia-Docker |ensure docker daemon.json is absent
file:
path: /etc/docker/daemon.json
state: absent
- name: Nvidia-Docker |add nvidia-container-runtime
blockinfile:
path: /etc/systemd/system/docker.service
insertafter: "ExecStart=/usr/bin/dockerd "
marker: ""
block: |
--add-runtime nvidia=/usr/bin/nvidia-container-runtime \
--default-runtime runc \
when: inventory_hostname in nvidia_gpu_nodes
- name: Nvidia-Docker |ensure docker service is reloaded
service:
name: docker
state: reloaded
when: inventory_hostname in nvidia_gpu_nodes
- name: Nvidia-Docker |ensure docker service is started and enabled
service:
name: docker
enabled: yes
state: started
when: inventory_hostname in nvidia_gpu_nodes
|
roles/prepare/nodes/tasks/nvidia-docker.yaml
|
---
# Develop playbook for a local vagrant instance
- hosts: develop
become: yes
become_method: sudo
roles:
- role: postgresql
postgresql_listen_addresses: "*"
postgresql_databases:
- "{{ project_name }}"
- role: DavidWittman.redis
redis_version: 4.0.2
redis_verify_checksum: true
redis_checksum: "sha256:b1a0915dbc91b979d06df1977fe594c3fa9b189f1f3d38743a2948c9f7634813"
redis_bind: 0.0.0.0
redis_port: 6379
redis_password: <PASSWORD>
redis_tcp_backlog: 511
redis_tcp_keepalive: 0
# Max connected clients at a time
redis_maxclients: 10000
redis_timeout: 0
# Socket options
# Set socket_path to the desired path to the socket. E.g. /var/run/redis/{{ redis_port }}.sock
redis_socket_path: false
redis_socket_perm: 755
## Replication options
# Set slaveof just as you would in redis.conf. (e.g. "redis01 6379")
redis_slaveof: false
# Make slaves read-only. "yes" or "no"
redis_slave_read_only: "yes"
redis_slave_priority: 100
redis_repl_backlog_size: false
## Logging
redis_logfile: '""'
# Enable syslog. "yes" or "no"
redis_syslog_enabled: "yes"
redis_syslog_ident: redis_{{ redis_port }}
# Syslog facility. Must be USER or LOCAL0-LOCAL7
redis_syslog_facility: USER
## General configuration
redis_daemonize: "yes"
redis_pidfile: /var/run/redis/{{ redis_port }}.pid
# Number of databases to allow
redis_databases: 16
redis_loglevel: notice
# Log queries slower than this many milliseconds. -1 to disable
redis_slowlog_log_slower_than: 10000
# Maximum number of slow queries to save
redis_slowlog_max_len: 128
# Redis memory limit (e.g. 4294967296, 4096mb, 4gb)
redis_maxmemory: false
redis_maxmemory_policy: noeviction
redis_rename_commands: []
# How frequently to snapshot the database to disk
# e.g. "900 1" => 900 seconds if at least 1 key changed
redis_save:
- 900 1
- 300 10
- 60 10000
redis_appendonly: "no"
redis_appendfilename: "appendonly.aof"
redis_appendfsync: "everysec"
redis_no_appendfsync_on_rewrite: "no"
redis_auto_aof_rewrite_percentage: "100"
redis_auto_aof_rewrite_min_size: "64mb"
## Redis sentinel configs
# Set this to true on a host to configure it as a Sentinel
redis_sentinel: false
- role: site
# pre_tasks are executed before roles
pre_tasks:
- include_tasks: includes/remote_host_prerequisites.yml
|
env/dev/deploy/develop.yml
|
de:
activerecord:
models:
humpyard:
element: "Element"
elements:
box_element: 'Rahmenelement'
text_element: 'Textelement'
news_element: 'Nachrichtenelement'
page: "Seite"
pages:
static_page: 'Statische Seite'
news_page: 'Nachrichtenseite'
virtual_page: 'Virtuelle Seite'
attributes:
humpyard:
elements:
shared_state: "Element teilen"
display_from: "Anzeige von"
display_until: "Anzeige bis"
box_element:
title: "Überschrift"
text_element:
content: "Inhalt"
html_content: "Inhalt (formatiert)"
news_element:
news_page: "Nachrichtenseite"
media_element:
asset: "Medium"
float: "Schwebende Ausrichtung"
uri: "Link Adresse"
pages:
title: "Überschrift"
description: "Beschreibung"
in_menu: "Im Menü anzeigen"
in_sitemap: "In der Sitemap anzeigen"
always_refresh: "Keinen Cache verwenden"
display_from: "Anzeige von"
display_until: "Anzeige bis"
yes: Ja
no: Nein
humpyard:
elements:
media_elements:
edit:
uri_description: "Die Link-Adresse funktioniert nur mit gewissen Medientypen wie Bildern."
none: "Keine"
left: "Links"
right: "Rechts"
humpyard_cms:
start: 'Humpyard %{version} laden'
login:
title: "Humpyard Anmeldung"
username: "Benutzer"
password: "<PASSWORD>"
submit: "Anmelden"
login_error: "Konnte Sie mit den angegebenen Daten nicht anmelden!"
shared_state_info:
owning_page:
"0": Ungeteilt
"1": Mit allen Geschwisterseiten geteilt
"2": Mit allen Kinderseiten geteilt
with_title:
"0": Ungeteilt
"1": Mit Geschwisterseite '%{title}' geteilt
"2": Von Elternseite '%{title}' geteilt
toolbar:
edit: "Seite bearbeiten"
pages: "Seiten verwalten"
assets: "Medien verwalten"
logout: "Abmelden"
pages:
edit:
title: "Seiten bearbeiten"
add: "%{page_type} hinzufügen"
errors:
messages:
cannot_be_after_display_until: "cannot be after 'Display until'. Set an earlier date/time OR remove 'Display until'"
cannot_be_before_display_from: "cannot be before 'Display from'. Set a later date/time OR remove 'Display from'"
|
config/locales/de.yml
|
name: Create and publish a Docker image
on:
release:
types:
- created
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}-proxy
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Get the version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
- name: Log in to the Container registry
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
with:
context: kbatch-proxy
file: kbatch-proxy/docker/production/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
- name: Set up Python 3.x
uses: actions/setup-python@v2
with:
python-version: "3.x"
- name: Install release dependencies
run: |
python -m pip install --upgrade build twine
- name: Build and publish kbatch
env:
TWINE_USERNAME: "__token__"
TWINE_PASSWORD: ${{ secrets.KBATCH_PYPI_PASSWORD }}
run: |
cd kbatch
python -m build
twine upload dist/*
- name: Build and publish kbatch-proxy
env:
TWINE_USERNAME: "__token__"
TWINE_PASSWORD: ${{ secrets.KBATCH_PROXY_PYPI_PASSWORD }}
run: |
cd kbatch-proxy
python -m build
twine upload dist/*
cd ..
|
.github/workflows/publish-image.yaml
|
actions:
1:
action: snapshot
description: >-
Store a snapshot in ${CURATOR_SNAPSHOT_REPO:logs} of the previous day's indices.
Ignore the error if the filter does not result in an actionable list of indices
(ignore_empty_list) and exit cleanly.
options:
repository: ${CURATOR_SNAPSHOT_REPO:logs}
name: sessions2-%Y%m%d%H%M%S
ignore_unavailable: False
include_global_state: True
partial: False
wait_for_completion: True
skip_repo_fs_check: False
ignore_empty_list: True
timeout_override:
continue_if_exception: True
disable_action: ${CURATOR_SNAPSHOT_DISABLED:True}
filters:
- filtertype: pattern
kind: prefix
value: sessions2-
exclude:
- filtertype: period
period_type: relative
source: ${CURATOR_TIME_SOURCE:creation_date}
timestring: '%y%m%d'
range_from: -1
range_to: -1
unit: days
2:
action: delete_indices
description: >-
Delete the oldest indices with sessions2- prefixes when their cumulative
disk consumtion is greater than ${CURATOR_DELETE_GIGS:1000000} gigabytes. Ignore
the error if the filter does not result in an actionable list of indices
(ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
timeout_override:
continue_if_exception: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: sessions2-
exclude:
- filtertype: space
disk_space: ${CURATOR_DELETE_GIGS:1000000}
use_age: True
source: ${CURATOR_TIME_SOURCE:name}
timestring: '%y%m%d'
exclude:
3:
action: delete_indices
description: >-
Delete indices older than ${CURATOR_DELETE_COUNT:99} ${CURATOR_DELETE_UNITS:years}
(based on index name), for sessions2-prefixed indices. Ignore the error if the
filter does not result in an actionable list of indices (ignore_empty_list) and
exit cleanly.
options:
ignore_empty_list: True
timeout_override:
continue_if_exception: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: sessions2-
exclude:
- filtertype: age
source: ${CURATOR_TIME_SOURCE:name}
direction: older
timestring: '%y%m%d'
unit: ${CURATOR_DELETE_UNITS:years}
unit_count: ${CURATOR_DELETE_COUNT:99}
exclude:
4:
action: close
description: >-
Close indices older than ${CURATOR_CLOSE_COUNT:10} ${CURATOR_CLOSE_UNITS:years}
(based on index name), for sessions2-prefixed indices, to conserve memory.
Ignore the error if the filter does not result in an actionable list of indices
(ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
timeout_override:
continue_if_exception: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: sessions2-
exclude:
- filtertype: age
source: ${CURATOR_TIME_SOURCE:name}
direction: older
timestring: '%y%m%d'
unit: ${CURATOR_CLOSE_UNITS:years}
unit_count: ${CURATOR_CLOSE_COUNT:10}
exclude:
|
curator/config/action_file.yml
|
backend:
name: github
repo: devmecha/Restaurant_Demo
branch: master # Branch to update (optional; defaults to master)
media_folder: "src/img/gallery" # Media files will be stored in the repo under images/uploads
collections:
- name: "food" # Used in routes
label: "Food Item" # Used in the UI
folder: "site/content/food" # The path to the folder where the documents are stored
create: true # Allow users to create new documents in this collection
fields: # The fields for each document, usually in front matter
- {label: "Name of Item", name: "title", widget: "string"}
- {label: "Supported Meals", name: "categories", widget: "list", default: ["lunch","dinner"]}
- {label: "Ingredients/Description", name: "description", widget: "text"}
- {label: "Type of food", name: "type", widget: "string", default: ["Tagine"]}
- {label: "Price", name: "price", widget: "string"}
- name: "drinks" # Used in routes
label: "Drinks Item" # Used in the UI
folder: "site/content/drinks" # The path to the folder where the documents are stored
create: true # Allow users to create new documents in this collection
fields: # The fields for each document, usually in front matter
- {label: "Name of Drink", name: "title", widget: "string"}
- {label: "Drink Marker", name: "categories", widget: "hidden", default: ["drinks"]}
- {label: "Ingredients/Description", name: "description", widget: "text"}
- {label: "Type of drinks", name: "type", widget: "list", default: ["Tea", "Juice", "Lemonade"]}
- {label: "Price", name: "price", widget: "string"}
- name: "gallery" # Used in routes
label: "Gallery Images" # Used in the UI
folder: "site/content/gallery" # The path to the folder where the documents are stored
create: true # Allow users to create new documents in this collection
fields: # The fields for each document, usually in front matter
- {label: "Title", name: "title", widget: "string"}
- {label: "Gallery Image", name: "image", widget: "image"}
|
site/static/admin/config.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "dolibarr.fullname" . }}
labels:
app: {{ template "dolibarr.name" . }}
chart: {{ template "dolibarr.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "dolibarr.name" . }}
release: {{ .Release.Name }}
template:
metadata:
annotations:
checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
labels:
app: {{ template "dolibarr.name" . }}
release: {{ .Release.Name }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
readinessProbe:
httpGet:
path: /robots.txt
port: http
initialDelaySeconds: 15
periodSeconds: 10
livenessProbe:
httpGet:
path: /robots.txt
port: http
initialDelaySeconds: 30
periodSeconds: 20
ports:
- name: http
containerPort: 80
protocol: TCP
resources:
{{ toYaml .Values.resources | indent 12 }}
env:
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: {{ template "dolibarr.fullname" . }}
key: db-username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "dolibarr.fullname" . }}
key: db-password
- name: COOKIE_KEY
valueFrom:
secretKeyRef:
name: {{ template "dolibarr.fullname" . }}
key: cookie-key
volumeMounts:
- name: config
mountPath: /config
- name: documents
mountPath: /var/www/html/dolibarr/documents
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "dolibarr.fullname" . }}
- name: documents
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "dolibarr.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
|
k8s/charts/dolibarr/templates/deployment.yaml
|
name: CI
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the main branch
on:
schedule:
- cron: '0 10 * * *' # everyday at 10am
push:
branches:
- 'master'
tags:
- 'v*.*.*'
pull_request:
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
-
name: Checkout
uses: actions/checkout@v2.3.4
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
-
name: Prepare
id: prep
run: |
DOCKER_IMAGE=adamus1red/ouroboros
VERSION=noop
if [ "${{ github.event_name }}" = "schedule" ]; then
VERSION=nightly
elif [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
VERSION=latest
fi
elif [[ $GITHUB_REF == refs/pull/* ]]; then
VERSION=pr-${{ github.event.number }}
fi
TAGS="${DOCKER_IMAGE}:${VERSION}"
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
MINOR=${VERSION%.*}
MAJOR=${MINOR%.*}
TAGS="$TAGS,${DOCKER_IMAGE}:${MINOR},${DOCKER_IMAGE}:${MAJOR}"
elif [ "${{ github.event_name }}" = "push" ]; then
TAGS="$TAGS,${DOCKER_IMAGE}:sha-${GITHUB_SHA::8}"
fi
echo ::set-output name=version::${VERSION}
echo ::set-output name=tags::${TAGS}
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
echo ${TAGS}
-
name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.10.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build and push
uses: docker/build-push-action@v2.7.0
with:
context: .
file: ./Dockerfile
platforms: linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}
|
.github/workflows/main.yml
|
- hosts: sonic
gather_facts: yes
tasks:
- fail: msg="need to provide topology type and vm base like topo=t0 and vm_base=VM100"
when: (topo is not defined) or (vm_base is not defined)
- fail: msg="need hwsku, interface speed, netmask and interface prefix/postfix defined to generate configuration file"
when: (hwsku is not defined) or (iface_speed is not defined) or (mgmt_subnet_mask_length is not defined)
- set_fact:
VM_topo: "{% if 'ptf' in topo %}False{% else %}True{% endif %}"
remote_dut: "{{ ansible_ssh_host }}"
template_name: "{{ 't1' if topo=='ptf32' else topo }}"
- testbed_vm_info: base_vm="{{ vm_base }}" topo="{{ topo }}"
connection: local
when: VM_topo
- name: find interface name mapping
port_alias: hwsku="{{ hwsku }}"
connection: local
- debug: var=port_alias
- name: save original minigraph file (if original file does not exist, then ignore errors)
shell: mv minigraph/{{ inventory_hostname }}.xml minigraph/{{ inventory_hostname }}.xml.orig
connection: local
ignore_errors: true
- name: create minigraph file in minigraph folder
become: true
template: src=templates/topo/{{ template_name }}.j2
dest=minigraph/{{ inventory_hostname}}.xml
connection: local
- block:
- name: saved original minigraph file (if original file may don't exist, then ignore errors)
shell: mv /etc/sonic/minigraph.xml /etc/sonic/minigraph.xml.orig
become: true
ignore_errors: true
- name: create minigraph file for SONiC device
template: src=templates/topo/{{ template_name }}.j2
dest=/etc/sonic/minigraph.xml
become: true
- name: disable automatic minigraph update if we are deploying new minigraph into SONiC
lineinfile:
name: /etc/sonic/updategraph.conf
regexp: '^enabled='
line: 'enabled=false'
become: true
# reload the device and wait it to come back
- name: Reboot is required for minigraph change
shell: sleep 2 && shutdown -r now "Ansible Create new configuration Minigraph file, triggered reboot."
async: 1
poll: 0
become: true
ignore_errors: true
- name: waiting for switch to come back
local_action:
wait_for host={{ remote_dut }}
port=22
state=started
delay=30
timeout=300
become: false
changed_when: false
when: deploy is defined and deploy|bool == true
|
ansible/config_sonic_basedon_testbed.yml
|
---
apiVersion: v1
kind: Service
metadata:
name: location-be-service-32079
labels:
app: location-be-service-32079
spec:
type: NodePort
ports:
- port: 8079
targetPort: 8079
nodePort: 32079
name: http
selector:
app: location-be-32079
---
apiVersion: v1
kind: Pod
metadata:
name: location-be-32079
labels:
app: location-be-32079
spec:
containers:
- name: location-be-service-32079-deployment
image: {{ .Values.images.locationServiceBe.repository }}:{{ .Values.images.locationServiceBe.tag }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8079
env:
- name: LISTEN_IP
value: 0.0.0.0
- name: MEP_AGENT
value: 127.0.0.1:8080
{{- if .Values.global.mepagent.enabled }}
- name: mep-agent
image: swr.ap-southeast-1.myhuaweicloud.com/edgegallery/mep-agent:latest
imagePullPolicy: Always
env:
- name: ENABLE_WAIT
value: "true"
- name: MEP_IP
value: "mep-api-gw.mep"
- name: MEP_APIGW_PORT
value: "8443"
- name: CA_CERT_DOMAIN_NAME
value: "edgegallery"
- name: CA_CERT
value: /usr/mep/ssl/ca.crt
- name: AK
valueFrom:
secretKeyRef:
name: {{ .Values.appconfig.aksk.secretname }}
key: accesskey
- name: SK
valueFrom:
secretKeyRef:
name: {{ .Values.appconfig.aksk.secretname }}
key: secretkey
- name: APPINSTID
valueFrom:
secretKeyRef:
name: {{ .Values.appconfig.aksk.secretname }}
key: appInsId
volumeMounts:
- name: mep-agent-service-config-volume
mountPath: /usr/mep/conf/app_instance_info.yaml
subPath: app_instance_info.yaml
volumes:
- name: mep-agent-service-config-volume
configMap:
name: {{ .Values.global.mepagent.configmapname }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: location-fe-service-32080
labels:
app: location-fe-service-32080
spec:
type: NodePort
ports:
- port: 8080
targetPort: 8080
nodePort: 32080
name: http
selector:
app: location-fe-32080
---
apiVersion: v1
kind: Pod
metadata:
name: location-fe-32080
labels:
app: location-fe-32080
spec:
containers:
- name: location-fe-service-32080-deployment
image: {{ .Values.images.locationServiceFe.repository }}:{{ .Values.images.locationServiceFe.tag }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
|
location_app/artifacts/location_service_deploy_mepagent.yaml
|
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-registry-config
data:
config: |
version: 0.1
log:
level: debug
fields:
service: registry
storage:
filesystem:
rootdirectory: /storage
cache:
layerinfo: inmemory
maintenance:
uploadpurging:
enabled: false
delete:
enabled: true
http:
addr: :5000
secret: placeholder
debug:
addr: localhost:5001
auth:
token:
issuer: harbor-token-issuer
realm: http://reg.mydomain.com/service/token
rootcertbundle: /etc/registry/root.crt
service: harbor-registry
notifications:
endpoints:
- name: harbor
disabled: false
url: http://ui/service/notifications
timeout: 3000ms
threshold: 5
backoff: 1s
cert: |
-----BEGIN CERTIFICATE-----
MIIE0zCCArugAwIBAgIJAP1DNMp0AU4fMA0GCSqGSIb3DQEBCwUAMAAwHhcNMTgw
MzEyMDY0MjE1WhcNMjgwMzA5MDY0MjE1WjAAMIICIjANBgkqhkiG9w0BAQEFAAOC
Ag8AMIICCgKCAgEAyyNV0qlVnXypo++tiD2dElsPKtrUVklAcB2oBcjTef99XZuK
6tJqqPruq9ptwoj9n/e641QL3NPzWaCUgk8aJHAb7MsX5eyP151yRFWsznGan67B
c/yW30HWbq6JeTRtpxo1wYtN3LPftyUrvOu9NBwglihJ45omm8XhIsq7A6ItSW9E
U/uTDSHz4kXkJUCdirHrs9nwYo6Ihnwrx+/9orSfaGTDOwIoYykIRTGKerFtcAoz
+itqk4dtp/07shGKpKmu5entChMNZcJ5F1C8hleOYZ6FMCmF/rGRvtk7KnxrqOKx
LQLZihvvhNHLWWsjMYvU5j/hcvCMP/3GYtPPERc7TEp5Bm6MUUjAmeQ6VbsshiTg
qLeXYcC6Ik+BYsLHqnpTYta9ZAEFeFwSQwYpPx7Ai8YE6s4WtnRlRqL2KOFmZZWH
YqG7VDVJeEy4AYg1Y9lzwEOtgqnIok1Axz8wzXSrAB3FezyaVq6ni94gAZX9/3Q1
eDSkGJYcPytxRppZkpY8r6ML+ywBqH/k75l6zEn3hNU/+uG/+ADYWSyqar1Y0Z4P
IUZmJ/V6VLk1uo7wLBuY6LgSdBDKliLIVcXkHfTErt7AdabksPz4uutYuwWg0WY0
uGX7h5nNCGabuswTnIKlZ5et5yYpYfhloe7K6LSNriR40ZES8jXRfxZtV9UCAwEA
AaNQME4wHQYDVR0OBBYEFO6oEX5drGvimtoNltqgF2j+bFETMB8GA1UdIwQYMBaA
FO6oEX5drGvimtoNltqgF2j+bFETMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEL
BQADggIBAJf0TovZaCC14Nscw0uG3vYXXR0u/3tBBYnX0AzT2qZSZ5XPW8+OUwbb
8ImEGGNJF9qBvySeg/O2xP7ocM6G1Q1s4LShKhePtfzC0FCAXpP17dQfAC0rkm5u
myWvUs0XHyd+5xgKbthDv+mWST6lDFcdfSe6rJferaMFfMH+QGkPb7k8z3fw0wFg
u22OvJYGp85J2iRmpFtaJyxrYtVm9N8DSRPZemYbXDx0NpF0aIA/hjKt/Zvjtc6a
xpMcTcBOpvqtLdq+8iPV9RbOT6D/TrM8y+lZO36bFFOH/KikT5YgoaAdby6TAejo
hfNjGtXi+W2ko7Oo2aRv6hTnNptJHQ3C25cMi+25mYNrRmFgsZHi51i04ADxmH6n
AGGm/xADxhs8nROCDBga2PDDG+Tw2/Zm/M+VrZtjmcW8faTY/nNYRWhzYmwHCQ1g
roLdJsPblwrUWyx4tqhJQh4daYXaRS05knl0/bIMPUAGCPcy/jpjO2LkrcGpzgut
0DMuVWiEaeZ7SFxxh4SWxI/mHtaIEOhUb1wOmkEypSopt8OtAzipkNrfttQTKRNL
B2nDPqMRCQakCH3ak6S2ThqRj+BZxhO2jPda3UQCnrnSrFyiHr+jwzCPes8+JyzD
KknrfEFMkxy/TRsAvGykgQi3XNzg8hJ+vNmMTcCQfXl8sgddBItn
-----END CERTIFICATE-----
|
make/kubernetes/registry/registry.cm.yaml
|
name: CI
on:
push:
branches:
- master
pull_request: {}
jobs:
check:
# Run `cargo check` first to ensure that the pushed code at least compiles.
runs-on: ubuntu-latest
strategy:
matrix:
rust: [1.46.0, stable]
steps:
- uses: actions/checkout@main
- uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
profile: minimal
override: true
- name: Check
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --tests --benches
test:
needs: check
runs-on: ubuntu-latest
strategy:
matrix:
rust: [1.46.0, stable, beta]
redis-version: [6]
steps:
- uses: actions/checkout@main
- uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
profile: minimal
override: true
# Starts Redis server needed by hitbox-redis for integration tests.
- name: Start Redis
uses: supercharge/redis-github-action@1.2.0
with:
redis-version: ${{ matrix.redis-version }}
- name: Run tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --all-features
- name: Generate coverage file
if: >
matrix.rust == 'stable'
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --verbose --workspace --all-features --ignore-tests
- name: Upload to Codecov
if: >
matrix.rust == 'stable'
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
clippy:
# Check for any warnings. This is informational and thus is allowed to fail.
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@main
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: clippy
profile: minimal
- name: Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --all-features --bins --examples --tests --benches -- -D warnings
|
.github/workflows/CI.yml
|
# Website settings
title: Maxablancas
description: Max's blog.
keywords: Max,Maxablancas,blog,Jekyll,github,gh-pages
#baseurl: /Maxr # if you have suburl as homepage like '/homepage', please change it to '/homepage' and your repo's name should be 'homepage'
url: https://Max-Robertson.github.io/Maxablancas # url should be 'http://<github_username>.github.io/homepage' as before
# url: http://127.0.0.1:4000
img_path: /static/assets/img/blog # url should be '/homepage/static/assets/img/blog' as before
# author -- index page "about me" section
author: # change following line to your infomation.
name: GitHub # Be used at blog posts' author.
first_name: Max # Be used at index page about me section.
last_name: Robertson # Be used at index page about me section.
avatar: /static/assets/img/landing/smallblob.jpeg # upload your avatar at directory and edit it.
github_username: Max-Robertson # Leave blank if you don't have now
linkedin_id: max-robertson-47bbb8139 # Leave blank if you don't have now
facebook_username: # Leave blank if you don't have now
twitter_username: maxablancas # Leave blank if you don't have now
# Leave blank if you don't have now
playstation_username: ThePretender1251
skill_icon1:
- name: fa-gamepad
- name: fas fa-ticket-alt
skill_icon2:
- name: fa-music
- name: fa-pen
desc: I have a passion for ideas, video games and the potential they hold as a creative medium. This website showcases my current skill set and tracks current projects I'm working on. Check out my Dev Diary where I'm posting updates on my journey to learn Game Design + Development. # Describe yourself.
# comment
disqus: # if you wanna comment feature, go to https://disqus.com/ to get your configuration; if not, comment following two lines.
name: Maxablancas
# analytics
## google analytics
ga: # if you wanna this feature, go to https://www.google.com/analytics/ to get your configuration; if not, comment following line.
# id: UA-93473370-3
## google optimize
go:
# id:
## growingIO
gio: # if you wanna this feature, go to https://www.growingio.com/ to get your configuration; if not, comment following line.
# id:
## donation
donation: off # type 'on'/'off' to choice display donation button at post page or not
## carousels
index_carousel: on # index page carousel switch
blog_carousel: on # blog page carousel switch
# scss
sass:
style: compressed
# Build settings
encoding: utf-8
# other settings
highlighter: rouge
markdown: kramdown
kramdown:
input: GFM
syntax_highlighter: rouge
|
_config.yml
|
- title: Animal motion capture
category: MoCap
status: Not implemented
sources:
- https://youtu.be/vJoHlCc4Y7E?t=1606
quote: "I think we probably would do some animal motion capture. It's always fun."
- title: Capture Backers faces for ingame characters
category: MoCap
status: Completed
tags:
- Official Stretch Goal
sources:
- https://robertsspaceindustries.com/comm-link/transmission/13305-Letter-From-The-Chairman-21-Million
- http://starcitizen.wikia.com/wiki/Stretch_goals#cite_ref-ID13284_13-0
- https://www.youtube.com/watch?v=2zQCrvsmzzI&feature=youtu.be&t=804
- https://forums.robertsspaceindustries.com/discussion/33152/facial-tracking-a-possibility/p1
- https://swetrust.com/forum/topic/311-do-you-want-your-face-in-the-verse/?tab=comments#comment-996
- https://www.facebook.com/Foundry42Derby/videos/190883468141025/
- https://imgur.com/ffWLyhh
quote: the technology is mobile enough to allow us to take it on the road and capture
select fans during special events!
- title: Build own motion capture studio
category: MoCap
status: Completed
tags:
- Official Stretch Goal
sources:
- https://robertsspaceindustries.com/comm-link/transmission/13048-10-Million
- https://www.youtube.com/watch?v=y78PPfs1NXc#t=303s
quote: "$10 million would allow Cloud Imperium to build our own mocap studio rather
than hiring out time from someone else. "
- title: 'Squadron 42: Capture faces for characters'
category: MoCap
status: Completed
tags:
- Official Stretch Goal
sources:
- http://starcitizen.wikia.com/wiki/Stretch_goals#cite_ref-ID13284_13-0
- title: Real-time facial motion capture for ingame avatar annimation (Live Driver/FaceOverIP)
category: MoCap
status: Not implemented
sources:
- https://youtu.be/OBAb-lska_I?t=38
- https://youtu.be/ItuiYF_lRGc?t=976
- https://www.youtube.com/watch?v=2zQCrvsmzzI&t=804
- title: Professional motion capture shoot
category: MoCap
status: Completed
sources:
- https://youtu.be/ZrKQn8u3gGk?t=695
quote: "We're gonna do a full motion capture performance capture"
|
_data/mocap.yaml
|
version: v1.0
name: Docker
agent:
machine:
type: e1-standard-2
os_image: ubuntu1804
fail_fast:
stop:
when: 'true'
blocks:
- name: Code Quality
task:
jobs:
- name: hadolint
commands:
- docker run --rm -i ghcr.io/hadolint/hadolint < Dockerfile || true
- name: shellcheck
commands:
- shellcheck *.sh
prologue:
commands:
- checkout
dependencies: []
- name: Build
task:
jobs:
- name: docker build
commands:
- 'docker build --progress=plain --target builder-image --cache-from "${DOCKERHUB_USERNAME}/webvirtmgr-docker:cache-${GIT_BRANCH_SLUG}" --tag "${DOCKERHUB_USERNAME}/webvirtmgr-docker:cache-${GIT_BRANCH_SLUG}" --build-arg BUILDKIT_INLINE_CACHE=1 .'
- 'docker build --progress=plain --target runner-image --cache-from "${DOCKERHUB_USERNAME}/webvirtmgr-docker:cache-${GIT_BRANCH_SLUG}" --cache-from "${DOCKERHUB_USERNAME}/webvirtmgr-docker:commit-${GIT_SHA_SHORT}" --tag "${DOCKERHUB_USERNAME}/webvirtmgr-docker:commit-${GIT_SHA_SHORT}" --build-arg BUILDKIT_INLINE_CACHE=1 .'
- 'docker tag "${DOCKERHUB_USERNAME}/webvirtmgr-docker:commit-${GIT_SHA_SHORT}" "${DOCKERHUB_USERNAME}/webvirtmgr-docker:branch-${GIT_BRANCH_SLUG}"'
secrets:
- name: dockerhub-secrets
prologue:
commands:
- checkout
- 'echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin'
- artifact pull workflow gitinfo_docker.env && source gitinfo_docker.env
env_vars:
- name: DOCKER_BUILDKIT
value: '1'
epilogue:
on_pass:
commands:
- 'docker push "${DOCKERHUB_USERNAME}/webvirtmgr-docker:cache-${GIT_BRANCH_SLUG}"'
- 'docker push "${DOCKERHUB_USERNAME}/webvirtmgr-docker:commit-${GIT_SHA_SHORT}"'
- 'docker push "${DOCKERHUB_USERNAME}/webvirtmgr-docker:branch-${GIT_BRANCH_SLUG}"'
dependencies:
- Git
run:
when: pull_request =~ '.*' OR branch = 'master'
- name: Git
run:
when: pull_request =~ '.*' OR branch = 'master'
dependencies: []
task:
jobs:
- name: commit infos
commands:
- 'export GIT_SHA="${SEMAPHORE_GIT_PR_SHA:-$SEMAPHORE_GIT_SHA}"'
- 'echo "GIT_SHA_SHORT=${GIT_SHA:0:8}" > gitinfo_docker.env'
- echo -n "GIT_BRANCH_SLUG=" >> gitinfo_docker.env
- 'echo -n "${SEMAPHORE_GIT_PR_BRANCH:-$SEMAPHORE_GIT_BRANCH}" | ruby -ne ''puts $_.downcase.gsub(/[^a-z0-9]/, "-")[0..62].gsub(/(\A-+|-+\z)/, "")'' >> gitinfo_docker.env'
epilogue:
always:
commands:
- cat gitinfo_docker.env
on_pass:
commands:
- artifact push workflow --expire-in 1w gitinfo_docker.env
- name: Test
dependencies:
- Build
- Code Quality
run:
when: pull_request =~ '.*' OR branch = 'master'
task:
secrets:
- name: dockerhub-secrets
prologue:
commands:
- 'echo "${DOCKERHUB_PASSWORD}" | docker login -u "${DOCKERHUB_USERNAME}" --password-stdin'
- artifact pull workflow gitinfo_docker.env && source gitinfo_docker.env
epilogue:
always:
commands:
- docker rm --force --volumes webvirtmgr || true
- docker volume rm webvirtmgr-data
jobs:
- name: docker run
commands:
- docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m webvirtmgr-data
- 'docker run -d -p 8080:8080 -p 6080:6080 -v webvirtmgr-data:/data --name webvirtmgr "${DOCKERHUB_USERNAME}/webvirtmgr-docker:commit-${GIT_SHA_SHORT}"'
- docker exec -ti --user=webvirtmgr webvirtmgr /webvirtmgr/manage.py test
promotions:
- name: Publish on DockerHub
pipeline_file: pipeline_2.yml
auto_promote:
when: branch = 'master' AND result = 'passed'
- name: Debug
pipeline_file: pipeline_3.yml
auto_promote:
when: (pull_request =~ '.*' OR branch = 'master') AND result = 'passed'
|
.semaphore/semaphore.yml
|
--- !<MAP_PACK>
contentType: "MAP_PACK"
firstIndex: "2018-11-01 07:10"
game: "Unreal"
name: "Dm Recall"
author: "<NAME> ** Norman(KWAK) **"
description: "None"
releaseDate: "2000-04"
attachments:
- type: "IMAGE"
name: "Dm_Recall_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal/MapPacks/D/Dm_Recall_shot_2.png"
- type: "IMAGE"
name: "Dm_Recall_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal/MapPacks/D/Dm_Recall_shot_1.png"
originalFilename: "dm-recall.zip"
hash: "9651130d7203babcddfdbc6d18da384e9964720c"
fileSize: 4539163
files:
- name: "DM-Recall.unr"
fileSize: 1411209
hash: "07ee02cf214faa2e281348775d2367c888e45df2"
- name: "DOM-Recall][.unr"
fileSize: 1625648
hash: "a29c19ccd494cc96a955756caa9264217f691934"
- name: "recal2.utx"
fileSize: 899360
hash: "202b380b0d57767d9c32e06d134e0c4706aca0cb"
- name: "DM-Recall][.unr"
fileSize: 1625652
hash: "61d9a98228c4e04263de8986a7000878c0be8e31"
- name: "UTtech3.utx"
fileSize: 89074
hash: "c8ecae95449e2f803c6916a7c576376d06ea3e9f"
- name: "recal.utx"
fileSize: 2906064
hash: "e0e42853c85699f60ab7e5883851c2ebed34e0bf"
otherFiles: 1
dependencies:
DOM-Recall][.unr:
- status: "OK"
name: "UTtech3"
- status: "MISSING"
name: "Scripted"
- status: "OK"
name: "recal"
- status: "OK"
name: "recal2"
- status: "MISSING"
name: "Indus7"
- status: "MISSING"
name: "Botpack"
DM-Recall][.unr:
- status: "OK"
name: "UTtech3"
- status: "MISSING"
name: "Scripted"
- status: "OK"
name: "recal"
- status: "OK"
name: "recal2"
- status: "MISSING"
name: "Indus7"
- status: "MISSING"
name: "Botpack"
DM-Recall.unr:
- status: "OK"
name: "UTtech3"
- status: "OK"
name: "recal"
- status: "OK"
name: "recal2"
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal/MapPacks/D/dm-recall.zip"
main: true
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/DeathMatch&file=dm-recall.zip"
main: false
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/DeathMatch/MapsR/&file=dm-recall.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Maps/DeathMatch/MapsR/dm-recall.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal/MapPacks/D/9/6/51130d/dm-recall.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal/MapPacks/D/9/6/51130d/dm-recall.zip"
main: false
repack: false
state: "OK"
deleted: false
maps:
- name: "DOM-Recall]["
title: "DOM Recall ]["
author: "<NAME> ** Norman(KWAK) **"
- name: "DM-Recall]["
title: "DM Recall ]["
author: "<NAME> ** Norman(KWAK) **"
- name: "DM-Recall"
title: "DM-Recall"
author: "<NAME> ** Norman(KWAK) **"
gametype: "Mixed"
themes:
Tech: 0.2
Ancient: 0.3
Natural: 0.4
Skaarj Tech: 0.1
|
content/Unreal/MapPacks/D/9/6/51130d/dm-recall_[9651130d].yml
|
name: Build Ubuntu
on:
push:
branches:
- master
pull_request: {}
jobs:
build-ubuntu:
strategy:
matrix:
config: [ 'Debug', 'Release' ]
cxx: [ 'g++-11', 'clang++-12' ]
include:
- cxx: g++-11
cc: gcc-11
name: GCC
cxxflags: -fcoroutines -mssse3
warnings: -Wall -Werror -Wno-volatile -Wno-maybe-uninitialized -Wno-deprecated-enum-enum-conversion -Wno-unknown-attributes # GLM triggers volatile, Tracy triggers maybe-unitialized, imgui triggers deprecated-enum-enum-conversion
packages: g++-11 gcc-11 libstdc++-11-dev
- cxx: clang++-12
cc: clang-12
name: Clang
cxxflags: -stdlib=libc++ -fdiagnostics-absolute-paths -mssse3
warnings: -Wall -Werror -Wno-deprecated-volatile -Wno-deprecated-enum-enum-conversion -Wno-unknown-attributes # GLM triggers deprecated-volatile, imgui triggers deprecated-enum-enum-conversion
llvm_version: 12
name: 'Ubuntu ${{ matrix.name }} ${{ matrix.config }}'
runs-on: 'ubuntu-20.04'
steps:
- uses: actions/checkout@master
with:
lfs: true
- name: Install Clang
if: ${{ matrix.llvm_version }}
uses: ./.github/actions/setup-llvm
with:
version: ${{ matrix.llvm_version }}
components: clang libc++
- name: Apt update
run: sudo apt-get -yq update
- name: Install libraries
run: sudo apt-get -yq install uuid-dev libsdl2-dev libassimp-dev libuv1-dev libunwind-dev
- name: Install packages
run: sudo apt-get -yq install cmake ${{ matrix.packages }}
if: ${{ matrix.packages }}
- name: Install Problem Matchers
uses: Trass3r/setup-cpp@v1
if: matrix.config == 'Debug'
- name: Install Ninja
uses: seanmiddleditch/gha-setup-ninja@v3
- name: Cache source dependencies
uses: actions/cache@v2
with:
path: ${{ github.workspace }}/deps
key: ${{ runner.os }}-${{ hashFiles('depends.cmake') }}
- name: Configure
env:
CXXFLAGS: '${{ matrix.cxxflags }}'
CXX: '${{ matrix.cxx }}'
CC: '${{ matrix.cc }}'
run: |
mkdir -p build
cmake -S . -B build -G Ninja -DUP_CXXFLAGS:STRING="${{ matrix.warnings }}" -DBUILD_SHARED_LIBS=OFF "-DCMAKE_BUILD_TYPE:STRING=${{ matrix.config }}" "-DFETCHCONTENT_BASE_DIR:PATH=${{ github.workspace }}/deps" ..
- name: Build
run: cmake --build build --parallel -- -k 0 -v
- name: Test
run: |
cd build
ctest -T test -R potato --verbose
- name: Build Resources
run: cmake --build build --target potato_convert_all
|
.github/workflows/build-ubuntu.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-20 10:16"
game: "Unreal Tournament 2004"
name: "DM-FuelingStation"
author: "<NAME>"
description: "This station usually fuels up the cargo ships that come and go. Today,\
\ however, it is the bloodsport of the tournament that provides fuel for the soul."
releaseDate: "2003-07"
attachments:
- type: "IMAGE"
name: "DM-FuelingStation_shot_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/F/DM-FuelingStation_shot_4.png"
- type: "IMAGE"
name: "DM-FuelingStation_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/F/DM-FuelingStation_shot_1.png"
- type: "IMAGE"
name: "DM-FuelingStation_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/F/DM-FuelingStation_shot_2.png"
- type: "IMAGE"
name: "DM-FuelingStation_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/F/DM-FuelingStation_shot_3.png"
originalFilename: "dm-fuelingstation.zip"
hash: "96de2b776bbf7fcfe4ea3becd02e801e3b191354"
fileSize: 3612073
files:
- name: "DM-FuelingStation.ut2"
fileSize: 8735510
hash: "a19f85d786c905598c340c98ae2b599423d61893"
otherFiles: 4
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/DeathMatch/F/dm-fuelingstation.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/DeathMatch/F/9/6/de2b77/dm-fuelingstation.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/DeathMatch/F/9/6/de2b77/dm-fuelingstation.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DM-FuelingStation"
playerCount: "8-16"
themes:
Industrial: 0.9
Ancient: 0.1
bots: true
|
content/Unreal Tournament 2004/Maps/DeathMatch/F/9/6/de2b77/dm-fuelingstation_[96de2b77].yml
|
_id: 71e70780-97e2-11e9-a7e3-717941eb3c30
message: >-
Electrodes cix.jddr.jambonkapa.github.io.nay.ik paradigms splint grasp
[URL=http://quotes786.com/tadacip/]cheapest tadacip[/URL]
[URL=http://quotes786.com/rulide/]rulide[/URL]
[URL=http://heavenlyhappyhour.com/questran--online/]questran lowest
price[/URL] [URL=http://solartechnicians.net/renova--for-sale/]renova[/URL]
[URL=http://alwaseetgulf.com/discount-viagra/]buyviagraonline.com[/URL]
[URL=http://livetvchannels.org/brand-viagra/]brand drug generic name
viagra[/URL] [URL=http://cocasinclair.com/generic-cialis-lowest-price/]buy
cialis online canada[/URL]
[URL=http://bootstrapplusplus.com/cialis/]cialis[/URL] flexors form
hypohidrosis, <a href="http://quotes786.com/tadacip/">online tadacip</a> <a
href="http://quotes786.com/rulide/">caysticum fioricet tadalafil rulide
tablets 50mg</a> <a
href="http://heavenlyhappyhour.com/questran--online/">mucus stools with
questran</a> <a href="http://solartechnicians.net/renova--for-sale/">online
renova</a> <a href="http://alwaseetgulf.com/discount-viagra/">viagra
prices</a> <a href="http://livetvchannels.org/brand-viagra/">cheapest brand
viagra</a> brand viagra <a
href="http://cocasinclair.com/generic-cialis-lowest-price/">cialis canada</a>
<a href="http://bootstrapplusplus.com/cialis/">cialis no prescription</a>
cialis generic 20 mg blankets http://quotes786.com/tadacip/#price-of-tadacip
tadacip generic http://quotes786.com/rulide/#buy-rulide discount rulide
http://heavenlyhappyhour.com/questran--online/#questran-light-diclofenac-diclofenac-mobifen-luxury
mucus stools with questran
http://solartechnicians.net/renova--for-sale/#renova--for-sale price of renova
renova http://alwaseetgulf.com/discount-viagra/#viagra-pills viagra
http://livetvchannels.org/brand-viagra/#cheapest-brand-viagra brand viagra
http://cocasinclair.com/generic-cialis-lowest-price/#medical-cialis order
cialis on line http://bootstrapplusplus.com/cialis/#cialis-20mg-for-sale 5mg
cialis dyslipidaemia, group.
name: aayasenepet
email: <PASSWORD>aea<PASSWORD>
url: 'http://quotes786.com/tadacip/'
hidden: ''
date: '2019-06-26T07:17:24.345Z'
|
_data/comments/barvinok-1/comment-1561533444345.yml
|
en:
kit:
auth:
notifications:
sign_in:
success: "You are now signed-in, welcome back!"
required: "This page requires you to be signed-in."
link:
expired: 'This sign-in link has expired. Please request a new one'
revoked: 'This sign-in link has already been used. Please request a new one.'
sign_up:
success: "Successfull sign-up, welcome!"
sign_out:
success: "You've been signed-out, see you soon!"
already: "${kit.auth.notifications.sign_out.success}"
required: "This page requires you to be signed-out."
devices:
success: "This device has been signed-out."
email_confirmation:
success: "Your email address has been successfully confirmed."
errors:
already_confirmed: "This email address is already confirmed."
link:
expired: "This email confirmation link has expired."
revoked: "This email confirmation link has already been used."
email_confirmation_request:
success: "You will receive an email with instructions for how to confirm your email address in a few minutes."
password_reset_request:
success: "If <b>%{email}</b> exists in our database, you will receive an email with a password recovery link in a few minutes!"
password_reset:
success: "Your password has been changed successfully.<br>You are now signed in."
link:
expired: "This reset password link has expired. Please request a new one."
revoked: "This reset password link has already been used. Please request a new one."
scopes:
missing: "Missing scopes: %{scopes}"
oauth:
link:
succcess: "Your %{provider} account was successfully linked to your current account!"
already_linked: "This %{provider} identity is already associated with your account."
destroyed: ""
unlink:
success: "The %{provider} account was successfully un-linked from your account."
errors:
users_conflict: "This %{provider} account email <b>%{provider_email}</b> belongs to another local account."
users_oauth_identity_conflict: "You are currently signed in with <b>%{session_user_email}</b>, but this %{provider} account is associated with another local account: <b>%{user_oauth_identity_user_email}</b>."
provider_unsupported: "%{provider} is not a supported OAuth provider."
errors:
forbidden: "You do not have the authorization to perform this action."
unauthorized: "You need to authenticate in order to access this ressource."
|
domains/kit-auth/config/locales/notifications.en.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
{{- if .Values.mysql.deploymentAnnotations }}
annotations:
{{ toYaml .Values.mysql.deploymentAnnotations | nindent 4 }}
{{- end }}
labels:
{{- include "rekor.mysql.labels" . | nindent 4 }}
name: {{ template "rekor.mysql.fullname" . }}
{{ include "rekor.namespace" . | indent 2 }}
spec:
selector:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
matchLabels:
{{- include "rekor.mysql.matchLabels" . | nindent 6 }}
replicas: {{ .Values.mysql.replicaCount }}
{{- if .Values.mysql.strategy }}
strategy:
{{ toYaml .Values.mysql.strategy | trim | indent 4 }}
{{ if eq .Values.mysql.strategy.type "Recreate" }}rollingUpdate: null{{ end }}
{{- end }}
template:
metadata:
{{- if .Values.mysql.podAnnotations }}
annotations:
{{ toYaml .Values.mysql.podAnnotations | nindent 8 }}
{{- end }}
labels:
{{- include "rekor.mysql.labels" . | nindent 8 }}
{{- if .Values.mysql.podLabels}}
{{ toYaml .Values.mysql.podLabels | nindent 8 }}
{{- end}}
spec:
serviceAccountName: {{ template "rekor.serviceAccountName.mysql" . }}
{{- if .Values.mysql.extraInitContainers }}
initContainers:
{{ toYaml .Values.mysql.extraInitContainers | indent 8 }}
{{- end }}
{{- if .Values.mysql.priorityClassName }}
priorityClassName: "{{ .Values.mysql.priorityClassName }}"
{{- end }}
containers:
- name: {{ template "rekor.name" . }}-{{ .Values.mysql.name }}
image: "{{ template "rekor.image" .Values.mysql.image }}"
imagePullPolicy: "{{ .Values.mysql.image.pullPolicy }}"
{{- if .Values.mysql.args }}
args:
{{ toYaml .Values.mysql.args | indent 12 }}
{{- end }}
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-root-password
- name: MYSQL_USER
value: {{ .Values.mysql.auth.username | quote }}
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-password
- name: MYSQL_DATABASE
value: {{ template "mysql.database" . }}
ports:
{{- include "rekor.containerPorts" .Values.mysql.service.ports | indent 12 }}
{{- if .Values.mysql.livenessProbe }}
livenessProbe:
{{ toYaml .Values.mysql.livenessProbe | indent 12 }}
{{- end }}
{{- if .Values.mysql.readinessProbe }}
readinessProbe:
{{ toYaml .Values.mysql.readinessProbe | indent 12 }}
{{- end }}
resources:
{{ toYaml .Values.mysql.resources | indent 12 }}
volumeMounts:
- name: storage
mountPath: {{ .Values.mysql.persistence.mountPath }}
subPath: {{ .Values.mysql.persistence.subPath }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.mysql.nodeSelector }}
nodeSelector:
{{ toYaml .Values.mysql.nodeSelector | indent 8 }}
{{- end }}
{{- with .Values.mysql.dnsConfig }}
dnsConfig:
{{ toYaml . | indent 8 }}
{{- end }}
{{- if .Values.mysql.securityContext }}
securityContext:
{{ toYaml .Values.mysql.securityContext | indent 8 }}
{{- end }}
{{- if .Values.mysql.tolerations }}
tolerations:
{{ toYaml .Values.mysql.tolerations | indent 8 }}
{{- end }}
{{- if .Values.mysql.affinity }}
affinity:
{{ toYaml .Values.mysql.affinity | indent 8 }}
{{- end }}
volumes:
{{- if not .Values.mysql.persistence.enabled }}
- name: storage
emptyDir: {}
{{- else if .Values.mysql.persistence.existingClaim }}
- name: storage
persistentVolumeClaim:
claimName: {{ .Values.mysql.persistence.existingClaim }}
{{- else }}
- name: storage
persistentVolumeClaim:
claimName: {{ template "rekor.mysql.fullname" . }}
{{- end }}
|
charts/rekor/templates/mysql/deployment.yaml
|
name: Go
on:
workflow_dispatch:
push:
branches: [ main ]
pull_request_target:
branches: [ main ]
types: [labeled]
jobs:
build_and_test:
name: Build and Test
runs-on: [self-hosted, x64]
if: github.event_name == 'workflow_dispatch' || github.event_name == 'push'
container:
image: ghcr.io/viamrobotics/test:latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
with:
fetch-depth: 2
- name: Configure git for private modules
env:
TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }}
run: |
sudo -u testbot bash -lc "git config --global --unset-all url.ssh://git@github.com/.insteadOf || true"
sudo -u testbot bash -lc "git config --global url.\"https://${TOKEN}@github.com\".insteadOf \"https://github.com\""
- name: Verify no uncommitted changes from build
run: |
git init
git add .
chown -R testbot .
sudo -u testbot bash -lc 'make build lint'
GEN_DIFF=$(git diff)
if [ -n "$GEN_DIFF" ]; then
echo '"make build lint" resulted in changes not in git' 1>&2
git status
echo $GEN_DIFF
exit 1
fi
- name: Test
env:
ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS: ${{ secrets.ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS }}
TEST_MONGODB_URI: ${{ secrets.TEST_MONGODB_URI }}
run: |
echo "$ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS" | base64 -d > artifact_google_creds.json
export ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS=`pwd`/artifact_google_creds.json
sudo -u testbot --preserve-env=ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS,TEST_MONGODB_URI bash -lc 'make cover'
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
build_and_test_from_pr: # TODO(GOUT-15): reuse above
name: Build and Test (PR)
runs-on: [self-hosted, x64]
if: contains(github.event.pull_request.labels.*.name, 'safe to test')
container:
image: ghcr.io/viamrobotics/test:latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 2
- name: Configure git for private modules
env:
TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }}
run: |
sudo -u testbot bash -lc "git config --global --unset-all url.ssh://git@github.com/.insteadOf"
sudo -u testbot bash -lc "git config --global url.\"https://${TOKEN}@github.com\".insteadOf \"https://github.com\""
- name: Verify no uncommitted changes from build
run: |
git init
git add .
chown -R testbot .
sudo -u testbot bash -lc 'make build lint'
GEN_DIFF=$(git diff)
if [ -n "$GEN_DIFF" ]; then
echo '"make build lint" resulted in changes not in git' 1>&2
git status
exit 1
fi
- name: Test
env:
ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS: ${{ secrets.ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS }}
TEST_MONGODB_URI: ${{ secrets.TEST_MONGODB_URI }}
run: |
echo "$ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS" | base64 -d > artifact_google_creds.json
export ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS=`pwd`/artifact_google_creds.json
sudo -u testbot --preserve-env=ARTIFACT_GOOGLE_APPLICATION_CREDENTIALS,TEST_MONGODB_URI bash -lc 'make cover'
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
|
.github/workflows/test.yml
|
name: CI
on:
push:
branches:
- main
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_number || github.ref }}
cancel-in-progress: true # cancel jobs from previous push
jobs:
formatting:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.head_ref }}
# Run this before any other commands so that git diff does not pick up formatting changes.
- run: |
find . -type f -not -path '.git/*' -exec sed -ri "s/[0-9]{4} (Scipp contributors)/$(date +%Y) \1/g" {} +
git diff --exit-code
- run: sudo apt-get install --yes clang-format-10
- run: find lib -type f -regex '.*\.\(cpp\|h\|tcc\)' -exec clang-format-10 -i {} +
- run: pip install cmake_format==0.6.9 flake8 nb-clean==2.1.0 yapf==0.30.0
- run: find . -type f -name CMakeLists.txt -or -name '*.cmake' -not -path "./lib/.tbb/*" | xargs cmake-format -i
- run: find . -type f -regex '.*\.ipynb' | xargs nb-clean clean --remove-empty-cells --preserve-cell-metadata
- run: yapf --recursive --in-place .
- run: flake8 .
- uses: stefanzweifel/git-auto-commit-action@v4
with:
commit_message: Apply automatic formatting
build_and_test:
needs: formatting
name: build and test ${{ matrix.os }}, py${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
defaults:
run:
shell: bash -l {0} # required for conda env
env:
PYTHONPATH: ${{ github.workspace }}/install
strategy:
fail-fast: false
matrix:
os: ["ubuntu-latest", "macos-latest", "windows-latest"]
python-version: ["3.8"]
include:
- os: ubuntu-latest
cmake-preset: ci-linux
- os: macos-latest
cmake-preset: ci-macos
- os: windows-latest
cmake-preset: ci-windows
steps:
- uses: actions/checkout@v2
with:
submodules: true
fetch-depth: 0 # history required so cmake can determine version
ref: ${{ github.head_ref }}
- uses: ilammy/msvc-dev-cmd@v1 # Required to set up MSVC dev environment for Ninja builds.
- run: python tools/metatoenv.py --dir=conda --env-file=buildenv.yml --channels=conda-forge --merge-with=developer-extra.yml
- name: Setup Mambaforge and dev env
uses: conda-incubator/setup-miniconda@v2
with:
miniforge-version: latest
miniforge-variant: Mambaforge
python-version: ${{ matrix.python-version }}
environment-file: buildenv.yml
activate-environment: buildenv
auto-activate-base: false
- run: echo "$CONDA/envs/buildenv/Library/bin" >> $GITHUB_PATH
if: ${{ contains(matrix.os, 'windows-latest') }}
- run: ccache --version
- name: ccache
uses: hendrikmuhs/ccache-action@9abc88f1e5416ef236a67b736bb781d6b134a6ea
with:
key: ${{ matrix.os }}
- run: cmake --preset ${{ matrix.cmake-preset }}
- run: cmake --build --preset build
- run: ctest --preset test
- run: python -m pytest -v tests
- run: |
pip install sphinx-autodoc-typehints>=1.17.0
cmake --build --preset docs
if: ${{ contains(matrix.os, 'ubuntu') }}
- uses: actions/upload-artifact@v2
if: ${{ contains(matrix.os, 'ubuntu') }}
with:
name: DocumentationHTML
path: build/html/
|
.github/workflows/pr_and_main.yml
|
homepage: http://www.kfish.org/software/hogg/
changelog-type: ''
hash: 9fbf1323b7f1cef98afff343cdbc74f6f099e660a71839601207cefd0465603b
test-bench-deps: {}
maintainer: <NAME> <<EMAIL>>
synopsis: Library and tools to manipulate the Ogg container format
changelog: ''
basic-deps:
bytestring: -any
base: ! '>=3 && <5'
time: -any
HUnit: -any
array: -any
containers: -any
old-locale: -any
mtl: -any
random: -any
all-versions:
- 0.2.0
- 0.3.0
- 0.4.1
- 0.4.1.1
- 0.4.1.2
author: <NAME>
latest: 0.4.1.2
description-type: text
description: ! "------------------------------------------------------------\nHOgg
-- Ogg encapsulation stuffz\n------------------------------------------------------------\n\nRequirements:\n
\ - Cabal (tested with various version from 1.1.3 to 1.6.0)\n\n - Data.ByteString,
included with GHC >= 6.6, and available separately\n for GHC 6.4 at:\n
\ http://www.cse.unsw.edu.au/~dons/fps.html (version 0.7 or greater)\n\n
\ If building on Debian GNU/Linux:\n $ apt-get install ghc6 libghc6-mtl-dev
libghc6-hunit-dev\n $ apt-get install libghc6-cabal-dev # not needed for
Debian unstable\n\nOptional:\n - HTTP1 with lazy bytestring support, from:\n
\ http://www.dtek.chalmers.se/~tox/site/http.php4\n To configure
with HTTP support, first install the above library, then\n go install through
Cabal, but run configure like\n\n $ runhaskell Setup configure --flags=\"http\"\n\n\nBuilding:\n\n
\ This package is configured by default to build with GHC 6.8.\n\n If
you are building with GHC 6.6, you need to follow the usual Cabal\n setup
but run configure like:\n\n $ runhaskell Setup configure --flags=\"-splitBase\"\n\n
\ If you are building with GHC 6.4, you will need to edit hogg.cabal\n by
adding 'fps' to the Build-Depends line and probably remove the Cabal\n conditionals.
You should consider upgrading to a more recent GHC.\n\n Once configured,
the following procedure will build hogg:\n\n $ chmod +x Setup.hs\n $
./Setup.hs configure\n $ ./Setup.hs build\n $ ./Setup.hs install\n\nUsage:\n
\ General help (lists subcommands):\n\n hogg help\n\n Help
on a subcommand:\n\n hogg help <subcommand>\n\n Info about
the various tracks of one or more files:\n\n hogg info file1.ogg
file2.ogg ...\n\n Dump packets of an Ogg file:\n\n hogg dump
file1.ogg file2.ogg ...\n\n Dump pages of an Ogg file\n\n hogg
pagedump file1.ogg file2.ogg ...\n\n Dump vorbis (theora, speex) packets\n\n
\ hogg dump -c vorbis file.ogg ...\n hogg pagedump
-c vorbis file.ogg ...\n hogg dumpraw -c vorbis file.ogg ...\n\n
\ Chop out part of a file\n\n hogg chop --start 01:10 --end
2:30 -o output.ogg file.ogg\n\n Merge\n\n merge pages from many
input files and produce a single output file:\n\n hogg merge -o output.ogg
file1.ogg file2.ogg file3.ogg ...\n\n\nChecking stuff works:\n\n Rewrite
a file (parse pages, rewrite page data):\n\n hogg rip file.ogg >
newfile.ogg\n diff file.ogg newfile.ogg\n\n Repacket a file
(parse to packets, rewrite with original segmentation):\n\n hogg
reconstruct file.ogg > newfile.ogg\n diff file.ogg newfile.ogg\n"
license-name: BSD-3-Clause
|
packages/ho/hogg.yaml
|
---
name: credhub-ha
releases:
- {name: credhub, version: latest}
- {name: uaa, version: latest}
- {name: postgres, version: latest}
- {name: haproxy, version: latest}
stemcells:
- alias: default
os: ubuntu-bionic
version: latest
instance_groups:
- name: credhub-proxy
instances: 1
vm_type: 1cpu-2g
stemcell: default
azs: [z1]
networks:
- name: net-bosh-2
static_ips: [192.168.116.107]
jobs:
- name: haproxy
release: haproxy
properties:
ha_proxy:
disable_http: true
log_level: debug
tcp:
- name: crehub-proxy
port: 8844
backend_port: 8844
backend_servers:
- 192.168.116.104
- 192.168.116.105
- name: credhub-uaa-proxy
instances: 1
vm_type: 1cpu-2g
stemcell: default
azs: [z1]
networks:
- name: net-bosh-2
static_ips: [192.168.116.108]
jobs:
- name: haproxy
release: haproxy
properties:
ha_proxy:
disable_http: true
log_level: debug
tcp:
- name: uaa-proxy
port: 8443
backend_port: 8443
backend_servers:
- 192.168.116.102
- 192.168.116.103
- name: credhub-db
instances: 1
vm_type: 4cpu-16g
persistent_disk_type: medium
stemcell: default
azs: [z1]
networks:
- name: net-bosh-2
default: [gateway,dns]
static_ips:
- 192.168.116.106
jobs:
- name: postgres
release: postgres
properties:
databases:
address: 127.0.0.1
port: 5432
databases:
- name: credhub
- name: uaa
roles:
- name: admin
password: ((database_<PASSWORD>))
- name: uaa-backend
instances: 2
vm_type: 1cpu-4g
stemcell: default
azs: [z1]
networks:
- name: net-bosh-2
default: [gateway,dns]
static_ips:
- 192.168.116.102
- 192.168.116.103
jobs:
- name: uaa
release: uaa
properties:
encryption:
encryption_keys:
- label: uaa-encryption-key-1
passphrase: ((<PASSWORD>_1))
active_key_label: uaa-encryption-key-1
uaa:
url: https://uaa-credhub.internal.paas:8443
catalina_opts: -Xms1024m -Xmx1024m -XX:MaxMetaspaceSize=256m -Djava.security.egd=file:///dev/urandom
scim:
users:
- name: admin
password: ((<PASSWORD>client_<PASSWORD>))
groups:
- scim.write
- scim.read
- bosh.admin
- credhub.read
- credhub.write
clients:
credhub_cli:
override: true
authorized-grant-types: password,refresh_token
scope: credhub.read,credhub.write
authorities: uaa.resource
access-token-validity: 1200 #was 30s. raised cause credhub cli doesnt refresh the token ...
refresh-token-validity: 3600
secret: ""
director_to_credhub:
override: true
authorized-grant-types: client_credentials
scope: uaa.none
authorities: credhub.read,credhub.write
access-token-validity: 43200
secret: ((director_to_credhub_secret))
admin: {client_secret: ((uaa_admin_client_secret))}
login: {client_secret: ((uaa_login_client_secret))}
zones: {internal: {hostnames: []}}
sslCertificate: ((uaa_cert))
sslPrivateKey: ((uaa_key))
jwt:
revocable: true
policy:
active_key_id: key-1
keys:
key-1:
signingKey: ((uaa_signing_key))
ldap:
enabled: true
profile_type: search-and-bind
url: 'ldap://elpaaso-ldap.internal.paas:389/'
userDN: 'cn=manager,dc=orange,dc=com'
userPassword: ((ldap_root_password))
searchBase: 'ou=users,dc=orange,dc=com'
searchFilter: 'uid={0}'
mailAttributeName: mail
groups:
profile_type: groups-as-scopes
searchBase: 'ou=paas-groups,ou=groups,dc=orange,dc=com'
groupRoleAttribute: 'description'
groupSearchFilter: 'uniqueMember={0}'
searchSubtree: true
maxSearchDepth: 10
autoAdd: true
uaadb:
address: 192.168.116.106 #db ip
port: 5432
db_scheme: postgresql
tls: disabled
databases:
- tag: uaa
name: uaa
roles:
- tag: admin
name: admin
password: ((database_admin))
login:
saml:
serviceProviderCertificate: ((uaa_saml_cert))
serviceProviderKey: ((uaa_saml_key))
serviceProviderKeyPassword: ""
update:
canaries: 1
max_in_flight: 1
serial: true
canary_watch_time: 1000-60000
update_watch_time: 1000-60000
|
micro-depls/credhub-ha/template/credhub-ha.yml
|
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
filebeat.inputs:
# Each - is a prospector. Most options can be set at the prospector level, so
# you can use different prospectors for various configurations.
# Below are the prospector specific configurations.
- type: log
paths:
- /logs/log1.log
tags: ["prog1", "log"]
fields:
log_index: 'prog1'
json.message_key:
- type: log
paths:
- /logs/log2.log
tags: ["prog2", "log"]
fields:
log_index: 'prog2'
json.message_key:
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
setup.template.name: "trace"
setup.template.pattern: "trace-*"
setup.ilm.enabled: false
setup.ilm.rollover_alias: "filebeat"
setup.ilm.pattern: "{now/d}-000001"
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["172.17.0.1:9200"]
index: "%{[fields.log_index]}-%{+yyyy.MM.dd}"
# Optional protocol and basic auth credentials.
#protocol: "https"
worker: 5
bulk_max_size: 200
#================================ Logging =====================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
logging.level: INFO
logging.to_files: true
logging.files:
path: logs
name: filebeat
keepfiles: 7
permissions: 0644
multiline.pattern: '^[[:space:]]+(at|\.{3})\b|^Caused by:'
multiline.negate: false
multiline.match: after
|
logging/filebeat/filebeat-es.yml
|
# Site Settings
title: <NAME> | Dev Ops Projects Manager and Cloud Expert
url: 'http://sergio.afanou.com'
name: Sergio's blog
logo: assets/images/salogo.png
#baseurl: '/' #change it according to your repository name
# Style will be applied only after restarting the build or serve. Just choose one of the options.
theme_skin: ceramic # blue turquoise green berry orange ceramic
chrome_mobile_color: #use hex colors (ex:#1976d2) or leave empty if you don't want a color for chrome mobile searchbar
include: ["_pages"]
# Contact page
contact_api: "https://script.google.com/macros/s/AKfycbwIRo-LvVIkMZEum8jopBu9CRpELUkO2d1hxBS5/exec"
# Tracker
analytics: UA-20627889-4
# Plugins
plugins:
- jekyll-paginate
- jekyll-sitemap
- jekyll-feed
- jekyll-seo-tag
- jekyll-archives
# Archives
jekyll-archives:
enabled:
- categories
layout: archive
permalinks:
category: '/category/:name/'
# Pagination
paginate: 6
paginate_path: /page:num/
collections:
articles:
output: true
permalink: /articles/:path
services:
output: true
posts:
permalink: /blog/:categories/:slug
# Authors
authors:
full:
name: Sergio
display_name: <NAME>
gravatar: e56154546cf4be74e393c62d1ae9f9d4
email: <EMAIL>
web: https://www.sergio.afanou.com
twitter: https://twitter.com/t0o1
description: "Author of Mediumish, a Bootstrap Medium styled template available for WordPress, HTML, Ghost and Jekyll. You are currently previewing Jekyll template demo."
john:
name: John
display_name: John
avatar: 'assets/images/avatar.png'
gravatar: b1cc14991db7a456fcd761680bbc8f81
email: <EMAIL>
web: https://www.wowthemes.net
twitter: https://twitter.com/wowthemesnet
description: "This is the author box. Write a short description of the author here. You are currently previewing Mediumish demo, a Jekyll template compatible with Github pages."
# Team names, titles and social links
people:
- name: <NAME>
pic: 1
position: AI solutions designer
social:
- title: twitter
url: #
- title: facebook
url: #
- title: stack-overflow
url: #
- name: <NAME>
pic: 2
position: Cloud Nerd
social:
- title: twitter
url: #
- title: facebook
url: #
- title: linkedin
url: #
- name: <NAME>
pic: 3
position: Lead Developer
social:
- title: twitter
url: #
- title: facebook
url: #
- title: google-plus
url: #
# Sass/SCSS
sass:
sass_dir: _sass
style: compressed # http://sass-lang.com/documentation/file.SASS_REFERENCE.html#output_style
# Build settings
compress-site: yes
encoding: "utf-8"
compress_html: # - http://jch.penibelst.de/
clippings: all
ignore:
envs: development
# Development Settings
port: 4000
host: 0.0.0.0
safe: false
# Build settings
markdown: kramdown
permalink: pretty
exclude: [changelog.md, LICENSE.txt, README.md, Gemfile, Gemfile.lock]
|
_config.yml
|
items:
- uid: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName
id: WithServiceName
artifact: com.microsoft.azure.appplatform.v2020_07_01:azure-mgmt-appplatform:1.0.0-beta
parent: com.microsoft.azure.management.appplatform.v2020_07_01
children:
- com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName.withServiceName(java.lang.String)
langs:
- java
name: AppResource.DefinitionStages.WithServiceName
nameWithType: AppResource.DefinitionStages.WithServiceName
fullName: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName
type: Interface
package: com.microsoft.azure.management.appplatform.v2020_07_01
summary: The stage of the appresource definition allowing to specify ServiceName.
syntax:
content: public static interface AppResource.DefinitionStages.WithServiceName
- uid: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName.withServiceName(java.lang.String)
id: withServiceName(java.lang.String)
artifact: com.microsoft.azure.appplatform.v2020_07_01:azure-mgmt-appplatform:1.0.0-beta
parent: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName
langs:
- java
name: withServiceName(String serviceName)
nameWithType: AppResource.DefinitionStages.WithServiceName.withServiceName(String serviceName)
fullName: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName.withServiceName(String serviceName)
overload: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName.withServiceName*
type: Method
package: com.microsoft.azure.management.appplatform.v2020_07_01
summary: Specifies serviceName.
syntax:
content: public abstract AppResource.DefinitionStages.WithCreate withServiceName(String serviceName)
parameters:
- id: serviceName
type: java.lang.String
description: The name of the Service resource
return:
type: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithCreate
description: the next definition stage
references:
- uid: java.lang.String
spec.java:
- uid: java.lang.String
name: String
fullName: java.lang.String
- uid: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithCreate
name: AppResource.DefinitionStages.WithCreate
nameWithType: AppResource.DefinitionStages.WithCreate
fullName: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithCreate
- uid: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName.withServiceName*
name: withServiceName
nameWithType: AppResource.DefinitionStages.WithServiceName.withServiceName
fullName: com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName.withServiceName
package: com.microsoft.azure.management.appplatform.v2020_07_01
|
preview/docs-ref-autogen/com.microsoft.azure.management.appplatform.v2020_07_01.AppResource.DefinitionStages.WithServiceName.yml
|
---
- name: broker group
group:
name: "{{ kafka.broker.group }}"
- name: broker user
user:
name: "{{ kafka.broker.user }}"
comment: "Kafka User"
system: yes
group: "{{ kafka.broker.group }}"
- name: create kafka log directory
file:
path: /var/log/kafka
state: directory
mode: 0755
owner: cp-kafka
group: confluent
- name: update the krb5.conf config file
template:
src: templates/krb5.conf.j2
dest: /etc/krb5.conf
owner: root
group: root
mode: 0644
- name: copy systemd service file
template:
src: kafka.service.j2
dest: "{{ kafka.broker.systemd_config_file }}"
owner: root
group: root
mode: 0644
- name: copy modified start script
copy: src=kafka-server-start dest="{{ kafka.broker.start_class }}"
- name: broker plaintext config
template:
src: server.properties.j2
dest: "{{ kafka.broker.config_file }}"
mode: 0640
owner: "{{ kafka.broker.user }}"
group: "{{ kafka.broker.group }}"
when: security_mode == "plaintext"
notify:
- restart kafka
- name: broker ssl config
template:
src: server_ssl.properties.j2
dest: "{{ kafka.broker.config_file }}"
mode: 0640
owner: "{{ kafka.broker.user }}"
group: "{{ kafka.broker.group }}"
when: security_mode == "ssl"
notify:
- restart kafka
- name: broker sasl_ssl config
template:
src: server_sasl_ssl.properties.j2
dest: "{{ kafka.broker.config_file }}"
mode: 0640
owner: "{{ kafka.broker.user }}"
group: "{{ kafka.broker.group }}"
when: security_mode == "sasl_ssl"
notify:
- restart kafka
- name: broker sasl_ssl JAAS config
template:
src: kafka_server_jaas.j2
dest: "{{ kafka.broker.jaas_config_file }}"
mode: 0640
owner: "{{ kafka.broker.user }}"
group: "{{ kafka.broker.group }}"
when: security_mode == "sasl_ssl"
- name: copy log4j config
template:
src: kafka-log4j.properties.j2
dest: "{{ kafka.broker.log4j_config_file }}"
mode: 0640
owner: "{{ kafka.broker.user }}"
group: "{{ kafka.broker.group }}"
- name: copy systemd environment file
template:
src: kafka-env.j2
dest: "{{ kafka.broker.systemd_env_file }}"
mode: 0640
owner: "{{ kafka.broker.user }}"
group: "{{ kafka.broker.group }}"
- name: broker configure service
systemd:
name: "{{ kafka.broker.service_name }}"
enabled: "{{ kafka.broker.systemd.enabled }}"
state: "{{ kafka.broker.systemd.state }}"
- name: Reload the systemd deamon with changes
systemd:
daemon_reload: yes
notify:
- restart kafka
|
provisioners/ansible/roles/confluent.kafka-broker/tasks/configure.yml
|
- name: check deployed stack
shell: . ~/stackrc && heat stack-list | grep overcloud
failed_when: false
changed_when: false
register: stack
- block:
- name: check IPA is available
wait_for: host={{ inventory_hostname }} port=22 timeout=1
register: ipa_available
- name: copy certificate of IPA
command: "{{ item }}"
delegate_to: ipa
register: certificate
changed_when: false
failed_when: certificate.stdout == ''
when: ipa_available|success
with_items:
- cat /etc/ipa/ca.crt
- openssl x509 -in /etc/ipa/ca.crt -outform PEM
when: stack.stdout == ""
- block:
- name: Create bin directory
file: path=~/bin state=directory
- name: create command binary
template: src={{ item }}.j2 dest=~/bin/{{ item }} mode=0755
with_items:
- deploy-overcloud
- virsh-names
- scale-down
- name: Create templates directory
file: path=~/templates state=directory
- name: Create nic-configs directory
file: path=~/templates/nic-configs state=directory
- name: create custom install configuration
template: src={{ item }}.j2 dest=~/templates/{{ item }} mode=0644
with_items: "{{ templates|select|list }}"
- name: create nic configs
template: src={{ item }}.j2 dest=~/templates/nic-configs/{{ item }} mode=0644
with_items:
- "compute.yaml"
- "controller.yaml"
- name: deploy overcloud (even more coffee!)
shell: >
. ~/stackrc &&
{{ lookup('template','deploy-overcloud.j2') }} 2>&1 >~/deploy_overcloud.log
tags: ['deploy']
# async: 7200
# poll: 0
# register: deploy_overcloud
#
# - name: check on overcloud deploy completed
# async_status: jid={{ deploy_overcloud.ansible_job_id }}
# register: job_result
# until: job_result.finished
# retries: 480
# delay: 15
when: stack.stdout == ""
become: yes
become_user: "{{ rhosp_stack_user }}"
- name: wait for stack creation to complete
shell: . ~/stackrc && openstack stack list -f json
register: stacklist
until: "'IN_PROGRESS' not in ( stacklist.stdout | from_json )[0]['Stack Status']"
retries: 720
delay: 60
become: yes
become_user: "{{ rhosp_stack_user }}"
failed_when: "'FAILED' in ( stacklist.stdout | from_json )[0]['Stack Status']"
changed_when: false
|
ansible/roles/layer2_rhosp_overcloud_deploy/tasks/create_overcloud.yml
|
imports:
- { resource: parameters.yml }
- { resource: security.yml }
framework:
#esi: ~
translator: { fallback: %locale% }
secret: %secret%
router:
resource: "%kernel.root_dir%/config/routing.yml"
strict_requirements: ~
form: ~
csrf_protection: ~
validation: { enable_annotations: true }
templating:
engines: ['twig']
#assets_version: SomeVersionScheme
default_locale: "%locale%"
trusted_proxies: ~
session: ~
fragments: ~
http_method_override: true
# Twig Configuration
twig:
debug: %kernel.debug%
strict_variables: %kernel.debug%
globals:
navbar_js_url: %navbar_js_url%
form:
resources:
- LexikFormFilterBundle:Form:form_div_layout.html.twig
- IfenslPadManagerBundle:Form:fields.html.twig
# Assetic Configuration
assetic:
debug: %kernel.debug%
use_controller: false
bundles: [ ]
#java: /usr/bin/java
filters:
cssrewrite: ~
#closure:
# jar: %kernel.root_dir%/Resources/java/compiler.jar
#yui_css:
# jar: %kernel.root_dir%/Resources/java/yuicompressor-2.4.7.jar
# Doctrine Configuration
doctrine:
dbal:
driver: %database_driver%
host: %database_host%
port: %database_port%
dbname: %database_name%
user: %database_user%
password: %database_password%
charset: UTF8
# if using pdo_sqlite as your database driver, add the path in parameters.yml
# e.g. database_path: %kernel.root_dir%/data/data.db3
# path: %database_path%
orm:
auto_generate_proxy_classes: %kernel.debug%
auto_mapping: true
# Swiftmailer Configuration
swiftmailer:
transport: %mailer_transport%
host: %mailer_host%
username: %mailer_user%
password: %<PASSWORD>%
auth_mode: %mailer_auth_mode%
port: %mailer_port%
encryption: %mailer_encryption%
# type: file
# path: %kernel.root_dir%/spool
da_api_client:
api:
etherpad:
endpoint_root: %etherpad_url%/api
security_token: %etherpad_security_token%
client:
service: ifensl_pad_mananger.etherpad_api_client
# Pad Configuration
ifensl_pad_manager:
etherpad:
url: %etherpad_url%
mailer:
from: %mailer_from%
max_pads_per_page: %max_pads_per_page%
|
app/config/config.yml
|
items:
- uid: '@azure/arm-logic.AS2EnvelopeSettings'
name: AS2EnvelopeSettings
fullName: AS2EnvelopeSettings
children:
- '@azure/arm-logic.AS2EnvelopeSettings.autogenerateFileName'
- '@azure/arm-logic.AS2EnvelopeSettings.fileNameTemplate'
- '@azure/arm-logic.AS2EnvelopeSettings.messageContentType'
- '@azure/arm-logic.AS2EnvelopeSettings.suspendMessageOnFileNameGenerationError'
- '@azure/arm-logic.AS2EnvelopeSettings.transmitFileNameInMimeHeader'
langs:
- typeScript
type: interface
summary: As configurações de envelope do contrato AS2.
package: '@azure/arm-logic'
- uid: '@azure/arm-logic.AS2EnvelopeSettings.autogenerateFileName'
name: autogenerateFileName
fullName: autogenerateFileName
children: []
langs:
- typeScript
type: property
summary: O valor que indica se é automaticamente gerar um nome de arquivo.
syntax:
content: 'autogenerateFileName: boolean'
return:
type:
- boolean
package: '@azure/arm-logic'
- uid: '@azure/arm-logic.AS2EnvelopeSettings.fileNameTemplate'
name: fileNameTemplate
fullName: fileNameTemplate
children: []
langs:
- typeScript
type: property
summary: O modelo de nome de arquivo.
syntax:
content: 'fileNameTemplate: string'
return:
type:
- string
package: '@azure/arm-logic'
- uid: '@azure/arm-logic.AS2EnvelopeSettings.messageContentType'
name: messageContentType
fullName: messageContentType
children: []
langs:
- typeScript
type: property
summary: O tipo de conteúdo de mensagem.
syntax:
content: 'messageContentType: string'
return:
type:
- string
package: '@azure/arm-logic'
- uid: '@azure/arm-logic.AS2EnvelopeSettings.suspendMessageOnFileNameGenerationError'
name: suspendMessageOnFileNameGenerationError
fullName: suspendMessageOnFileNameGenerationError
children: []
langs:
- typeScript
type: property
summary: O valor que indica se deve suspender a mensagem de erro de geração de nome de arquivo.
syntax:
content: 'suspendMessageOnFileNameGenerationError: boolean'
return:
type:
- boolean
package: '@azure/arm-logic'
- uid: '@azure/arm-logic.AS2EnvelopeSettings.transmitFileNameInMimeHeader'
name: transmitFileNameInMimeHeader
fullName: transmitFileNameInMimeHeader
children: []
langs:
- typeScript
type: property
summary: O valor que indica se deve transmitir o nome do arquivo no cabeçalho mime.
syntax:
content: 'transmitFileNameInMimeHeader: boolean'
return:
type:
- boolean
package: '@azure/arm-logic'
|
docs-ref-autogen/@azure/arm-logic/AS2EnvelopeSettings.yml
|