code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
---
driver:
name: docker
dockerfile: test/platforms/Dockerfile
cc_pattern: "<%= File.basename(Dir.pwd) %>"
cc_token: "sFlR<PASSWORD>F<PASSWORD>Axbh/w=="
provisioner:
name: chef_solo
attributes:
rails_part:
db:
adapter: sqlite3
nginx_part:
maintenance: '<!DOCTYPE html><html><head></head><body>dummy</body></html>'
cloudconductor:
servers:
ap_01:
private_ip: '127.0.0.1'
roles: 'ap'
weight: '0'
db_01:
private_ip: '127.0.0.1'
roles: 'db'
applications:
cloudconductor:
type: 'dynamic'
protocol: 'git'
url: 'https://github.com/cloudconductor/cloud_conductor.git'
revision: 'v1.0.0'
version: '1.0.0'
pre-deploy: 'cp config/config.rb.smp config/config.rb; echo "gem ''mysql2''" >> Gemfile; echo "gem ''rspec''" >> Gemfile; echo "gem ''puma''" >> Gemfile; sed -i -e ''s/# config.secret_key = / config.secret_key = /'' config/initializers/devise.rb; sed -i -e ''s/<%%= ENV\[\"SECRET_KEY_BASE\"\] %>/dummy_key/'' config/secrets.yml'
parameters:
default_server: true
port: 8080
platforms:
- name: centos-6.6
driver:
image: centos:centos6.6
run_command: /sbin/init
- name: centos-7
driver:
image: centos:centos7
privileged: true
run_command: /sbin/init
suites:
- name: web_setup
driver:
cc_role: 'web'
run_list:
- role[web_setup]
- name: web_configure
driver:
cc_role: 'web'
run_list:
- role[web_setup]
- role[web_configure]
- name: web_deploy
driver:
cc_role: 'web'
run_list:
- role[web_setup]
- role[web_configure]
- role[web_deploy]
- name: ap_setup
driver:
cc_role: 'ap'
run_list:
- role[ap_setup]
- name: db_setup
driver:
cc_role: 'db'
run_list:
- role[db_setup]
- name: db_configure
driver:
cc_role: 'db'
run_list:
- role[db_setup]
- role[db_configure]
|
.kitchen.yml
|
uuid: 3f1082fc-d8f1-4e3a-82d4-df9f19916f86
langcode: en
status: true
dependencies:
config:
- core.entity_view_display.comment.recipe_review.default
- field.field.node.recipe.field_category
- field.field.node.recipe.field_difficulty
- field.field.node.recipe.field_image
- field.field.node.recipe.field_ingredients
- field.field.node.recipe.field_number_of_services
- field.field.node.recipe.field_preparation_time
- field.field.node.recipe.field_recipe_instruction
- field.field.node.recipe.field_recipe_reviews_and_testimo
- field.field.node.recipe.field_tags
- field.field.node.recipe.field_total_time
- node.type.recipe
module:
- comment
- image
- options
- text
- user
_core:
default_config_hash: 5XHTf4E2w-CdiqEtfWlabaLUmSVj5FB8ph1omPa1Lek
id: node.recipe.default
targetEntityType: node
bundle: recipe
mode: default
content:
field_category:
weight: 105
label: above
settings:
link: true
third_party_settings: { }
type: entity_reference_label
region: content
field_difficulty:
weight: 108
label: above
settings: { }
third_party_settings: { }
type: list_default
region: content
field_image:
weight: 102
label: above
settings:
image_style: ''
image_link: ''
third_party_settings: { }
type: image
region: content
field_ingredients:
weight: 109
label: above
settings:
link: true
third_party_settings: { }
type: entity_reference_label
region: content
field_number_of_services:
weight: 111
label: above
settings:
thousand_separator: ''
prefix_suffix: true
third_party_settings: { }
type: number_integer
region: content
field_preparation_time:
weight: 106
label: above
settings:
thousand_separator: ''
prefix_suffix: true
third_party_settings: { }
type: number_integer
region: content
field_recipe_instruction:
weight: 110
label: above
settings: { }
third_party_settings: { }
type: text_default
region: content
field_recipe_reviews_and_testimo:
weight: 113
label: above
settings:
view_mode: default
pager_id: 0
third_party_settings: { }
type: comment_default
region: content
field_tags:
weight: 112
label: above
settings:
link: true
third_party_settings: { }
type: entity_reference_label
region: content
field_total_time:
weight: 107
label: above
settings:
thousand_separator: ''
prefix_suffix: true
third_party_settings: { }
type: number_integer
region: content
links:
weight: 100
region: content
title:
label: hidden
type: string
weight: -5
region: content
settings:
link_to_entity: false
third_party_settings: { }
hidden:
langcode: true
|
config/core.entity_view_display.node.recipe.default.yml
|
# to test locally, run (windows):
# bundle exec jekyll serve --baseurl
# on ubuntu:
# jekyll serve
title: Dr. G
description: Nate G's personal site
disqus_shortname: wordsforthewise
reading_time: true
words_per_minute: 200
# Your site's domain goes here (eg: //mmistakes.github.io, http://mademistakes.com, etc)
# When testing locally, set url to blank or use http://localhost:8888 , for production: //nateGeorge.github.io
url: //nateGeorge.github.io #// http://localhost:8888 ##http://localhost:8888 # for testing, use: http://localhost:8888 otherwise, //nateGeorge.github.io
baseurl: /nateGeorge.github.io # make this line blank for testing... otherwise, /nateGeorge.github.io
# Owner/author information
owner:
name: <NAME>
avatar: avatar.jpg
bio: "I'm a PhD chemical engineer by training, who does a lot of coding, electrical engineering, and chemistry."
email: <EMAIL>
# Social networking links used in footer. Update and remove as you like.
twitter:
facebook:
github: nateGeorge
stackexchange: http://stackexchange.com/users/5764261/wordsforthewise
linkedin: nateG-ai
instagram:
flickr:
tumblr:
google_plus:
# Background image to be tiled on all pages
background: bg2.png
# Analytics and webmaster tools stuff goes here
google_analytics: UA-53388616-2
google_verify:
# https://ssl.bing.com/webmaster/configure/verify/ownership Option 2 content= goes here
bing_verify:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
timezone: America/Denver
future: true
highlighter: rouge #pygments
markdown: kramdown
plugins:
- jekyll-sitemap
- jekyll-paginate
#- jekyll_inline_highlight
- jekyll-include-cache
- jekyll-gist
- jekyll-feed
sass:
sass_dir: _sass
style: compressed
# https://github.com/mojombo/jekyll/wiki/Permalinks
permalink: /:categories/:title/
# Amount of post to show on home page
paginate: 5
kramdown:
input: GFM
auto_ids: true
footnote_nr: 1
entity_output: as_char
toc_levels: 1..6
enable_coderay: false
include:
- .htaccess
exclude:
- "*.less"
- "*.sublime-project"
- "*.sublime-workspace"
- .asset-cache
- .bundle
- .jekyll-assets-cache
- .sass-cache
- CHANGELOG
- Capfile
- Gemfile
- Gruntfile.js
- LICENSE
- README
- Rakefile
- config
- gulpfile.js
- lib
- log
- node_modules
- package.json
- spec
- tmp
excerpt_separator: <!--more-->
|
_config.yml
|
slider:
enable : true
bg_image : "images/banner/banner-1.jpg"
slider_item:
# slider item loop
- title : "Il tuo futuro luminoso è la nostra missione"
content : "Sviati dal labiritnto del Sistema (Babilonia), con i nostri corsi per il benessere psicofisico"
animation_in : "left"
animation_out : "right"
# available animation : left, right, up, down
button:
enable : true
label : "Candidati ora"
link : "contact"
# slider item loop
- title : "One Yoga"
content : "Un programma psicofisico divertente, per elevare la propria coscienza fino alla cima, sperimentando l’esatsi zen e l’unione con il creato e le energie cosmiche."
animation_in : "left"
animation_out : "right"
# available animation : left, right, up, down
button:
enable : true
label : "Candidati ora"
link : "contact"
# slider item loop
- title : "Corso Pratico di <NAME>"
content : "Usare e scoprire il proprio corpo come veicolo di esperienze ed espressioni. Le lezioni saranno concentrate sull’apprendimento dell’antica pratica taoista “I MOVIMENTI DEI CINQUE ANIMALI” in dodici sequenze."
animation_in : "left"
animation_out : "right"
# available animation : left, right, up, down
button:
enable : true
label : "Candidati ora"
link : "contact"
############################### Banner feature ###############################
banner_feature:
enable : true
image : "images/banner/banner-feature.png"
feature_item:
# feature item loop
- name : "Notizie sulla borsa di studio "
icon : "ti-book" # icon pack : https://themify.me/themify-icons
content : "Attualmente non ci sono borse di studio."
# feature item loop
- name : "La nostra bacheca "
icon : "ti-blackboard" # icon pack : https://themify.me/themify-icons
content : "Corsi per il benessere psicofisico e spirituale. Le antiche saggezze del mondo a portata di mouse."
# feature item loop
- name : "I nostri successi"
icon : "ti-agenda" # icon pack : https://themify.me/themify-icons
content : "Per noi il successo è vedere i fratelli e le sorelle uniti nel nome del Signore."
# feature item loop
- name : "Ammissione ora "
icon : "ti-write" # icon pack : https://themify.me/themify-icons
content : "Fai domanda per i nostri corsi attraverso il modulo dei contatti."
############################## about ##################################
about:
enable : true
title : "Circa RasEduCenter"
image : "images/about/about-us.jpg"
content : "La spiritualità insegnata dai Rasta, secondo un approcio che va oltre la religione, unendo la sapienza di vari popoli ed epoche. Grazie alla semplicità d'isegnamento, le sapienze millenaire sono finalmente a portata di mano."
button:
enable : true
label : "Ulteriori informazioni"
link : "about"
############################# Course ################################
course:
enable : true
title : "I nostri corsi"
# course item comes from "content/**/course" folder.
########################## Call to action ############################
cta:
enable : true
subtitle : "Clicca per partecipare al nostro corso avanzato"
title : "Formazione in spiritualità avanzata"
button:
enable : true
label : "Unisciti ora"
link : "contact"
######################## Success story #############################
success_story:
enable : true
bg_image : "images/backgrounds/success-story.jpg"
title : "Storie di successo"
content : "Sono innumerevoli le storie di successo tra i Rasta. Spesso si tratta di un successo artistico, a permettere la divulgazione dei concetti rasta attraverso, per esempio, la musica."
# video link
video_link : "https://www.youtube.com/watch?v=ResipmZmpDU"
############################# event ################################
event:
enable : true
title : "Prossimi eventi"
# event item comes from "content/**/event" folder.
########################### funfacts ############################
funfacts:
enable : true
funfact_item:
# funfact item loop
- name : "INSEGNANTI"
count : "1"
# funfact item loop
- name : "CORSI"
count : "3"
# funfact item loop
- name : "STUDENTI"
count : "0"
# funfact item loop
- name : "CLIENT SODDISFATTI"
count : "0"
# funfact is for about page
############################# blog ################################
blog:
enable : true
title : "Ultime notizie"
# blog item comes from "content/**/blog" folder.
|
exampleSite/data/it/homepage.yml
|
title: Diagnose Functional Requirements (T-1 and T-2)
tests:
#T-1:
################################ testcase1a
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase1a.txt
################################ testcase2a
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase2a.txt
exit: 1 # Permission denied
################################ testcase2b
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase2b.txt
exit: 1 # Permission denied
################################ testcase3a
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase3a.txt
exit: 1 # Permission denied
################################ testcase3b
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase3b.txt
exit: 1 # Permission denied
################################ testcase4a
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase4a.txt
exit: 1 # Permission denied
################################ testcase5a
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase5a.txt
exit: 1 # Permission denied
################################ testcase5b
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase5b.txt
exit: 1 # Permission denied
################################ testcase6a
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase6a.txt
exit: 1 # Permission denied
################################ testcase6b
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase6b.txt
exit: 1 # Permission denied
################################ testcase7a
- sh: python3 /home/bodhi/Repos/Drasil/code/build/Diagnose/src/python/Control.py /home/bodhi/Repos/Drasil/code/drasil-example/Drasil/Diagnose/inputfiles/testcase7a.txt
exit: 1 # Permission denied
|
code/drasil-example/Drasil/Diagnose/test/input.yaml
|
---
# Copyright 2017, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Remove all the inventory files
- hosts: all:!opsmgr_containers
become: no
gather_facts: no
serial: 1
vars_files:
- defaults.yml
tasks:
- name: clean target inventory files
local_action: file dest={{ opsmgr_dat }}/inventory-{{ item }} state=absent
with_items: "{{ group_names }}"
- name: Ensure the opsmgr_dat directory exist
local_action: file dest={{ opsmgr_dat }} state=directory
- name: create target inventory files
local_action: copy dest={{ opsmgr_dat }}/inventory-{{ item }} content='[targets]' force=no
with_items: "{{ group_names }}"
#Add each host to the inventory file
- hosts: all:!opsmgr_containers
become: no
gather_facts: no
serial: 1
vars_files:
- defaults.yml
tasks:
- name: update target inventory files
local_action: lineinfile dest={{ opsmgr_dat }}/inventory-{{ item }} line='{{ inventory_hostname }}'
with_items: "{{ group_names }}"
when: ansible_ssh_host is not defined
- name: update target inventory files
local_action: lineinfile dest={{ opsmgr_dat }}/inventory-{{ item }} line='{{ inventory_hostname }} ansible_ssh_host={{ ansible_ssh_host }}'
with_items: "{{ group_names }}"
when: ansible_ssh_host is defined
- hosts: localhost
become: no
connection: local
gather_facts: no
vars_files:
- defaults.yml
vars:
- runbase: true
- runtgts: true
tasks:
- name: create base inventory file
copy: dest={{ opsmgr_dat }}/inventory-all content='[targets]' force=yes
- name: update base inventory file
lineinfile: dest={{ opsmgr_dat }}/inventory-all line='{{ item }}'
with_items: "{{groups['all']}}"
when: hostvars[item].ansible_ssh_host is not defined
- name: update base inventory file
lineinfile: dest={{ opsmgr_dat }}/inventory-all line='{{ item }} ansible_ssh_host={{ hostvars[item].ansible_ssh_host }}'
with_items: "{{groups['all']}}"
when: hostvars[item].ansible_ssh_host is defined
- name: clean up facts and logs
file:
dest: "{{ opsmgr_dir }}/plugins/provisioning/base/{{ item }}"
state: absent
with_items:
- ".facts"
- "ansible.log"
when: runbase == true
- name: run base ansible playbook
shell: >
ansible-playbook -i "{{ opsmgr_dat }}/inventory-all"
-e "opsmgr_dir={{ opsmgr_dir }}"
site.yml
args:
chdir: "{{ opsmgr_dir }}/plugins/provisioning/base/"
when: runbase == true
- name: Determine the provisioning plugins that exist
stat:
path: "{{ opsmgr_dir }}/plugins/provisioning/{{ item }}"
with_items: "{{ groups.keys() }}"
register: directory_stat
- name: clean up facts and logs
file:
dest: "{{ opsmgr_dir }}/plugins/provisioning/{{ item[0].item}}/{{ item[1] }}"
state: absent
with_nested:
- "{{ directory_stat.results }}"
- [ ".facts", "ansible.log", "run.log" ]
when: runtgts == true and item[0].stat.exists
- name: run target ansible playbooks
shell: >
ansible-playbook -i "{{ opsmgr_dat }}/inventory-{{ item.item }}"
-e "opsmgr_dir={{ opsmgr_dir }}"
site.yml
args:
chdir: "{{ opsmgr_dir }}/plugins/provisioning/{{ item.item }}/"
with_items: "{{ directory_stat.results }}"
when: runtgts == true and item.stat.exists
|
playbooks/targets.yml
|
- slug: test-min
name: Minimalistic Processor
version: 1.0.0
type: "data:test:min"
run:
bash: |
echo 'Test Minimalistic Processor'
- slug: test-min
name: Minimalistic Processor
version: 1.0.1
type: "data:test:min"
run:
bash: |
echo 'Test Minimalistic Processor'
- slug: test-min
name: Minimalistic Processor
version: 0.0.9
type: "data:test:min"
run:
bash: |
echo 'Test Minimalistic Processor'
- slug: test-bloated
name: Bloated Processor
version: 0.0.0
type: "data:test:bloated:"
category: tests
persistence: CACHED
description: A very bloated processor.
input:
- name: boolean
label: Boolean
type: basic:boolean
default: true
- name: date
label: Date
type: basic:date
default: "2013-12-31"
- name: datetime
label: Date and time
type: basic:datetime
default: "2013-12-31 23:59:59"
- name: decimal
label: Decimal
type: basic:decimal
default: -123.456
- name: integer
label: Integer
type: basic:integer
default: -123
- name: string
label: String
type: basic:string
default: "Foo b-a-r.gz 1.23"
- name: string-list
label: String List
type: list:basic:string
- name: text
label: Text
type: basic:text
default: |
Foo bar
in 3
lines.
- name: url_download
label: URL download
type: basic:url:download
default:
url: "http://www.w3.org/TR/1998/REC-html40-19980424/html40.pdf"
- name: url_view
label: URL view
type: basic:url:view
default:
name: Google
url: "http://www.google.com/"
- name: group
label: Group
group:
- name: string2
label: String 2 required
type: basic:string
description: "String 2 description."
required: true
placeholder: "Enter string"
- name: string3
label: String 3 disabled
type: basic:string
description: "String 3 description."
disabled: true
default: "disabled"
- name: string4
label: String 4 hidden
type: basic:string
description: "String 4 description."
hidden: true
default: "hidden"
- name: string5
label: String 5 choices
type: basic:string
description: "String 5 description."
default: "choice_2"
choices:
- label: Choice 1
value: choice_1
- label: Choice 2
value: choice_2
- label: Choice 3
value: choice_3
- name: string6
label: String 6 regex only "Aa"
type: basic:string
default: "AAaAaaa"
validate_regex: /^[aA]*$/
- name: tricky
label: Tricky
group:
- name: tricky1
label: Tricky 1
group:
- name: tricky2
label: Tricky 2
type: basic:string
default: "true"
output:
- name: output
label: Result
type: basic:url:view
- name: out_boolean
label: Boolean
type: basic:boolean
- name: out_date
label: Date
type: basic:date
- name: out_datetime
label: Date and time
type: basic:datetime
- name: out_decimal
label: Decimal
type: basic:decimal
- name: out_integer
label: Integer
type: basic:integer
- name: out_string
label: String
type: basic:string
- name: out_text
label: Text
type: basic:text
- name: out_url_download
label: URL download
type: basic:url:download
- name: out_url_view
label: URL view
type: basic:url:view
- name: out_group
label: Group
group:
- name: string2
label: String 2 required
type: basic:string
description: "String 2 description."
- name: string3
label: String 3 disabled
type: basic:string
description: "String 3 description."
- name: string4
label: String 4 hidden
type: basic:string
description: "String 4 description."
- name: string5
label: String 5 choices
type: basic:string
description: "String 5 description."
- name: string6
label: String 6 regex only "Aa"
type: basic:string
- name: out_tricky
label: Tricky
group:
- name: tricky1
label: Tricky 1
group:
- name: tricky2
label: Tricky 2
type: basic:string
run:
bash: |
echo 'Test Bloated Processor'
|
resolwe/flow/tests/processes/tests.yml
|
params:
- name: classes
type: string
required: false
description: Classes to add to the registration for event.
- name: heading
type: string
required: true
description: The title of even or registration
- name: formItems
type: array
params:
- name: label
type: string
required: true
description: The title of field
- name: hint
type: string
required: false
description: additional description of specific field
- name: error
type: string
required: false
description: Error message - required if 'params.required' == true
- name: tag
type: string
required: true
description: type of field [input/select]
- name: name
type: string
required: true
description: the name attribute
- name: type
type: string
required: false
description: type of field e.g text/email/numbet/url etc.. required if 'params.tag' == input
- name: required
type: string
required: false
description: if field is required, set it to true
- name: options
type: string
required: false
description: these options will be in select dropdown list - required if 'params.tag' == select
- name: buttonLabel
type: string
required: true
description: The title of submit button
- name: thankYouMsg
type: string
required: true
description: this message will be shown after form submission
examples:
- name: Ukážka 1
data:
heading: Registrácia na podujatie
formItems:
- label: Meno
error: Zadajte meno.
tag: input
name: name
type: text
required: true
- label: Priezvisko
error: Zadajte priezvisko.
tag: input
name: surname
type: text
required: true
- label: Emailová adresa
hint: Sem zadajte emailovú adresu v tvare <EMAIL>
error: Zadajte správnu emailovú adresu.
tag: input
name: email
type: email
required: true
- label: Organizácia
tag: input
name: organization
type: text
- label: Miesto konania podujatia
tag: select
name: city
options:
- value: Bardejov
name: Bardejov
- value: Kosice
name: Košice
- value: Bratislava
name: Bratislava
- label: Súhlasím so spracovaním osobných údajov za účelom účasti na podujatí.
error: Prosím, akceptujte súhlas so spracovaním osobných údajov.
tag: input
name: terms
type: checkbox
required: true
buttonLabel: Registrovať na podujatie
thankYouMsg: Ďakujeme za registráciu na podujatie.
|
src/idsk/components/registration-for-event/registration-for-event.yaml
|
courseName: Shiruba Springs
skinned: false
holes:
- id: 891abee8-03ba-474a-8f41-fb866b65d3cd
course: Shiruba Springs
number: 1
par: 4
replayName: "4"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: b95a21e2-93df-4feb-9e4c-4d71cd158fe9
course: Shiruba Springs
number: 2
par: 3
replayName: "3"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: 20f0243c-7cc6-4813-8748-5f519b62ecf4
course: Shiruba Springs
number: 3
par: 5
replayName: "5"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: 1d9848d3-bb2b-45b5-bc8c-23d16a09786b
course: Shiruba Springs
number: 4
par: 3
replayName: "3B"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: 800da01f-e698-42a5-90fe-8ec5a46ce0ed
course: Shiruba Springs
number: 5
par: 4
replayName: "4B"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: 484fce1e-4653-4d66-896d-95a83ce26586
course: Shiruba Springs
number: 6
par: 5
replayName: "5B"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: c1341293-bf24-4a03-ad83-0562c20e46bd
course: Shiruba Springs
number: 7
par: 4
replayName: "4C"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: be228551-67e6-40d7-a599-8736193af98e
course: Shiruba Springs
number: 8
par: 3
replayName: "3C"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
- id: 483a64ef-89d1-4171-b454-905cc0ec1436
course: Shiruba Springs
number: 9
par: 5
replayName: "5C"
description: >
Help Wanted
clubRecommendations:
clubSuggestions:
resourceCreators:
|
modules/site/src/main/resources/microsite/data/courses/ShirubaSprings.yaml
|
name: Retrieve Maven artifact versions
description: Retrieve Maven artifact versions from Maven repository
inputs:
group:
required: true
description: Maven artifact group
name:
required: true
description: Maven artifact name
repository:
default: central
required: true
description: |-
Maven repository URL.
Aliases can be ued:
* central - https://repo1.maven.org/maven2/
* oss-snapshots - https://oss.sonatype.org/content/repositories/snapshots/
user:
required: false
description: Maven repository user
password:
required: false
description: Maven repository password
min:
required: false
description: Minimum version
max:
required: false
description: Maximum version
exclude:
required: false
description: Version to exclude
outputs:
latestStable:
description: Latest matched stable version
latestUnstable:
description: Latest matched version among stable and unstable versions
stable:
description: JSON array of all matched stable versions
stableAndLatestUnstable:
description: JSON array of all matched stable versions and latest unstable if it's greater than latest stable
unstable:
description: JSON array of all matched stable and unstable versions
stableMajors:
description: JSON array of last minor version of all matched stable versions
stableMajorsAndLatestUnstable:
description: JSON array of last minor version of all matched stable versions and latest unstable if it's greater than latest stable
unstableMajors:
description: JSON array of last minor version of all matched stable and unstable versions
stableMinor:
description: JSON array of last patch version of all matched stable versions
stableMinorAndLatestUnstable:
description: JSON array of last patch version of all matched stable versions and latest unstable if it's greater than latest stable
unstableMinor:
description: JSON array of last patch version of all matched stable and unstable versions
runs:
using: node12
main: 'dist/index.js'
|
action.yml
|
name: Build and deploy to Netlify
on: [push]
env:
ASPNETCORE_ENVIRONMENT: 'Action'
jobs:
deployCommitDraft:
name: Deploy draft to Netlify
runs-on: ubuntu-latest
if: github.event_name == 'push' && github.ref != 'refs/heads/main'
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Uncomment if you want to run default instead of simplecss
# - name: Setup npm
# uses: actions/setup-node@v2
# with:
# node-version: 17.1.0
# - run: npm ci
# - run: npm run prodbuild
- name: Setup .NET Core
uses: actions/setup-dotnet@v1
with:
dotnet-version: 6.0.100
- name: Add robots.txt disallow
shell: pwsh
run: |
Set-Content "./src/Krompaco.RecordCollector.Web/wwwroot/robots.txt" "User-agent: *`r`nDisallow: /"
- name: Generate static site
run: dotnet test ./src/Krompaco.RecordCollector.Generator/Krompaco.RecordCollector.Generator.csproj --logger "console;verbosity=detailed"
- name: Publish draft to Netlify
uses: nwtgck/actions-netlify@v1.2
with:
publish-dir: './artifacts/static-site'
enable-commit-comment: true
production-deploy: false
github-token: ${{ secrets.GITHUB_TOKEN }}
env:
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
publishMasterCommit:
name: Publish to Netlify
runs-on: ubuntu-latest
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Uncomment if you want to run default instead of simplecss
# - name: Setup npm
# uses: actions/setup-node@v2
# with:
# node-version: 17.1.0
# - run: npm ci
# - run: npm run prodbuild
- name: Setup .NET Core
uses: actions/setup-dotnet@v1
with:
dotnet-version: 6.0.100
- name: Add robots.txt disallow
shell: pwsh
run: |
Set-Content "./src/Krompaco.RecordCollector.Web/wwwroot/robots.txt" "User-agent: *`r`nDisallow: /"
- name: Generate static site
run: dotnet test ./src/Krompaco.RecordCollector.Generator/Krompaco.RecordCollector.Generator.csproj --logger "console;verbosity=detailed"
- name: Publish to Netlify production
uses: nwtgck/actions-netlify@v1.2
with:
publish-dir: './artifacts/static-site'
enable-commit-comment: true
production-deploy: true
github-token: ${{ secrets.GITHUB_TOKEN }}
env:
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
|
.github/workflows/build-and-deploy-to-netlify.yml
|
name: Build
on: [push, pull_request]
jobs:
buildserver:
name: "Build Server"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build and push
uses: docker/build-push-action@v2
with:
context: server
platforms: linux/amd64
push: false
tags: lavalleeale/remoteftc:latest
buildelectron:
name: "Build Electron"
defaults:
run:
shell: bash
working-directory: web
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-latest, windows-latest, ubuntu-latest]
steps:
- uses: actions/checkout@master
- name: Cache pnpm modules
uses: actions/cache@v2
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-
- uses: actions/setup-node@master
with:
node-version: 16.13
- uses: pnpm/action-setup@master
with:
version: latest
- name: Install dependencies
run: pnpm i
- name: Build
run: pnpm electron:build
- name: Archive macOS production artifacts
if: ${{matrix.os == 'macos-latest'}}
uses: actions/upload-artifact@v2
with:
name: RemoteFTC-mac-x64
path: web/dist/RemoteFTC-mac-x64.dmg
- name: Archive Linux production artifacts
if: ${{matrix.os == 'ubuntu-latest'}}
uses: actions/upload-artifact@v2
with:
name: RemoteFTC-linux-x86_64
path: web/dist/RemoteFTC-linux-x86_64.AppImage
- name: Archive Windows production artifacts
if: ${{matrix.os == 'windows-latest'}}
uses: actions/upload-artifact@v2
with:
name: RemoteFTC-win-x64
path: web/dist/RemoteFTC-win-x64.exe
test:
name: "Test with Cypress"
runs-on: ubuntu-latest
needs:
- buildelectron
- buildserver
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Cache pnpm modules
uses: actions/cache@v2
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-
- name: Setup dependancy caching
uses: pnpm/action-setup@v2.0.1
with:
version: 6.27.1
- name: Install dependencies
run: pnpm --dir web i
- name: Cypress run
uses: cypress-io/github-action@v2
with:
install: false
build: pnpm build
start: pnpm start
working-directory: web
|
.github/workflows/build.yml
|
version: '2.4'
services:
identity:
image: "avivasolutionsnl.azurecr.io/${IMAGE_PREFIX}identity:${TAG}"
command: -commerceHostname ${COMMERCE_HOST_NAME} -sitecoreHostname ${SITECORE_HOST_NAME} -identityHostname ${IDENTITY_HOST_NAME}
networks:
exampleNetwork:
aliases:
- ${IDENTITY_HOST_NAME}
volumes:
- .\logs\identity:C:\inetpub\wwwroot\identity\app_data\logs
- .\src:C:\license
depends_on:
- mssql
commerce:
image: "avivasolutionsnl.azurecr.io/${IMAGE_PREFIX}commerce:${TAG}"
mem_limit: 4096m
cpu_count: 6
command: -commerceHostname ${COMMERCE_HOST_NAME} -sitecoreHostname ${SITECORE_HOST_NAME} -identityHostname ${IDENTITY_HOST_NAME}
networks:
exampleNetwork:
aliases:
- ${COMMERCE_HOST_NAME}
volumes:
- .\logs\commerce\CommerceAuthoring_Sc9:C:\inetpub\wwwroot\CommerceAuthoring_Sc9\wwwroot\logs
- .\logs\commerce\CommerceMinions_Sc9:C:\inetpub\wwwroot\CommerceMinions_Sc9\wwwroot\logs
- .\logs\commerce\CommerceOps_Sc9:C:\inetpub\wwwroot\CommerceOps_Sc9\wwwroot\logs
- .\logs\commerce\CommerceShops_Sc9:C:\inetpub\wwwroot\CommerceShops_Sc9\wwwroot\logs
# - .\wwwroot\commerce:C:\Workspace
depends_on:
- xconnect
- mssql
- solr
- sitecore
- identity
mssql:
image: "avivasolutionsnl.azurecr.io/${IMAGE_PREFIX}mssql-jss:${TAG}"
mem_limit: 4096m
cpu_count: 4
networks:
- exampleNetwork
environment:
ACCEPT_EULA: "Y"
sa_password: ${<PASSWORD>}
volumes:
- .\data\mssql:C:\Data
sitecore:
image: "avivasolutionsnl.azurecr.io/${IMAGE_PREFIX}sitecore-jss:${TAG}"
mem_limit: 8192m
cpu_count: 6
command: -commerceHostname ${COMMERCE_HOST_NAME} -identityHostname ${IDENTITY_HOST_NAME}
networks:
exampleNetwork:
aliases:
- ${SITECORE_HOST_NAME}
- wooli.local
volumes:
- .\logs\sitecore:c:\inetpub\wwwroot\${SITECORE_SITE_NAME}\App_Data\logs
- .\wwwroot\sitecore:C:\Workspace
- .\src:C:\license
- .\src:C:\inetpub\wwwroot\sitecore\App_Data\unicorn-wooli
depends_on:
- xconnect
- mssql
- solr
- identity
solr:
image: "avivasolutionsnl.azurecr.io/${IMAGE_PREFIX}solr:${TAG}"
mem_limit: 4096m
cpu_count: 4
networks:
- exampleNetwork
volumes:
- .\data\solr:C:\Data
xconnect:
image: "avivasolutionsnl.azurecr.io/${IMAGE_PREFIX}xconnect:${TAG}"
networks:
- exampleNetwork
volumes:
- .\logs\xconnect:C:\inetpub\wwwroot\xconnect\App_data\Logs
- .\src:C:\license
depends_on:
- mssql
- solr
networks:
exampleNetwork:
external:
name: nat
|
docker-compose.yml
|
name: Build (Linux, Mac OS, Windows)
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build-linux:
name: "Build Linux"
runs-on: ubuntu-latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v2.3.4
- name: Setup Flutter SDK
uses: subosito/flutter-action@v1.4.0
- name: Install Linux build tools
run: sudo apt-get update && sudo apt-get install clang cmake ninja-build pkg-config libgtk-3-dev
- name: Enable desktop
run: flutter config --enable-linux-desktop
- name: Get packages
run: flutter pub get
- name: Analyze
run: flutter analyze
- name: Flutter build app
run: flutter build linux
- name: Compress artifacts
uses: TheDoctor0/zip-release@0.4.1
with:
filename: linux-dev.zip
- name: Build AppImage
uses: AppImageCrafters/build-appimage@master
with:
recipe: "AppImageBuilder.yml"
- name: Upload Build AppImage
uses: actions/upload-artifact@v2.3.1
with:
name: sidekick-linux-dev.AppImage
path: Sidekick-latest-x86_64.AppImage
- name: Upload Build Zip
uses: actions/upload-artifact@v2.3.1
with:
name: sidekick-linux-dev.zip
path: linux-dev.zip
build-macos:
name: "Build MacOS"
runs-on: macos-latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v2.3.4
- name: Setup Flutter SDK
uses: subosito/flutter-action@v1.4.0
- name: Set Up XCode
uses: devbotsxyz/xcode-select@v1.1.0
- name: Enable desktop
run: flutter config --enable-macos-desktop
- name: Get packages
run: flutter pub get
- name: Analyze
run: flutter analyze
- name: Flutter build app
run: flutter build macos
- name: Install create-dmg
run: brew install create-dmg
- name: Create dmg
run: |
./scripts/create_mac_dmg.sh
- name: Compress artifacts
run: zip -r macos-dev.zip build/macos/Build/Products/Release
- name: Upload Build DMG
uses: actions/upload-artifact@v2.3.1
with:
name: sidekick-macos-dev.dmg
path: build/macos/Build/Products/Release/Sidekick.dmg
- name: Upload Build Zip
uses: actions/upload-artifact@v2.3.1
with:
name: sidekick-macos-dev.zip
path: macos-dev.zip
build-windows:
name: "Build Windows"
runs-on: windows-latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v2.3.4
- name: Setup Flutter SDK
uses: subosito/flutter-action@v1.4.0
- name: Enable desktop
run: flutter config --enable-windows-desktop
- name: Get packages
run: flutter pub get
- name: Analyze
run: flutter analyze
- name: Write MSIX
uses: DamianReeves/write-file-action@v1.0
with:
path: pubspec.yaml
contents: |
msix_config:
display_name: Sidekick DEV Build
publisher_display_name: <NAME>.
identity_name: 44484EduardoM.SidekickFlutter
publisher: CN=1E781C91-227E-4505-B5B8-7E5EFE39D3A6
msix_version: 1.0.0.0
logo_path: assets\promo-windows\icon.png
architecture: x64
capabilities: "internetClient,removableStorage"
certificate_path: windows\SIDEKICK-CERT.pfx
certificate_password: ${{ secrets.WIN_CERT_PASS }}
store: false
write-mode: append
- name: Write MS Store
uses: DamianReeves/write-file-action@v1.0
with:
path: lib/modifiers.dart
contents: |
// Generated file. Do not modify
const isMSStore = false;
write-mode: overwrite
- name: Flutter build app
run: flutter build windows
- name: Create MSIX
run: flutter pub run msix:create
- name: Compress artifacts
run: tar.exe -a -c -f windows-dev.zip build/windows/Runner/release
- name: Upload Build MSIX
uses: actions/upload-artifact@v2.3.1
with:
name: sidekick-windows-dev.msix
path: build/windows/Runner/release/sidekick.msix
- name: Upload Build Zip
uses: actions/upload-artifact@v2.3.1
with:
name: sidekick-windows-dev.zip
path: windows-dev.zip
|
.github/workflows/main.yaml
|
---
# This script performs different parameters validation
######################################
# SAP RFC server - specific parameters validation
######################################
- name: Check sap_install_sapjco3_jar param
fail:
msg: "Wrong value for parameter sap_install_sapjco3_jar - allowed values are 'yes' and 'no'"
when: sap_install_sapjco3_jar != "yes" and sap_install_sapjco3_jar != "no"
- name: Get sapjco3.jar file status
stat:
path: "{{ sap_sapjco3_jar_path }}/sapjco3.jar"
register: sap_jar_file
when: sap_install_sapjco3_jar == 'yes'
- name: Check sap_sapjco3_jar_path param
fail:
msg: "Wrong value for for sap_sapjco3_jar_path param - file sapjco3.jar does not exist at that location"
when: sap_install_sapjco3_jar == 'yes' and not sap_jar_file.stat.exists
# Check port-type parameter: sap_rfc_server_shutdown_port
- name: Check port-type parameters
fail:
msg: "Parameter {{ item.name }} has value {{ item.value }}, which is not a valid TCP/IP port number !"
when: item.value != '' and ( not ( item.value | string | regex_search( '[0-9]*' ) ) or ( item.value | int < 0 ) or ( item.value | int > 65535 ) )
with_items:
- { name: "sap_rfc_server_shutdown_port", value: "{{ sap_rfc_server_shutdown_port }}" }
- name: Check mandatory params (must not be empty)
fail:
msg: "Parameter {{ item.name }} must not be empty !"
when: item.value == ''
with_items:
- { name: "sap_jco_server_gwhost", value: "{{ sap_jco_server_gwhost }}" }
- { name: "sap_jco_server_gwserv", value: "{{ sap_jco_server_gwserv }}" }
# Check for positive integers (>0):
- name: Check positive integer parameter above zero
fail:
msg: "Parameter {{ item.name }} has value {{ item.value }}, which is not a valid positive integer number !"
when: item.value != '' and ( item.value | int < 1 )
with_items:
- { name: "sap_jco_server_connection_count", value: "{{ sap_jco_server_connection_count }}" }
- { name: "sap_jco_server_worker_thread_count", value: "{{ sap_jco_server_worker_thread_count }}" }
- { name: "sap_jco_server_worker_thread_min_count", value: "{{ sap_jco_server_worker_thread_min_count }}" }
- { name: "sap_rfc_server_thread_pool_size", value: "{{ sap_rfc_server_thread_pool_size }}" }
- { name: "sap_rfc_server_shutdown_timout", value: "{{ sap_rfc_server_shutdown_timout }}" }
- { name: "sap_api_idoc_transaction_wait_timout", value: "{{ sap_api_idoc_transaction_wait_timout }}" }
- name: Check sap_jco_server_trace param
fail:
msg: "Parameter sap_jco_server_trace must be either 0 or 1 !"
when: sap_jco_server_trace != '' and sap_jco_server_trace != '0' and sap_jco_server_trace != '1'
- name: Check boolean params
fail:
msg: "Parameter {{ item.name }} must be either true or false (if not empty) !"
when: item.value != '' and item.value != 'true' and item.value != 'false'
with_items:
- { name: "sap_receiver_transactional", value: "{{ sap_receiver_transactional }}" }
- { name: "sap_rfc_server_jms_replicate_in_durable_queues", value: "{{ sap_rfc_server_jms_replicate_in_durable_queues }}" }
- { name: "sap_rfc_server_jms_persistence", value: "{{ sap_rfc_server_jms_persistence }}" }
- { name: "sap_rfc_server_jms_jmx", value: "{{ sap_rfc_server_jms_jmx }}" }
|
ansible/roles/sap-rfc-server/tasks/params_validation.yml
|
version: '3.7'
services:
os01:
restart: always
image: opensearchproject/opensearch:1.2.0
environment:
OPENSEARCH_JAVA_OPTS: "-Xms1024m -Xmx1024m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
node.name: os01
discovery.seed_hosts: os01,os02,os03
cluster.initial_master_nodes: os01,os02,os03
plugins.security.ssl.transport.pemkey_filepath: certificates/os01/os01.key # relative path
plugins.security.ssl.transport.pemcert_filepath: certificates/os01/os01.pem
plugins.security.ssl.http.pemkey_filepath: certificates/os01/os01.key
plugins.security.ssl.http.pemcert_filepath: certificates/os01/os01.pem
DISABLE_INSTALL_DEMO_CONFIG: "true"
JAVA_HOME: /usr/share/opensearch/jdk
bootstrap.memory_lock: "true" # along with the memlock settings below, disables swapping
network.host: "0.0.0.0"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "./opensearch.yml:/usr/share/opensearch/config/opensearch.yml"
- "os-data1:/usr/share/opensearch/data"
- "./certs:/usr/share/opensearch/config/certificates:ro"
ports:
- 9200:9200
- 9600:9600 # required for Performance Analyzer
os02:
restart: always
image: opensearchproject/opensearch:1.2.0
environment:
OPENSEARCH_JAVA_OPTS: "-Xms1024m -Xmx1024m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
node.name: os02
discovery.seed_hosts: os01,os02,os03
cluster.initial_master_nodes: os01,os02,os03
plugins.security.ssl.transport.pemkey_filepath: certificates/os02/os02.key # relative path
plugins.security.ssl.transport.pemcert_filepath: certificates/os02/os02.pem
plugins.security.ssl.http.pemkey_filepath: certificates/os02/os02.key
plugins.security.ssl.http.pemcert_filepath: certificates/os02/os02.pem
DISABLE_INSTALL_DEMO_CONFIG: "true"
JAVA_HOME: /usr/share/opensearch/jdk
bootstrap.memory_lock: "true" # along with the memlock settings below, disables swapping
network.host: "0.0.0.0"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "./opensearch.yml:/usr/share/opensearch/config/opensearch.yml"
- "os-data2:/usr/share/opensearch/data"
- "./certs:/usr/share/opensearch/config/certificates:ro"
os03:
restart: always
image: opensearchproject/opensearch:1.2.0
environment:
OPENSEARCH_JAVA_OPTS: "-Xms1024m -Xmx1024m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
node.name: os03
discovery.seed_hosts: os01,os02,os03
cluster.initial_master_nodes: os01,os02,os03
plugins.security.ssl.transport.pemkey_filepath: certificates/os03/os03.key # relative path
plugins.security.ssl.transport.pemcert_filepath: certificates/os03/os03.pem
plugins.security.ssl.http.pemkey_filepath: certificates/os03/os03.key
plugins.security.ssl.http.pemcert_filepath: certificates/os03/os03.pem
DISABLE_INSTALL_DEMO_CONFIG: "true"
JAVA_HOME: /usr/share/opensearch/jdk
bootstrap.memory_lock: "true" # along with the memlock settings below, disables swapping
network.host: "0.0.0.0"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- "./opensearch.yml:/usr/share/opensearch/config/opensearch.yml"
- "os-data3:/usr/share/opensearch/data"
- "./certs:/usr/share/opensearch/config/certificates:ro"
kibana:
restart: always
image: opensearchproject/opensearch-dashboards:1.2.0
ports:
- 5601:5601
volumes:
- "./certs:/usr/share/opensearch-dashboards/config/certificates:ro"
- "./opensearch-dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml"
environment:
OPENSEARCH_HOSTS: '["https://os01:9200","https://os02:9200","https://os03:9200"]' # must be a string with no spaces when specified as an environment variable
DISABLE_INSTALL_DEMO_CONFIG: "true"
volumes:
os-data1:
os-data2:
os-data3:
|
docker-compose.yml
|
---
- include_vars: munge_vars.yml
#Create credentials
- name: MUNGE group
group: name=munge state=present system=yes
- name: MUNGE user
user: name=munge createhome=yes comment="MUNGE Uid 'N' Gid Emporium" group=munge home=/var/lib/munge shell=/sbin/nologin state=present system=yes
#Install munge dependencies on the system
- name: Install munge and/or dependencies on RHEL/CentOS/SL systems
yum: name={{item}} state=present
with_items: "{{munge_packages_redhat}}"
when: ansible_distribution == 'CentOS' or ansible_distribution == 'Scientific' or ansible_distribution == 'RedHat'
- name: Install munge and/or dependencies on Fedora systems
dnf: name={{item}} state=present
with_items: "{{munge_packages_redhat}}"
when: ansible_distribution == 'Fedora'
- name: Install munge dependencies on Ubuntu systems
apt: name={{item}} state=present
with_items: "{{munge_packages_ubuntu}}"
when: ansible_distribution == 'Ubuntu'
#Create directories if not exist with correct ownership and permissions
- name: Make Directory /etc/munge if not exists
file: path=/etc/munge state=directory owner=munge group=munge mode=0700
- name: Make Directory /var/lib/munge if not exists
file: path=/var/lib/munge state=directory owner=munge group=munge mode=0711
- name: Set permissions on /var/log
file: path=/var/log state=directory mode=0755
- name: Make Directory /var/log/munge if not exists
file: path=/var/log/munge state=directory owner=munge group=munge mode=0700
- name: Make Directory /var/run/munge if not exists
file: path=/var/run/munge state=directory owner=munge group=munge mode=0755
#Copy the MUNGE key
#Or create new one with dd if=/dev/urandom bs=1 count=2048 > $CLUSTER_MUNGE_KEY
#Or via dd if=/dev/random bs=1 count=2048 > $CLUSTER_MUNGE_KEY
- name: Copy MUNGE key
copy: src="{{ lookup('env','CLUSTER_MUNGE_KEY') }}" dest=/etc/munge/munge.key mode=0400 owner=munge backup=yes
notify: Restart munge
- name: Start and enable munge
service: name=munge state=started enabled=yes
...
|
roles/software/tasks/munge.yml
|
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kube-prometheus-stack
namespace: monitoring
spec:
interval: 5m
chart:
spec:
# renovate: registryUrl=https://prometheus-community.github.io/helm-charts
chart: kube-prometheus-stack
version: 25.1.0
sourceRef:
kind: HelmRepository
name: prometheus-community-charts
namespace: flux-system
valuesFrom:
- kind: Secret
name: kube-prometheus-stack
valuesKey: discord-webhook
targetPath: alertmanager.config.global.slack_api_url
optional: false
- kind: ConfigMap
name: prometheus-values
- kind: ConfigMap
name: alertmanager-values
values:
fullnameOverride: prom
kubeEtcd:
enabled: true
service:
enabled: true
port: 2381
targetPort: 2381
kubeControllerManager:
enabled: true
service:
enabled: true
port: 10257
targetPort: 10257
selector:
k8s-app: kube-controller-manager
serviceMonitor:
https: true
insecureSkipVerify: true
kubeScheduler:
enabled: true
service:
enabled: true
port: 10259
targetPort: 10259
selector:
k8s-app: kube-scheduler
serviceMonitor:
https: true
insecureSkipVerify: true
kubeProxy:
enabled: false
defaultRules:
rules:
time: false
prometheusOperator:
# Setting this option to 0 to disable cpu limits
# see https://github.com/prometheus-operator/prometheus-operator/blob/master/cmd/operator/main.go#L175
configReloaderCpu: 0
grafana:
enabled: false
forceDeployDashboards: true
sidecar:
dashboards:
multicluster:
global:
enabled: true
etcd:
enabled: true
prometheus-node-exporter:
fullnameOverride: node-exporter
kubeStateMetrics:
enabled: true
# This is broken and a issue needs to be found/opened
# kube-state-metrics:
# fullnameOverride: kube-state-metrics
nodeExporter:
serviceMonitor:
relabelings:
- action: replace
regex: (.*)
replacement: $1
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: kubernetes_node
|
gitops/base/monitoring/kube-prometheus-stack/helm-release.yaml
|
data:
num_points: 2048
datadir_root: ./dataset
dataset_name: b-it-bots_atwork_dataset
num_classes: 15
augment_rotation: False
augment_scale: False
augment_translation: False
augment_jitter: False
augment_outlier: False
train:
batch_size: 16
max_epoch: 100
gpu_selection: 0
limit_gpu: True #limit gpu to selection
fine_tune: True
logdir_root: "/scratch/mwasil2s/log_train/robocup_pointcloud_classification/"
models:
DGCNNC:
pointcloud_color: True
base_learning_rate: 0.001
momentum: 0.9
optimizer: 'adam'
decay_step: 200000
decay_rate: 0.7
bn_init_decay: 0.5
bn_decay_decay_rate: 0.5
bn_decay_decay_step: 200000 #float (decay_step)
bn_decay_clip: 0.99
DGCNN:
pointcloud_color: False
base_learning_rate: 0.001
momentum: 0.9
optimizer: 'adam'
decay_step: 200000
decay_rate: 0.7
bn_init_decay: 0.5
bn_decay_decay_rate: 0.5
bn_decay_decay_step: 200000 #float (decay_step)
bn_decay_clip: 0.99
3DmFV:
pointcloud_color: False
n_gaussians: 5
gmm_type: 'grid'
gmm_variance: 0.04
add_gaussian_noise: False
base_learning_rate: 0.001
momentum: 0.9
optimizer: 'adam'
decay_step: 200000
decay_rate: 0.7
weight_decay: 0.0
bn_init_decay: 0.5
bn_decay_decay_rate: 0.5
bn_decay_decay_step: 200000 #float (decay_step)
bn_decay_clip: 0.99
SpiderCNN:
pointcloud_color: False
base_learning_rate: 0.001
momentum: 0.9
optimizer: 'adam'
decay_step: 200000
decay_rate: 0.7
bn_init_decay: 0.5
bn_decay_decay_rate: 0.5
bn_decay_decay_step: 200000 #float (decay_step)
bn_decay_clip: 0.99
PointNet:
pointcloud_color: False
base_learning_rate: 0.001
momentum: 0.9
optimizer: 'adam'
decay_step: 200000
decay_rate: 0.7
bn_init_decay: 0.5
bn_decay_decay_rate: 0.5
bn_decay_decay_step: 200000 #float (decay_step)
bn_decay_clip: 0.99
PointNet2:
pointcloud_color: False
base_learning_rate: 0.001
momentum: 0.9
optimizer: 'adam'
decay_step: 200000
decay_rate: 0.7
bn_init_decay: 0.5
bn_decay_decay_rate: 0.5
bn_decay_decay_step: 200000 #float (decay_step)
bn_decay_clip: 0.99
PointCNN:
pointcloud_color: False
base_learning_rate: 0.001
momentum: 0.9
optimizer: 'adam'
decay_step: 200000
decay_rate: 0.7
bn_init_decay: 0.5
bn_decay_decay_rate: 0.5
bn_decay_decay_step: 200000 #float (decay_step)
bn_decay_clip: 0.99
|
config/config.yaml
|
title: demandes-dasile-en-baisse-en-2010
date: '2011-03-28T00:00:00.000Z'
image: 'https://www.populationdata.net/wp-content/uploads/unhcr-tentes.jpg'
fr:
title: Demandes d'asile en baisse en 2010
body: >-
Le nombre de demandeurs d'asile a baissé dans le monde en 2010. 358 800
personnes ont en effet déposé une demande d'asile dans les pays développés
l'année dernière, soit une baisse de 5% par rapport à 2009. Depuis 10 ans,
la baisse atteint -42%.<!--more-->En 2001, environ 620 000 personnes avaient
demandé l'asile dans la liste des 44 pays de l'étude publiée par le Haut
commissariat des Nations Unies pour les réfugiés.<br /><br />Cette tendance
globale sur une décennie révèle des différences notables selon les pays.
Dans un petit nombre d'entre eux, les demandes d'asile ont augmenté
fortement d'une année sur l'autre, tandis que dans la plupart des pays de
l'étude, on observe une baisse.<br /><br /><img border="0" alt=""
src="/images/articles/unhcr-demandeurs-d-asile-2010-1.png" /><br /><br />Le
premier pays de destination est les Etats-Unis avec 55 530 demandeurs
d'asile, devant la France (47 800) et l'Allemagne (41 330). Ces trois pays,
avec la Suède, voient le nombre de demandeurs d'asile augmenter fortement en
2010 par rapport à 2009.<br />Le Canada et le Royaume-Uni voient au
contraire ce nombre diminuer.<br /><br /><img border="0" alt=""
src="/images/articles/unhcr-demandeurs-d-asile-2010-2.png" /><br /><br />Les
demandeurs d'asile dans le monde en 2010 viennent principalement de Serbie
avec 28 900 personnes (8%), comprenant le Kosovo, soit +54% par rapport à
2009. Viennent ensuite l'Afghanistan (7%), Chine et Irak (6%), Russie et
Somalie (5%), Iran (4%).<br />L'Irak ne figure plus, pour la première fois
depuis 2005, dans les deux premiers pays d'origine des demandeurs
d'asile.<br /><br /><img border="0" alt=""
src="/images/articles/unhcr-demandeurs-d-asile-2010-3.png" /><br /><br
/>Depuis le début de 2011, d'importantes crises ont éclaté, notamment en
Afrique (Tunisie, Côte d'Ivoire, Libye, Egypte) et en Asie de l'ouest. Le
nombre de réfugiés dans ces pays et leurs voisins risque donc d'augmenter
fortement pour cette année. <br /><br />Le Haut Commissaire pour les
réfugiés <NAME> observe que la majeure partie des réfugiés sont
toujours accueillis par les pays sous-développés, malgré les difficultés
qu'ils rencontrent déjà. Il a demandé aux pays riches d'aider ceux qui ont
le plus de besoins.<br /><br />Source : UNHCR
en:
title: Demandes d'asile en baisse en 2010
body: ''
|
data/posts/2011-03-27_demandes-dasile-en-baisse-en-2010.yml
|
noctis-grey:
# Fonts
primary-font-family: 'Raleway,sans-serif'
paper-font-common-base_-_font-family: 'var(--primary-font-family)'
paper-font-common-code_-_font-family: 'var(--primary-font-family)'
paper-font-body1_-_font-family: 'var(--primary-font-family)'
paper-font-subhead_-_font-family: 'var(--primary-font-family)'
paper-font-headline_-_font-family: 'var(--primary-font-family)'
paper-font-caption_-_font-family: 'var(--primary-font-family)'
paper-font-title_-_font-family: 'var(--primary-font-family)'
ha-card-header-font-family: 'var(--primary-font-family)'
# Text
text-color: '#ffffff'
primary-text-color: 'var(--text-color)'
text-primary-color: 'var(--text-color)'
rgb-primary-text-color: 'var(--text-color)'
rgb-text-primary-color: 'var(--text-color)'
secondary-text-color: '#BAC0C6'
text-medium-light-color: '#A0A2A8'
text-medium-color: '#80828A'
disabled-text-color: '#626569'
# Main Colors
app-header-background-color: 'var(--background-color)'
accent-color: '#1A89F5'
accent-medium-color: 'var(--accent-color)'
primary-color: 'var(--accent-color)'
light-primary-color: '#7b7d80'
# Background
background-color: '#212121'
primary-background-color: 'var(--background-color)'
background-color-2: '#606060'
secondary-background-color: 'none'
markdown-code-background-color: '#424242'
# Card
ha-card-background: '#2E2E2E'
card-background-color: 'var(--ha-card-background)'
paper-card-background-color: 'var(--ha-card-background)'
ha-card-box-shadow: '3px 3px 13px -6px rgba(17,35,52,1)'
ha-card-border-radius: '5px'
border-color: 'none'
# Icons
paper-item-icon-color: '#EBEBEB'
paper-item-icon-active-color: 'var(--accent-color)'
# Sidebar
sidebar-background-color: 'var(--background-color)'
sidebar-icon-color: '#7b7d80'
sidebar-selected-icon-color: 'var(--accent-color)'
sidebar-selected-text-color: 'var(--text-color)'
divider-color: 'var(--background-color)'
# Sliders
paper-slider-knob-color: 'var(--accent-color)'
paper-slider-pin-color: 'var(--background-color-2)'
paper-slider-active-color: 'var(--accent-color)'
paper-slider-container-color: 'var(--background-color-2)'
# Toggle:
paper-toggle-button-checked-bar-color: 'var(--accent-color)'
mdc-theme-primary: 'var(--accent-color)'
# Switch
switch-unchecked-color: '#8C8C8C'
switch-checked-button-color: 'var(--accent-color)'
switch-unchecked-track-color: 'var(--background-color-2)'
switch-checked-track-color: 'var(--background-color-2)'
# ListBox
paper-listbox-background-color: 'transparent' # Also changes partial sidebar color
# Radio Button
paper-radio-button-checked-color: 'var(--accent-color)'
# Checkboxes
mdc-checkbox-unchecked-color: var(--text-medium-color);
# Popups
more-info-header-background: 'var(--secondary-background-color)'
paper-dialog-background-color: 'var(--background-color)'
# Tables
table-row-background-color: 'var(--background-color)'
table-row-alternative-background-color: 'var(--paper-card-background-color)'
# Badges and gauges
label-badge-background-color: 'var(--background-color)'
label-badge-text-color: 'var(--text-primary-color)'
label-badge-red: '#B54949'
label-badge-blue: '#539BE1'
label-badge-green: '#44ba83'
label-badge-yellow: '#E0A957'
paper-input-container-focus-color: 'var(--accent-color)'
# Toast
paper-toast-background-color: '#353535'
# Custom Header
ch-background: 'var(--background-color)'
ch-active-tab-color: 'var(--accent-color)'
ch-notification-dot-color: 'var(--accent-color)'
ch-all-tabs-color: 'var(--sidebar-icon-color)'
ch-tab-indicator-color: 'var(--accent-color)'
|
themes/noctis-grey/noctis-grey.yaml
|
---
# EXAMPLE usage from letsencrypt.yml playbook outside this role:
#- hosts: letsencrypt
# vars:
# le_cron_inventory_groups:
# - "your_inventory_group"
# roles:
# - letsencrypt
# post_tasks:
# - name: "letsencrypt : SETUP : config letsencrypt cronjobs (including mail_error) for inventory groups"
# include_tasks: "roles/letsencrypt/tasks/setup_cron.yml"
# with_items:
# - "{{ le_cron_inventory_groups }}"
# loop_control:
# loop_var: le_setup_cron_loop
# tags:
# - setup
- block:
- name: "letsencrypt : SETUP cron : [{{ le_setup_cron_loop }}] - create directory {{ playbook_dir }}/.cron"
file:
path: "{{ playbook_dir }}/.cron"
state: directory
delegate_to: 127.0.0.1
run_once: true
- name: "letsencrypt : SETUP cron : [{{ le_setup_cron_loop }}] - config dynamic cron environment"
template:
src: "roles/letsencrypt/templates/setup/localhost/cron_env.j2"
dest: "{{ playbook_dir }}/.cron/letsencrypt_{{ le_setup_cron_loop }}"
owner: root
group: root
mode: 0744
delegate_to: 127.0.0.1
- name: "letsencrypt : SETUP cron : [{{ le_setup_cron_loop }}] - config static cron environment"
cron:
name: PATH
env: yes
user: root
cron_file: letsencrypt
job: /usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/sbin
delegate_to: 127.0.0.1
- name: "letsencrypt : SETUP cron : [{{ le_setup_cron_loop }}] - config cronjob"
cron:
name: "letsencrypt [{{ le_setup_cron_loop }}]"
minute: "0"
hour: "22"
user: root
cron_file: letsencrypt
job: ". {{ playbook_dir }}/.cron/letsencrypt_{{ le_setup_cron_loop }}; ansible-playbook {{ playbook_dir }}/{{ le_cron_playbook_filename }} -i {{ inventory_file }} --vault-password-file {{ le_cron_vault_password_file }} -l {{ le_setup_cron_loop }} -t certificate,deploy > /dev/null 2>&1"
delegate_to: 127.0.0.1
when: le_setup_cron_loop in group_names
tags:
- always
|
tasks/setup_cron.yml
|
id: ocd-person/601f7bba-aac4-5010-9553-d6a214d03abb
name: <NAME>
given_name: Liz
family_name: Cheney
gender: F
birth_date: 1966-07-28
image: https://theunitedstates.io/images/congress/450x550/C001109.jpg
party:
- start_date: '2017-01-03'
end_date: '2023-01-03'
name: Republican
roles:
- start_date: '2017-01-03'
end_date: '2019-01-03'
type: lower
jurisdiction: ocd-jurisdiction/country:us/government
district: WY-AL
- start_date: '2019-01-03'
end_date: '2021-01-03'
type: lower
jurisdiction: ocd-jurisdiction/country:us/government
district: WY-AL
- start_date: '2021-01-03'
end_date: '2023-01-03'
type: lower
jurisdiction: ocd-jurisdiction/country:us/government
district: WY-AL
contact_details:
- note: Capitol Office
address: 416 Cannon House Office Building Washington DC 20515-5000
voice: 202-225-2311
- note: District Office
address: 100 East B Street Room 4003; Casper, WY 82602
voice: 307-261-6595
fax: 307-261-6597
- note: District Office
address: 2120 Capitol Ave. Suite 8005; Cheyenne, WY 82001
voice: 307-772-2595
fax: 307-772-2597
- note: District Office
address: 300 S. Gillette Ave. Suite 2001; Gillette, WY 82716
voice: 307-414-1677
fax: 307-414-1711
- note: District Office
address: 45 E. Loucks St. Suite 300 F; Sheridan, WY 82801
voice: 307-673-4608
fax: 307-261-6597
links:
- url: https://cheney.house.gov
note: website
ids:
twitter: RepLizCheney
youtube: UCvL57Zp99QdDllF-KGziUAA
facebook: replizcheney
other_identifiers:
- scheme: bioguide
identifier: C001109
- scheme: govtrack
identifier: '412732'
- scheme: wikipedia
identifier: <NAME>
- scheme: wikidata
identifier: Q5362573
- scheme: google_entity_id
identifier: kg:/m/04slt0
- scheme: votesmart
identifier: '171319'
- scheme: fec
identifier: H6WY00159
- scheme: opensecrets
identifier: N00035504
- scheme: maplight
identifier: '2244'
- scheme: cspan
identifier: '86147'
- scheme: icpsr
identifier: '21710'
- scheme: ballotpedia
identifier: <NAME>
sources:
- url: https://theunitedstates.io/
|
data/us/legislature/Liz-Cheney-601f7bba-aac4-5010-9553-d6a214d03abb.yml
|
name: Terraform
on:
push:
branches:
- main
pull_request:
branches:
- main
workflow_dispatch: # manual trigger
env:
TF_BACKEND_GCS_BUCKET: ${{ secrets.TF_BACKEND_GCS_BUCKET }}
TF_VAR_project: ${{ secrets.TF_VAR_PROJECT }}
TF_VAR_region: europe-west4
TF_VAR_zone: europe-west4-a
jobs:
terraform:
name: Terraform
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
pull-requests: write
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Authenticate to Google Cloud
id: auth
uses: google-github-actions/auth@v0.3.1
with:
token_format: access_token
access_token_lifetime: 300s
workload_identity_provider: ${{ secrets.WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.SERVICE_ACCOUNT }}
- name: Set Terraform vars
run: echo "TF_VAR_access_token=${{ steps.auth.outputs.access_token }}" >> $GITHUB_ENV
- name: Setup Terraform
uses: hashicorp/setup-terraform@v1
- name: Terraform Format
id: fmt
run: terraform fmt -check
- name: Terraform Init
id: init
run: |
terraform init \
-backend-config="bucket=${TF_BACKEND_GCS_BUCKET}" \
-backend-config="access_token=${TF_VAR_access_token}"
- name: Terraform Validate
id: validate
run: terraform validate -no-color
- name: Terraform Plan
id: plan
if: github.event_name == 'pull_request'
run: terraform plan -no-color
continue-on-error: true
- name: Update Pull Request
uses: actions/github-script@0.9.0
if: github.event_name == 'pull_request'
env:
PLAN: "terraform\n${{ steps.plan.outputs.stdout }}"
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\`
#### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\`
#### Terraform Plan 📖\`${{ steps.plan.outcome }}\`
#### Terraform Validation 🤖\`${{ steps.validate.outcome }}\`
<details><summary>Show Plan</summary>
\`\`\`\n
${process.env.PLAN}
\`\`\`
</details>
*Pusher: @${{ github.actor }}, Action: \`${{ github.event_name }}\`*`;
github.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
})
- name: Terraform Plan Status
if: steps.plan.outcome == 'failure'
run: exit 1
- name: Terraform Apply
if: github.ref == 'refs/heads/main' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch')
run: terraform apply -auto-approve
|
.github/workflows/terraform.yml
|
title: Azrrrrr
name: Azr的博客 | Blog
SEOTitle: Azr的博客 | Blog
header-img: img/post-bg-desk.jpg
email: <EMAIL>
description: "Azr的博客是一个分享技术教程,共享资源,前端开发者交流的个人独立博客。"
keyword: "Blog, Azrrrrr,Azr的博客,博客,个人博客,前端开发,前端博客,源码分享,前端课堂,前端学习,编程基础"
# url: "https://amor9.cn/" # your host, for absolute URL
baseurl: "" # for example, '/blog' if your blog hosted on 'host/blog'
github_repo: "https://github.com/azrrrrr/azrrrrr.github.io.git" # you code repository
# Sidebar settings
sidebar: true # whether or not using Sidebar.
sidebar-about-description: "感谢生活 有剥夺 也有馈赠。"
sidebar-avatar: https://ws2.sinaimg.cn/large/006tNbRwly1fxej6om0zxj30jg0jgq3o.jpg # use absolute URL, seeing it's used in both `/` and `/about/`
# SNS settings
RSS: true
# weibo_username: SunnyAmor
zhihu_username: azr7
github_username: azrrrrr
# facebook_username: Amor_dr7
jianshu_username: 93d333ac3afd
# twitter_username: Amor_dr7
# Build settings
# from 2016, 'pygments' is unsupported on GitHub Pages. Use 'rouge' for highlighting instead.
permalink: pretty
paginate: 10
exclude: ["less","node_modules","Gruntfile.js","package.json","README.md"]
anchorjs: true # if you want to customize anchor. check out line:181 of `post.html`
# Gems
# from PR#40, to support local preview for Jekyll 3.0
plugins: [jekyll-paginate]
# Markdown settings
# replace redcarpet to kramdown,
# although redcarpet can auto highlight code, the lack of header-id make the catalog impossible, so I switch to kramdown
# document: http://jekyllrb.com/docs/configuration/#kramdown
markdown: kramdown
highlighter: rouge
kramdown:
input: GFM # use Github Flavored Markdown !important
# 评论系统
# Disqus(https://disqus.com/)
disqus_username: azrrrrr
# Gitalk
gitalk:
enable: true #是否开启Gitalk评论
clientID: 859e47f4d4c09b159b29 #生成的clientID
clientSecret: <KEY> #生成的clientSecret
# repo: azrrrrr.github.io #仓库名称
owner: azrrrrr #github用户名
admin: azrrrrr
distractionFreeMode: true #是否启用类似FB的阴影遮罩
# 统计
# Analytics settings
# Baidu Analytics
ba_track_id: 84f8c229e6242111dd9ad43d36c8075f
# Google Analytics
ga_track_id: 'UA-131190596-1' # Format: UA-xxxxxx-xx
ga_domain: auto # 默认的是 auto, 这里我是自定义了的域名,你如果没有自己的域名,需要改成auto
# Featured Tags
featured-tags: true # 是否使用首页标签
featured-condition-size: 1 # 相同标签数量大于这个数,才会出现在首页
# Progressive Web Apps
chrome-tab-theme-color: "#000000"
service-worker: true
# Friends
friends: [
{
title: "阮一峰",
href: "http://www.ruanyifeng.com/blog/"
},
{
title: "张鑫旭",
href: "https://www.zhangxinxu.com/"
},
{
title: "朴灵",
href: "http://html5ify.com/"
},
{
title: "廖雪峰",
href: "https://www.liaoxuefeng.com/"
},
{
title: "颜海镜",
href: "https://yanhaijing.com/"
},
# {
# title: "AmorR",
# href: "https://www.cnblogs.com/AmorR/"
# },
{
title: "大漠穷秋",
href: "http://www.ngfans.net/"
},
{
title: "游魂",
href: "https://www.iyouhun.com/"
},
{
title: "奇淼",
href: "http://www.henrongyi.top/"
},
{
title: "小翼",
href: "https://www.my-fe.pub/"
},
{
title: "前端大爆炸",
href: "https://alili.tech/"
},
{
title: "sumorfly",
href: "http://sumorfly.github.io"
},
{
title: "jartto",
href: "http://jartto.wang/"
},
{
title: "极客教程",
href: "https://www.geekjc.com/"
},
{
title: "小菜",
href: "https://github.com/xtx1130/blog"
},
{
title: "<IJSE BLOG />",
href: "https://ijser.cn/"
}
]
# theme: jekyll-theme-midnight
|
_config.yml
|
trigger:
branches:
include:
- master
paths:
# only trigger if version number has changed
include:
- _version.py
stages:
- stage: Build_Test
jobs:
- job: Build_Test
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
Python37:
python.version: '3.7'
Python38:
python.version: '3.8'
Python39:
python.version: '3.9'
Python310:
python.version: '3.10'
variables:
projectTestResultDir: junit
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '$(python.version)'
addToPath: true
displayName: 'Use Python $(python.version)'
# - script: |
# sudo apt-get update && sudo apt-get install build-essential
# name: gcc
# displayName: 'Install core tools (gcc)'
- script: |
pip install --upgrade pip && \
pip install setuptools wheel && \
pip install -r requirements.txt && \
pip install -e . && \
pip install -e .[gradio]
name: install
displayName: 'Install packages'
- script: |
pip install pytest pytest-cov
python -m pytest --junitxml=junit/test-results.xml --cov=skippa --cov-report=xml
name: run_tests
displayName: 'Run tests'
condition: succeeded()
- task: PublishTestResults@2
condition: succeededOrFailed()
inputs:
testResultsFiles: '**/test-*.xml'
testRunTitle: 'Publish test results for Python $(python.version)'
displayName: 'Publish test results'
- task: PublishCodeCoverageResults@1
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'
displayName: 'Publish coverage results'
- stage: Publish
dependsOn: Build_Test
condition: succeeded()
jobs:
- job: Publish
pool:
vmImage: 'ubuntu-latest'
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.8'
addToPath: true
displayName: 'Use Python 3.8'
- bash: |
pip install -r requirements.txt
pip install -r requirements_dev.txt
pip install -e .
make docs
make dist
displayName: 'Install setuptools, wheel, sphinx, twine...'
- bash: |
# this does everything from clean build, build docs, build wheel and twine upload
twine upload -u datasciencelab -p $(PYPIPASSWORD) --non-interactive dist/*
env:
PYPIPASSWORD: $(<PASSWORD>)
displayName: 'Run make release'
|
azure-pipelines.yml
|
---
swagger: '2.0'
info:
version: '0.1.0'
title: reference analysis
parameters:
someParam:
name: some
in: query
type: string
bodyParam:
name: some
in: body
schema:
# expect this $ref to be kept
$ref: '#/definitions/myBody'
responses:
notFound:
description: 'Not Found'
schema:
$ref: '#/definitions/notFound'
paths:
/some/where:
parameters:
- $ref: '#/parameters/someParam'
get:
parameters:
- $ref: '#/parameters/bodyParam'
- name: other
in: query
type: string
responses:
default:
$ref: '#/responses/notFound'
404:
description: ok
schema:
$ref: '#/definitions/myResponse'
304:
description: ok
schema:
$ref: 'transitive-1.yaml#/definitions/transitive-1.1'
204:
description: ok
schema:
$ref: '#/definitions/uniqueName1'
200:
description: 'RecordHolder'
schema:
type: object
properties:
prop0:
$ref: '#/definitions/myBody'
206:
description: ok
schema:
$ref: 'transitive-1.yaml#/definitions/a'
205:
description: ok
schema:
$ref: 'transitive-1.yaml#/definitions/b'
# arbitrary json pointers
post:
responses:
200:
description: ok
schema:
# this one gets resolved
$ref: 'transitive-2.yaml#/definitions/a/properties/b'
204:
description: ok
schema:
# this one gets resolved
$ref: 'transitive-1.yaml#/definitions/c/properties/d'
default:
description: default
schema:
# this one remains (same file)
$ref: '#/definitions/myDefaultResponse/properties/zzz'
/some/where/else:
get:
responses:
default:
description: default
schema:
$ref: '#/definitions/notFound'
/yet/again/some/where:
get:
responses:
default:
description: default
schema:
$ref: 'transitive-1.yaml#/somewhere'
/with/slice/container:
get:
responses:
default:
description: default
schema:
allOf:
- $ref: '#/definitions/uniqueName3'
- $ref: 'transitive-1.yaml#/definitions/uniqueName3'
/with/tuple/container:
get:
responses:
default:
description: default
schema:
type: array
items:
- $ref: '#/definitions/uniqueName3'
- $ref: 'transitive-1.yaml#/definitions/uniqueName3'
/with/tuple/conflict:
get:
responses:
default:
description: default
schema:
type: array
items:
- $ref: 'transitive-1.yaml#/definitions/uniqueName4'
- $ref: 'transitive-2.yaml#/definitions/uniqueName4'
/with/boolable/container:
get:
responses:
default:
description: default
schema:
type: object
additionalProperties:
$ref: 'transitive-1.yaml#/definitions/uniqueName5'
definitions:
myDefaultResponse:
type: object
properties:
zzz:
type: integer
myBody:
type: object
properties:
prop1:
type: integer
aA:
$ref: '#/definitions/aA'
aA:
type: string
format: date
bB:
type: string
format: date-time
myResponse:
type: object
properties:
prop2:
type: integer
notFound:
type: array
items:
type: integer
uniqueName1:
# expect this to be expanded after OAIGen stripping
$ref: 'transitive-1.yaml#/definitions/uniqueName1'
notUniqueName2:
# this one prevents OAIGen stripping
$ref: 'transitive-1.yaml#/definitions/uniqueName2'
uniqueName2:
$ref: 'transitive-1.yaml#/definitions/uniqueName2'
uniqueName3:
type: object
properties:
prop7:
type: integer
uniqueName5:
type: object
properties:
prop10:
type: integer
|
vendor/github.com/go-openapi/analysis/fixtures/oaigen/fixture-oaigen.yaml
|
name: LootCrates
main: com.crazicrafter1.lootcrates.Main
version: 3.3.3 # 3.3.2 was previous
api-version: 1.13
author: crazicrafter1
softdepend: [CRUtils, Gapi, QualityArmory, Skript, PlaceholderAPI]
commands:
lootcrates:
permission: lootcrates.admin
aliases: [crates, lc]
permissions:
lootcrates.open:
description: Allow for users to open crates
default: true
lootcrates.admin:
description: Allows access to /crates command
default: op
# 3.3.3 (1/22/2022)
# fixed major overlooked bug when a
# player attempts to place an items into a consumable
# inventory (crafting/furnace/smoker) to prevent crate
# deletion
# place would be cancelled regardless of item
# 3.3.2 / 3.3.1 (1/18/2022)
# mainly direct download for SpigotMC
# some fixes
# 3.3.2 is basically a clone
# 3.3.0 (1/14/2022)
# 1.18.1 support + fixes
# 3.2.5 (11/22/2021)
# fixed 1.17 only item in editor
# 3.2.4 (11/21/2021)
# fixed item edit when not swapping with another item bug
# 3.2.3 (11/6/2021)
# fixed duplicating slot panel on close event bug
# skript get crate item and all crates by name
# 3.2.2 (10/17/2021)
# using better menu titles
# 3.2.1 (10/15/2021)
# hotfixes
# 3.2.0 (10/14/2021)
# 1.14.4 | 1.16.5 | 1.17.1 confirmed to work
# in game editor
# no touch config
# 3.1.4 (4/26/2021)
# rename permissions
# 3.1.3 (4/6/2021)
# fixed an uncaught error when clicking
# 3.1.2 (4/5/2021)
# 1.8 compatibility
# 3.1.1 (4/5/2021
# added customModelData for texture packs
# 3.1.0 (4/4/2021)
# Highlights:
# - fixed seasonal crates after being broken for a few months
# - added config option for seasonal crates
# - seasonal crates are seasonal (multiple days) instead of on a one day basis
# - crates are now detected via nbt not name/lore...
# 3.0.2 (4/2/2021)
# fixed mispelled enchantment and added soul speed
# some performance fixes
# 3.0.1 (3/22/2021)
# crates can *almost* now be edited ingame via a cool looking gui (so close)
# autoupdater
# 3.0.0 (2/19/2021)
# loot types are now completely object oriented
# added custom potion support
# LootGroup chances are cumulatively weighted instead of adding up to 100
# modified format of config, take a gander
# snazzy
# 2.1.7 (11/13/2020)
# fixed update checker
# 2.1.6 (11/13/2020)
# fixed a mis-catched error for config parsing
# and yes works for 1.16.3 and maybe even 1.16.4
# 2.1.5 (8/16/2020)
# fixed some very unexpected bugs that broke mechanics
# 1.16 works!!!
# 2.1.4
# tidied up some loose ends regarding opened crates
# working on gui!
# i think that's it
# 2.1.3
# changed some things to work for 1.8 and maybe earlier
# 2.1.2
# added an * arg to command to target all players
|
plugin.yml
|
name: Feature Request
description: Request a new feature from Apache ECharts
title: "[Feature] "
labels: [new-feature]
body:
- type: markdown
attributes:
value: |
The issue list is reserved exclusively for bug reports and feature requests.
For usage questions, please use the following resources:
- Read the [docs](https://echarts.apache.org/option.html)
- Find in [examples](https://echarts.apache.org/examples/)
- Look for / ask questions on [Stack Overflow](https://stackoverflow.com/questions/tagged/echarts)
For non-technical support or general questions, you can email [<EMAIL>](mailto:<EMAIL>). And don't forget to subscribe to our [mailing list](https://echarts.apache.org/maillist.html) to get updated with the project.
Also try to search for your issue - it may have already been answered or even fixed in the development branch. However, if you find that an old, closed issue still persists in the latest version, you should open a new issue using the form below instead of commenting on the old issue.
- type: textarea
attributes:
label: What problem does this feature solve?
description: |
Explain your use case, context, and rationale behind this feature request. More importantly, what is the end user experience you are trying to build that led to the need for this feature?
An important design goal of ECharts is keeping the API surface small and straightforward. In general, we only consider adding new features that solve a problem that cannot be easily dealt with using existing APIs (i.e. not just an alternative way of doing things that can already be done). The problem should also be common enough to justify the addition.
validations:
required: true
- type: textarea
attributes:
label: What does the proposed API look like?
description: Describe how you propose to solve the problem and provide code samples of how the API would work once implemented. Note that you can use [Markdown](https://docs.github.com/en/github/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to format your code blocks.
validations:
required: true
|
.github/ISSUE_TEMPLATE/feature_request.yml
|
language: php
# The Travis CI container mode has random functional test fails, so we must use
# sudo here.
sudo: true
php:
- 7.1
- 7.3
services:
- mysql
env:
global:
- MODULE=devel
matrix:
- DRUPAL_CORE=8.8.x
- DRUPAL_CORE=8.9.x
- DRUPAL_CORE=9.0.x
matrix:
fast_finish: true
# PHP7.1 is not supported from Core 9.0 onwards.
exclude:
- php: 7.1
env: DRUPAL_CORE=9.0.x
# Be sure to cache composer downloads.
cache:
directories:
- $HOME/.composer
before_script:
- echo $MODULE
# Remove Xdebug as we don't need it and it causes
# PHP Fatal error: Maximum function nesting level of '256' reached.
# We also don't care if that file exists or not on PHP 7.
- phpenv config-rm xdebug.ini || true
# Navigate out of module directory to prevent blown stack by recursive module
# lookup.
- cd ..
# Create database.
- mysql -e "create database $MODULE"
# Export database variable for kernel tests.
- export SIMPLETEST_DB=mysql://root:@127.0.0.1/$MODULE
# Download Drupal core from the Github mirror because it is faster.
- travis_retry git clone --branch $DRUPAL_CORE --depth 1 https://github.com/drupal/drupal.git
- cd drupal
# Store the path to Drupal root.
- DRUPAL_ROOT=$(pwd)
- echo $DRUPAL_ROOT
# Make a directory for our module and copy the built source into it.
- mkdir $DRUPAL_ROOT/modules/$MODULE
- cp -R $TRAVIS_BUILD_DIR/* $DRUPAL_ROOT/modules/$MODULE/
# Apply patch to reverse the addition of doctrine/debug in composer.json so that tests can run again.
- cd $DRUPAL_ROOT/modules/$MODULE;
- wget -q -O - https://www.drupal.org/files/issues/2020-04-17/3125678-9.remove-doctrine-common.patch | patch -p1 --verbose;
- cd $DRUPAL_ROOT;
# Run composer self-update and install.
- travis_retry composer self-update && travis_retry composer install
# Run composer update in the module directory in order to fetch dependencies.
- travis_retry composer update -d $DRUPAL_ROOT/modules/$MODULE
# Install drush
- travis_retry composer require drush/drush:"^9.0 || ^10.0"
# Coder is already installed as part of composer install. We just need to set
# the installed_paths to pick up the Drupal standards.
- $DRUPAL_ROOT/vendor/bin/phpcs --config-set installed_paths $DRUPAL_ROOT/vendor/drupal/coder/coder_sniffer
# Start a web server on port 8888, run in the background.
- php -S localhost:8888 &
# Export web server URL for browser tests.
- export SIMPLETEST_BASE_URL=http://localhost:8888
# Interim patch for 9.0 only to avoid the Webprofiler toolbar halting the test run with 'class not found'.
- if [ $DRUPAL_CORE == "9.0.x" ]; then
cd $DRUPAL_ROOT/modules/$MODULE;
wget -q -O - https://www.drupal.org/files/issues/2020-04-07/3097125-33.replace-JavascriptTestBase.patch | patch -p1 --verbose;
fi
script:
# Run the PHPUnit tests.
- cd $DRUPAL_ROOT
- ./vendor/bin/phpunit -c ./core/phpunit.xml.dist --verbose --group=devel,devel_generate,webprofiler ./modules/$MODULE
# Check for coding standards. First change directory to our module.
- cd $DRUPAL_ROOT/modules/$MODULE
# List all the sniffs that were used.
- $DRUPAL_ROOT/vendor/bin/phpcs --version
- $DRUPAL_ROOT/vendor/bin/phpcs -i
- $DRUPAL_ROOT/vendor/bin/phpcs -e
# Show the violations in detail and do not fail for any errors or warnings.
- $DRUPAL_ROOT/vendor/bin/phpcs -s --report-width=130 --colors --runtime-set ignore_warnings_on_exit 1 --runtime-set ignore_errors_on_exit 1 .
# Run again to give a summary and total count.
- $DRUPAL_ROOT/vendor/bin/phpcs --report-width=130 --colors --runtime-set ignore_warnings_on_exit 1 --runtime-set ignore_errors_on_exit 1 --report=summary .
|
web/modules/contrib/devel/.travis.yml
|
version: 2.1
orbs:
ansible: orbss/ansible-playbook@0.0.4
executors:
python:
docker:
- image: circleci/python
jobs:
ansible-playbook:
executor: python
parameters:
version:
description: |
Ansible version
type: string
default: ''
galaxy-options:
description: |
ansible-galaxy command options
type: string
default: ''
galaxy-requirements-file:
description: |
Ansible Galaxy requirements file path
type: string
default: ''
galaxy-roles-path:
description: |
ansible-galaxy command roles-path option
type: string
default: ''
inventory:
description: |
Ansible inventory file. The default value must be empty,
so do not store any value to this environment variable.
The data must be registered in base64 format
type: env_var_name
default: NONEXISTENT_ANSIBLE_INVENTORY
inventory-parameters:
description: |
Ansible inventory parameters
type: string
default: ''
playbook:
description: |
The path of Ansible playbook
type: string
playbook-options:
description: |
Ansible-playbook command options
type: string
default: ''
private-key:
description: |
SSH private key file. The default value must be empty,
so do not store any value to this environment variable.
The data must be registered in base64 format
type: env_var_name
default: NONEXISTENT_ANSIBLE_SSH_KEY
steps:
- checkout
- ansible/install:
version: <<parameters.version>>
- ansible/galaxy:
galaxy-options: <<parameters.galaxy-options>>
galaxy-requirements-file: <<parameters.galaxy-requirements-file>>
galaxy-roles-path: <<parameters.galaxy-roles-path>>
- ansible/playbook:
inventory: <<parameters.inventory>>
inventory-parameters: <<parameters.inventory-parameters>>
playbook: <<parameters.playbook>>
playbook-options: <<parameters.playbook-options>>
private-key: <<parameters.private-key>>
workflows:
version: 2
test-checkout:
jobs:
- ansible-playbook:
galaxy-requirements-file: tests/requirements.yml
inventory: ANSIBLE_INVENTORY
inventory-parameters:
playbook: tests/test.yml
private-key: ANSIBLE_SSH_KEY
|
.circleci/config.yml
|
uid: "com.microsoft.store.partnercenter.customerdirectoryroles.UserMemberOperations.UserMemberOperations*"
fullName: "com.microsoft.store.partnercenter.customerdirectoryroles.UserMemberOperations.UserMemberOperations"
name: "UserMemberOperations"
nameWithType: "UserMemberOperations.UserMemberOperations"
members:
- uid: "com.microsoft.store.partnercenter.customerdirectoryroles.UserMemberOperations.UserMemberOperations(com.microsoft.store.partnercenter.IPartner,java.lang.String,java.lang.String,java.lang.String)"
fullName: "com.microsoft.store.partnercenter.customerdirectoryroles.UserMemberOperations.UserMemberOperations(IPartner rootPartnerOperations, String customerId, String roleId, String userId)"
name: "UserMemberOperations(IPartner rootPartnerOperations, String customerId, String roleId, String userId)"
nameWithType: "UserMemberOperations.UserMemberOperations(IPartner rootPartnerOperations, String customerId, String roleId, String userId)"
summary: "Initializes a new instance of the UserMemberOperations class."
parameters:
- description: "The partner operations instance."
name: "rootPartnerOperations"
type: "<xref href=\"com.microsoft.store.partnercenter.IPartner?alt=com.microsoft.store.partnercenter.IPartner&text=IPartner\" data-throw-if-not-resolved=\"False\" />"
- description: "The customer identifier."
name: "customerId"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "The directory role identifier."
name: "roleId"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "The user identifier."
name: "userId"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
syntax: "public UserMemberOperations(IPartner rootPartnerOperations, String customerId, String roleId, String userId)"
type: "constructor"
metadata: {}
package: "com.microsoft.store.partnercenter.customerdirectoryroles"
artifact: com.microsoft.store:partnercenter:1.15.3
|
docs-ref-autogen/com.microsoft.store.partnercenter.customerdirectoryroles.UserMemberOperations.UserMemberOperations.yml
|
- position: 1
driverNumber: 20
driverId: stirling-moss
constructorId: lotus
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:39.100"
gap:
interval:
laps:
- position: 2
driverNumber: 36
driverId: richie-ginther
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: dunlop
time: "1:39.200"
gap: "+0.100"
interval: "+0.100"
laps:
- position: 3
driverNumber: 28
driverId: jim-clark
constructorId: lotus
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:39.600"
gap: "+0.500"
interval: "+0.400"
laps:
- position: 4
driverNumber: 18
driverId: graham-hill
constructorId: brm
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:39.600"
gap: "+0.500"
interval: "+0.000"
laps:
- position: 5
driverNumber: 38
driverId: phil-hill
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: dunlop
time: "1:39.800"
gap: "+0.700"
interval: "+0.200"
laps:
- position: 6
driverNumber: 40
driverId: wolfgang-von-trips
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: dunlop
time: "1:39.800"
gap: "+0.700"
interval: "+0.000"
laps:
- position: 7
driverNumber: 26
driverId: bruce-mclaren
constructorId: cooper
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:39.800"
gap: "+0.700"
interval: "+0.000"
laps:
- position: 8
driverNumber: 16
driverId: tony-brooks
constructorId: brm
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:40.100"
gap: "+1.000"
interval: "+0.300"
laps:
- position: 9
driverNumber: 2
driverId: jo-bonnier
constructorId: porsche
engineManufacturerId: porsche
tyreManufacturerId: dunlop
time: "1:40.300"
gap: "+1.200"
interval: "+0.200"
laps:
- position: 10
driverNumber: 30
driverId: innes-ireland
constructorId: lotus
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:40.500"
gap: "+1.400"
interval: "+0.200"
laps:
- position: 11
driverNumber: 4
driverId: dan-gurney
constructorId: porsche
engineManufacturerId: porsche
tyreManufacturerId: dunlop
time: "1:40.600"
gap: "+1.500"
interval: "+0.100"
laps:
- position: 12
driverNumber: 22
driverId: john-surtees
constructorId: cooper
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:41.100"
gap: "+2.000"
interval: "+0.500"
laps:
- position: 13
driverNumber: 6
driverId: hans-herrmann
constructorId: porsche
engineManufacturerId: porsche
tyreManufacturerId: dunlop
time: "1:41.100"
gap: "+2.000"
interval: "+0.000"
laps:
- position: 14
driverNumber: 8
driverId: michael-may
constructorId: lotus
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:42.000"
gap: "+2.900"
interval: "+0.900"
laps:
- position: 15
driverNumber: 32
driverId: cliff-allison
constructorId: lotus
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:42.300"
gap: "+3.200"
interval: "+0.300"
laps:
- position: 16
driverNumber: 42
driverId: maurice-trintignant
constructorId: cooper
engineManufacturerId: maserati
tyreManufacturerId: dunlop
time: "1:42.400"
gap: "+3.300"
interval: "+0.100"
laps:
- position: 17
driverNumber: 34
driverId: henry-taylor
constructorId: lotus
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:42.600"
gap: "+3.500"
interval: "+0.200"
laps:
- position: 18
driverNumber: 14
driverId: masten-gregory
constructorId: cooper
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:42.700"
gap: "+3.600"
interval: "+0.100"
laps:
- position: 19
driverNumber: 10
driverId: lucien-bianchi
constructorId: emeryson
engineManufacturerId: maserati
tyreManufacturerId: dunlop
time: "1:42.900"
gap: "+3.800"
interval: "+0.200"
laps:
- position: 20
driverNumber: 12
driverId: olivier-gendebien
constructorId: emeryson
engineManufacturerId: maserati
tyreManufacturerId: dunlop
time: "1:43.700"
gap: "+4.600"
interval: "+0.800"
laps:
- position: 21
driverNumber: 24
driverId: jack-brabham
constructorId: cooper
engineManufacturerId: climax
tyreManufacturerId: dunlop
time: "1:44.000"
gap: "+4.900"
interval: "+0.300"
laps:
|
src/data/seasons/1961/races/01-monaco/qualifying-results.yml
|
- type: replace
path: /instance_groups/-
value:
name: broker
instances: 1
azs: [z1]
vm_type: default
stemcell: default
networks:
- name: default
jobs:
- name: broker
release: safe
properties:
username: vault
password: ((<PASSWORD>))
guid: ((vault-broker-guid))
service:
name: vault
description: Your Very Own Vault of Secrets
tags: [vault, credentials, secure, key-value]
backend:
token: ((vault-token))
skip_verify: true
# broker registration errand
- type: replace
path: /instance_groups/-
value:
lifecycle: errand
name: register-broker
instances: 1
azs: [z1]
vm_type: default
stemcell: default
networks:
- name: default
jobs:
- name: broker-registrar
release: broker-registrar
properties:
servicebroker:
name: vault
cf:
api_url: ((cf-api-url))
username: ((cf-username))
password: ((<PASSWORD>))
skip_ssl_validation: ((cf-skip-ssl-validation))
- type: replace
path: /instance_groups/-
value:
lifecycle: errand
name: unregister-broker
instances: 1
azs: [z1]
vm_type: default
stemcell: default
networks:
- name: default
jobs:
- name: broker-deregistrar
release: broker-registrar
properties:
servicebroker:
name: vault
cf:
api_url: ((cf-api-url))
username: ((cf-username))
password: ((<PASSWORD>))
skip_ssl_validation: ((cf-skip-ssl-validation))
- type: replace
path: /releases/-
value:
name: broker-registrar
version: 3.3.1
url: https://github.com/cloudfoundry-community/broker-registrar-boshrelease/releases/download/v3.3.1/broker-registrar-3.3.1.tgz
sha1: c95283460a4f962cee1cacabb7333774783a24e9
- type: replace
path: /variables/-
value:
name: cf-api-url
- type: replace
path: /variables/-
value:
name: cf-username
- type: replace
path: /variables/-
value:
name: cf-password
- type: replace
path: /variables/-
value:
name: cf-skip-ssl-validation
- type: replace
path: /variables/-
value:
name: vault-token
- type: replace
path: /variables/-
value:
name: vault-broker-guid
- type: replace
path: /variables/-
value:
name: vault-broker-password
type: password
|
manifests/ops/broker.yml
|
ProgramName: fsimilar
PackageName: main
Name: fsimilar
Desc: "find/file similar"
Text: Find similar files
Global: true
NumOption: cli.AtLeast(1)
#NumArg: cli.ExactN(1)
#UsageLead: "Usage:\\n fsimilar [Options]\\n\\nExample:\\n find . \\\\( -type f -o -type l \\\\) -printf '%%7s %%p\\\\n' | fsimilar -i -S\\n mlocate -i soccer | fsimilar -i | fsimilar -i -Q"
Options:
- Name: SizeGiven
Type: bool
Flag: S,size-given
Usage: size of the files in input as first field
- Name: QuerySize
Type: bool
Flag: Q,query-size
Usage: query the file sizes from os
- Name: Filei
Type: '*clix.Reader'
Flag: '*i,input'
Usage: input from stdin or the given file (mandatory)
- Name: Phonetic
Type: bool
Flag: p,phonetic
Usage: use phonetic as words for further error tolerant
- Name: Final
Type: bool
Flag: F,final
Usage: produce final output, the recommendations
- Name: Ext
Type: string
Flag: e,ext
Usage: "extension to override all files' to (for ffcvt)"
- Name: CfgPath
Type: string
Flag: c,cp
Usage: config path, path that hold all template files
Value: '$FSIM_CP'
- Name: Verbose
Type: cli.Counter
Flag: v,verbose
Usage: verbose mode (multiple -v increase the verbosity)
Command:
- Name: sim
Desc: "Filter the input using simhash similarity check"
Text: 'Usage:\n mlocate -i soccer | fsimilar sim -i'
#NumArg: cli.AtLeast(1)
NumOption: cli.AtLeast(1)
Options:
- Name: Distance
Type: uint8
Flag: d,dist
Usage: the hamming distance of hashes within which to deem similar
Value: 3
- Name: vec
Desc: "Use Vector Space for similarity check"
Text: 'Usage:\n { mlocate -i soccer; mlocate -i football; } | fsimilar sim -i | fsimilar vec -i -S -Q -F'
#NumArg: cli.AtLeast(1)
NumOption: cli.AtLeast(1)
Options:
- Name: Threshold
Type: float64
Flag: t,thr
Usage: the threshold above which to deem similar (0.8 = 80%%)
Value: 0.86
|
fsimilar_cli.yaml
|
name: AtlasRelatedObjectIdBase
uid: '@azure-rest/purview-catalog.AtlasRelatedObjectIdBase'
package: '@azure-rest/purview-catalog'
summary: ''
fullName: AtlasRelatedObjectIdBase
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: displayText
uid: '@azure-rest/purview-catalog.AtlasRelatedObjectIdBase.displayText'
package: '@azure-rest/purview-catalog'
summary: The display text.
fullName: displayText
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'displayText?: string'
return:
type: string
description: ''
- name: entityStatus
uid: '@azure-rest/purview-catalog.AtlasRelatedObjectIdBase.entityStatus'
package: '@azure-rest/purview-catalog'
summary: >-
Status of the entity - can be active or deleted. Deleted entities are not
removed from Atlas store.
fullName: entityStatus
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'entityStatus?: Status'
return:
type: <xref uid="@azure-rest/purview-catalog.Status" />
description: ''
- name: relationshipAttributes
uid: >-
@azure-rest/purview-catalog.AtlasRelatedObjectIdBase.relationshipAttributes
package: '@azure-rest/purview-catalog'
summary: >-
Captures details of struct contents. Not instantiated directly, used only
via AtlasEntity, AtlasClassification.
fullName: relationshipAttributes
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'relationshipAttributes?: AtlasStruct'
return:
type: <xref uid="@azure-rest/purview-catalog.AtlasStruct" />
description: ''
- name: relationshipGuid
uid: '@azure-rest/purview-catalog.AtlasRelatedObjectIdBase.relationshipGuid'
package: '@azure-rest/purview-catalog'
summary: The GUID of the relationship.
fullName: relationshipGuid
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'relationshipGuid?: string'
return:
type: string
description: ''
- name: relationshipStatus
uid: '@azure-rest/purview-catalog.AtlasRelatedObjectIdBase.relationshipStatus'
package: '@azure-rest/purview-catalog'
summary: The enum of relationship status.
fullName: relationshipStatus
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'relationshipStatus?: StatusAtlasRelationship'
return:
type: <xref uid="@azure-rest/purview-catalog.StatusAtlasRelationship" />
description: ''
- name: relationshipType
uid: '@azure-rest/purview-catalog.AtlasRelatedObjectIdBase.relationshipType'
package: '@azure-rest/purview-catalog'
summary: ''
fullName: relationshipType
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'relationshipType?: string'
return:
type: string
description: ''
|
preview-packages/docs-ref-autogen/@azure-rest/purview-catalog/AtlasRelatedObjectIdBase.yml
|
name: Dev
on: [push]
defaults:
run:
shell: bash
jobs:
test:
name: Test
runs-on: [self-hosted, nix]
steps:
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579
- name: Test
shell: nix develop -v -c bash {0}
run: |-
set -euo pipefail
./cli.sh code_quality
./cli.sh test
./cli.sh test_helm_chart
build:
name: Build image
runs-on: [self-hosted, nix]
needs: [test]
env:
IMAGE_REPOSITORY: public.ecr.aws/shopstic
strategy:
matrix:
image: [app, init]
arch: [x86_64, aarch64]
include:
- image: app
imageName: k8s-env-injector
- image: init
imageName: k8s-env-injector-init
steps:
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579
- name: Login to Amazon ECR
uses: ./.github/actions/login-to-public-ecr
with:
imageRepo: ${{ env.IMAGE_REPOSITORY }}
- name: Build container images
run: |-
nix build -L -v '.#packages.${{ matrix.arch }}-linux.${{ matrix.image }}Image'
- name: Push
env:
GITHUB_SHA: ${{ github.sha }}
NIX_ARCH: ${{ matrix.arch }}
IMAGE_NAME: ${{ matrix.imageName }}
shell: nix develop -v -c bash {0}
run: |-
IMAGE_ARCH="amd64"
if [[ "${NIX_ARCH}" == "aarch64" ]]; then
IMAGE_ARCH="arm64"
fi
IMAGE_TAG="dev-${IMAGE_ARCH}-${GITHUB_SHA}"
skopeo --insecure-policy copy \
docker-archive:./result \
docker://"${IMAGE_REPOSITORY}"/"${IMAGE_NAME}":"${IMAGE_TAG}"
push-multi-arch:
name: Push multi-arch image manifest
runs-on: [self-hosted, nix]
needs: [build]
env:
IMAGE_REPOSITORY: public.ecr.aws/shopstic
strategy:
matrix:
image: [app, init]
include:
- image: app
imageName: k8s-env-injector
- image: init
imageName: k8s-env-injector-init
steps:
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579
- name: Login to Amazon ECR
uses: ./.github/actions/login-to-public-ecr
with:
imageRepo: ${{ env.IMAGE_REPOSITORY }}
- name: Push multi-arch manifest
shell: nix develop -v -c bash {0}
env:
GITHUB_SHA: ${{ github.sha }}
IMAGE_NAME: ${{ matrix.imageName }}
run: |-
manifest-tool push from-args \
--platforms linux/amd64,linux/arm64 \
--template "${IMAGE_REPOSITORY}"/"${IMAGE_NAME}":dev-ARCH-"${GITHUB_SHA}" \
--target "${IMAGE_REPOSITORY}"/"${IMAGE_NAME}":dev-"${GITHUB_SHA}"
push-helm-chart:
name: Push Helm chart
runs-on: [self-hosted, nix]
needs: [build]
env:
IMAGE_REPOSITORY: public.ecr.aws/shopstic
steps:
- uses: actions/checkout@ec3a7ce113134d7a93b817d10a8272cb61118579
- name: Login to Amazon ECR
uses: ./.github/actions/login-to-public-ecr
with:
imageRepo: ${{ env.IMAGE_REPOSITORY }}
- name: Push Helm chart
shell: nix develop -v -c bash {0}
env:
HELM_CHART_VERSION: 1.0.0-${{ github.sha }}
HELM_APP_VERSION: dev-${{ github.sha }}
run: |-
./cli.sh push_helm_chart \
"${HELM_CHART_VERSION}" \
"${HELM_APP_VERSION}" \
"oci://${IMAGE_REPOSITORY}/charts"
|
.github/workflows/dev.yaml
|
service: backend-api
# Create an optimized package for our functions
package:
individually: true
plugins:
- serverless-bundle # Package our functions with Webpack
- serverless-offline
- ./plugins/offline-invalidate.js
- serverless-dotenv-plugin # Load .env as environment variables
- serverless-pseudo-parameters
provider:
name: aws
runtime: nodejs12.x
stage: dev
region: eu-west-1
# To load environment variables externally
# rename env.example to .env and uncomment
# the following line. Also, make sure to not
# commit your .env.
#
environment:
STAGE: ${self:custom.stage}
GEOCODE_API_KEY: ${env:GEOCODE_API_KEY}
GEOCODE_ENDPOINT: ${env:GEOCODE_ENDPOINT}
GEOCODE_REGION: ${env:GEOCODE_REGION}
COMPANIES_TABLE_NAME: ${self:custom.stage}-companies
USERS_TABLE_NAME: ${self:custom.stage}-users
COUPONS_TABLE_NAME: ${self:custom.stage}-coupons
STRIPE_CONNECT_URL: ${env:STRIPE_CONNECT_URL}
APPLICATION_URL: ${env:APPLICATION_URL}
WEBFRONTEND_URL: ${env:WEBFRONTEND_URL}
SEND_COUPON_EMAIL: ${env:COUPON_EMAIL}
STRIPE_CHECKOUT_REDIRECT_SUCCESS: ${env:STRIPE_CHECKOUT_REDIRECT_SUCCESS}
STRIPE_CHECKOUT_REDIRECT_CANCEL: ${env:STRIPE_CHECKOUT_REDIRECT_CANCEL}
# 'iamRoleStatements' defines the permission policy for the Lambda function.
# In this case Lambda functions are granted with permissions to access DynamoDB.
iamRoleStatements:
- Effect: Allow
Action:
- dynamodb:Query
- dynamodb:Scan
- dynamodb:GetItem
Resource: 'arn:aws:dynamodb:eu-west-1:*:*'
custom:
stage: ${opt:stage, self:provider.stage}
functions:
# Defines an HTTP API endpoint that calls the main function in address/get.js
# - path: url path is /address/{postalCode}/{houseNumber}
# - method: GET request
addressGet:
handler: address/get.main
events:
- http:
path: address/{postalCode}/{houseNumber}
method: get
cors: true
authorizer:
type: COGNITO_USER_POOLS
authorizerId:
Ref: apiGatewayAuthorizer
companyGet:
handler: company/get.main
events:
- http:
path: company/{id}
method: get
companiesGet:
handler: company/getAll.main
events:
- http:
path: companies
method: get
cors: true
companyCreate:
role: readAndWriteToDynamoDbWithoutSecret
handler: company/create.main
companyUpdate:
role: readAndWriteToDynamoDbWithoutSecret
handler: company/update.main
events:
- http:
path: company/{id}
method: put
cors: true
authorizer:
type: COGNITO_USER_POOLS
authorizerId:
Ref: apiGatewayAuthorizer
companyDelete:
role: readAndWriteToDynamoDbWithoutSecret
handler: company/delete.main
events:
- http:
path: company/{id}
method: delete
cors: true
authorizer:
type: COGNITO_USER_POOLS
authorizerId:
Ref: apiGatewayAuthorizer
testCognito:
handler: handler.test1
events:
- http:
path: test1
method: get
cors: true
authorizer:
type: COGNITO_USER_POOLS
authorizerId:
Ref: apiGatewayAuthorizer
preSignupLambdaFunction:
role: readAndWriteToDynamoDbWithoutSecret
handler: cognito/autoConfirmUser.main
stripeConnectCallback:
role: readAndWriteToDynamoDbWithSecretAndEmail
handler: stripe/connectCallback.main
events:
- http:
path: stripe/connect/callback
method: get
stripeConnect:
role: readAndWriteToDynamoDbWithSecretAndEmail
handler: stripe/connect.main
events:
- http:
path: stripe/connect
method: get
cors: true
authorizer:
type: COGNITO_USER_POOLS
authorizerId:
Ref: apiGatewayAuthorizer
stripeCheckoutSession:
role: readAndWriteToDynamoDbWithSecretAndEmail
handler: stripe/createCheckoutSession.main
events:
- http:
path: stripe/checkout-session
method: get
stripeCheckoutCompleted:
role: readAndWriteToDynamoDbWithSecretAndEmail
handler: stripe/checkoutCompleted.main
events:
- http:
path: stripe/checkout-completed
method: post
stripeApplicationAccountDeauthorized:
role: readAndWriteToDynamoDbWithSecretAndEmail
handler: stripe/accountApplicationDeauthorized.main
events:
- http:
path: stripe/account-application-deauthorized
method: post
# For quick testing only
# sendCouponEmail:
# handler: email/send-coupon.lambda.main
# events:
# - http:
# path: coupon
# method: get
resources:
- ${file(resources/cognito.yml)}
- ${file(resources/roles.yml)}
|
serverless.yml
|
---
- name: One-Time Setup
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Get Timestamp
command: date +%Y-%m-%d_%H%M%S
register: timestamp
always_run: yes
- name: Get Git Commit Short Hash
command: git rev-parse --short HEAD
register: git_rev
always_run: yes
- name: Base Directory Setup
hosts: all
connection: local
gather_facts: no
tasks:
- include: tasks/common.yml
- name: Create Log Directory
file:
path={{ log_dir }}
state=directory
- name: Create Diff Directory
file:
path={{ diff_dir }}
state=directory
- name: Clean Build Directory
file:
path={{ build_dir }}
state=absent
- name: Clean Config Directory
file:
path={{ config_dir }}
state=absent
- name: Create Build Directory
file:
path={{ build_dir }}
state=directory
- name: Create Config Directory
file:
path={{ config_dir }}
state=directory
- name: Fact Collection
hosts: all
connection: local
gather_facts: no
tasks:
- name: Gather Junos Facts
junos_get_facts:
host={{ inventory_hostname }}
user={{ user }}
passwd=<PASSWORD>
register: junos
when: vendor == 'juniper'
- name: Build Config
hosts: all
connection: local
gather_facts: no
roles: [ common ]
- name: Compile Config
hosts: all
connection: local
gather_facts: no
tasks:
- name: Assemble Fragments
assemble:
src={{ build_dir }}
dest={{ config_file }}
- name: Prepend Root XML Tag
lineinfile:
dest={{ config_file }}
line="<configuration>"
insertbefore=BOF
when: vendor == 'juniper'
- name: Append Root XML Tag
lineinfile:
dest={{ config_file }}
line="</configuration>"
when: vendor == 'juniper'
- name: Deploy Config
hosts: all
gather_facts: no
tasks:
- name: Deploy Juniper Networks
junos_install_config:
host={{ inventory_hostname }}
user={{ user }}
passwd=<PASSWORD>
file={{ config_file }}
logfile={{ log_dir }}/{{ hostvars.localhost.timestamp.stdout }}.log
diffs_file={{ diff_dir }}/{{ hostvars.localhost.timestamp.stdout }}.log
comment={{ hostvars.localhost.git_rev.stdout }}
when: vendor == 'juniper'
- name: Enable Quagga
copy:
src=files/quagga_daemons
dest=/etc/quagga/daemons
owner=quagga
mode=0644
when: vendor == 'cumulus'
- name: Deploy Cumulus Networks
copy:
src={{ config_file }}
dest=/etc/quagga/Quagga.conf
owner=quagga
mode=0644
when: vendor == 'cumulus'
- name: Deploy Cisco Systems
netmiko_install_config:
host={{ inventory_hostname }}
user=vagrant
passwd=<PASSWORD>
file={{ config_file }}
log_file={{ log_dir }}/{{ hostvars.localhost.timestamp.stdout }}.log
diff_file={{ diff_dir }}/{{ hostvars.localhost.timestamp.stdout }}.log
when: vendor == 'ios'
|
deploy.yml
|
title: A Modern olvasó dokumentációja
summary: A Modern olvasó egy olyan eszköz, amelynek segítségével már bizonyított technikák valósíthatók meg az olvasás utáni szövegértés javításához feltörekvő olvasók, nyelvtanulók és eltérő tanulási képességekkel rendelkező személyek számára. Webalkalmazásában a Modern olvasó SDK-jával használhatja a Modern olvasót.
metadata:
title: A Modern olvasó dokumentációja
description: A Modern olvasó egy olyan eszköz, amelynek segítségével már bizonyított technikák valósíthatók meg az olvasás utáni szövegértés javításához feltörekvő olvasók, nyelvtanulók és eltérő tanulási képességekkel rendelkező személyek számára. Webalkalmazásában a Modern olvasó SDK-jával használhatja a Modern olvasót.
services: service
ms.service: cognitive-services
ms.subservice: immersive-reader
ms.topic: landing-page
ms.collection: collection
author: IEvangelist
ms.author: dapine
ms.date: 01/23/2020
ms.openlocfilehash: 4cc05d3b99530e5ba923bfcbf7f749fbcd1c0f20
ms.sourcegitcommit: d135e9a267fe26fbb5be98d2b5fd4327d355fe97
ms.translationtype: MT
ms.contentlocale: hu-HU
ms.lasthandoff: 03/10/2021
ms.locfileid: "102616638"
landingContent:
- title: A Modern olvasó ismertetése
linkLists:
- linkListType: overview
links:
- text: Mi az a Modern olvasó?
url: overview.md
- linkListType: video
links:
- text: Első lépések – rövid mérnöki videók
url: https://aka.ms/ImmersiveReaderSDKVideos
- title: Bevezetés
linkLists:
- linkListType: quickstart
links:
- text: A C# használata (ASP.NET Core)
url: ./quickstarts/client-libraries.md?pivots=programming-language-csharp
- text: Node.js használata
url: ./quickstarts/client-libraries.md?pivots=programming-language-nodejs
- text: A Java használata (Android)
url: ./quickstarts/client-libraries.md?pivots=programming-language-java-android
- text: A Kotlin használata (Android)
url: ./quickstarts/client-libraries.md?pivots=programming-language-kotlin
- text: A Swift használata (iOS)
url: ./quickstarts/client-libraries.md?pivots=programming-language-swift
- title: A Modern olvasó áttekintése
linkLists:
- linkListType: how-to-guide
links:
- text: Erőforrás létrehozása a Modern olvasóhoz
url: how-to-create-immersive-reader.md
- text: Több Modernolvasó-erőforrás használata
url: how-to-multiple-resources.md
- text: A lebilincselő olvasó elindítása
url: how-to-launch-immersive-reader.md
- text: A cookie-kra vonatkozó irányelv beállítása
url: how-to/set-cookie-policy.md
- text: Matematikai jelek megjelenítése
url: how-to/display-math.md
- text: A Modern olvasó gombjának testreszabása
url: how-to-customize-launch-button.md
- text: A hitelesítési jogkivonat gyorsítótárazása
url: how-to-cache-token.md
- text: Felolvasás konfigurálása
url: how-to-configure-read-aloud.md
- text: Fordítás konfigurálása
url: how-to-configure-translation.md
- text: Felhasználói beállítások tárolása
url: how-to-store-user-preferences.md
- linkListType: reference
links:
- text: JavaScript SDK
url: reference.md
- text: Kibocsátási megjegyzések
url: release-notes.md
- title: Súgó és visszajelzés
linkLists:
- linkListType: reference
links:
- text: Támogatási és súgólehetőségek
url: ../cognitive-services-support-options.md?context=/azure/cognitive-services/immersive-reader/context/context
|
articles/cognitive-services/immersive-reader/index.yml
|
version: '2'
services:
##################################################################
app:
image: shotgun-app:%VERSION%
ports:
- "<network_interface_ip>:80:80"
environment:
SG_SITE_URL: shotgun.mystudio.test
POSTGRES_HOST: db # Hostname or DB container name
POSTGRES_DB: shotgrid
POSTGRES_PORT: 5432
POSTGRES_USER: shotgrid
#POSTGRES_PASSWORD: <PASSWORD>
MEMCACHED_HOST: memcached # Hostname or memcached container name
MEMCACHED_PORT: 11211
labels:
com.shotgunsoftware.component: app
volumes:
- ./media:/media
depends_on:
- db
- memcached
logging:
driver: "json-file"
options:
max-size: "2g"
max-file: "20"
networks:
default:
aliases:
- shotgun.mystudio.test
restart: always
##################################################################
# emailnotifier:
# image: shotgun-app:%VERSION%
# command: sg_run_email_notifier
# environment:
# SG_SITE_URL: shotgrod.mystudio.test
# POSTGRES_HOST: db # Hostname or DB container name
# POSTGRES_DB: shotgrid
# POSTGRES_PORT: 5432
# POSTGRES_USER: shotgrid
# #POSTGRES_PASSWORD: <PASSWORD>
# labels:
# com.shotgunsoftware.component: emailnotifier
# depends_on:
# - db
# - memcached
# restart: always
##################################################################
memcached:
image: memcached:1.4
restart: always
##################################################################
db:
image: postgres:11
environment:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_USER: shotgrid
PGDATA: /var/lib/postgresql/data
restart: always
volumes:
- ./pgdata:/var/lib/postgresql/data
##################################################################
dbops:
image: postgres:11
command: ["/bin/bash"]
stdin_open: true
tty: true
environment:
PGHOST: db
PGPORT: 5432
PGUSER: shotgrid
PGDATABASE: shotgrid
PGOPTIONS: "-c statement_timeout=0"
#PGPASSWORD: <PASSWORD>
labels:
com.shotgunsoftware.component: dbops
volumes:
- ./db_backup:/db_backup
restart: always
# ##################################################################
# transcoderserver:
# image: shotgrid-transcoder-server:%TCSERVER_VERSION%
# environment:
# POSTGRES_HOST: db # Hostname or DB container name
# POSTGRES_DB: shotgrid_transcoding
# POSTGRES_PORT: 5432
# POSTGRES_USER: shotgrid
# #POSTGRES_PASSWORD: <PASSWORD>
# DASHBOARD_USERNAME: admin
# DASHBOARD_PASSWORD: <password>
# labels:
# com.shotgunsoftware.component: transcoderserver
# ports:
# - "<network_interface_ip>:8008:80"
# restart: always
# ##################################################################
# transcoderworker:
# image: shotgrid-transcoder-worker:%TCWORKER_VERSION%
# environment:
# POSTGRES_HOST: db # Hostname or DB container name
# POSTGRES_DB: shotgrid_transcoding
# POSTGRES_PORT: 5432
# POSTGRES_USER: shotgrid
# #POSTGRES_PASSWORD: <PASSWORD>
# labels:
# com.shotgunsoftware.component: transcoderworker
# volumes:
# - ./media:/media
# restart: always
##################################################################
# proxy:
# build: ./proxy/setup
# ports:
# - "<network_interface_ip>:80:80"
# volumes:
# - ./proxy/config:/usr/local/etc/haproxy/config
# - ./proxy/certs:/usr/local/etc/haproxy/certs
# - ./proxy/custom_errors:/usr/local/etc/haproxy/custom_errors
# depends_on:
# - app
# restart: always
# logging:
# driver: "json-file"
# options:
# max-size: "2g"
# max-file: "20"
|
docker-compose.yml
|
title: <NAME>
email: <EMAIL>
url: https://aidevia.github.io
description: "Write an awesome description for your new site here. You can edit this line in _config.yml. It will appear in your document head meta (for Google search results) and in your feed.xml site description."
skills: "Hi I'm <NAME>
I'm part of Information System at Faculty of Science and Technology , State Islamic University of Sultan Syarif Kasim Riau"
meta_author: <NAME>
# Google webmaster tools
google_verify:
# https://ssl.bing.com/webmaster/configure/verify/ownership Option 2 content= goes here
bing_verify:
# Contact form:
# - static : pass through formspree.io to validate email sending
# - disqus : replace contact form by disqus thread
# - comment the line below if you want to stick with the default PHP contact form
contact: static
# If you use disqus you need disqus shortname
# https://help.disqus.com/customer/portal/articles/466208
disqus_shortname: myshortname
# Color settings (hex-codes without the leading hash-tag)
color:
primary: 18bc9c #80B3FF
primary-rgb: "24,288,156" #"128,179,255"
secondary: 2c3e50 #FD6E8A
secondary-dark: 233140 #A2122F
# Footer settings
footer:
copyright: aidevia
location: Location
social: Around the Web
credits: Credits
# Social networks usernames (many more available: google-plus, flickr, dribbble, pinterest, instagram, tumblr, linkedin, etc.)
social:
- title: twitter
url: http://twitter.com/ai_devia
- title: facebook
url: https://www.facebook.com/ex.sari
- title: stack-overflow
url: http://stackoverflow.com/questions/tagged/jekyll
- title: bitbucket
url: http://bitbucket.org/jekyll
- title: github
url: http://github.com/aidevia
# Postal address (add as many lines as necessary)
address:
- line: Jl. <NAME> Gg. Asrama No.117A rt/rw 001/002
- line: <NAME>.<NAME>, Pekanbaru
# Credits content
credits: 'Akun Terkait </br>
<a href="http://sif.uin-suska.ac.id/">Sistem Informasi</a></br>
<a href="http://uin-suska.ac.id//">UIN Suska Riau</a></br>'
# Build settings
markdown: kramdown
permalink: pretty
|
_config.yml
|
uid: "com.microsoft.azure.servicebus.primitives"
fullName: "com.microsoft.azure.servicebus.primitives"
name: "com.microsoft.azure.servicebus.primitives"
classes:
- "com.microsoft.azure.servicebus.primitives.AuthorizationFailedException"
- "com.microsoft.azure.servicebus.primitives.ClientConstants"
- "com.microsoft.azure.servicebus.primitives.ClientEntity"
- "com.microsoft.azure.servicebus.primitives.CommunicationException"
- "com.microsoft.azure.servicebus.primitives.ConnectionStringBuilder"
- "com.microsoft.azure.servicebus.primitives.CoreMessageReceiver"
- "com.microsoft.azure.servicebus.primitives.CoreMessageSender"
- "com.microsoft.azure.servicebus.primitives.ExceptionUtil"
- "com.microsoft.azure.servicebus.primitives.IllegalConnectionStringFormatException"
- "com.microsoft.azure.servicebus.primitives.MessageLockLostException"
- "com.microsoft.azure.servicebus.primitives.MessageNotFoundException"
- "com.microsoft.azure.servicebus.primitives.MessageWithDeliveryTag"
- "com.microsoft.azure.servicebus.primitives.MessageWithLockToken"
- "com.microsoft.azure.servicebus.primitives.MessagingEntityAlreadyExistsException"
- "com.microsoft.azure.servicebus.primitives.MessagingEntityDisabledException"
- "com.microsoft.azure.servicebus.primitives.MessagingEntityNotFoundException"
- "com.microsoft.azure.servicebus.primitives.MessagingFactory"
- "com.microsoft.azure.servicebus.primitives.MiscRequestResponseOperationHandler"
- "com.microsoft.azure.servicebus.primitives.OperationCancelledException"
- "com.microsoft.azure.servicebus.primitives.Pair"
- "com.microsoft.azure.servicebus.primitives.PayloadSizeExceededException"
- "com.microsoft.azure.servicebus.primitives.QuotaExceededException"
- "com.microsoft.azure.servicebus.primitives.ReceiverDisconnectedException"
- "com.microsoft.azure.servicebus.primitives.ReceiverErrorContext"
- "com.microsoft.azure.servicebus.primitives.RequestResponseUtils"
- "com.microsoft.azure.servicebus.primitives.RequestResponseWorkItem"
- "com.microsoft.azure.servicebus.primitives.RetryExponential"
- "com.microsoft.azure.servicebus.primitives.RetryPolicy"
- "com.microsoft.azure.servicebus.primitives.SASUtil"
- "com.microsoft.azure.servicebus.primitives.SenderErrorContext"
- "com.microsoft.azure.servicebus.primitives.ServerBusyException"
- "com.microsoft.azure.servicebus.primitives.ServiceBusException"
- "com.microsoft.azure.servicebus.primitives.SessionCannotBeLockedException"
- "com.microsoft.azure.servicebus.primitives.SessionLockLostException"
- "com.microsoft.azure.servicebus.primitives.SettleModePair"
- "com.microsoft.azure.servicebus.primitives.StringUtil"
- "com.microsoft.azure.servicebus.primitives.TimeoutException"
- "com.microsoft.azure.servicebus.primitives.TimeoutTracker"
- "com.microsoft.azure.servicebus.primitives.Timer"
- "com.microsoft.azure.servicebus.primitives.TrackingUtil"
- "com.microsoft.azure.servicebus.primitives.Util"
enums:
- "com.microsoft.azure.servicebus.primitives.MessagingEntityType"
- "com.microsoft.azure.servicebus.primitives.TimerType"
- "com.microsoft.azure.servicebus.primitives.TransportType"
metadata: {}
package: "com.microsoft.azure.servicebus.primitives"
artifact: com.microsoft.azure:azure-servicebus:3.6.1
|
docs-ref-autogen/com.microsoft.azure.servicebus.primitives.yml
|
# Read about fixtures at http://ar.rubyonrails.org/classes/Fixtures.html
Guest:
id: 1
login: Guest
email: <EMAIL>
password_hash: <%= <PASSWORD>('<PASSWORD>') %>
posts_count: 0
created_at: <%= 5.days.ago.to_s :db %>
profile_updated_at: <%= 5.hours.ago.to_s :db %>
online_at: <%= 5.hours.ago.to_s :db %>
all_viewed_at: <%= 5.hours.ago.to_s :db %>
time_zone: 'UTC'
Administrator:
id: 2
login: Administrator
email: <EMAIL>
password_hash: <%= <PASSWORD>('<PASSWORD>') %>
admin: 1
posts_count: 1
created_at: <%= 5.days.ago.to_s :db %>
profile_updated_at: <%= 5.hours.ago.to_s :db %>
online_at: <%= 5.hours.ago.to_s :db %>
all_viewed_at: <%= 5.hours.ago.to_s :db %>
bio: 'admin'
auth_token: <PASSWORD>
auth_token_exp: <%= (Time.now.utc - 1.week).to_s :db %> # expired
time_zone: 'UTC'
Timothy:
id: 3
login: Timothy
email: <EMAIL>
posts_count: 3450
password_hash: <%= User.encrypt('<PASSWORD>') %>
created_at: <%= 5.days.ago.to_s :db %>
profile_updated_at: <%= 5.hours.ago.to_s :db %>
online_at: <%= 5.hours.ago.to_s :db %>
all_viewed_at: <%= 5.hours.ago.to_s :db %>
auth_token: <PASSWORD>
auth_token_exp: <%= (Time.now.utc + 1.week).to_s :db %> # not expired
time_zone: 'UTC'
trevor:
id: 4
login: trevor
email: <EMAIL>
posts_count: 1000
password_hash: <%= User.encrypt('<PASSWORD>') %>
created_at: <%= 5.days.ago.to_s :db %>
profile_updated_at: <%= 5.hours.ago.to_s :db %>
online_at: <%= 5.hours.ago.to_s :db %>
all_viewed_at: <%= 5.hours.ago.to_s :db %>
bio: 'my bio'
avatar: 4.gif
time_zone: 'UTC'
banned:
id: 5
login: banned
email: <EMAIL>
posts_count: 0
password_hash: <%= User.encrypt('<PASSWORD>') %>
created_at: <%= 5.days.ago.to_s :db %>
profile_updated_at: <%= 5.hours.ago.to_s :db %>
online_at: <%= 5.hours.ago.to_s :db %>
all_viewed_at: <%= 5.hours.ago.to_s :db %>
time_zone: 'UTC'
ban_message: 'get banned!'
banned_until: <%= (Time.now.utc + 5.days).to_s :db %>
post_test:
id: 6
login: post
email: <EMAIL>
posts_count: 0
password_hash: <%= <PASSWORD>('<PASSWORD>') %>
created_at: <%= 5.days.ago.to_s :db %>
profile_updated_at: <%= 5.hours.ago.to_s :db %>
online_at: <%= 2.weeks.ago.to_s :db %>
all_viewed_at: <%= 5.hours.ago.to_s :db %>
time_zone: 'UTC'
noposts:
id: 7
login: noposts
email: <EMAIL>
posts_count: 0
password_hash: <%= <PASSWORD>('<PASSWORD>') %>
created_at: <%= 5.days.ago.to_s :db %>
profile_updated_at: <%= 5.hours.ago.to_s :db %>
online_at: <%= 2.weeks.ago.to_s :db %>
all_viewed_at: <%= 5.hours.ago.to_s :db %>
time_zone: 'UTC'
|
test/fixtures/users.yml
|
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "mesh-namespaces-migration.fullname" . }}-hooks
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "mesh-namespaces-migration.fullname" . }}-view-namespaces
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: hooks-view-namespaces
namespace: {{ .Release.Namespace }}
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "mesh-namespaces-migration.fullname" . }}-view-namespaces
subjects:
- kind: ServiceAccount
name: {{ template "mesh-namespaces-migration.fullname" . }}-hooks
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "mesh-namespaces-migration.fullname" . }}-hooks
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyma-admin
subjects:
- kind: ServiceAccount
name: {{ template "mesh-namespaces-migration.fullname" . }}-hooks
namespace: {{ .Release.Namespace }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "mesh-namespaces-migration.fullname" . }}-mesh-namespaces-migration
labels:
{{ include "mesh-namespaces-migration.labels.standard" . | indent 4 }}
annotations:
helm.sh/hook: pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
template:
metadata:
annotations:
sidecar.istio.io/inject: 'false'
labels:
{{ include "mesh-namespaces-migration.labels.standard" . | indent 8 }}
spec:
restartPolicy: Never
serviceAccountName: {{ template "mesh-namespaces-migration.fullname" . }}-hooks
containers:
- name: mesh-namespaces-migration
image: "{{ .Values.global.containerRegistry.path }}/{{ .Values.global.event_bus.dir }}{{ .Values.global.event_bus.meshNamespacesMigrationImage }}:{{ .Values.global.event_bus.version }}"
imagePullPolicy: Always
|
resources/event-bus/charts/mesh-namespaces-migration/templates/migration-jobs.yaml
|
apiVersion: helm.fluxcd.io/v1
kind: HelmRelease
metadata:
name: {{ .Release.Name }}
labels:
app: {{ template "psmdb-db.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
forceUpgrade: true
helmVersion: v3
chart:
git: https://github.com/codejamninja/percona-helm-charts.git
ref: codejamninja/psmdb-db-0.1.0
path: charts/psmdb-db
values:
platform: kubernetes
DNSsuffix: .svc.cluster.local
pause: {{ .Values.config.psmdb.pause }}
allowUnsafeConfigurations: true
updateStrategy: {{ .Values.config.updateStrategy }}
upgradeOptions:
versionServiceEndpoint: https://check.percona.com/versions
apply: recommended
schedule: {{ .Values.config.psmdb.upgrade.schedule }}
image:
repository: {{ .Values.images.psmdb.repository | quote }}
tag: {{ .Values.images.psmdb.tag | quote }}
imagePullSecrets: []
# runUid: 1001
secrets: {}
pmm:
enabled: {{ .Values.config.pmm.enabled }}
image:
repository: {{ .Values.images.pmm.repository }}
tag: {{ .Values.images.pmm.tag }}
serverHost: {{ .Values.config.pmm.hostname | quote }}
serverUser: {{ .Values.config.pmm.username | quote }}
replset:
name: rs0
size: {{ .Values.config.psmdb.size }}
antiAffinityTopologyKey: "kubernetes.io/hostname"
{{- if .Values.persistence.velero.enabled }}
annotations:
backup.velero.io/backup-volumes: datadir
{{- end }}
# priorityClass: ""
# labels: {}
nodeSelector: {}
livenessProbe:
failureThreshold: 4
initialDelaySeconds: 60
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
startupDelaySeconds: 7200
podDisruptionBudget:
maxUnavailable: 1
expose:
enabled: false
exposeType: LoadBalancer
arbiter:
enabled: false
size: {{ .Values.config.arbiter.size }}
antiAffinityTopologyKey: "kubernetes.io/hostname"
# priorityClass: ""
# annotations: {}
# labels: {}
# nodeSelector: {}
# livenessProbe: {}
# schedulerName: ""
resources:
{{- if .Values.config.psmdb.resources.enabled }}
requests:
{{ toYaml .Values.config.psmdb.resources.requests | indent 10 }}
limits:
{{ toYaml .Values.config.psmdb.resources.limits | indent 10 }}
{{- else }}
requests: {}
limits: {}
{{- end }}
volumeSpec:
{{- if .Values.persistence.enabled }}
pvc:
storageClassName: {{ .Values.persistence.storageClass }}
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size.psmdb }}
{{- else }}
emptyDir: {}
{{- end }}
backup:
enabled: {{ .Values.config.backup.enabled }}
restartOnFailure: true
image:
repository: {{ .Values.images.backup.repository }}
tag: {{ .Values.images.backup.tag }}
serviceAccountName: percona-server-mongodb-operator
# resources:
# limits:
# cpu: "300m"
# memory: "0.5G"
# requests:
# cpu: "300m"
# memory: "0.5G"
storages:
s3:
type: s3
s3:
bucket: {{ .Values.config.backup.s3.bucket }}
credentialsSecret: {{ template "psmdb-db.fullname" . }}-backup
region: {{ .Values.config.backup.s3.region }}
endpointUrl: {{ .Values.config.backup.s3.endpoint }}
schedule:
- name: backup
schedule: {{ .Values.config.backup.enabled | ternary .Values.config.backup.schedule "0 0 31 2 *" }}
# keep: {{ .Values.config.backup.keep }}
storageName: s3
compressionType: gzip
users:
MONGODB_BACKUP_USER: {{ "backup" | b64enc }}
MONGODB_BACKUP_PASSWORD: {{ "backup" | b64enc }}
MONGODB_CLUSTER_ADMIN_USER: {{ "clusterAdmin" | b64enc }}
MONGODB_CLUSTER_ADMIN_PASSWORD: {{ "clusterAdmin" | b64enc }}
MONGODB_CLUSTER_MONITOR_USER: {{ "clusterMonitor" | b64enc }}
MONGODB_CLUSTER_MONITOR_PASSWORD: {{ "clusterMonitor" | b64enc }}
MONGODB_USER_ADMIN_USER: {{ "userAdmin" | b64enc }}
MONGODB_USER_ADMIN_PASSWORD: {{ "<PASSWORD>" | b64enc }}
PMM_SERVER_USER: {{ "admin" | b64enc }}
PMM_SERVER_PASSWORD: {{ "<PASSWORD>" | b64enc }}
|
depricated/psmdb-db/templates/releases/psmdb-db.yaml
|
# Site
title: Jim's Home
subtitle: '技术分享平台'
description: '专注于个人原创的技术分享平台, 主要内容分为数据结构算法, 设计模式, Java 开发, javascript 开发, 测试, 部署, 运维等各方面的技能分享以及开源内容归档整理'
keywords: 数据结构算法,设计模式,Java 开发,javascript 开发,测试,部署,运维
author: Jim
language: zh-CN
timezone: Asia/Shanghai
# URL
## If your site is put in a subdirectory, set url as 'http://yoursite.com/child' and root as '/child/'
url: https://www.yzer.club
root: /
permalink: :title/
permalink_defaults:
pretty_urls:
trailing_index: true # Set to false to remove trailing 'index.html' from permalinks
trailing_html: true # Set to false to remove trailing '.html' from permalinks
# Directory
source_dir: source
public_dir: public
tag_dir: tags
archive_dir: archives
category_dir: categories
code_dir: downloads/code
i18n_dir: :lang
skip_render:
# Writing
new_post_name: :title.md # File name of new posts
default_layout: post
titlecase: false # Transform title into titlecase
external_link:
enable: true # Open external links in new tab
field: site # Apply to the whole site
exclude: ''
filename_case: 0
render_drafts: false
post_asset_folder: true
relative_link: false
future: true
highlight:
enable: true
line_number: true
auto_detect: false
tab_replace: ''
wrap: true
hljs: false
# Home page setting
# path: Root path for your blogs index page. (default = '')
# per_page: Posts displayed per page. (0 = disable pagination)
# order_by: Posts order. (Order by date descending by default)
index_generator:
path: ''
per_page: 10
order_by: -date
# Category & Tag
default_category: uncategorized
category_map:
tag_map:
# Metadata elements
## https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta
meta_generator: true
# Date / Time format
## Hexo uses Moment.js to parse and display date
## You can customize the date format as defined in
## http://momentjs.com/docs/#/displaying/format/
date_format: YYYY-MM-DD
time_format: HH:mm:ss
## Use post's date for updated date unless set in front-matter
use_date_for_updated: false
# Pagination
## Set per_page to 0 to disable pagination
per_page: 10
pagination_dir: page
# Include / Exclude file(s)
## include:/exclude: options only apply to the 'source/' folder
include:
exclude:
ignore:
# Extensions
## Plugins: https://hexo.io/plugins/
## Themes: https://hexo.io/themes/
theme: next
# Deployment
## Docs: https://hexo.io/docs/deployment.html
deploy:
type: ''
# 外部链接优化
nofollow:
enable: true
# 例外的链接,可将友情链接放置此处
exclude:
- 'www.yzer.club'
# hexo-filter-emoji
emoji:
enable: false
className: github-emoji
styles:
background-image: none
customEmojis:
# hexo-filter-github-emojis
githubEmojis:
enable: true
className: github-emoji
inject: true
styles:
customEmojis:
mermaid:
enable: true
version: 8.5.0
# markdown_it_plus:
# highlight: true
# html: true
# xhtmlOut: true
# breaks: true
# langPrefix:
# linkify: true
# typographer:
# quotes: “”‘’
# pre_class: highlight
# markdown:
# render:
# html: true
# xhtmlOut: false
# breaks: true
# linkify: true
# typographer: true
# quotes: '“”‘’'
# plugins:
# - markdown-it-katex
# - markdown-it-abbr
# - markdown-it-footnote
# - markdown-it-ins
# - markdown-it-sub
# - markdown-it-sup
# anchors:
# level: 2
# collisionSuffix: ''
# permalink: false
# permalinkClass: 'header-anchor'
# permalinkSide: 'left'
# permalinkSymbol: '¶'
# case: 0
# separator: ''
# MathJax
math:
engine: 'mathjax'
mathjax:
src: https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS-MML_HTMLorMML
search:
path: search.xml
field: post
format: html
limit: 10000
|
_config.yml
|
jobs:
# Create a new tag in order to trigger this
- name: test-and-ship-tag
plan:
- in_parallel:
- get: tagged-commits
trigger: true
- get: tasks
- task: build-image
privileged: true
file: tasks/oci/build-image.yml
input_mapping: { source: tagged-commits }
- load_var: image-version
file: tagged-commits/.git/describe_ref # https://github.com/concourse/git-resource#additional-files-populated
- put: docker-hub
params:
image: image/image.tar
version: ((.:image-version))
bump_aliases: true
- task: compile-release-notes
file: tasks/git/release-notes.yml
input_mapping: { repo: tagged-commits }
output_mapping: { release-notes: release-notes }
- put: gh-release
params:
name: tagged-commits/.git/ref
body: release-notes/commits-since-last-tag.txt
tag: tagged-commits/.git/ref
on_success:
do:
- task: create-publication-message
file: tasks/git/create-release-message.yml
input_mapping: { source: tagged-commits }
params: { project_name: concourse-resource-proxy }
output_mapping: { message: telegram-message }
- put: telegram-notification
params:
chat_id: ((telegram.chat_id))
text_file: telegram-message/new-release.txt
resources:
- name: tagged-commits
type: git
icon: github
source:
uri: <EMAIL>:suhlig/concourse-resource-proxy.git
private_key: ((github.ssh_key))
fetch_tags: true
tag_filter: v*
- name: tasks
type: git
icon: github
source:
uri: <EMAIL>:suhlig/concourse-task-store.git
private_key: ((github.ssh_key))
- name: docker-hub
type: registry-image
icon: docker
source:
repository: suhlig/concourse-resource-proxy
username: suhlig
password: ((<PASSWORD>))
- name: gh-release
type: github-release
icon: github-circle
source:
owner: suhlig
repository: concourse-resource-proxy
access_token: ((github.token))
pre_release: true
drafts: true
- name: telegram-notification
type: telegram-notification
icon: telegram
source:
bot_token: ((telegram.bot_token))
resource_types:
- name: telegram-notification
type: docker-image
source:
repository: w32blaster/concourse-telegram-notifier
|
ci/pipeline.yml
|
title: |-
Test Reports
summary: |
Reports test
description: |
Reports test
website: https://github.com/bitrise-steplib/bitrise-step-test-reports
source_code_url: https://github.com/bitrise-steplib/bitrise-step-test-reports
support_url: https://github.com/bitrise-steplib/bitrise-step-test-reports/issues
host_os_tags:
- osx-10.10
- ubuntu-16.04
project_type_tags:
- android
- react-native
type_tags:
- test
is_requires_admin_user: true
is_always_run: true
is_skippable: false
toolkit:
go:
package_name: github.com/bitrise-steplib/bitrise-step-test-reports
# inputs:
# - project_location: $BITRISE_SOURCE_DIR
# opts:
# title: Project Location
# summary: A project in Android Studio contains everything that defines your workspace for an app (gradlew, settings.gradle), from source code and assets, to test code and build configurations.
# description: A project in Android Studio contains everything that defines your workspace for an app (gradlew, settings.gradle), from source code and assets, to test code and build configurations.
# is_required: true
# - variant: ""
# opts:
# title: Variant
# summary: |
# Set the variant that you want to lint. To see your available variants please open your project in Android Studio and go in [Project Structure] -> variants section.
# description: |
# Set the variant that you want to lint. To see your available variants please open your project in Android Studio and go in [Project Structure] -> variants section.
# is_required: false
# # - build_type: ""
# # opts:
# # title: Build Type
# # summary: |
# # Set the build type(by default: release/debug) that you want to lint. To see your available build types please open your project in Android Studio and go in [Project Structure] -> Build Types section.
# # description: |
# # Set the build type(by default: release/debug) that you want to lint. To see your available build types please open your project in Android Studio and go in [Project Structure] -> Build Types section.
# # is_required: false
# # - module: ""
# # opts:
# # title: Module
# # summary: |
# # Set the module(by default: app) that you want to lint. To see your available modules please open your project in Android Studio and go in [Project Structure] then see the list on the left.
# # description: |
# # Set the module(by default: app) that you want to lint. To see your available modules please open your project in Android Studio and go in [Project Structure] then see the list on the left.
# # is_required: false
# - report_include_pattern: "*/build/reports/lint-results.*"
# opts:
# category: Report Export Options
# title: Include pattern
# summary: |
# Will find the report file with the given pattern. If you need the html file only then you can use: "*/build/reports/lint-results.html"
# description: |
# Will find the report file with the given pattern. If you need the html file only then you can use: "*/build/reports/lint-results.html"
# is_required: true
|
step.yml
|
service: debug
frameworkVersion: "3"
plugins:
- serverless-plugin-scripts
- serverless-s3-bucket-helper
- serverless-iam-helper
provider:
name: aws
runtime: nodejs14.x
region: us-east-1
custom:
stage: ${opt:stage, self:provider.stage}
region: ${opt:region, self:provider.region}
iamPath: ${ssm:/configuration/${self:custom.stage}/iam/path~true, ssm:/configuration/default/iam/path~true, "/"}
iamPermissionsBoundaryPolicy: ${ssm:/configuration/${self:custom.stage}/iam/permissionsBoundaryPolicy~true, ssm:/configuration/default/iam/permissionsBoundaryPolicy~true, ""}
vpcId: ${ssm:/configuration/${self:custom.stage}/vpc/id~true, ssm:/configuration/default/vpc/id~true}
dataSubnets:
- ${ssm:/configuration/${self:custom.stage}/vpc/subnets/private/a/id~true, ssm:/configuration/default/vpc/subnets/private/a/id~true}
- ${ssm:/configuration/${self:custom.stage}/vpc/subnets/private/b/id~true, ssm:/configuration/default/vpc/subnets/private/b/id~true}
- ${ssm:/configuration/${self:custom.stage}/vpc/subnets/private/c/id~true, ssm:/configuration/default/vpc/subnets/private/c/id~true}
SsmPathPrefix: /macpro-platform-${self:custom.stage}
scripts:
commands:
connect: |
instanceId=${cf:debug-${self:custom.stage}.Ec2Id, ""}
if [ -z "$instanceId" ]; then
echo "No debug instance found for stage ${self:custom.stage}" && exit 1
else
echo """
To connect to the debug instance, run the following command:
aws ssm start-session --target $instanceId
"""
fi
resources:
Conditions:
CreatePermissionsBoundary:
Fn::Not:
- Fn::Equals:
- ""
- ${self:custom.iamPermissionsBoundaryPolicy}
Parameters:
pLatestAmiId:
Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>
Default: /aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2
Resources:
Ec2InstanceRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
Service:
- ec2.amazonaws.com
Action:
- sts:AssumeRole
Path: ${self:custom.iamPath}
PermissionsBoundary: ${self:custom.iamPermissionsBoundaryPolicy}
ManagedPolicyArns:
- !Sub arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
Policies:
- PolicyName: Ec2RolePolicy
PolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Action:
- ssm:PutParameter
- ssm:GetParameter
Resource: !Sub arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter${self:custom.SsmPathPrefix}/*
- Effect: Allow
Action:
- execute-api:*
Resource: "*"
Ec2InstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Path: ${self:custom.iamPath}
Roles:
- !Ref Ec2InstanceRole
Ec2SecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Security group for the debug instance.
VpcId: ${self:custom.vpcId}
Ec2:
Type: AWS::EC2::Instance
Properties:
BlockDeviceMappings:
- DeviceName: /dev/xvda
Ebs:
VolumeType: gp2
VolumeSize: 10
DeleteOnTermination: "false"
Encrypted: "true"
HibernationOptions:
Configured: true
IamInstanceProfile: !Ref Ec2InstanceProfile
ImageId: !Ref pLatestAmiId
InstanceType: t3.micro
SecurityGroupIds:
- !Ref Ec2SecurityGroup
SubnetId: ${self:custom.dataSubnets.0}
UserData:
Fn::Base64: |
#!/bin/bash
amazon-linux-extras enable python3.8
yum clean metadata
yum install python38
curl -O https://bootstrap.pypa.io/get-pip.py
yum install git -y
sudo su - ssm-user
cd ~
git clone https://github.com/CMSgov/macpro-platform-doc-conversion
DebugEc2SecurityGroupIngressVPN:
Type: AWS::EC2::SecurityGroupIngress
Properties:
GroupId: !Sub "${Ec2SecurityGroup}"
IpProtocol: tcp
CidrIp: 10.0.0.0/8
ToPort: 22
FromPort: 22
Outputs:
Ec2Id:
Description: The Id of the EC2 debug instance
Value: !Ref Ec2
|
services/.debug/serverless.yml
|
---
# YAML template
_rust_base: &rust_base
language: rust
cache: cargo
script:
- cargo clean
- cargo build
- cargo test
_coverage: &coverage
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == stable && "$TRAVIS_OS_NAME" == linux ]]; then
cargo install cargo-tarpaulin -f
fi
after_success: |
if [[ "$TRAVIS_RUST_VERSION" == stable && "$TRAVIS_OS_NAME" == linux ]]; then
cargo tarpaulin --out Xml
bash <(curl -s https://codecov.io/bash)
fi
_linux_x11: &linux_x11
os: linux
services:
- xvfb
addons:
apt:
packages:
- libxcb-shape0-dev # Because we use clipboard crate
- libxcb-xfixes0-dev # Because we use clipboard crate
- libssl-dev
matrix:
allow_failures:
- os: windows
jobs:
include:
- stage: "Linux Tests"
name: "Rust stable"
<<: *rust_base
<<: *linux_x11
<<: *coverage
rust: stable
- stage: "Linux Tests"
name: "Rust beta"
if: branch =~ /^(master|develop)$/
<<: *rust_base
<<: *linux_x11
rust: beta
- stage: "Linux Tests"
name: "Rust nightly"
if: type = cron
<<: *rust_base
<<: *linux_x11
rust: nightly
- stage: "MacOS Tests"
name: "Rust stable"
<<: *rust_base
os: osx
rust: stable
- stage: "MacOS Tests"
name: "Rust beta"
if: branch =~ /^(master|develop)$/
<<: *rust_base
os: osx
rust: beta
- stage: "MacOS Tests"
name: "Rust nightly"
if: type = cron
<<: *rust_base
os: osx
rust: nightly
- stage: "Windows Tests"
name: "Rust stable"
<<: *rust_base
os: windows
rust: stable
- stage: "Windows Tests"
name: "Rust beta"
if: branch =~ /^(master|develop)$/
<<: *rust_base
os: windows
rust: beta
- stage: "Windows Tests"
name: "Rust nightly"
if: type = cron
<<: *rust_base
os: windows
rust: nightly
- stage: "Deployments"
name: "Crates.io"
if: branch = master
<<: *rust_base
<<: *linux_x11
deploy:
provider: cargo
token:
secure: <KEY>
on:
branch: master
|
.travis.yml
|
schema:
schema_id: zhengma_trad
name: 郑码简入繁出
version: "1.0"
author:
- 发明人 郑易里先生
description:
郑码简入繁出
按 ` 键进入拼音反查
dependencies:
- pinyin_simp
switches:
- name: ascii_mode
reset: 0
states: [ 中文, 英文 ]
- name: ascii_punct
states: [ ,。, ,. ]
- name: full_shape
states: [ 半角, 全角 ]
- name: zh_trad
reset: 1
states: [ 简体, 繁体 ]
engine:
processors:
- ascii_composer
- recognizer
- key_binder
- speller
- punctuator
- selector
- navigator
- express_editor
segmentors:
- ascii_segmentor
- matcher
- abc_segmentor
- punct_segmentor
- fallback_segmentor
translators:
- punct_translator
- reverse_lookup_translator
# 固定词频的翻译器
- table_translator@fixed
# 默认翻译器
- table_translator
filters:
- simplifier
- uniquifier
#固定词频的翻译器
fixed:
# 源自默认翻译器
__include: translator
# 关闭用户词典
enable_user_dict: false
enable_completion: false
# 最高出字优先级
initial_quality: 1000
speller:
# 音节间分音符
delimiter: "'"
# 自动上屏
auto_select: false
# 最大码长超过则顶字上屏
# max_code_length: 4
translator:
dictionary: zhengma
prism: zhengma_trad
# 字符集过滤
enable_charset_filter: false
# 自动造句
enable_sentence: true
# 自动造词
enable_encoder: true
# 对已上屏词自动成词
encode_commit_history: false
# 最大自动成词词长
# max_phrase_length: 4
# 逐渐提示
enable_completion: true
# 用户词典
enable_user_dict: true
disable_user_dict_for_patterns:
- "^`.*$"
reverse_lookup:
dictionary: pinyin_simp
prefix: "`"
suffix: "'"
tips: [ 拼音 ]
preedit_format:
- xform/([nl])v/$1ü/
- xform/([nl])ue/$1üe/
- xform/([jqxy])v/$1u/
simplifier:
opencc_config: s2t.json
option_name: zh_trad
key_binder:
import_preset: default
bindings:
# Shift加空格切换全角和半角
- { accept: Shift+space, toggle: full_shape, when: always }
# Control加逗号切换中英文输入
- { accept: Control+comma, toggle: ascii_mode, when: always }
# Control加空格切换中英文输入
- { accept: Control+space, toggle: ascii_mode, when: always }
# Control加句号切换中英文标点
- { accept: Control+period, toggle: ascii_punct, when: always }
recognizer:
import_preset: default
patterns:
# 编码反查
reverse_lookup: "^`[a-z]*'?$"
# 输入symbols.yaml中的特殊字符
punct: "/[a-z]*$"
ascii_composer:
import_preset: default
good_old_caps_lock: false
switch_key:
# Shift键不单独使用
Shift_L: noop
Shift_R: noop
# Control键不单独使用
Control_L: noop
Control_R: noop
Caps_Lock: clear
Eisu_toggle: clear
punctuator:
import_preset: symbols
# 中文标点符号
half_shape:
',' : { commit: , }
'.' : { commit: 。 }
'<' : 《
'>' : 》
'/' : '/'
'?' : { commit: ? }
';' : { commit: ; }
':' : { commit: : }
'''' : { pair: [ '‘', '’' ] }
'"' : { pair: [ '“', '”' ] }
'\' : 、
'|' : '|'
'`' : '`'
'~' : '~'
'!' : { commit: ! }
'@' : '@'
'#' : '#'
'%' : '%'
'$' : ¥
'^' : { commit: …… }
'&' : '&'
'*' : '*'
'(' : (
')' : )
'-' : '-'
'_' : '_'
'+' : '+'
'=' : '='
'[' : '['
']' : ']'
'{' : '{'
'}' : '}'
|
zhengma_trad.schema.yaml
|
sudo: false
language: python
python:
- 3.3
- 3.4
addons:
apt:
packages:
- texlive-xetex
- texlive-latex-recommended
- lmodern
- texlive-latex-extra
- texlive-lang-greek
- fonts-linuxlibertine
- poppler-utils
install:
- pip install flake8
- pip install -r requirements.txt
before_script:
- describe_pdf_file() { pdftotext "${1:?}" - | tail; } # http://stackoverflow.com/questions/6187250/pdf-text-extraction/6189489#6189489
- build_reader_LaTeX() { local D="${1:?}"; local T="${2:?}.tex"; cat - > "${D}/$T"; cd "$D"; xelatex "$T" && xelatex "$T" || exit 1; }
- reader_filename_LaTeX() { echo "${1:?}.pdf"; }
- describe_reader_file_LaTeX() { describe_pdf_file "${1:?}"; }
- build_reader_SILE() { local D="${1:?}"; local T="${2:?}.sil"; cat - > "${D}/$T"; cd "$D"; sile "$T" || exit 1; }
- reader_filename_SILE() { echo "${1:?}.pdf"; }
- describe_reader_file_SILE() { describe_pdf_file "${1:?}"; }
- flake8 .
- fc-list
- mkdir ci.out
script:
- ./frequency_exclusion.py $LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE > ci.out/exclude.txt
- wc -l ci.out/exclude.txt
- ./make_glosses.py --exclude ci.out/exclude.txt "$VERSES" > ci.out/glosses.yaml
- ./make_headwords.py --exclude ci.out/exclude.txt "$VERSES" > ci.out/headwords.yaml
- ./reader.py --headwords ci.out/headwords.yaml --glosses ci.out/glosses.yaml --language eng --exclude ci.out/exclude.txt --typeface "Linux Libertine O" "$VERSES" --backend backends.$BACKEND | build_reader_$BACKEND ci.out reader
- RF=$(reader_filename_$BACKEND reader)
- ls -l ci.out/$RF
after_success:
- file ci.out/$RF
- describe_reader_file_$BACKEND ci.out/$RF
env:
- BACKEND=LaTeX VERSES="John 18:1-11" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=31
- BACKEND=LaTeX VERSES="John 18:1-11" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=48
- BACKEND=LaTeX VERSES="John 18:1-11" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=662
- BACKEND=LaTeX VERSES="John 18:1-11" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=1000000
- BACKEND=SILE VERSES="John 18:1-11" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=31
- BACKEND=LaTeX VERSES="Matthew 1:1-Revelation 22:21" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=31
- BACKEND=LaTeX VERSES="Matthew 1:1-Revelation 22:21" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=48
- BACKEND=LaTeX VERSES="Matthew 1:1-Revelation 22:21" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=190
matrix:
allow_failures:
- env: BACKEND=SILE VERSES="John 18:1-11" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=31
- env: BACKEND=LaTeX VERSES="Matthew 1:1-Revelation 22:21" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=31
- env: BACKEND=LaTeX VERSES="Matthew 1:1-Revelation 22:21" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=48
- env: BACKEND=LaTeX VERSES="Matthew 1:1-Revelation 22:21" LOWER_OCCURRENCE_LIMIT_TO_EXCLUDE=190
fast_finish: true
|
.travis.yml
|
--- !ruby/object:RI::ClassDescription
attributes:
- !ruby/object:RI::Attribute
comment:
name: audit
rw: RW
- !ruby/object:RI::Attribute
comment:
name: debug
rw: RW
- !ruby/object:RI::Attribute
comment:
name: host
rw: R
- !ruby/object:RI::Attribute
comment:
name: maxConnections
rw: R
- !ruby/object:RI::Attribute
comment:
name: port
rw: R
- !ruby/object:RI::Attribute
comment:
name: stdlog
rw: RW
class_methods:
- !ruby/object:RI::MethodSummary
name: in_service?
- !ruby/object:RI::MethodSummary
name: new
- !ruby/object:RI::MethodSummary
name: stop
comment:
- !ruby/struct:SM::Flow::P
body: GServer implements a generic server, featuring thread pool management, simple logging, and multi-server management. See HttpServer in <tt>xmlrpc/httpserver.rb</tt> in the Ruby standard library for an example of GServer in action.
- !ruby/struct:SM::Flow::P
body: Any kind of application-level server can be implemented using this class. It accepts multiple simultaneous connections from clients, up to an optional maximum number. Several <em>services</em> (i.e. one service per TCP port) can be run simultaneously, and stopped at any time through the class method <tt>GServer.stop(port)</tt>. All the threading issues are handled, saving you the effort. All events are optionally logged, but you can provide your own event handlers if you wish.
- !ruby/struct:SM::Flow::H
level: 3
text: Example
- !ruby/struct:SM::Flow::P
body: "Using GServer is simple. Below we implement a simple time server, run it, query it, and shut it down. Try this code in <tt>irb</tt>:"
- !ruby/struct:SM::Flow::VERB
body: " require 'gserver'\n\n #\n # A server that returns the time in seconds since 1970.\n #\n class TimeServer < GServer\n def initialize(port=10001, *args)\n super(port, *args)\n end\n def serve(io)\n io.puts(Time.now.to_i)\n end\n end\n\n # Run the server with logging enabled (it's a separate thread).\n server = TimeServer.new\n server.audit = true # Turn logging on.\n server.start\n\n # *** Now point your browser to http://localhost:10001 to see it working ***\n\n # See if it's still running.\n GServer.in_service?(10001) # -> true\n server.stopped? # -> false\n\n # Shut the server down gracefully.\n server.shutdown\n\n # Alternatively, stop it immediately.\n GServer.stop(10001)\n # or, of course, "server.stop".\n"
- !ruby/struct:SM::Flow::P
body: All the business of accepting connections and exception handling is taken care of. All we have to do is implement the method that actually serves the client.
- !ruby/struct:SM::Flow::H
level: 3
text: Advanced
- !ruby/struct:SM::Flow::P
body: As the example above shows, the way to use GServer is to subclass it to create a specific server, overriding the <tt>serve</tt> method. You can override other methods as well if you wish, perhaps to collect statistics, or emit more detailed logging.
- !ruby/struct:SM::Flow::VERB
body: " connecting\n disconnecting\n starting\n stopping\n"
- !ruby/struct:SM::Flow::P
body: The above methods are only called if auditing is enabled.
- !ruby/struct:SM::Flow::P
body: You can also override <tt>log</tt> and <tt>error</tt> if, for example, you wish to use a more sophisticated logging system.
constants:
- !ruby/object:RI::Constant
comment:
name: DEFAULT_HOST
value: "\"127.0.0.1\""
full_name: GServer
includes: []
instance_methods:
- !ruby/object:RI::MethodSummary
name: connecting
- !ruby/object:RI::MethodSummary
name: connections
- !ruby/object:RI::MethodSummary
name: disconnecting
- !ruby/object:RI::MethodSummary
name: error
- !ruby/object:RI::MethodSummary
name: join
- !ruby/object:RI::MethodSummary
name: log
- !ruby/object:RI::MethodSummary
name: serve
- !ruby/object:RI::MethodSummary
name: shutdown
- !ruby/object:RI::MethodSummary
name: start
- !ruby/object:RI::MethodSummary
name: starting
- !ruby/object:RI::MethodSummary
name: stop
- !ruby/object:RI::MethodSummary
name: stopped?
- !ruby/object:RI::MethodSummary
name: stopping
name: GServer
superclass: Object
|
tools/jruby-1.5.1/share/ri/1.8/system/GServer/cdesc-GServer.yaml
|
k8s中的资源类型: 以适用性范围来分类
名称空间下的资源: 指定空间名称获取资源
工作负载型资源(workload): pod ReplicaSet Deployment StatefulSet DaemonSet, Job, CronJob
服务发现及负载均衡型资源(service Discovery LoadBalance): Service Ingress
配置与存储型资源: Volume, CSI
特殊类型的存储卷: ConfigMap,Secret,DownloadAPI(把外部环境中的信息输出给容器)
物理集群下的资源: Namespace,Node,Role, ClusterRole, RoleBinding, ClusterRoleBinding
元信息定义下的资源(通过指标来定义操作): HPA,PodTemplate, LimitRange
yaml:
数据类型: 对象 映射 哈希 字典 数据 序列 列表 纯量(scalar)
标量: String, Boolean(false, true), Integer, Float, Null(~), Datetime, Date
DNS服务:kube-system 名称下的两个pod作为 k8s内置DNS服务, 创建pod的过程中会向该DNS服务写入名称,
同时将那两个pod中的DNS服务配置为DNS服务器 解析k8s 集群中的各种名称。
Pod生命周期:
kubectl 调用 APIServer下发指令,APIServer 同步 ETCD 信息,kubelet 从 etcd同步信息,准备创建POD
1-初始化 pause 容器
2-启动InitC并运行,他们为整个pod 运行应用容器提前创建一些先决条件,
它们都是按序完成逐个启动并“运行完成退出”的,
如果中途有个容器启动或者运行失败,则pod会根据restartPollicy进行重启或者直接宕掉。
3-在所有的initC运行成功之前,service 不会去 聚集 mainC 的端口的信息,
正在初始化的POD的状态为pending,但会设置 initializing 字段设置为 true
4-如果POD重启所有intC都必须重新执行,每次重启POD过程中initC的执行结果必须是幂等的
5-对于已经运行的POD, intC的spec的修改只能限制在 image 字段,修改其它字段不会生效,
如果修改其image,则等价于POD的重启。
6-initC具有应用容器的所有字段,除了 readinessProbe 和 livenessProbe,因为Init容器
无法定义不同于不同于完成(completion)的就绪(readiness)之外的其它状态,
这会在验证过程中被强制执行。
7- POd中每个容器的name必须唯一,如果出现重复,则会报错。但是一组InitC对外暴露的端口可以一致,
因为他们是串行执行的。
初始化容器的作用:
(1)包含一些工具,这些工具用来为应用容器构建运行环境,在环境构建好之后,应释放资源。
而没必要在应用容器中去运行这些工具,从而造成应用容器的臃肿
(2)分权限进行应用容器的资源获取,initC 可以访问 secret 的权限,而且也可以访问应用容器挂载目录,
当应用容器需要高权限的资源时,可以由initC写入到应用容器中。
(3)阻塞或者延迟并行化运行的一个应用容器的启动,直到满足一组先决条件为止。
比如Apache访问mysql中,Apache必须在mysql启动之后才能启动,所以在
Apache的InitC中可以完成mysql容器启动的探测,并阻塞Apache的启动。
直到mysql启动之后,InitC完成并退出,然后启动Apache
(4)但是一般不会用initC做探测,因为initC在探测成功之后就会退出,不能做持续性探测,比如liveness
探测:
探针是由 kubelet对容器执行的定期诊断。要执行诊断,kubelet 调用容器实现的Handler。
ExecAction: 在容器内执行指定命令。如果命令退出时返回码为0,则认为诊断成功。
TcpSocketAction: 对指定端口上的容器的IP地址执行TCP检查。如果端口打开,则诊断被认为是成功的。
HTTPGetAction: 对指定的端口和路径上的容器的 IP地址执行 Http get 请求,通过响应码判断诊断结果
成功 失败 未知
livenessProbe:指示容器是否正在运行。如果存活探测失败,则 kubelet 会杀死容器,并且容器(pod)将
受到其 重启策略 的影响。如果容器不提供存活探针,则默认状态为 Success
readinessProbe:指示容器是否准备好服务请求。如果就绪探测失败,端点控制器将从与 Pod 匹配的
所有 Service 的端点中删除该 Pod 的 IP 地址。初始延迟之前的就绪状态默认为 Failure。
如果容器不提供就绪探针,则默认状态为 Success
Pod 的 status 字段是一个 PodStatus 对象,PodStatus中有一个 phase 字段。
Pod 的相位(phase)是 Pod 在其生命周期中的简单宏观概述。
该阶段并不是对容器或 Pod 的综合汇总,也不是为了做为综合状态机Pod 相位的数量和含义是严格指定的。
除了本文档中列举的状态外,不应该再假定 Pod 有其他的 phase 值
pod 的状态: 包含 规约部分和实际状态部分,这些信息被记录在 pod 对象的 Conditions 中。
如果应用需要,也可以注入自定义的就绪性信息。
container: waiting running terminated
pod: pending running succeeded failed unknown
1- 请求指令下达到 API接口,并将指令下达给kubelet
2- scheduler 将新 pod 分配到某个节点上,pod没有自愈能力,
由抽象的 controller 策略来管理, kubelet 实现对该pod的构建和注册等。
3- Pod 一旦被调度到某个节点上,那么这个Pod 实例始终就在该节点上,直到 pod 实例被停止或者删除。
4- 一个包含多个容器的 Pod 中包含一个用来拉取文件的程序和一个 Web 服务器, 均使用持久卷作为容器间共享的存储。
还有一个用于网络栈共享的pause 容器。
-- -------------------------------------------------------------------------------------------------
1-命令式编程(开发人员)
2-声明式编程(实际用户)幂等 sql 后有引擎
-- -------------------------------------------------------------------------------------------------
pods
ingress
services
namespaces
deployments
kubectl
get -n -A --show-labels -o yaml -w --selector
set iamge deployment/name containerName=imageName:tag
edit
scale deployment deploymentName --replicas 10
rollout undo --to-reversion |status|history --revision=2|pause deployment/deploymentName
label pod --overwrite=true
apply
# 声明式 Deployment
create --record
# 命令式 ReplicationSet
delete --all -f
# -f 通过文件来识别资源
expose
cordon
explain
describe
logs -f -c
exec pod-name -c -it -- /bin/sh
watch kubectl get pods -l app=nginx
kubectl set image deployment/a-deployment a-container=harbor.atompai.com/k8s-test/testa:v2 --record
port : service 暴露给集群内部节点访问的端口
hostPort : 指定POD所在node映射到pod上的端口
nodePort : service 暴露服务给外部的方式,同时也指定了暴露的端口
targetPort : 容器的端口(最终的流量端口)。targetPort是pod上的端口,从port和nodePort上来的流量,
经过kube-proxy流入到后端pod的targetPort上,最后进入容器。
containerPort : 容器内部使用的端口
|
xtalpi/src/doc/yamls/note2.yaml
|
tools:
github:
- name: local
use_shell: true
shell_name: bash
helm:
- name: in-docker
cmd@env:
- docker
- run
- -i
- --rm
- -w
- $(pwd)
- -v
- $(pwd):$(pwd)
# source(docker): ghcr.io/arhat-dev/helm
- ghcr.io/arhat-dev/helm:3.6
- helm
workflow:
- name: local
shells:
- name: bash
workflow:run:
- name: test-gen
matrix:
chart@shell: |-
cd charts
find . -mindepth 1 -maxdepth 1 -type d -exec echo '- {}' \;
jobs:
- shell@template: |-
{{ $chart := (.Env.MATRIX_CHART | trimPrefix "./") }}
mkdir -p build
helm template {{ $chart }} \
./charts/{{ $chart }} \
--include-crds \
--namespace test-gen \
--debug > build/{{ $chart }}.yaml
- name: release-chart-to-github
hooks:
before:
- name: Fetch history
shell: git fetch --prune --unshallow
continue_on_error: true
- name: Lint charts
shell: make lint.all
- shell: |-
git config --global user.email '7279572<EMAIL>'
git config --global user.name '<NAME>'
jobs:
- task: helm:in-docker:package(charts)
- task: github:local:release(charts)
- task: helm:in-docker:index(charts)
github:release:
- name: charts
hooks:
before:
- shell:bash: |-
tag_name="chart-release-${GIT_COMMIT:0:8}"
git tag -m "${tag_name}" "${tag_name}"
git push origin "${tag_name}"
title@template: chart-release-{{ .Env.GIT_COMMIT | substr 0 8 }}
tag@template: chart-release-{{ .Env.GIT_COMMIT | substr 0 8 }}
notes@env: |-
Automated chart release
for commit ${GIT_COMMIT}
draft: false
files:
- path: ./.packages/*.tgz
helm:package:
- name: charts
chart: charts/*
packages_dir: &pkg_dir .packages
signing:
enabled: false
helm:index:
- name: charts
hooks:
before:
- shell: git switch gh-pages
after:success:
- shell: |-
cat .packages/index.yaml
sudo mv .packages/index.yaml ./index.yaml
git add ./index.yaml
git commit -m "chore: Update chart index"
git push
after:
- shell@env: |-
echo "going back to ${GIT_BRANCH}"
git switch ${GIT_BRANCH}
packages_dir: *pkg_dir
repo_url@template: |-
https://github.com/arhat-dev/helm-charts/releases/
{{- /* just a comment for multi-line formatting */ -}}
download/chart-release-{{ .Env.GIT_COMMIT | substr 0 8 }}/
merge: ./index.yaml
|
.dukkha.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "transactionqueryservice.fullname" . }}-event-api
labels:
app: {{ template "transactionqueryservice.name" . }}
chart: {{ template "transactionqueryservice.chart" . }}
draft: {{ .Values.draft | default "draft-app" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
revisionHistoryLimit: 0
replicas: {{ .Values.transactionqueryservice.replicaCount }}
selector:
matchLabels:
app: {{ template "transactionqueryservice.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "transactionqueryservice.name" . }}
draft: {{ .Values.draft | default "draft-app" }}
release: {{ .Release.Name }}
annotations:
buildID: {{ .Values.buildID | default "" | quote }}
prometheus.io/scrape: "true"
prometheus.io/path: {{ .Values.prometheus.metricspath }}
prometheus.io/port: "{{ .Values.prometheus.metricsport }}"
spec:
volumes:
- name: {{ .Values.transactionstore.shareName }}
persistentVolumeClaim:
claimName: pvc-transaction-query-service
containers:
- name: {{ .Chart.Name }}
volumeMounts:
- name: {{ .Values.transactionstore.shareName }}
mountPath: /mnt/stores/{{ .Values.transactionstore.shareName }}
readOnly: true
image: "{{ .Values.imagestore.transactionqueryservice.registry }}{{ .Values.imagestore.transactionqueryservice.repository }}:{{ .Values.imagestore.transactionqueryservice.tag }}"
ports:
- name: http
containerPort: {{ .Values.transactionqueryservice.deployment.containerPort }}
protocol: TCP
- name: metrics
containerPort: {{ .Values.prometheus.metricsport }}
protocol: TCP
{{- if .Values.transactionqueryservice.probes.enabled }}
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
{{- end }}
env:
- name: ASPNETCORE_URLS
value: "https://+:443;http://+:{{ .Values.prometheus.metricsport }}"
- name: username
valueFrom:
secretKeyRef:
name: transactionqueryservicesecret
key: username
- name: password
valueFrom:
secretKeyRef:
name: transactionqueryservicesecret
key: password
resources:
{{ toYaml .Values.transactionqueryservice.resources | indent 12 }}
{{- with .Values.transactionqueryservice.imagePullSecrets }}
imagePullSecrets:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.transactionqueryservice.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.transactionqueryservice.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.transactionqueryservice.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
|
adaptation/templates/transaction-query-service-deployment.yaml
|
items:
- uid: SpiceSharpParser.Parsers.Netlist
commentId: N:SpiceSharpParser.Parsers.Netlist
id: SpiceSharpParser.Parsers.Netlist
children:
- SpiceSharpParser.Parsers.Netlist.ParseException
- SpiceSharpParser.Parsers.Netlist.ParseTreeEvaluationException
- SpiceSharpParser.Parsers.Netlist.ParseTreeNode
- SpiceSharpParser.Parsers.Netlist.ParseTreeNonTerminalNode
- SpiceSharpParser.Parsers.Netlist.ParseTreeTerminalNode
- SpiceSharpParser.Parsers.Netlist.ParseTreeTraversal
langs:
- csharp
- vb
name: SpiceSharpParser.Parsers.Netlist
nameWithType: SpiceSharpParser.Parsers.Netlist
fullName: SpiceSharpParser.Parsers.Netlist
type: Namespace
assemblies:
- SpiceSharpParser
references:
- uid: SpiceSharpParser.Parsers.Netlist.ParseException
commentId: T:SpiceSharpParser.Parsers.Netlist.ParseException
name: ParseException
nameWithType: ParseException
fullName: SpiceSharpParser.Parsers.Netlist.ParseException
- uid: SpiceSharpParser.Parsers.Netlist.ParseTreeEvaluationException
commentId: T:SpiceSharpParser.Parsers.Netlist.ParseTreeEvaluationException
name: ParseTreeEvaluationException
nameWithType: ParseTreeEvaluationException
fullName: SpiceSharpParser.Parsers.Netlist.ParseTreeEvaluationException
- uid: SpiceSharpParser.Parsers.Netlist.ParseTreeNode
commentId: T:SpiceSharpParser.Parsers.Netlist.ParseTreeNode
parent: SpiceSharpParser.Parsers.Netlist
name: ParseTreeNode
nameWithType: ParseTreeNode
fullName: SpiceSharpParser.Parsers.Netlist.ParseTreeNode
- uid: SpiceSharpParser.Parsers.Netlist.ParseTreeNonTerminalNode
commentId: T:SpiceSharpParser.Parsers.Netlist.ParseTreeNonTerminalNode
parent: SpiceSharpParser.Parsers.Netlist
name: ParseTreeNonTerminalNode
nameWithType: ParseTreeNonTerminalNode
fullName: SpiceSharpParser.Parsers.Netlist.ParseTreeNonTerminalNode
- uid: SpiceSharpParser.Parsers.Netlist.ParseTreeTerminalNode
commentId: T:SpiceSharpParser.Parsers.Netlist.ParseTreeTerminalNode
name: ParseTreeTerminalNode
nameWithType: ParseTreeTerminalNode
fullName: SpiceSharpParser.Parsers.Netlist.ParseTreeTerminalNode
- uid: SpiceSharpParser.Parsers.Netlist.ParseTreeTraversal
commentId: T:SpiceSharpParser.Parsers.Netlist.ParseTreeTraversal
name: ParseTreeTraversal
nameWithType: ParseTreeTraversal
fullName: SpiceSharpParser.Parsers.Netlist.ParseTreeTraversal
- uid: SpiceSharpParser.Parsers.Netlist
commentId: N:SpiceSharpParser.Parsers.Netlist
name: SpiceSharpParser.Parsers.Netlist
nameWithType: SpiceSharpParser.Parsers.Netlist
fullName: SpiceSharpParser.Parsers.Netlist
|
src/docs/api/SpiceSharpParser.Parsers.Netlist.yml
|
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: customersapi
spec:
replicas: 1
# The template of a deployment specification is a pod specification
template:
metadata:
# This label identifies the pod so that it can be referenced by other
# components (like services)
labels:
app: aspnetcoredemo
component: customersapi
spec:
# A pod contains one or more containers which are deployed together
containers:
- name: customersapi
# Kubernetes doesn't allow parameterized manifests, so this is a placeholder
# that our deploy.ps1 script will replace at runtime
# (since the image will depend on which container registry is being used)
image: <CustomersApiImage>
imagePullPolicy: Always
ports:
- containerPort: 80
# Environment variables are the most common way of specifying configuration
# for Kubernetes-hosted services. As shown here, the config values can come from
# Kubernetes' secret store or (as shown below) can be hardcoded
env:
- name: ApplicationInsights__InstrumentationKey
valueFrom:
secretKeyRef:
name: aspnetcoredemo-secrets
key: AppInsightsKey
# An image pull secret is the secret used to connect with a private Docker registry
imagePullSecrets:
- name: registry-key
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: customersmvc
spec:
replicas: 1
template:
metadata:
labels:
app: aspnetcoredemo
component: customersmvc
spec:
imagePullSecrets:
- name: registry-key
containers:
- name: customersmvc
image: <CustomersMvcImage>
imagePullPolicy: Always
ports:
- containerPort: 80
env:
# Kubernetes DNS will expose services based on their name, so we set
# CustomersAPIService__Url to use the CustomersApi service's name
- name: CustomersAPIService__Url
value: http://customersapiservice:5000
- name: ApplicationInsights__InstrumentationKey
valueFrom:
secretKeyRef:
name: aspnetcoredemo-secrets
key: AppInsightsKey
|
k8s/deployments.yml
|
homepage: "https://www.leanplum.com"
documentation: "https://docs.leanplum.com/docs/google-tag-manager-setup"
versions:
- sha: a2ac4c9214727aa7fda6d0404ce62d7ab98cc622
changeNotes: |2
What's new:
- Add the `defineAction` method that enables development of custom message templates. (#138)
- Add the `setLocale` method to allow to set a new locale (#140, thanks to @wil93)
What's fixed:
- Dev mode connection to dev server is not always correctly established (#142)
- The `downloadMessages` callback is sometimes called with a null value (#139, thanks to @alberto911)
- sha: 5606c2e642afb87e7b5f025ea87e012c6735fbfe
changeNotes: |2
What's new:
- The "Register for push" app function can now be used (#123, #124)
What's fixed:
- Support for chained messages created in Messaging (#128)
- sha: 61d8a336faf3ddd67eb0bdd21a7fca80c16f9257
changeNotes: |2
What's new:
- Additional data for App Inbox messages can be accessed through the `.data()` method. (#122)
What's fixed:
- Resolve deprecated API warning when rendering In-App messages. (#121)
- sha: 2a0e088704a76bbc137fdcfcb8d162241ef979fe
changeNotes: |2
What's new:
- When triggering an `Open URL` action, there's a preventable `openUrl` event that allows integration code to handle it, instead of using `window.location` directly. (#118)
What's fixed:
- Lifetime occurrences were not counted correctly when starting new sessions. (#119)
- sha: 96f9c856e207d8687b3a98ae358c8dddd62c4c3a
changeNotes: |2
What's new:
- Rich In-App message rendering.
- sha: 9af670fba52317659d52aaaaec2d0511aa044604
changeNotes: |2
Update tag name and description
- sha: 0b79f6aeddcb607d47fd3ad675a9d5b0e8b4f590
changeNotes: |2
What's new:
- Call `useSessionLength()` when loading SDK to ensure sessions are deduplicated.
- sha: 0f505dc80a436ce92ed51d36fb222c6f8bceffb4
changeNotes: |2
What's new:
- In-App Messages: subscribe to the `showMessage` event for custom rendering of in-app messages.
See the [Web In-App messages help topic](https://docs.leanplum.com/v1/reference#web-in-app-messages) for more information.
- `getFileUrl` method that resolves a filename to link to the [downloadFile API](https://docs.leanplum.com/reference#get_api-action-downloadfile).
- `on` and `off` methods for subscribing to events
- `getUserId` method for getting the ID of the current user
What's fixed:
- JS error when the development socket closes due to an error (#95)
|
metadata.yaml
|
---
result: SUCCESS
timestamp: 2016-06-06 17:14:37 UTC
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/68/
trigger_url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-trigger-ad_hoc/30/
duration: 1h3m19s
runs:
debian-6-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=i386,platform=debian-6,project=angry-omnibus-toolchain,role=builder/68/
duration: 15m6s
el-5-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=i386,platform=el-5,project=angry-omnibus-toolchain,role=builder/68/
duration: 15m15s
el-6-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=i386,platform=el-6,project=angry-omnibus-toolchain,role=builder/68/
duration: 18m
freebsd-10-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=i386,platform=freebsd-10,project=angry-omnibus-toolchain,role=builder/68/
duration: 17m23s
freebsd-9-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=i386,platform=freebsd-9,project=angry-omnibus-toolchain,role=builder/68/
duration: 23m31s
ubuntu-12.04-i386:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=i386,platform=ubuntu-12.04,project=angry-omnibus-toolchain,role=builder/68/
duration: 28m27s
solaris-10-i86pc:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=i86pc,platform=solaris-10,project=angry-omnibus-toolchain,role=builder/68/
duration: 11m41s
aix-6.1-powerpc:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=powerpc,platform=aix-6.1,project=angry-omnibus-toolchain,role=builder/68/
duration: 27m44s
el-7-ppc64:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=ppc64,platform=el-7,project=angry-omnibus-toolchain,role=builder/68/
duration: 14m27s
el-7-ppc64le:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=ppc64le,platform=el-7,project=angry-omnibus-toolchain,role=builder/68/
duration: 14m3s
ubuntu-14.04-ppc64le:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=ppc64le,platform=ubuntu-14.04,project=angry-omnibus-toolchain,role=builder/68/
duration: 13m52s
solaris-10-sun4v:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=sun4v,platform=solaris-10,project=angry-omnibus-toolchain,role=builder/68/
duration: 1h3m13s
debian-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=debian-6,project=angry-omnibus-toolchain,role=builder/68/
duration: 24m42s
el-5:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=el-5,project=angry-omnibus-toolchain,role=builder/68/
duration: 23m40s
el-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=el-6,project=angry-omnibus-toolchain,role=builder/68/
duration: 31m55s
el-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=el-7,project=angry-omnibus-toolchain,role=builder/68/
duration: 13m58s
freebsd-10:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=freebsd-10,project=angry-omnibus-toolchain,role=builder/68/
duration: 33m48s
freebsd-9:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=freebsd-9,project=angry-omnibus-toolchain,role=builder/68/
duration: 48m52s
ios_xr-6:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=ios_xr-6,project=angry-omnibus-toolchain,role=builder/68/
duration: 22m29s
mac_os_x-10.9:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=mac_os_x-10.9,project=angry-omnibus-toolchain,role=builder/68/
duration: 9m9s
nexus-7:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=nexus-7,project=angry-omnibus-toolchain,role=builder/68/
duration: 22m8s
ubuntu-12.04:
result: SUCCESS
url: http://manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/architecture=x86_64,platform=ubuntu-12.04,project=angry-omnibus-toolchain,role=builder/68/
duration: 36m33s
|
reports/stages/manhattan.ci.chef.co/job/angry-omnibus-toolchain-build/68.yaml
|
---
ID: "e6b36111-8411-414b-a7e9-58e75b365ea7"
Parent: "c83cc1ee-a860-4e64-9715-e9c3bbb885f0"
Template: "dd22f1b3-bd87-4db2-9e7d-f7a496888d43"
Path: /sitecore/system/Modules/PowerShell/Script Library/Package Generator/Content Editor/Context Menu/Packaging/Add Tree to Package
DB: master
SharedFields:
- ID: "06d5295c-ed2f-4a54-9bf2-26228d113318"
Hint: __Icon
Value: Software/16x16/branch_add.png
- ID: "1c76313e-3c8c-4807-a826-135d10c39299"
Hint: ShowRule
Type: Rules
Value: <ruleset />
- ID: "7fa141b7-7473-44a9-9bd9-2739c51ff8da"
Hint: PersistentSessionId
Value: packageBuilder
- ID: "b1a94ff0-6897-47c0-9c51-aa6acb80b1f0"
Hint: Script
Value: |
Import-Function Resolve-Error
Function Add-ItemToPackage {
param (
$Package,
$Item,
$IncludeDescendants
)
$path = $Item.ProviderPath
if ($IncludeDescendants) {
$source = New-ItemSource -Item $Item -Name "$path" -InstallMode $InstallMode -MergeMode $MergeMode
} else {
$source = New-ExplicitItemSource -Item $Item -Name "$path" -InstallMode $InstallMode -MergeMode $MergeMode
}
$Package.Sources.Add($source);
return $Package
}
$rootOptions = [ordered]@{
"Root and descendants" = "RootAndDescendants";
"Root and children" = "RootAndChildren";
"Descendants only" = "DescendantsOnly";
"Children only" = "ChildrenOnly";
};
$installModeOptions = [ordered]@{
"Merge" = "Merge";
"Skip" = "Skip";
"Side By Side" = "SideBySide"
};
$mergeModeOptions = [ordered]@{
"Merge" = "Merge"
"Clear" = "Clear";
"Append" = "Append";
};
$result = Read-Variable -Parameters `
@{ Name = "IncludeItems"; Title="Items to include in package"; Options=$rootOptions; Value="RootAndDescendants"; Editor="radio" }, `
@{ Name = "InstallMode"; Title="Installation Behaviour"; Options = $installModeOptions ; Hint = "How should the installer behave if the package contains items that already exist"; Placeholder = "Overwrite"}, `
@{ Name = "MergeMode"; Title="Merge Behaviour"; Options = $mergeModeOptions ; Hint = "This only applies if install behaviour is Merge"}, `
@{ Name = "info"; Title="Click OK to add branch to your package or Cancel to abort."; Value="Root: '$((Get-Item .).ProviderPath)'" ; Editor="info"} `
-Description "Set installation options for this package source." `
-Title "Installation options" -ShowHints
Resolve-Error
if($result -ne "ok")
{
Close-Window
Exit
}
$InstallMode = if ($InstallMode) { $InstallMode } else { "Overwrite" }
if ($IncludeItems -eq "RootAndDescendants") {
$package = Add-ItemToPackage -Package $package -Item (Get-Item .) -IncludeDescendants $True
}
else
{
if ($IncludeItems -eq "RootAndChildren") {
$package = Add-ItemToPackage -Package $package -Item (Get-Item .) -IncludeDescendants $False
}
# Iterate over the children and either add to include descendants or just themselves
Get-ChildItem . | % {
$package = Add-ItemToPackage -Package $package -Item $_ -IncludeDescendants ($IncludeItems -eq "DescendantsOnly")
}
}
Close-Window
- ID: "f62c67b4-28b6-4a6f-91cb-db78cf054f4e"
Hint: EnableRule
Type: Rules
Value: |
<ruleset>
<rule
uid="{53E0EE5B-77EB-4FE5-AFC3-C676DE31736E}">
<conditions>
<condition
id="{3578FABA-08EF-4FDE-BEEC-1EC8F5F47530}"
uid="5E3F148A40024D15A7E2F6E789A78A08"
persistentsessionid="packageBuilder" />
</conditions>
</rule>
</ruleset>
Languages:
- Language: en
Versions:
- Version: 1
Fields:
- ID: "25bed78c-4957-4165-998a-ca1b52f67497"
Hint: __Created
Value: 20130919T124500
|
Cognifide.PowerShell/serialization/Scripts/Script Library/Script Library/Package Generator/Content Editor/Context Menu/Packaging/Add Tree to Package.yml
|
# The port to listen for HTTPS requests on
listenport: 31337
# Show or hide eldim information from HTTP headers
servertokens: true
# Maximum uploaded file size to keep in RAM before using disk
maxuploadram: 100
#####
# TLS Configuration
#####
# The TLS Certificate Chain File
tlschain: "/etc/letsencrypt/live/eldim.example.com/fullchain.pem"
# The TLS Private Key File
tlskey: "/etc/letsencrypt/live/eldim.example.com/privkey.pem"
#####
# Clients
#####
# The file to load the clients from
clientfile: "/etc/eldim/clients.yml"
#####
# Encryption
#####
# The encryption password to use for all file uploads
# encryptionkey: "Insecure" (DEPRECATED)
# Public Keys that will be able to decrypt uploaded files
encryption:
age-id:
- "age17w2a6ad6cccx5kenanamr72e9qzgt5d7vqhvq8rrxx8pc3qt53vq70un5p"
age-ssh:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMAvJvMq0gej+uXqINFrrbqAElw2h32qyxGEBG8ef7vn a-comment"
- "ssh-rsa AAAAB3RSAKeysAreTooLongSoISnippedThisSorrytlCx+PAaDpsD3yQ== another-comment"
#####
# Prometheus
#####
# Enable or disable Prometheus metrics
prometheusenabled: true
# Prometheus Endpoint (/metrics) HTTP Basic Auth Username
prometheusauthuser: "username"
# Prometheus Endpoint (/metrics) HTTP Basic Auth Password
prometheusauthpass: "password"
#####
# Backends
#####
# All the OpenStack Swift Backends
swiftbackends:
-
name: "OVH-Cold"
username: "openStackUser"
apikey: "openStackPassword"
authurl: "https://auth.cloud.ovh.net/v3"
region: "GRA3"
container: "my-logs-cold"
expireseconds: 63072000
-
name: "OVH-Hot"
username: "openStackUser"
apikey: "openStackPassword"
authurl: "https://auth.cloud.ovh.net/v3"
region: "SBG3"
container: "my-logs-hot"
expireseconds: 2592000
# All the Google Cloud Storage Backends
gcsbackends:
-
name: "Example Google Cloud Storage Backend"
bucketname: "eldim-is-awesome"
credsfile: "/etc/eldim/gcs-credentials.json"
# All S3 Storage Backends
s3backends:
-
name: "Minio Play"
endpoint: "play.minio.io"
region: "does-not-matter"
bucketname: "0000eldim"
accesskey: "Q3AM3UQ867SPQQA43P2F"
secretkey: "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
-
name: "Scaleway Example"
endpoint: "s3.fr-par.scw.cloud"
region: "fr-par"
bucketname: "eldim-bucket"
accesskey: "keyGoesHere"
secretkey: "secretGoesHere"
-
name: "Amazon S3 Example"
endpoint: "s3.amazonaws.com"
region: "eu-central-1"
bucketname: "eldim-in-amazon"
accesskey: "AccessKey"
secretkey: "SecretKey"
-
name: "Backblaze B2 Example"
endpoint: "s3.eu-central-003.backblazeb2.com"
region: "eu-central-003"
bucketname: "b2-bucket-name"
accesskey: "AppKey-keyID"
secretkey: "AppKey-applicationKey"
sendcontentmd5: true # If you use Object Lock (>=1d), this is mandatory
|
eldim.yml
|
id: islandora_basic_image_media
label: Basic Image Objects OBJ Media
migration_group: islandora_7x
migration_dependencies:
required:
- islandora_basic_image_files
- islandora_basic_image
source:
plugin: islandora
# Base url of your Solr instance
solr_base_url: http://10.0.2.2:9080/solr
# Base url of your Fedora 3 instance
fedora_base_url: &fedora_base_url http://10.0.2.2:9080/fedora
# define a http data fetcher to access the remote sites
data_fetcher_plugin: http
# Add authentication information to access Fedora
authentication: &fedora_auth
plugin: basic
username: fedoraAdmin
password: <PASSWORD>
# We search Solr for PIDs, this is your Solr field for content models
content_model_field: RELS_EXT_hasModel_uri_ms
# This is the content model to search for (with or without "info:fedora/")
content_model: islandora:sp_basic_image
# Fedora requires the authentication you defined above to access the objectXML, so use this plugin instead of xml
data_parser_plugin: authenticated_xml
# This grabs the root of an object as we only get one object for each XML.
item_selector: /foxml:digitalObject
constants:
# Tag for this media.
preservation_master: 'Preservation Master'
# UID of the author.
creator_uid: 1
fields:
-
name: PID
label: 'PID'
selector: '@PID'
-
name: mimetype
label: "MimeType"
selector: 'foxml:datastream[@ID = "OBJ" and @CONTROL_GROUP = "M"]/foxml:datastreamVersion[position() = last()]/@MIMETYPE'
-
name: filesize
label: 'File size'
selector: 'foxml:datastream[@ID = "OBJ" and @CONTROL_GROUP = "M"]/foxml:datastreamVersion[position() = last()]/@SIZE'
ids:
PID:
type: string
process:
# Add mimetype if available.
field_mime_type:
plugin: skip_on_empty
method: process
source: mimetype
# Add file size if available.
field_file_size:
plugin: skip_on_empty
method: process
source: filesize
# Lookup the Tiff we just migrated
field_media_image/target_id:
plugin: migration_lookup
migration: islandora_basic_image_files
source: PID
no_stub: true
# Set the display profile.
field_media_image/display:
plugin: default_value
default_value: 1
# Set the description.
field_media_image/description:
plugin: default_value
default_value: ''
# Lookup the Repository object we just created
field_media_of:
plugin: migration_lookup
migration: islandora_basic_image
source: PID
no_stub: true
# Set as Preservation Master
field_tags:
plugin: entity_lookup
source: constants/preservation_master
value_key: name
bundle_key: vid
bundle: tags
entity_type: taxonomy_term
ignore_case: true
# Set author.
uid: constants/creator_uid
destination:
plugin: 'entity:media'
default_bundle: image
# Enforced dependencies means this migration is removed if any of these modules are uninstalled.
dependencies:
enforced:
module:
- migrate_7x_claw
- migrate_plus
- islandora
|
config/install/migrate_plus.migration.islandora_basic_image_media.yml
|
---
features:
- |
In this release the OVS plugin was extended to always plug VIFs even when
libvirt could plug the vif. This will enable faster migration leveraging
the multiple port bindings work completed in the Rocky release.
security:
- |
In this release an edgecase where libvirt plugged the VIF instead of os-vif
was addressed. Previously if ``ovs_hybrid_plug`` was set to ``False`` in
the port binding details, os-vif would only ensure the ovs bridge existed
and the plugging would be done by libvirt. As a result during live
migration, there was a short interval where a guest could receive tagged
broadcast, multicast, or flooded traffic to/from another tenant.
This vulnerability is described in `bug 1734320`_. By ensuring that
os-vif always creates the OVS port as part of vif plugging we enable
neutron to isolate the port prior to nova resuming the VM on the
destination node. Note that as Nova cannot rely on Neutron to send
``network-vif-plugged`` events on completion of wiring up an interface
it cannot wait to receive a notification before proceeding with the
migration. As a result this is a partial mitigation and additional changes
will be required to fully address this bug.
.. _bug 1734320: https://bugs.launchpad.net/neutron/+bug/1734320
- |
A new config option was introduced for the OVS VIF plugin.
The ``isolate_vif`` option was added as a partial mitigation of
`bug 1734320`_. The ``isolate_vif`` option defaults to ``False`` for
backwards compatibility with SDN controller based OpenStack deployments.
For all deployments using the reference implementation of ML2/OVS with
the neutron L2 agents, ``isolate_vif`` should be set to ``True``.
This option instructs the OVS plugin to assign the VIF to the
Neutron dead VLAN (4095) when attaching the interface to OVS. By setting
the VIF's VLAN to this dead VLAN number, we eliminate the small attack
vector that exists for other tenants to read packets during the VIF's
bring up.
|
releasenotes/notes/always-plug-vifs-for-ovs-1d033fc49a9c6c4e.yaml
|
- name: Reset kubernetes token file
file:
path: /opt/config/token.txt
state: absent
tags:
- first_masters
- kube_masters
- other_masters
when: inventory_hostname == "{{hostvars[groups['kube_masters'][0]]['inventory_hostname']}}"
- name: Create tokens file
file:
path: /opt/config/token.txt
state: touch
force: yes
when: inventory_hostname == "{{hostvars[groups['kube_masters'][0]]['inventory_hostname']}}"
tags:
- first_masters
- kube_masters
- other_masters
- name: Add export KUBECONFIG=/etc/kubernetes/admin.conf to bashrc
lineinfile:
path: /root/.bashrc
line: "{{ item }}"
loop:
- 'export KUBECONFIG=/etc/kubernetes/admin.conf'
- 'source <(kubectl completion bash)'
tags:
- first_masters
- kube_masters
- other_masters
- name: Apply bashrc changes
shell: |
source /root/.bashrc
args:
executable: /bin/bash
tags:
- first_masters
- kube_masters
- other_masters
- name: create kubernetes config directory
file:
path: ~/.kube/
state: directory
become: false
tags:
- first_masters
- kube_masters
- other_masters
- name: create kubernetes root config directory
file:
path: /root/.kube/
state: directory
tags:
- first_masters
- kube_masters
- other_masters
- name: permissions for admin.conf
file:
path: /etc/kubernetes/admin.conf
mode: 0775
tags:
- first_masters
- kube_masters
- other_masters
- name: copy admin.conf to home directory
copy:
src: /etc/kubernetes/admin.conf
dest: "~/.kube/config"
mode: 0755
remote_src: True
become: false
- first_masters
- kube_masters
- other_masters
- name: copy admin.conf to root home directory
copy:
src: /etc/kubernetes/admin.conf
dest: "/root/.kube/config"
remote_src: true
tags:
- first_masters
- kube_masters
- other_masters
- name: Get kubeadm token
shell: |
kubeadm token create --print-join-command --ttl=0
register: kubeadmToken
when: inventory_hostname == "{{hostvars[groups['kube_masters'][0]]['inventory_hostname']}}"
tags:
- first_masters
- kube_masters
- other_masters
- name: Put kubernetes token to file
lineinfile:
path: /opt/config/token.txt
line: "{{ kubeadmToken.stdout }}"
when: inventory_hostname == "{{hostvars[groups['kube_masters'][0]]['inventory_hostname']}}"
tags:
- first_masters
- kube_masters
- other_masters
- name: Copy init token to ansible host
fetch:
src: /opt/config/token.txt
dest: /tmp/k8s/
when: inventory_hostname == "{{hostvars[groups['kube_masters'][0]]['inventory_hostname']}}"
tags:
- first_masters
- kube_masters
- other_masters
- name: Copy kubeadmin token to ansiblehost
fetch:
src: "/opt/config/token.txt"
dest: "/tmp/k8s/token.txt"
flat: yes
with_items:
- "{{ files_to_copy.stdout_lines }}"
when: inventory_hostname == "{{hostvars[groups['kube_masters'][0]]['inventory_hostname']}}"
tags:
- first_masters
- kube_masters
- other_masters
- name: Install Calico
shell: |
kubectl apply -f https://docs.projectcalico.org/v{{ calico_version }}/manifests/calico.yaml
tags:
- first_masters
- kube_masters
- other_masters
when: inventory_hostname == "{{hostvars[groups['kube_masters'][0]]['inventory_hostname']}}"
|
roles/kube_masters/tasks/after_install.yml
|
name: hrafnar
version: 0.1.0.0
github: "realglobe-Inc/hrafnar"
license: BSD3
author: "REALGLOBE INC"
maintainer: "<EMAIL>"
copyright: "2018 REALGLOBE INC"
extra-source-files:
- README.md
- ChangeLog.md
- dist/main.js
- dist/index.html
description: Please see the README on GitHub at <https://github.com/realglobe-Inc/hrafnar#readme>
library:
source-dirs: server
internal-libraries:
hrafnar-util:
source-dirs: util
dependencies:
- base >= 4.7 && < 5
- aeson
- array
- async
- directory
- file-embed
- filepath
- extensible
- http-client
- http-client-tls
- http-media
- http-types
- lens
- megaparsec
- mtl
- network
- network-simple
- network-simple-tls
- path
- path-io
- reflection
- safe-exceptions
- text
- scientific
- scotty
- servant
- servant-server
- stm
- template-haskell
- time
- tls
- transformers
- containers
- ulid
- unix-time
- utf8-string
- x509-store
- wai
- wai-cors
- warp
- yaml
# Chore temporary
- process
- bytestring
- unordered-containers
- vector
default-extensions:
- DataKinds
- ConstraintKinds
- ExistentialQuantification
- FlexibleContexts
- FlexibleInstances
- KindSignatures
- LambdaCase
- MultiParamTypeClasses
- OverloadedLabels
- OverloadedStrings
- QuasiQuotes
- RankNTypes
- RecordWildCards
- PolyKinds
- ScopedTypeVariables
- TemplateHaskell
- TupleSections
- TypeApplications
- TypeFamilies
- TypeOperators
- TypeSynonymInstances
executables:
hrafnar:
main: Main.hs
source-dirs: server-exec
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
- -optP-Wno-nonportable-include-path
dependencies:
- hrafnar
- optparse-applicative
hli:
main: Main.hs
source-dirs: repl
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
- -optP-Wno-nonportable-include-path
dependencies:
- hrafnar
- haskeline
- optparse-applicative
tests:
hrafnar-test:
main: Main.hs
source-dirs: server-test
ghc-options:
- -threaded
- -rtsopts
- -with-rtsopts=-N
dependencies:
- hrafnar
- hrafnar-util
- hspec
- hspec-megaparsec
- hspec-wai
- smallcheck-series
- case-insensitive
- wai-extra
util-test:
main: Main.hs
source-dirs: util-test
dependencies:
- hrafnar-util
- hspec
- th-test-utils
|
package.yaml
|
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
role: alert-rules
app: {{ .Values.prometheus_rule_selector_app }}
release: {{ .Values.prometheus_rule_selector_release }}
name: {{ .Values.fullnameOverride }}-custom-rule-k8s
namespace: {{ .Values.namespace }}
spec:
groups:
- name: alertrules.custom.k8s
rules:
- alert: TargetDown
annotations:
message: '{{`{{`}} printf "%.4g" $value {{`}}`}}% of the {{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.service {{`}}`}} targets in {{`{{`}} $labels.namespace {{`}}`}} namespace are down.'
expr: 100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, namespace, service)) > 10
for: 10m
labels:
severity: critical
notify: inhibit
- alert: KubeHpaMaxedOut
annotations:
message: HPA {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.hpa {{`}}`}} has been running at max replicas for longer than 15 minutes.
expr: |-
kube_hpa_status_current_replicas{job="kube-state-metrics", namespace=~".*"}
==
kube_hpa_spec_max_replicas{job="kube-state-metrics", namespace=~".*"}
for: 15m
labels:
severity: critical
notify: inhibit
- alert: KubeJobFailed
annotations:
message: Job {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.job_name {{`}}`}} failed to complete.
expr: kube_job_failed{job="kube-state-metrics", namespace=~".*"} > 0
for: 15m
labels:
severity: critical
notify: inhibit
- alert: KubeNodeNotReady
annotations:
message: '{{`{{`}} $labels.node {{`}}`}} has been unready for more than 15 minutes.'
expr: kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0
for: 15m
labels:
severity: critical
notify: inhibit
- alert: KubeNodeUnreachable
annotations:
message: '{{`{{`}} $labels.node {{`}}`}} is unreachable and some workloads may be rescheduled.'
expr: kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} == 1
for: 2m
labels:
severity: critical
notify: inhibit
- alert: KubeletTooManyPods
annotations:
message: Kubelet '{{`{{`}} $labels.node {{`}}`}}' is running at {{`{{`}} $value | humanizePercentage {{`}}`}} of its Pod capacity.
expr: max(max(kubelet_running_pod_count{job="kubelet", metrics_path="/metrics"}) by(instance) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) by(node) / max(kube_node_status_capacity_pods{job="kube-state-metrics"} != 1) by(node) > 0.95
for: 15m
labels:
severity: critical
notify: inhibit
|
kubernetes/helm_charts/monitoring/alertrules/templates/custom_promrules_k8s.yml
|
---
fixtures:
- SecureRBACPolicyFixture
vars:
- &project_id $ENVIRON['PROJECT_ID']
- &project_id_alt $ENVIRON['PROJECT_ID_ALT']
- &system_admin_headers
x-auth-token: user
x-roles: admin,member,reader
accept: application/json
content-type: application/json
openstack-api-version: placement latest
openstack-system-scope: all
- &system_reader_headers
x-auth-token: user
x-roles: reader
accept: application/json
content-type: application/json
openstack-api-version: placement latest
openstack-system-scope: all
- &project_admin_headers
x-auth-token: user
x-roles: admin,member,reader
x-project-id: *project_id
accept: application/json
content-type: application/json
openstack-api-version: placement latest
- &project_member_headers
x-auth-token: user
x-roles: member,reader
x-project-id: *project_id
accept: application/json
content-type: application/json
openstack-api-version: placement latest
- &project_reader_headers
x-auth-token: user
x-roles: reader
x-project-id: *project_id
accept: application/json
content-type: application/json
openstack-api-version: placement latest
- &alt_project_admin_headers
x-auth-token: user
x-roles: admin,member,reader
x-project-id: *project_id_alt
accept: application/json
content-type: application/json
openstack-api-version: placement latest
- &alt_project_member_headers
x-auth-token: user
x-roles: member,reader
x-project-id: *project_id_alt
accept: application/json
content-type: application/json
openstack-api-version: placement latest
- &alt_project_reader_headers
x-auth-token: user
x-roles: reader
x-project-id: *project_id_alt
accept: application/json
content-type: application/json
openstack-api-version: placement latest
tests:
- name: system admin can create resource provider
POST: /resource_providers
request_headers: *system_admin_headers
data:
name: $ENVIRON['RP_NAME']
uuid: $ENVIRON['RP_UUID']
status: 200
- name: project admin cannot list provider usage
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
request_headers: *project_admin_headers
status: 403
- name: project member cannot list provider usage
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
request_headers: *project_member_headers
status: 403
- name: project reader cannot list provider usage
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
request_headers: *project_reader_headers
status: 403
- name: system reader can list provider usage
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
request_headers: *system_reader_headers
status: 200
response_json_paths:
usages: {}
- name: system admin can list provider usage
GET: /resource_providers/$ENVIRON['RP_UUID']/usages
request_headers: *system_admin_headers
status: 200
response_json_paths:
usages: {}
- name: project admin can get total usage for project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *project_admin_headers
status: 200
response_json_paths:
usages: {}
- name: project member can get total usage for project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *project_member_headers
status: 200
response_json_paths:
usages: {}
- name: project reader can get total usage for project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *project_reader_headers
status: 200
response_json_paths:
usages: {}
# Make sure users from other projects can't snoop around for usage on projects
# they have no business knowing about.
- name: project admin cannot get total usage for unauthorized project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *alt_project_admin_headers
status: 403
- name: project member cannot get total usage for unauthorized project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *alt_project_member_headers
status: 403
- name: project reader cannot get total usage for unauthorized project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *alt_project_reader_headers
status: 403
- name: system reader can get total usage for project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *system_reader_headers
status: 200
response_json_paths:
usages: {}
- name: system admin can get total usage for project
GET: /usages?project_id=$ENVIRON['PROJECT_ID']
request_headers: *system_admin_headers
status: 200
response_json_paths:
usages: {}
|
placement/tests/functional/gabbits/usage-secure-rbac.yaml
|
name: gh-action-buildnum
author: EBekker
description: |
Generates build numbers for Workflow runs, across a number
of _scopes_ useful for build tracking and versioning.
inputs:
gist_token:
required: true
description: |
GitHub OAuth/PAT token to be used for accessing Gist to store builder number state.
The integrated GITHUB_TOKEN that is normally accessible during a Workflow does not
include read/write permissions to associated Gists, therefore a separate token is
needed. You can control which account is used to actually store the state by
generating a token associated with the target account.
repo_full_name:
required: false
description: |
The name of the current repository, in `<OWNER>/<REPO-NAME>` format.
This input is optional and is only used to override the default value
which is pulled in from the running Workflow context. This value is
used to compute a unique identifier for the Gist that will be used to
store state for current and subsequent build numbers.
Default is `$env:GITHUB_REPOSITORY`.
workflow_name:
required: false
description: |
The name of the workflow to identify the build number with.
This input is optional and is only used to override the default value
which is pulled in from the running Workflow context. This value is
used to compute a unique identifier for the Gist that will be used to
store state for current and subsequent build numbers.
Default is `$env:GITHUB_WORKFLOW`.
version_key:
required: false
description: |
A unique identifer used to calculate a version-specific build number.
skip_bump:
required: false
description: |
If true, this will skip bumping up the build numbers, and only pulls
in the last values stored in the state Gist.
set_env:
required: false
description: |
If true, this will export the resolved version numbers as environment
variables for the current and future steps.
outputs:
global_build_number:
description: |
Resolved build number for the repository or `global` scope.
workflow_build_number:
description: |
Resolved build number for the Workflow scope.
version_build_number:
description: |
Resolved build number for the Version scope.
branding:
color: purple
icon: terminal
## Even though the Action logic may be implemented
## in PWSH, we still need a NodeJS entry point
runs:
using: node12
main: _init/index.js
|
action.yml
|
id: ocd-organization/b84942cc-12c0-418e-ba55-4d4374447597
jurisdiction: ocd-jurisdiction/country:us/state:fl/government
classification: committee
name: Higher Education Appropriations Subcommittee
chamber: lower
sources:
- url: https://www.myfloridahouse.gov/Sections/Committees/committeesdetail.aspx?CommitteeId=3089
members:
- name: Rene "Coach P" Plasencia
role: Chair
person_id: ocd-person/32888a93-b636-4c26-963b-2467072826e4
- name: <NAME>
role: Vice Chair
person_id: ocd-person/ed56c597-3652-45a5-9c01-de0bf3eb8ab9
- name: <NAME>. "Geri" Thompson
role: Democratic Ranking Member
person_id: ocd-person/a26db915-ec42-4abf-9462-3a6bbb633b2e
- name: Rene "Coach P" Plasencia
role: member
person_id: ocd-person/32888a93-b636-4c26-963b-2467072826e4
- name: <NAME>
role: member
person_id: ocd-person/ed56c597-3652-45a5-9c01-de0bf3eb8ab9
- name: <NAME>. "Geri" Thompson
role: member
person_id: ocd-person/a26db915-ec42-4abf-9462-3a6bbb633b2e
- name: <NAME>. "Joe" Casello
role: member
person_id: ocd-person/de6ef298-c7dd-4a60-866e-763e2c5bf607
- name: <NAME>
role: member
person_id: ocd-person/7613c23d-5734-498a-9f69-8037010cba4a
- name: <NAME>
role: member
person_id: ocd-person/5fc9e9fc-1d3e-44f0-8769-71c1413d5370
- name: <NAME> "Randy" Maggard
role: member
person_id: ocd-person/2a415c08-da66-402b-ada6-93390a5df820
- name: <NAME>
role: member
person_id: ocd-person/596401ac-e1f1-4ccf-a4e4-93e1fe05b5d6
- name: <NAME>. "Tray" McCurdy
role: member
person_id: ocd-person/f7e97bcf-0bde-494d-983b-28cfe35a9944
- name: Angela "Angie" Nixon
role: member
person_id: ocd-person/66c3a112-3008-4332-a6b0-8990a7d992d5
- name: <NAME>
role: member
person_id: ocd-person/b2fce1a8-4ba6-483b-a534-fa48cc43ee7f
- name: <NAME>
role: member
person_id: ocd-person/846cffa2-d750-4e03-95c9-61fae85481db
- name: <NAME>
role: member
person_id: ocd-person/2bae38cc-f703-446b-93ec-576c0e388760
- name: <NAME>
role: member
person_id: ocd-person/7bf7d958-fabd-430b-9326-97586b0c0880
- name: <NAME>
role: member
person_id: ocd-person/67e17458-97f7-41c2-a239-e7f19d3bc720
|
data/fl/committees/lower-Higher-Education-Appropriations-Subcommittee-b84942cc-12c0-418e-ba55-4d4374447597.yml
|
run:
build-tags:
- live
skip-dirs:
- assets
skip-files:
- "assets/assets.go"
modules-download-mode: readonly
output:
sort-results: true
# all available settings of specific linters
linters-settings:
dogsled:
max-blank-identifiers: 2
dupl:
threshold: 150
errcheck:
check-type-assertions: true
check-blank: true
exhaustive:
check-generated: false
default-signifies-exhaustive: false
funlen:
lines: 60
statements: 40
gci:
local-prefixes: github.com/chabad360/covey
gocognit:
min-complexity: 15
nestif:
min-complexity: 4
goconst:
min-len: 3
min-occurrences: 3
gocritic:
disabled-tags:
- experimental
gocyclo:
min-complexity: 10
godot:
scope: all
capital: True
godox:
keywords:
- NOTE
- OPTIMIZE
- TODO
- BUG
- FIXME
gofmt:
simplify: true
template-path: LICENSE
golint:
min-confidence: 0.8
gomnd:
settings:
mnd:
checks:
- argument
- case
- condition
- operation
- return
- assign
govet:
check-shadowing: true
# enable or disable analyzers by name
enable:
- atomic
enable-all: false
disable-all: false
lll:
line-length: 140
maligned:
suggest-new: true
misspell:
locale: US
nakedret:
max-func-lines: 30
nolintlint:
allow-leading-space: false
allow-no-explanation: [ ]
require-explanation: true
require-specific: true
unused:
check-exported: true
errorlint:
errorf: true
linters:
# please, do not use `enable-all`: it's deprecated and will be removed soon.
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
disable-all: true
enable:
- bodyclose
- deadcode
- dogsled
- dupl
- errcheck
- errorlint
- exhaustive
- funlen
- gci
- gocognit
- goconst
- gocritic
- gocyclo
- godot
- godox
- gofmt
- golint
- gomnd
- goprintffuncname
- gosec
- gosimple
- govet
- ineffassign
- interfacer
- lll
- maligned
- misspell
- nakedret
- nestif
- noctx
- nolintlint
- prealloc
- rowserrcheck
- scopelint
- staticcheck
- structcheck
- stylecheck
- typecheck
- unconvert
- unparam
# - unused
- varcheck
- whitespace
issues:
exclude-rules:
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- gosec
- linters:
- lll
source: "^//go:generate "
# # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
# max-issues-per-linter: 0
#
# # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
# max-same-issues: 0
severity:
default-severity: warning
rules:
- linters:
- prealloc
severity: info
|
.golangci.yml
|
product-name: harbor-container-registry
product-properties:
.properties.server_cert_key:
value:
cert_pem: |
{{ indent 10 (include "fullchain") }}
private_key_pem: |
{{ indent 10 (include "privkey") }}
.properties.auth_mode:
selected_option: uaa_auth_pks
value: uaa_auth_pks
.properties.admin_password:
value:
secret: {{env.Getenv "HARBOR_PASSWORD" }}
.properties.admin_password_for_smoketest:
value:
secret: {{env.Getenv "HARBOR_PASSWORD" }}
.properties.clair_updater_interval:
value: 0
.properties.customize_container_network:
selected_option: default
value: default
.properties.hostname:
value: {{ (ds "data").harbor_endpoint }}
.properties.no_proxy:
value: 127.0.0.1,localhost,ui,registry
.properties.registry_storage:
selected_option: s3
value: s3
.properties.registry_storage.s3.bucket:
value: {{ (ds "data").harbor_bucket }}
.properties.registry_storage.s3.chunksize:
value: 5242880
.properties.registry_storage.s3.region:
value: {{ (ds "data").region }}
.properties.registry_storage.s3.secure:
value: true
.properties.registry_storage.s3.v4auth:
value: true
.properties.vm_monitor:
selected_option: no_monitor
value: no_monitor
.properties.with_clair:
value: true
.properties.with_notary:
value: true
network-properties:
network:
name: infrastructure
other_availability_zones:
{{range (ds "data").azs}}
- name: {{.}}
{{end}}
singleton_availability_zone:
name: {{index (ds "data").azs 0}}
resource-config:
harbor-app:
instances: automatic
persistent_disk:
size_mb: automatic
instance_type:
id: automatic
internet_connected: false
elb_names:
{{range (ds "data").harbor_api_target_groups}}
- alb:{{.}}
{{end}}
additional_vm_extensions:
- harbor-api-lb-security-groups
max_in_flight: 1
smoke-testing:
instances: automatic
instance_type:
id: automatic
internet_connected: false
max_in_flight: 1
errand-config:
smoke-testing:
post-deploy-state: false
uaa-deregistration:
pre-delete-state: false
|
templates/config/harbor.yml
|
version: '2.2'
services:
app:
build:
context: .
dockerfile: Dockerfile-python-client
container_name: app
restart: unless-stopped
networks:
- kafka-backend
depends_on:
- kafka
- splunk-siem
cpus: 1
mem_limit: 50MB
splunk-siem:
image: splunk/splunk:7.3.2
container_name: 'splunk-siem'
restart: unless-stopped
networks:
- logging-backend
environment:
- 'SPLUNK_START_ARGS=--accept-license'
- 'SPLUNK_PASSWORD=<PASSWORD>'
ports:
- 8000:8000
depends_on:
- rsyslog
cpus: 2
mem_limit: 1G
rsyslog:
container_name: 'rsyslog'
restart: unless-stopped
build:
context: .
dockerfile: Dockerfile-rsyslog
ports:
- 1514:1514/udp
networks:
- default
- kafka-backend
- logging-backend
depends_on:
- kafka
cpus: 1
mem_limit: 150M
zookeeper:
image: confluentinc/cp-zookeeper:5.3.1
container_name: zookeeper
restart: unless-stopped
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- kafka-backend
cpus: 1
mem_limit: 150M
kafka:
image: confluentinc/cp-kafka:5.3.1
container_name: kafka
restart: unless-stopped
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
#KAFKA_ADVERTISED_LISTENERS: INTERNAL_DOCKER_NETWORK://kafka:29092,EXTERNAL_NETWORK://kafka.hackinglab.local:9092
#KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_DOCKER_NETWORK:PLAINTEXT,EXTERNAL_NETWORK:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: INTERNAL_DOCKER_NETWORK://kafka:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_DOCKER_NETWORK:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL_DOCKER_NETWORK
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
networks:
- default
- kafka-backend
#ports:
# - 9092:9092
depends_on:
- zookeeper
cpus: 2
mem_limit: 1G
networks:
kafka-backend:
logging-backend:
|
osquery-url-monitor/docker-compose.yml
|
name: Basic Test
on: [push, pull_request]
jobs:
testactions_job:
runs-on: ubuntu-latest
name: Test
strategy:
matrix:
include:
- arch: aarch64
distro: bullseye
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Build and run container
id: build
uses: ./
with:
arch: ${{ matrix.arch }}
distro: ${{ matrix.distro }}
env: |
env_arch: ${{ matrix.arch }}
env_distro: ${{ matrix.distro }}
# Test multiple argument formats
dockerRunArgs: |
-v "${PWD}/volume_1:/volume_1"
--volume=${PWD}/volume_2:/volume_2
"-v${PWD}/volume_3:/volume_3"
-v "${PWD}/volume_4:/volume_4" -v "${PWD}/volume_5:/volume_5"
# Sourced on host, after container build, before container run
setup: |
distro_info=$(cat /etc/*-release | tr '[:upper:]' '[:lower:]' | tr '"' ' ' | tr '\n' ' ')
echo ::set-output name=host_arch::"$(uname -m)"
echo ::set-output name=host_distro_info::"$distro_info"
echo ::set-output name=host_env_arch::"$env_arch"
echo ::set-output name=host_env_distro::"$env_distro"
echo ::set-output name=host_shell_options::"$-"
# List all qemu packages, including qemu-user-static platforms
ls -al /usr/bin/qemu-*
install: |
apt-get update -q -y
apt-get install -q -y git
# Run on container
run: |
distro_info=$(cat /etc/*-release | tr '[:upper:]' '[:lower:]' | sed 's/"//g' | tr '\n' ';')
echo ::set-output name=arch::"$(uname -m)"
echo ::set-output name=distro_info::"$distro_info"
echo ::set-output name=shebang::"$(head -n 1 "$0")"
echo ::set-output name=env_arch::"$env_arch"
echo ::set-output name=env_distro::"$env_distro"
echo ::set-output name=shell_options::"$-"
- name: Assert container receives environment variables
run: |
arch="${{ steps.build.outputs.env_arch }}"
distro="${{ steps.build.outputs.env_distro }}"
echo "Assert env_arch: '$arch' == '${{ matrix.arch }}'"
test "$arch" == "${{ matrix.arch }}"
echo "Assert env_distro: '$distro' == '${{ matrix.distro }}'"
test "$distro" == "${{ matrix.distro }}"
|
.github/workflows/simple.yml
|
tests:
- description: Newsgraphics GCS bucket responds with 200 over http
request:
scheme: http
path: /newsgraphics/2014/01/05/poverty-map/index.html
headers:
x-nyt-miss: "1"
conditions:
env:
TEST_ENV: dev|stg|prd
response:
headers:
patterns:
x-nyt-route: ^newsgraphics-gcs$
x-nyt-backend: ^gcs_origin$
x-goog-hash: .+
x-goog-storage-class: .+
x-guploader-uploadid: .+
statusCodes:
- 200
- description: Projects path in newsgraphics GCS bucket responds with 200
request:
scheme: http
path: /projects/healthcheck.txt
headers:
x-nyt-miss: "1"
conditions:
env:
TEST_ENV: dev|stg|prd
response:
headers:
patterns:
x-nyt-route: ^newsgraphics-gcs$
x-nyt-backend: ^gcs_origin$
x-goog-hash: .+
x-goog-storage-class: .+
x-guploader-uploadid: .+
statusCodes:
- 200
- description: Test newsgraphics; obey redirects configured by
x-amz-meta-website-redirect-location header
request:
scheme: http
path: /newsgraphics/2012/1220-snow-fall-preview/
headers:
x-nyt-miss: "1"
conditions:
env:
TEST_ENV: dev|stg
response:
headers:
patterns:
x-nyt-route: ^newsgraphics-gcs$
x-nyt-backend: ^gcs_origin$
location: https?:\/\/www(\.dev|\.stg)?\.nytimes\.com\/newsgraphics\/2012\/1220-snow-fall-preview\/index\.html
statusCodes:
- 301
- description: Test newsgraphics; 2018 should work on https
request:
path: /newsgraphics/2018/healthcheck.txt
headers:
x-nyt-miss: "1"
conditions:
env:
TEST_ENV: dev|stg|prd
response:
headers:
patterns:
x-nyt-route: ^newsgraphics-gcs$
x-nyt-backend: ^gcs_origin$
statusCodes:
- 200
- description: Test newsgraphics; 2017 should work on https
request:
path: /newsgraphics/2017/healthcheck.txt
headers:
x-nyt-miss: "1"
conditions:
env:
TEST_ENV: dev|stg|prd
response:
headers:
patterns:
x-nyt-route: ^newsgraphics-gcs$
x-nyt-backend: ^gcs_origin$
statusCodes:
- 200
|
tests/www.nytimes.com/route-newsgraphics-gcs.yml
|
server:
port: 8080
#上传配置
upload:
config:
ysUrls: /image/**,/template/**,/dowload/**
localUrls: /Users/zhuxiang/work/upload/image/,/Users/zhuxiang/work/upload/template/,/Users/zhuxiang/work/upload/dowload/
#Swagger账号配置
swagger:
production: false
basic:
enable: true
username: admin
password: <PASSWORD>
spring:
thymeleaf:
prefix: classpath:/templates/views/
resources:
static-locations: classpath:/static/
mvc:
dispatch-options-request: true
#设置上传文件大小
servlet:
multipart:
#设置文件上传大小,-1为不限制
maxFileSize: -1
#上传请求大小写,-1不限制
maxRequestSize: -1
#redis 配置
redis:
database: 0
host: 192.168.127.12
lettuce:
pool:
max-active: 8 #最大连接数据库连接数,设 0 为没有限制
max-idle: 8 #最大等待连接中的数量,设 0 为没有限制
max-wait: -1ms #最大建立连接等待时间。如果超过此时间将接到异常。设为-1表示无限制。
min-idle: 0 #最小等待连接中的数量,设 0 为没有限制
shutdown-timeout: 100ms
password: '<PASSWORD>'
port: 6379
autoconfigure:
exclude: com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceAutoConfigure
#数据库访问配置
datasource:
url: jdbc:mysql://127.0.0.1:3306/zgb_code?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&autoReconnect=true&useSSL=false
username: root
password: <PASSWORD>
driverClassName: com.mysql.jdbc.Driver
type: com.alibaba.druid.pool.DruidDataSource
druid:
stat-view-servlet:
login-username: admin
login-password: <PASSWORD>
#初始化连接大小
initial-size: 8
#最小空闲连接数
min-idle: 5
#最大连接数
max-active: 10
#查询超时时间
query-timeout: 6000
#事务查询超时时间
transaction-query-timeout: 6000
#关闭空闲连接超时时间
remove-abandoned-timeout: 1800
filters: stat,config
# 合并多个DruidDataSource的监控数据
use-global-data-source-stat: true
filter:
stat:
slow-sql-millis: 5000
# 通过connectProperties属性来打开mergeSql功能;慢SQL记录
connection-properties: druid.stat.mergeSql=true
#Mybatis配置
mybatis-plus:
# 修改Mapper自动加载无需重启
refresh: true
db-config:
#字段策略 IGNORED:"忽略判断",NOT_NULL:"非 NULL 判断"),NOT_EMPTY:"非空判断"
field-strategy: not_empty
#逻辑删除配置
logic-delete-value: 1
logic-not-delete-value: 0
mapper-locations: classpath:mapper/*/*.xml
configuration:
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
map-underscore-to-camel-case: true
cache-enabled: false
#需要授权的api配置
auth:
url:
#需要授权的系统接口地址
sysUrls: /common/api/*,/api/sys/*
#配置Swagger文档
swaggerui:
info:
title: zgb_code
version: 1.0
description: zgb_code Api文档
contact: 朱翔
email: <EMAIL>
basePackage: com.zgb.test
tokenName: Authorization
#日志配置
logging:
level:
root: info
file: ./log/zgb_code.log
|
src/main/resources/application.yml
|
items:
- uid: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint"
id: "WithServiceEndpoint"
parent: "com.microsoft.azure.management.sql"
children:
- "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint.ignoreMissingSqlServiceEndpoint()"
langs:
- "java"
name: "SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT>"
nameWithType: "SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT>"
fullName: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT>"
type: "Interface"
package: "com.microsoft.azure.management.sql"
summary: "The SQL Virtual Network Rule definition to set ignore flag for the missing subnet's SQL service endpoint entry."
syntax:
content: "public static interface SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT> extends SqlVirtualNetworkRule.DefinitionStages.WithAttach<ParentT>"
typeParameters:
- id: "ParentT"
implements:
- "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithAttach<ParentT>"
- uid: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint.ignoreMissingSqlServiceEndpoint()"
id: "ignoreMissingSqlServiceEndpoint()"
parent: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint"
langs:
- "java"
name: "ignoreMissingSqlServiceEndpoint()"
nameWithType: "SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT>.ignoreMissingSqlServiceEndpoint()"
fullName: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT>.ignoreMissingSqlServiceEndpoint()"
overload: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint.ignoreMissingSqlServiceEndpoint*"
type: "Method"
package: "com.microsoft.azure.management.sql"
summary: "Sets the flag to ignore the missing subnet's SQL service endpoint entry.\n\nVirtual Machines in the subnet will not be able to connect to the SQL server until Microsoft.Sql service endpoint is added to the subnet"
syntax:
content: "public abstract SqlVirtualNetworkRule.DefinitionStages.WithAttach<ParentT> ignoreMissingSqlServiceEndpoint()"
return:
type: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithAttach<ParentT>"
description: "The next stage of the definition."
references:
- uid: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithAttach<ParentT>"
spec.java:
- uid: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithAttach"
name: "WithAttach"
fullName: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithAttach"
- name: "<"
fullName: "<"
- uid: "ParentT"
name: "ParentT"
fullName: "ParentT"
- name: ">"
fullName: ">"
- uid: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint.ignoreMissingSqlServiceEndpoint*"
name: "ignoreMissingSqlServiceEndpoint"
nameWithType: "SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT>.ignoreMissingSqlServiceEndpoint"
fullName: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint<ParentT>.ignoreMissingSqlServiceEndpoint"
package: "com.microsoft.azure.management.sql"
- uid: "ParentT"
name: "ParentT"
nameWithType: "ParentT"
fullName: "ParentT"
- uid: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithAttach"
name: "SqlVirtualNetworkRule.DefinitionStages.WithAttach"
nameWithType: "SqlVirtualNetworkRule.DefinitionStages.WithAttach"
fullName: "com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithAttach"
|
docs-ref-autogen/com.microsoft.azure.management.sql.SqlVirtualNetworkRule.DefinitionStages.WithServiceEndpoint.yml
|
version: "3"
services:
# ---------- application services ---------------
cep:
depends_on:
- mqtt
image: cproinger/sample-bridge
deploy:
resources:
limits:
memory: 180M
cpus: "0.1"
ports:
- "8082:8080"
networks:
- app_overlay
- cloudstack_app_overlay
environment:
- JAVA_OPTS=-server -Xmx100m -XX:MaxMetaspaceSize=64m -Dspring.profiles.active=inbound-mqtt,esper-cep-randomcount,outbound-rabbitmq -Dinbound-mqtt.brokerURL=tcp://nfvstack_mqtt:1883 -Dinbound-mqtt.subscriptions=sensor/+/randomcount -Dspring.rabbitmq.host=cloudstack_rabbitmq -Dspring.rabbitmq.username=guest -Dspring.rabbitmq.password=<PASSWORD>
mqtt:
image: toke/mosquitto:release-1.4.10-2
deploy:
resources:
limits:
memory: 10M
cpus: "0.5"
ports:
- "1883:1883"
networks:
- app_overlay
# ---------- monitoring services ---------------
prometheus:
image: prom/prometheus:v1.5.3
ports:
- "9092:9090"
volumes:
- /mastergit/sanalytics/sampleapps/prom/:/etc/prometheus/
# strange error https://github.com/prometheus/prometheus/issues/1600
#- /c/Users/cproinger/Documents/Docker/prom/nfv:/prometheus
#- /mastergit/sanalytics/sampleapps/prom/sample.rules:/etc/prometheus/sample.rules
networks:
- app_overlay
- monitoring_overlay
- cloudstack_monitoring_overlay
- platform_overlay
depends_on:
# these do not need to be defined regarding DNS resolution of the names (just if you are wondering)
- cep
command:
- -config.file=/etc/prometheus/prometheus-nfv.yml
- -storage.local.path=/prometheus
- -web.console.libraries=/etc/prometheus/console_libraries
- -web.console.templates=/etc/prometheus/consoles
# TODO memory usage verringern.
#- -storage.local.target-heap-size=104857600 # 100 MB.
#- -storage.remote.influxdb-url=http://influx:8086 -> invalid URL ??? WTF!
# *banging head against table* -> https://github.com/prometheus/prometheus/issues/2022
- -storage.remote.influxdb-url=http://influx:8086
- -storage.remote.influxdb.database=mytestdb
- -storage.remote.influxdb.retention-policy=autogen
- -storage.remote.influxdb.username=username
environment:
- INFLUXDB_PW=password
networks:
app_overlay:
monitoring_overlay:
platform_overlay:
external: true
cloudstack_monitoring_overlay:
external: true
cloudstack_app_overlay:
external: true
|
sampleapps/dc-nfv-slice-part-with-monitoring.yml
|
version: 2.1
orbs:
apptest-ai: apptestai/android-app-test@volatile
jobs:
build-android:
environment:
JVM_OPTS: -Xmx1536m
GRADLE_OPTS: "-XX:+HeapDumpOnOutOfMemoryError -Dorg.gradle.jvmargs=-Xmx1536m -Dorg.gradle.caching=true -Dorg.gradle.configureondemand=true -Dkotlin.compiler.execution.strategy=in-process -Dkotlin.incremental=false"
docker:
- image: circleci/android:api-29-node
steps:
- checkout
- run:
name: npm install
command: npm install
- run:
name: create keystore file, fill key
command: |
CURPWD=$(pwd)
echo "$SIGNING_KEY" | base64 --decode > keystore.jks
echo "" >> android/gradle.properties
echo "MYAPP_UPLOAD_STORE_FILE=${CURPWD}/keystore.jks" >> android/gradle.properties
echo "MYAPP_UPLOAD_STORE_PASSWORD=$KEY_STORE_PASSWORD" >> android/gradle.properties
echo "MYAPP_UPLOAD_KEY_ALIAS=$ALIAS" >> android/gradle.properties
echo "MYAPP_UPLOAD_KEY_PASSWORD=$KEY_PASSWORD" >> android/gradle.properties
- run:
name: Run build
working_directory: ./android
command: ./gradlew assembleRelease
- apptest-ai/run-test:
binary_path: "android/app/build/outputs/apk/release/app-release.apk"
project_id: "1103"
- store_artifacts:
path: test-results
- store_test_results:
path: test-results
build-ios:
macos:
xcode: 11.3.1
steps:
- checkout
- run:
name: npm install
command: npm install
- run:
name: patch react-native xcode 11 build bug
command: patch -u node_modules/react-native/React/Base/RCTModuleMethod.mm -i fix-xcode-11-build.patch
- run:
name: build
working_directory: ./ios
command: |
fastlane beta
- apptest-ai/run-test:
binary_path: "ios/anime_jisho.ipa"
project_id: "1102"
- store_artifacts:
path: test-results
- store_test_results:
path: test-results
workflows:
build:
jobs:
- build-android
- build-ios
|
.circleci/config.yml
|
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Deployment
apiVersion: apps/v1beta1
metadata:
name: nfs-provisioner-01
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-provisioner-01
template:
metadata:
labels:
app: nfs-provisioner-01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
#image: quay.io/external_storage/nfs-client-provisioner:latest
image: jmgao1983/nfs-client-provisioner:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
# 此处供应者名字供storageclass调用
value: nfs-provisioner-01
- name: NFS_SERVER
value: "192.168.174.134"
- name: NFS_PATH
value: "/nfs"
volumes:
- name: nfs-client-root
nfs:
server: "192.168.174.134"
path: "/nfs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-dynamic-class
provisioner: nfs-provisioner-01
|
deploy-ansible/kubeadm/add_on/efk/nfs-provileges.yaml
|
title: Networking Fundamentals
metadata:
title: 'Exam 98-366: Networking Fundamentals'
description: 'Exam 98-366: Networking Fundamentals'
uid: exam.98-366
displayName: 98-366
summary: |-
<div><font color='red'><strong>Este examen MTA se retirará el 30 de junio de 2022. La venta de bonos para este examen finalizó el 30 de junio de 2021. Utiliza los botones "Programar con Pearson VUE" o "Programar con Certiport" a continuación para canjear un bono comprado y programar tu examen. Consulta <a href="/learn/certifications/mta-retirement-faqs">las FAQ</a> para más detalles.</strong></font></br></br></div>
Los candidatos a este examen están familiarizados con los conceptos generales y las tecnologías de redes. Los candidatos deben tener alguna experiencia práctica con Windows Server, redes basadas en Windows, herramientas de gestión de redes, DNS, TCP/IP, el proceso de resolución de nombres y los protocolos y topologías de redes.
<div style='font-weight: bold;'><br/>Hay actualizaciones del examen en vigor a partir del miércoles, 31 de enero de 2018. Para obtener más información sobre estos cambios y cómo afectan las habilidades medidas, descargue y revise el documento de cambio del Examen 98-366.</div>
subTitle: Los candidatos a este examen están familiarizados con los conceptos generales y las tecnologías de redes. Los candidatos deben tener alguna experiencia práctica con Windows Server, redes basadas en Windows, herramientas de gestión de redes, DNS, TCP/IP, el proceso de resolución de nombres y los protocolos y topologías de redes.
iconUrl: /media/learn/certification/badges/certification-exam.svg
pdfDownloadUrl: https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4tnJr
practiceTestUrl: https://www.mindhub.com/MTA-98-366-Networking-Fundamentals-p/mu-98-366_p.htm?utm_source=microsoft&utm_medium=certpage&utm_campaign=msofficialpractice
providers:
- providerType: pearson-vue
examUrl: https://examregistration.microsoft.com/?action=1&locale=es-es&examcode=98-366&examname=Networking%20Fundamentals&returnToLearningUrl=https://docs.microsoft.com/learn/certifications/exams/98-366
- providerType: certiport
examUrl: http://www.certiport.com/locator
locales:
- en
- zh-cn
- zh-tw
- fr
- de
- it
- ja
- ko
- pt-br
- ru
- es
- es-mx
levels:
- beginner
skills:
- Comprender las infraestructuras de red (30-35%)
- Comprender el hardware de red (20-25%)
- Comprender los protocolos y servicios (45-50%)
retirementDate: 06/30/2022
roles:
- administrator
products:
- windows-server
relatedCertifications:
- related:
uid: certification.mta-networking-fundamentals
resources:
- title: Exam Replay
description: Vea dos grandes ofertas para ayudar a aumentar sus probabilidades de éxito.
href: ../deals.md
- title: Panel de certificación
description: Revise y administre sus citas programadas, certificados y transcripciones.
href: https://www.microsoft.com/learning/dashboard.aspx
- title: Request accommodations
description: Obtenga más información sobre cómo solicitar un ajuste para su examen.
href: ../request-accommodations.md
- title: Políticas de examen y preguntas frecuentes
description: Revise y administre sus citas programadas, certificados y transcripciones.
href: ../certification-exam-policies.md
|
learn-certs-pr/exams/98-366.yml
|
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
annotations:
mu/version: {{ .MuVersion }}
data:
mapRoles: |
- rolearn: {{.EC2RoleArn}}
username: system:node:{{`{{EC2PrivateDNSName}}`}}
groups:
- system:bootstrappers
- system:nodes
{{range .RBACServices}}
- rolearn: arn:{{$.AWSPartition}}:iam::{{$.AWSAccountId}}:role/{{$.MuNamespace}}-pipeline-{{.Name}}-mu-acpt-{{$.AWSRegion}}
username: mu-service-{{.Name}}
groups:
- mu-view
- rolearn: arn:{{$.AWSPartition}}:iam::{{$.AWSAccountId}}:role/{{$.MuNamespace}}-pipeline-{{.Name}}-mu-prod-{{$.AWSRegion}}
username: mu-service-{{.Name}}
groups:
- mu-view
{{end}}
mapUsers: |
{{range .RBACUsers}}
- userarn: arn:{{$.AWSPartition}}:iam::{{$.AWSAccountId}}:user/{{.Name}}
groups:
- mu-{{.Role}}
{{end}}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: mu-admin-role-binding
annotations:
mu/version: {{ .MuVersion }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: Group
name: mu-admin
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: mu-view-role-binding
annotations:
mu/version: {{ .MuVersion }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: Group
name: mu-view
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: mu-deploy-role-binding
annotations:
mu/version: {{ .MuVersion }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: edit
subjects:
- kind: Group
name: mu-deploy
{{range .RBACServices}}
{{if eq .Role "deploy"}}
---
apiVersion: v1
kind: Namespace
metadata:
name: mu-service-{{.Name}}
annotations:
mu/type: service
mu/service: {{ .Name }}
mu/version: {{ $.MuVersion }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: mu-service-{{.Name}}-{{.Role}}-role-binding
namespace: mu-service-{{.Name}}
annotations:
mu/version: {{ $.MuVersion }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: User
name: mu-service-{{.Name}}
{{end}}
{{end}}
|
templates/assets/kubernetes/cluster.yml
|
- position: 1
driverNumber: 9
driverId: kimi-raikkonen
constructorId: mclaren
engineManufacturerId: mercedes
tyreManufacturerId: michelin
lap: 23
time: "1:35.483"
gap:
interval:
- position: 2
driverNumber: 8
driverId: nick-heidfeld
constructorId: williams
engineManufacturerId: bmw
tyreManufacturerId: michelin
lap: 40
time: "1:35.712"
gap: "+0.229"
interval: "+0.229"
- position: 3
driverNumber: 16
driverId: jarno-trulli
constructorId: toyota
engineManufacturerId: toyota
tyreManufacturerId: michelin
lap: 18
time: "1:35.816"
gap: "+0.333"
interval: "+0.104"
- position: 4
driverNumber: 5
driverId: fernando-alonso
constructorId: renault
engineManufacturerId: renault
tyreManufacturerId: michelin
lap: 18
time: "1:35.899"
gap: "+0.416"
interval: "+0.083"
- position: 5
driverNumber: 7
driverId: mark-webber
constructorId: williams
engineManufacturerId: bmw
tyreManufacturerId: michelin
lap: 20
time: "1:36.026"
gap: "+0.543"
interval: "+0.127"
- position: 6
driverNumber: 6
driverId: giancarlo-fisichella
constructorId: renault
engineManufacturerId: renault
tyreManufacturerId: michelin
lap: 21
time: "1:36.182"
gap: "+0.699"
interval: "+0.156"
- position: 7
driverNumber: 17
driverId: ralf-schumacher
constructorId: toyota
engineManufacturerId: toyota
tyreManufacturerId: michelin
lap: 15
time: "1:36.321"
gap: "+0.838"
interval: "+0.139"
- position: 8
driverNumber: 10
driverId: juan-pablo-montoya
constructorId: mclaren
engineManufacturerId: mercedes
tyreManufacturerId: michelin
lap: 42
time: "1:36.585"
gap: "+1.102"
interval: "+0.264"
- position: 9
driverNumber: 14
driverId: david-coulthard
constructorId: red-bull
engineManufacturerId: cosworth
tyreManufacturerId: michelin
lap: 18
time: "1:36.790"
gap: "+1.307"
interval: "+0.205"
- position: 10
driverNumber: 2
driverId: rubens-barrichello
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: bridgestone
lap: 22
time: "1:36.878"
gap: "+1.395"
interval: "+0.088"
- position: 11
driverNumber: 15
driverId: christian-klien
constructorId: red-bull
engineManufacturerId: cosworth
tyreManufacturerId: michelin
lap: 17
time: "1:36.902"
gap: "+1.419"
interval: "+0.024"
- position: 12
driverNumber: 1
driverId: michael-schumacher
constructorId: ferrari
engineManufacturerId: ferrari
tyreManufacturerId: bridgestone
lap: 41
time: "1:36.982"
gap: "+1.499"
interval: "+0.080"
- position: 13
driverNumber: 12
driverId: felipe-massa
constructorId: sauber
engineManufacturerId: petronas
tyreManufacturerId: michelin
lap: 18
time: "1:37.212"
gap: "+1.729"
interval: "+0.230"
- position: 14
driverNumber: 3
driverId: jenson-button
constructorId: bar
engineManufacturerId: honda
tyreManufacturerId: michelin
lap: 2
time: "1:37.912"
gap: "+2.429"
interval: "+0.700"
- position: 15
driverNumber: 11
driverId: jacques-villeneuve
constructorId: sauber
engineManufacturerId: petronas
tyreManufacturerId: michelin
lap: 18
time: "1:38.058"
gap: "+2.575"
interval: "+0.146"
- position: 16
driverNumber: 19
driverId: narain-karthikeyan
constructorId: jordan
engineManufacturerId: toyota
tyreManufacturerId: bridgestone
lap: 18
time: "1:39.833"
gap: "+4.350"
interval: "+1.775"
- position: 17
driverNumber: 18
driverId: tiago-monteiro
constructorId: jordan
engineManufacturerId: toyota
tyreManufacturerId: bridgestone
lap: 36
time: "1:40.432"
gap: "+4.949"
interval: "+0.599"
- position: 18
driverNumber: 4
driverId: anthony-davidson
constructorId: bar
engineManufacturerId: honda
tyreManufacturerId: michelin
lap: 2
time: "1:41.470"
gap: "+5.987"
interval: "+1.038"
- position: 19
driverNumber: 21
driverId: christijan-albers
constructorId: minardi
engineManufacturerId: cosworth
tyreManufacturerId: bridgestone
lap: 12
time: "1:42.465"
gap: "+6.982"
interval: "+0.995"
- position: 20
driverNumber: 20
driverId: patrick-friesacher
constructorId: minardi
engineManufacturerId: cosworth
tyreManufacturerId: bridgestone
lap: 2
time: "1:43.558"
gap: "+8.075"
interval: "+1.093"
|
src/data/seasons/2005/races/02-malaysia/fastest-laps.yml
|
- industry: all
items:
- title: Diversity, Equity, Inclusion, and Accessibility Training
agency: LABOR, DEPT OF
- title: Ink Toner Boxes
agency: VETERANS AFFAIRS, DEPT OF
- title: Hotel Accommodations/Lodging
agency: JUSTICE, DEPT OF
- industry: architecture
items:
- title: Supplemental Architecture and Engineering Services for OK
agency: PUBLIC BUILDINGS SERVICE
- title: Design-Build Construction of Plant Biosence Research Facility, WA
agency: DEPT OF DEFENSE
- industry: construction
items:
- title: Fort Hood Renovate and Repair Architectural Features IDIQ
agency: DEPT OF DEFENSE
- title: Harbor Refuge Dwelling Rehabilitation
agency: NATIONAL PARK SERVICE
- title: Blackfeet Reservation Apartment Complex
agency: INDIAN HEALTH SERVICE
- industry: facilities
items:
- title: Mount Rushmore Custodial Services
agency: NATIONAL PARK SERVICE
- title: Vault Pumping Contract
agency: BUREAU OF LAND MANAGEMENT
- title: Grounds Maintenance Services, Ithaca NY
agency: DEPT OF DEFENSE
- industry: furniture
items:
- title: Cardio Tilt Table
agency: VETERANS AFFAIRS, DEPT OF
- title: JFB, GSA D&C 5th to 18th Floor Furniture
agency: GENERAL SERVICES ADMINISTRATION
- industry: human
items:
- title: AF Band Diversity and Inclusion Training
agency: DEPT OF DEFENSE
- title: Diversity, Equity, Inclusion, and Accessibility Training
agency: LABOR, DEPT OF
- title: Human Resources (HR) Support Services
agency: NUCLEAR REGULATORY COMMISSION
- industry: industrial
items:
- title: Radio Frequency (RF) Welder
agency: DEPT OF DEFENSE
- title: 300 MhZ Solid-State NMR Spectrometer Console
agency: COMMERCE, DEPT OF
- industry: interior
items:
- title: Port of Entry Project - A/E Services
agency: GENERAL SERVICES ADMINISTRATION
- title: Relocate PHysical Medicine and Rehabilitation to 4th Floor
agency: VETERANS AFFAIRS, DEPT OF
- industry: it
items:
- title: PM/CM Engineering Support for VACCHCS IAW
agency: VETERANS AFFAIRS, DEPT OF
- title: Purchase Software as a Service (SAAS) code and platforms
agency: SMALL BUSINESS ADMINISTRATION
- title: Web Site Hosting
agency: Congressional Budget Office
- industry: itsatcom
items:
- title: Cellular telephone services
agency: STATE, DEPT OF
- title: Notice of Intent to Solicit and Award Contract for Beeper Services
agency: VETERANS AFFAIRS, DEPT OF
- title: Tactical Network Routers
agency: DEPT OF DEFENSE
- industry: office
items:
- title: Ink Toner Boxes
agency: VETERANS AFFAIRS, DEPT OF
- title: Packing, packaging, preservation, and marking services
agency: GENERAL SERVICES ADMINISTRATION
- title: Large Format Production Printer
agency: DEPT OF DEFENSE
- industry: professional
items:
- title: FY22 Full Service Contract for SPS Getinge Equipment
agency: VETERANS AFFAIRS, DEPT OF
- title: Surgical Instruments for Northport VA Medical System
agency: VETERANS AFFAIRS, DEPT OF
- industry: realestate
items:
- title: Marketing, Brokerage Services, & Lease Management of US GSA Lamar Building
agency: GENERAL SERVICES ADMINISTRATION
- title: "Iowa Land Appraisals: Agricultural Conservation Easement Program"
agency: AGRICULTURE, DEPT OF
- industry: science
items:
- title: Surgical Instruments for Northport VA Medical System
agency: VETERANS AFFAIRS, DEPT OF
- title: 300 MhZ Solid-State NMR Spectrometer Console
agency: COMMERCE, DEPT OF
- industry: security
items:
- title: NP Aerospace Bomb Suits
agency: JUSTICE, DEPT OF
- title: Smithsonian Tropical Research Institute Guard Force and Control Room Operator Services
agency: SMITHSONIAN INSTITUTION
- industry: transport
items:
- title: New 55 Ton Lowboy Trailer
agency: INTERIOR, DEPT OF THE
- title: Boat Trailer
agency: DEPT OF DEFENSE
- title: Hood River Ranger District Dirt Bike Purchase
agency: AGRICULTURE, DEPT OF
- industry: travel
items:
- title: Hotel Accommodations/Lodging
agency: JUSTICE, DEPT OF
- title: DTMO Travel Management Company Services for Travel Area-3
agency: DEPT OF DEFENSE
- industry: other
items:
- title: Horse Mountain Wildlife Water Catchment
agency: INTERIOR, DEPT OF THE
- title: 2027 Gershwin Prize IDIQ
agency: LIBRARY OF CONGRESS
|
_data/opportunity-explorer-answers-contracts.yml
|
version: '2.4'
networks:
server1:
server2:
server3:
server4:
common:
services:
broadcast: &broadcast
hostname: broadcast
extends:
file: docker-compose-common-components.yml
service: python
command: broadcast_loop
depends_on:
- postgres
- redis
networks:
- server1
- common
#stats: &stats
# hostname: stats
# extends:
# file: docker-compose-common-components.yml
# service: python
# command: stats_loop
# depends_on:
# - postgres
# - redis
logs_event_loop: &logs_event_loop
hostname: logs_event_loop
extends:
file: docker-compose-common-components.yml
service: python
command: log_loop
depends_on:
- postgres
- redis
networks:
- server1
- common
backend_1: &backend
hostname: api
extends:
file: docker-compose-common-components.yml
service: python
command: web
depends_on:
- postgres
- redis
networks:
server1:
aliases:
- backend
common:
frontend: &frontend
image: maresh/hll_rcon_frontend:latest
ports:
- ${RCONWEB_PORT}:80
restart: always
environment:
HLL_HOST: ${HLL_HOST}
RCONWEB_PASSWORD: ${<PASSWORD>}
RCONWEB_USERNAME: ${RCONWEB_USERNAME}
depends_on:
- backend_1
networks:
- server1
- common
redis:
image: redislabs/redistimeseries:1.2.6
restart: always
volumes:
- ./redis_data:/data
networks:
- common
postgres:
image: postgres:12
restart: always
environment:
# If a password is not defined this container will fail to create
POSTGRES_PASSWORD: ${HLL_DB_PASSWORD}
POSTGRES_USER: rcon
POSTGRES_DB: rcon
PGDATA: /data
volumes:
- ./db_data:/data
networks:
- common
############ SERVER 2 #############
broadcast_2:
<<: *broadcast
hostname: broadcast_2
environment: &env2
HLL_HOST: ${HLL_HOST_2}
HLL_PORT: ${HLL_PORT_2}
HLL_PASSWORD: ${HLL_PASSWORD_2}
LOGGING_LEVEL: 'INFO'
LOGGING_PATH: /logs/
REDIS_URL: redis://redis:6379/1
DB_URL: 'postgres://rcon:${HLL_DB_PASSWORD}@postgres:5432'
DISCORD_WEBHOOK_AUDIT_LOG: ${DISCORD_WEBHOOK_AUDIT_LOG_2}
RCONWEB_PASSWORD: ${R<PASSWORD>}
RCONWEB_USERNAME: ${RCONWEB_USERNAME_2}
RCONWEB_API_SECRET: ${RCONWEB_API_SECRET}
SERVER_SHORT_NAME: ${SERVER_SHORT_NAME_2}
DISCORD_CHAT_WEBHOOK: ${DISCORD_CHAT_WEBHOOK_2}
DISCORD_CHAT_WEBHOOK_ALLOW_MENTIONS: ${DISCORD_CHAT_WEBHOOK_ALLOW_MENTIONS}
RCONWEB_PORT: ${RCONWEB_PORT_2}
DISCORD_PING_TRIGGER_WORDS: ${DISCORD_PING_TRIGGER_WORDS_2}
DISCORD_PING_TRIGGER_ROLES: ${DISCORD_PING_TRIGGER_ROLES_2}
DISCORD_PING_TRIGGER_WEBHOOK: ${DISCORD_PING_TRIGGER_WEBHOOK_2}
DISCORD_KILLS_WEBHOOK: ${DISCORD_KILLS_WEBHOOK_2}
DISCORD_SEND_KILL_UPDATES: ${DISCORD_SEND_KILL_UPDATES_2}
DISCORD_SEND_TEAM_KILL_UPDATES: ${DISCORD_SEND_TEAM_KILL_UPDATES_2}
restart: on-failure
networks:
- common
- server2
depends_on:
- frontend
#stats_2:
# <<: *stats
# hostname: stats_2
# environment:
# <<: *env2
# restart: on-failure
# depends_on:
# - frontend
logs_event_loop_2:
<<: *logs_event_loop
hostname: logs_event_loop_2
environment:
<<: *env2
restart: on-failure
networks:
- common
- server2
depends_on:
- frontend
backend_2:
<<: *backend
hostname: api_2
environment:
<<: *env2
restart: on-failure
networks:
common:
server2:
aliases:
- backend
depends_on:
- frontend
frontend_2:
<<: *frontend
ports:
- ${RCONWEB_PORT_2}:80
environment:
<<: *env2
restart: on-failure
networks:
- common
- server2
depends_on:
- backend_2
|
docker-compose.yml
|
description: |
Install tooling and build project dependencies
parameters:
stack-yaml:
description: "Path to stack.yaml file to use"
type: string
default: stack.yaml
stack-arguments:
description: "Additional Stack arguments"
type: string
default: ""
install-stack:
description: "Install Stack? Necessary with default executor"
type: boolean
default: true
upgrade-stack:
description: "Upgrade Stack? Only useful with custom executor"
type: boolean
default: false
before-dependencies:
description: "Steps to run before installing dependencies"
type: steps
default: []
after-dependencies:
description: "Steps to run after installing dependencies"
type: steps
default: []
hlint:
description: "Install HLint?"
type: boolean
default: true
weeder:
description: "Install Weeder?"
type: boolean
default: true
cache-prefix:
description: "Prefix used for cache key"
type: string
default: v1-dependencies
steps:
- when:
condition: <<parameters.install-stack>>
steps:
- run:
name: Install Stack
command: curl -sSL https://get.haskellstack.org/ | sh
- when:
condition: <<parameters.upgrade-stack>>
steps:
- run:
name: Upgrade Stack
command: stack upgrade
- run:
name: Digest
command: |
# TODO: once committing the cabal file catches on, we can just
# assume it exists and md5sum *.cabal here.
find . -maxdepth 1 -type f \
-name package.yaml -name '*.cabal' \
-exec md5sum {} + > project.digest
git ls-files | xargs md5sum > source.digest
- restore_cache:
keys:
- <<parameters.cache-prefix>>-{{ checksum "<<parameters.stack-yaml>>" }}-{{ checksum "project.digest" }}
- <<parameters.cache-prefix>>-{{ checksum "<<parameters.stack-yaml>>" }}-
- steps: <<parameters.before-dependencies>>
- run:
name: Build dependencies
command: stack build
--stack-yaml "<<parameters.stack-yaml>>"
--no-terminal
<<parameters.stack-arguments>>
--dependencies-only
--bench
--no-run-benchmarks
--test
--no-run-tests
- steps: <<parameters.after-dependencies>>
- when:
condition: <<parameters.hlint>>
steps:
- run:
name: Install HLint
command: stack install
--stack-yaml "<<parameters.stack-yaml>>"
--no-terminal
<<parameters.stack-arguments>>
--copy-compiler-tool hlint
- when:
condition: <<parameters.weeder>>
steps:
- run:
name: Install Weeder
command: stack install
--stack-yaml "<<parameters.stack-yaml>>"
--no-terminal
<<parameters.stack-arguments>>
--copy-compiler-tool weeder
- save_cache:
key: <<parameters.cache-prefix>>-{{ checksum "<<parameters.stack-yaml>>" }}-{{ checksum "project.digest" }}
paths:
- ~/.stack
- ./.stack-work
|
src/commands/setup.yml
|
services:
acme_event_manager.edition_handler:
class: Acme\Bundle\EventManagerBundle\Model\EditionHandler
arguments: [@security.token_storage]
acme_event_manager.creation_handler:
class: Acme\Bundle\EventManagerBundle\Model\CreationHandler
arguments: [@security.token_storage]
acme_event_manager.csv_parser:
class: Acme\Bundle\EventManagerBundle\Util\CSVParser
acme_event_manager.csv_import_handler:
class: Acme\Bundle\EventManagerBundle\Util\CSVImportHandler
arguments: [@doctrine.orm.default_entity_manager, @acme_event_manager.creation_handler]
acme_event_manager.csv_export_handler:
class: Acme\Bundle\EventManagerBundle\Util\CSVExportHandler
arguments: [@doctrine.orm.default_entity_manager, @acme_event_manager.event_participants_provider]
acme_event_manager.api_request_handler:
class: Acme\Bundle\EventManagerBundle\Model\ApiRequestHandler
arguments: [@doctrine.orm.default_entity_manager]
acme_event_manager.event_participation_handler:
class: Acme\Bundle\EventManagerBundle\Model\ParticipationInEventHandler
arguments: [@doctrine.orm.default_entity_manager, @security.token_storage, @acme_event_manager.creation_handler]
acme_event_manager.event_statistic_provider:
class: Acme\Bundle\EventManagerBundle\Model\EventStatisticProvider
arguments: [@doctrine.orm.default_entity_manager]
acme_event_manager.event_participants_provider:
class: Acme\Bundle\EventManagerBundle\Model\ParticipantsProvider
arguments: [@doctrine.orm.default_entity_manager]
acme_event_manager.paper_addition_handler:
class: Acme\Bundle\EventManagerBundle\Model\PaperAdditionHandler
arguments: [@doctrine.orm.default_entity_manager, @security.token_storage, @acme_event_manager.creation_handler, @acme_event_manager.event_participation_handler]
acme_event_manager.event_papers_provider:
class: Acme\Bundle\EventManagerBundle\Model\PapersProvider
arguments: [@doctrine.orm.default_entity_manager]
acme_event_manager.pdf_export_handler:
class: Acme\Bundle\EventManagerBundle\Util\PdfExportHandler
arguments: [@spraed.pdf.generator]
# acme_event_manager.example:
# class: Acme\Bundle\EventManagerBundle\Example
# arguments: [@service_id, "plain_value", %parameter%]
|
src/Acme/Bundle/EventManagerBundle/Resources/config/services.yml
|
name: CI
on:
push:
branches:
- '*'
tags:
- '[0-9]*'
pull_request:
branches:
- '*'
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
# test against
# - Java 1.8 (minimum requirement)
# - Java 9 (first version with JPMS)
# - Java LTS versions (11, 17, ...)
# - lastest Java version(s)
java:
- 1.8
- 9
- 11 # LTS
- 15
steps:
- uses: actions/checkout@v2
- name: Setup Java ${{ matrix.java }}
uses: actions/setup-java@v1
with:
java-version: ${{ matrix.java }}
- name: Cache Maven packages
uses: actions/cache@v2
with:
path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: Build with Maven
run: mvn --batch-mode test
working-directory: svg-core
deploy:
runs-on: ubuntu-latest
needs: build
if: |
github.event_name == 'push' &&
(startsWith( github.ref, 'refs/tags/' ) || github.ref == 'refs/heads/master') &&
github.repository == 'JFormDesigner/svgSalamander'
steps:
- uses: actions/checkout@v2
- name: Setup Java 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
server-id: ossrh
server-username: OSSRH_USERNAME
server-password: <PASSWORD>
gpg-private-key: ${{ secrets.SIGNING_KEY }}
gpg-passphrase: <PASSWORD>
- name: Cache Maven packages
uses: actions/cache@v2
with:
path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: Publish to oss.sonatype.org
run: mvn --batch-mode deploy -P deploy
working-directory: svg-core
env:
OSSRH_USERNAME: ${{ secrets.OSSRH_USERNAME }}
OSSRH_PASSWORD: ${{ secrets.OSSRH_PASSWORD }}
SIGNING_PASSWORD: ${{ secrets.SIGNING_PASSWORD }}
|
.github/workflows/ci.yml
|
documentType: LandingData
title: Documentação do Azure FXT Edge Filer
metadata:
title: Documentação do Azure FXT Edge Filer
description: Usar o Azure FXT Edge Filer como uma camada de cache para tarefas de HPC (computação de alto desempenho) de leitura intensa
author: ekpgh
ms.service: fxt-edge-filer
ms.topic: landing-page
ms.date: 07/10/2019
ms.author: v-erkell
abstract:
description: 'O cache de armazenamento híbrido do Azure FXT Edge Filer é um sistema híbrido de hardware/software que acelera o acesso a dados para fluxos de trabalho de HPC (computação de alto desempenho) de leitura intensa com base no arquivo. Um cluster de três ou mais dispositivos Azure FXT Edge Filer pode acelerar o desempenho de armazenamento para sistemas NAS locais (NetApp ou Dell EMC Isilon), armazenamento em nuvem ou uma combinação de soluções.'
sections:
- title: Tutoriais passo a passo
items:
- type: list
style: unordered
items:
- html: <a href="fxt-install">Instalar o hardware Azure FXT Edge Filer</a>
- html: <a href="fxt-network-power">Fazer conexões de rede para o Azure FXT Edge Filer</a>
- html: <a href="fxt-node-password">Definir as senhas raiz em nós de hardware</a>
- html: <a href="fxt-cluster-create">Criar o cluster de cache de armazenamento híbrido</a>
- html: <a href="fxt-add-nodes">Adicionar nós de cluster</a>
- html: <a href="fxt-add-storage">Adicionar armazenamento de back-end e configurar o namespace virtual</a>
- html: <a href="fxt-configure-network">Definir as configurações de rede do cluster</a>
- title: Guias de instruções
items:
- type: list
style: unordered
items:
- html: <a href="fxt-mount-clients">Conectar clientes ao cluster do Azure FXT Edge Filer</a>
- html: <a href="fxt-monitor">Monitorar o Azure FXT Edge Filer</a>
- html: <a href="fxt-power-off">Desligar com segurança um nó de hardware</a>
- title: Recursos
items:
- type: list
style: unordered
items:
- html: <a href="fxt-specs">Especificações de hardware</a>
- html: <a href="fxt-additional-doc">Documentação adicional</a>
- html: <a href="fxt-support-ticket">Abrir um tíquete de suporte</a>
|
articles/fxt-edge-filer/index.yml
|
version: '2'
services:
# Our frontstage machine which is responsible for delivering the correct Electronic Questionarie (EQ)
ras-frontstage:
build:
#context: <EMAIL>:ONSdigital/ras-frontstage.git
context: ras-frontstage
ports:
- "5101:5001"
environment:
- OAUTHLIB_INSECURE_TRANSPORT=1 # Used for development. Allows OAuth2 protocol to work over non TLS protocol
- APP_SETTINGS=config.Config # Allows Flask to load config files
- ONS_OAUTH_SERVER=django-oauth2-test:8040 # Tells ras-frontstage how to find the django OAuth2 server
- RAS_FRONTSTAGE_CLIENT_ID=<EMAIL> # Configures a Client ID for ras-frontstage see: https://www.oauth.com/oauth2-servers/client-registration/client-id-secret/
- RAS_FRONTSTAGE_CLIENT_SECRET=password # Configures a Client secret for ras-frontstage see above URL
- ONS_AUTHORIZATION_ENDPOINT=/web/authorize/ # Configures an authorisation endpoint see: https://www.oauth.com/oauth2-servers/authorization/
- ONS_TOKEN_ENDPOINT=/api/v1/tokens/ # Configures a token endpoint see: https://www.oauth.com/oauth2-servers/access-tokens/
- ONS_ADMIN_ENDPOINT=/api/account/create # The admin interface to our OAuth2 server.
- ONS_OAUTH_PROTOCOL=http:// # This could be overkill, but if we ever chnage the protocol we can do here
- DEBUG=False
- TESTING=False
- CSRF_ENABLED=True
- SECRET_KEY=this-really-needs-to-be-changed
- dbname=ras_frontstage_backup
- SQLALCHEMY_DATABASE_URI=postgresql://ras_party:password@postgres:5432/postgres
- PARTYSERVICE_PROTOCOL=http://
- PARTYSERVICE_SERVER=ras-party:5062
- PARTYSERVICE_REGISTER_ENDPOINT=/respondents/
restart: always
depends_on:
- postgres
networks:
- ras
ras-party:
build:
#context: <EMAIL>:ONSdigital/ras-party.git
context: ras-party
ports:
- "5162:5062"
environment:
- DEBUG=False
- TESTING=False
- CSRF_ENABLED=True
- SECRET_KEY=this-really-needs-to-be-changed
- dbname=ras_party
- SQLALCHEMY_DATABASE_URI=postgresql://ras_party:password@postgres:5432/postgres
networks:
- ras
django-oauth2-test:
build:
context: django-oauth2-test
ports:
- "8140:8040"
networks:
- ras
ras-collection-instrument:
build:
context: ras-collection-instrument
ports:
- "5152:5052"
environment:
- OAUTHLIB_INSECURE_TRANSPORT=1 # Used for development. Allows OAuth2 protocol to work over non TLS protocol
- APP_SETTINGS=config.Config # Allows Flask to load config files
- DEBUG=False
- TESTING=False
- CSRF_ENABLED=True
- SECRET_KEY=this-really-needs-to-be-changed
- dbname=ras_collection_instrument
- SQLALCHEMY_DATABASE_URI=postgresql://ras_collection_instrument:password@postgres:5432/postgres
networks:
- ras
secure-message:
build:
context: ras-secure-message
environment:
SECURE_MESSAGING_DATABASE_URL: postgres://ras_secure_message:password@postgres:5432/postgres
RAS_SM_PATH: ./
restart: always
depends_on:
- postgres
networks:
- ras
ports:
- "5055:5050"
# Spring config server - not currently used.
#ras-config:
# build:
# context: ras-config
# environment:
# - server.port=8080
# networks:
# - ras
# Database container:
# The defaut port to postgres DB is 5432. However some people have postgres running on their local machine, so we
# need to change our external port number to map our local port to the docker instance port. We are using 5431 until
# we here otherwise
postgres:
image: postgres:9.6
ports:
- "5431:5432"
networks:
- ras
networks:
ras:
|
docker-compose.yml
|
- type: copyright
description: todos os direitos reservados
- type: CC0
description: sob domínio público
url: https://creativecommons.org/share-your-work/public-domain/cc0/
- type: CCBY40
description: licenciado sob CC BY 4.0
url: https://creativecommons.org/licenses/by/4.0/
- type: CCBYSA40
description: licenciado sob CC BY-SA 4.0
url: https://creativecommons.org/licenses/by-sa/4.0/
- type: CCBYNC40
description: licenciado sob CC BY-NC 4.0
url: https://creativecommons.org/licenses/by-nc/4.0/
- type: CCBYND40
description: licenciado sob CC BY-ND 4.0
url: https://creativecommons.org/licenses/by-nd/4.0/
- type: CCBYNCND40
description: licenciado sob CC BY-NC-ND 4.0
url: https://creativecommons.org/licenses/by-nc-nd/4.0/
- type: CCBYNCSA40
description: licenciado sob CC BY-NC-SA 4.0
url: https://creativecommons.org/licenses/by-nc-sa/4.0/
- type: CCBY30
description: licenciado sob CC BY 3.0
url: https://creativecommons.org/licenses/by/3.0/
- type: CCBYSA30
description: licenciado sob CC BY-SA 3.0
url: https://creativecommons.org/licenses/by-sa/3.0/
- type: CCBYNC30
description: licenciado sob CC BY-NC 3.0
url: https://creativecommons.org/licenses/by-nc/3.0/
- type: CCBYND30
description: licenciado sob CC BY-ND 3.0
url: https://creativecommons.org/licenses/by-nd/3.0/
- type: CCBYNCND30
description: licenciado sob CC BY-NC-ND 3.0
url: https://creativecommons.org/licenses/by-nc-nd/3.0/
- type: CCBYNCSA30
description: licenciado sob CC BY-NC-SA 3.0
url: https://creativecommons.org/licenses/by-nc-sa/3.0/
- type: CCBY25
description: licenciado sob CC BY 2.5
url: https://creativecommons.org/licenses/by/2.5/
- type: CCBYSA25
description: licenciado sob CC BY-SA 2.5
url: https://creativecommons.org/licenses/by-sa/2.5/
- type: CCBYNC25
description: licenciado sob CC BY-NC 2.5
url: https://creativecommons.org/licenses/by-nc/2.5/
- type: CCBYND25
description: licenciado sob CC BY-ND 2.5
url: https://creativecommons.org/licenses/by-nd/2.5/
- type: CCBYNCND25
description: licenciado sob CC BY-NC-ND 2.5
url: https://creativecommons.org/licenses/by-nc-nd/2.5/
- type: CCBYNCSA25
description: licenciado sob CC BY-NC-SA 2.5
url: https://creativecommons.org/licenses/by-nc-sa/2.5/
- type: CCBY20
description: licenciado sob CC BY 2.0
url: https://creativecommons.org/licenses/by/2.0/
- type: CCBYSA20
description: licenciado sob CC BY-SA 2.0
url: https://creativecommons.org/licenses/by-sa/2.0/
- type: CCBYNC20
description: licenciado sob CC BY-NC 2.0
url: https://creativecommons.org/licenses/by-nc/2.0/
- type: CCBYND20
description: licenciado sob CC BY-ND 2.0
url: https://creativecommons.org/licenses/by-nd/2.0/
- type: CCBYNCND20
description: licenciado sob CC BY-NC-ND 2.0
url: https://creativecommons.org/licenses/by-nc-nd/2.0/
- type: CCBYNCSA20
description: licenciado sob CC BY-NC-SA 2.0
url: https://creativecommons.org/licenses/by-nc-sa/2.0/
- type: CCBY10
description: licenciado sob CC BY 1.0
url: https://creativecommons.org/licenses/by/1.0/
- type: CCBYSA10
description: licenciado sob CC BY-SA 1.0
url: https://creativecommons.org/licenses/by-sa/1.0/
- type: CCBYNC10
description: licenciado sob CC BY-NC 1.0
url: https://creativecommons.org/licenses/by-nc/1.0/
- type: CCBYND10
description: licenciado sob CC BY-ND 1.0
url: https://creativecommons.org/licenses/by-nd/1.0/
- type: CCBYNCND10
description: licenciado sob CC BY-NC-ND 1.0
url: https://creativecommons.org/licenses/by-nc-nd/1.0/
- type: CCBYNCSA10
description: licenciado sob CC BY-NC-SA 1.0
url: https://creativecommons.org/licenses/by-nc-sa/1.0/
|
_data/licenses.yml
|
title: Az Azure Kinect DK dokumentációja
summary: Az Azure Kinect DK egy fejlesztői csomag, amelynek fejlett AI-érzékelői kifinomult számítógépes látástechnológiás és beszédmodelleket nyújtanak. A Kinect fejlett mélységérzékelőt, térbeli mikrofonsort, videokamerát és orientációs érzékelőt tartalmaz, és ez a kompakt kis eszköz számos móddal, beállítással és szoftverfejlesztői készlettel (SDK-k) rendelkezik.
metadata:
title: Az Azure Kinect DK dokumentációja
description: Az Azure Kinect DK egy fejlesztői csomag, amelynek fejlett AI-érzékelői kifinomult számítógépes látástechnológiás és beszédmodelleket nyújtanak. A Kinect fejlett mélységérzékelőt, térbeli mikrofonsort, videokamerát és orientációs érzékelőt tartalmaz, és ez a kompakt kis eszköz számos móddal, beállítással és SDK-val rendelkezik.
ms.prod: kinect-dk
ms.topic: landing-page
author: qm13
ms.author: quentinm
ms.date: 03/18/2021
ms.openlocfilehash: 74f320461327d4835e07a12d122f131977edc91a
ms.sourcegitcommit: ac035293291c3d2962cee270b33fca3628432fac
ms.translationtype: MT
ms.contentlocale: hu-HU
ms.lasthandoff: 03/24/2021
ms.locfileid: "104954904"
landingContent:
- title: Az Azure Kinect DK bemutatása
linkLists:
- linkListType: overview
links:
- text: Mi az az Azure Kinect DK?
url: about-azure-kinect-dk.md
- linkListType: reference
links:
- text: Hardverspecifikáció
url: hardware-specification.md
- text: Rendszerkövetelmények
url: system-requirements.md
- title: Bevezetés
linkLists:
- linkListType: download
links:
- text: Azure Kinect Sensor SDK
url: sensor-sdk-download.md
- text: Azure Kinect Body Tracking SDK
url: body-sdk-download.md
- linkListType: quickstart
links:
- text: Az Azure Kinect DK beállítása
url: set-up-azure-kinect-dk.md
- text: Érzékelőstreamek rögzítése fájlba
url: record-sensor-streams-file.md
- text: Az első Azure Kinect-alkalmazás összeállítása
url: build-first-app.md
- text: A Body Tracking SDK beállítása
url: body-sdk-setup.md
- text: Az első testkövetési alkalmazás összeállítása
url: build-first-body-app.md
- title: A Sensor SDK használata
linkLists:
- linkListType: how-to-guide
links:
- text: Keresse meg, majd nyissa meg az eszközt
url: find-then-open-device.md
- text: Képadatok lekérése
url: about-sensor-sdk.md
- text: Hozzáférés a mikrofonhoz
url: access-mics.md
- text: Rögzítés és lejátszás
url: record-playback-api.md
- title: A Body Tracking SDK használata
linkLists:
- linkListType: how-to-guide
links:
- text: Testkövetési eredmények lekérése
url: get-body-tracking-results.md
- text: Adatok elérése a testkeretben
url: access-data-body-frame.md
- title: Referencia
linkLists:
- linkListType: reference
links:
- text: Sensor SDK API
url: https://microsoft.github.io/Azure-Kinect-Sensor-SDK/master/index.html
- text: Body Tracking SDK API
url: https://microsoft.github.io/Azure-Kinect-Body-Tracking/release/1.1.x/index.html
- text: Vision Service-szolgáltatások
url: https://azure.microsoft.com/services/cognitive-services/directory/vision/
- text: Beszédszolgáltatások
url: https://docs.microsoft.com/azure/cognitive-services/speech-service/
|
articles/kinect-dk/index.yml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-trigger-nightly-master/68/
timestamp: 2016-02-14 09:09:00 UTC
duration: 1h40m29s
active_duration: 1h40m5s
parameters: {}
change:
git_remote: <EMAIL>:chef/chef-analytics.git
git_commit: <PASSWORD>
project: opscode-analytics
version: 1.3.0+20160214090925
stages:
chef-analytics-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-promote/68/
duration: 7s
chef-analytics-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-test/125/
duration: 27m57s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-test/architecture=x86_64,platform=el-5,project=opscode-analytics,role=tester/125/
duration: 17m31s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-test/architecture=x86_64,platform=el-6,project=opscode-analytics,role=tester/125/
duration: 15m52s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-test/architecture=x86_64,platform=el-7,project=opscode-analytics,role=tester/125/
duration: 27m56s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-test/architecture=x86_64,platform=ubuntu-10.04,project=opscode-analytics,role=tester/125/
duration: 22m24s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-test/architecture=x86_64,platform=ubuntu-12.04,project=opscode-analytics,role=tester/125/
duration: 14m39s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-test/architecture=x86_64,platform=ubuntu-14.04,project=opscode-analytics,role=tester/125/
duration: 26m8s
chef-analytics-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-build/141/
duration: 1h11m45s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=el-5,project=opscode-analytics,role=builder/141/
duration: 1h11m29s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=el-6,project=opscode-analytics,role=builder/141/
duration: 1h5m29s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=el-7,project=opscode-analytics,role=builder/141/
duration: 57m28s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=ubuntu-10.04,project=opscode-analytics,role=builder/141/
duration: 58m15s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=ubuntu-12.04,project=opscode-analytics,role=builder/141/
duration: 56m5s
chef-analytics-trigger-nightly-master:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-analytics-trigger-nightly-master/68/
duration: 15s
|
reports/wilson.ci.chef.co/job/chef-analytics-trigger-nightly-master/68.yaml
|
documentType: LandingData
title: Documentación de Form Recognizer
metadata:
title: 'Documentación de API (Form Recognizer): Azure Cognitive Services | Microsoft Docs'
description: Aprenda a usar Form Recognizer para extraer de forma inteligente datos del par clave-valor de los documentos del formulario.
services: cognitive-services
author: PatrickFarley
manager: cgronlun
ms.service: cognitive-services
ms.subservice: form-recognizer
ms.tgt_pltfrm: na
ms.devlang: na
ms.topic: landing-page
ms.date: 04/16/2019
ms.author: pafarley
ms.custom: seodec18
abstract:
description: 'Azure Form Recognizer es un servicio cognitivo que utiliza tecnología de aprendizaje automático para identificar y extraer pares clave-valor, así como datos de tablas de los documentos de formularios. A continuación, genera datos estructurados que incluyen las relaciones del fichero original. El aprendizaje sin supervisión permite que el modelo comprenda el diseño y las relaciones entre los campos y las entradas sin necesidad de un etiquetado de datos manual o de una codificación y un mantenimiento intensivos.'
sections:
- items:
- type: list
style: cards
className: cardsM
columns: 3
items:
- href: /azure/cognitive-services/form-recognizer/overview
html: <p>Obtenga información acerca de Form Recognizer.</p>
image:
src: ../media/index/i_overview.svg
title: ¿Qué es Form Recognizer?
- href: /azure/cognitive-services/form-recognizer/form-recognizer-container-howto
html: <p>Aprenda a utilizar el contenedor de Form Recognizer para analizar datos de formularios y tablas.</p>
image:
src: ../media/index/i_get-started.svg
title: Instalación y ejecución de contenedores
- title: Inicio rápido en 5 minutos
items:
- type: paragraph
text: 'Entrene un modelo de Form Recognizer y extraiga datos de formularios mediante:'
- type: list
style: icon48
items:
- image:
src: ./media/logos/logo-curl.svg
text: cURL
href: quickstarts/curl-train-extract.md
- image:
src: ./media/logos/logo-python.svg
text: Python
href: quickstarts/python-train-extract.md
- title: Referencia
items:
- type: list
style: cards
className: cardsD
items:
- title: API existentes
html: '<p><a href="https://aka.ms/form-recognizer/api">API de Form Recognizer</a></p>'
- title: PowerShell
html: '<p><a href="https://docs.microsoft.com/powershell/module/az.cognitiveservices/#cognitive_services">Azure PowerShell</a></p>'
- title: CLI
html: '<p><a href="https://docs.microsoft.com/cli/azure/cognitiveservices?view=azure-cli-latest#az-cognitiveservices-list">CLI de Azure</a></p>'
|
articles/cognitive-services/form-recognizer/index.yml
|
backends:
- group: google-cloud-dns-v1
url_pattern: /dns/v1/projects/{project}
method: GET
host:
- https://www.googleapis.com
whitelist:
- id
- kind
- number
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: Configures and serves authoritative DNS records.
version: v1
slug: google-cloud-dns-v1-dns-v1-projects-project
schemes:
- https
mapping:
id: id
kind: kind
number: number
hosts:
- https://www.googleapis.com
- group: google-cloud-dns-v1
url_pattern: /dns/v1/projects/{project}/managedZones
method: GET
host:
- https://www.googleapis.com
whitelist:
- kind
- managedZones
- nextPageToken
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: Configures and serves authoritative DNS records.
version: v1
slug: google-cloud-dns-v1-dns-v1-projects-project-managedzones
schemes:
- https
mapping:
kind: kind
managedZones: managedZones
nextPageToken: nextPageToken
hosts:
- https://www.googleapis.com
- group: google-cloud-dns-v1
url_pattern: /dns/v1/projects/{project}/managedZones/{managedZone}
method: GET
host:
- https://www.googleapis.com
whitelist:
- creationTime
- description
- dnsName
- id
- kind
- name
- nameServerSet
- nameServers
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: Configures and serves authoritative DNS records.
version: v1
slug: google-cloud-dns-v1-dns-v1-projects-project-managedzones-managedzone
schemes:
- https
mapping:
creationTime: creationTime
description: description
dnsName: dnsName
id: id
kind: kind
name: name
nameServerSet: nameServerSet
nameServers: nameServers
hosts:
- https://www.googleapis.com
- group: google-cloud-dns-v1
url_pattern: /dns/v1/projects/{project}/managedZones/{managedZone}/changes
method: GET
host:
- https://www.googleapis.com
whitelist:
- changes
- kind
- nextPageToken
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: Configures and serves authoritative DNS records.
version: v1
slug: google-cloud-dns-v1-dns-v1-projects-project-managedzones-managedzone-changes
schemes:
- https
mapping:
changes: changes
kind: kind
nextPageToken: nextPageToken
hosts:
- https://www.googleapis.com
- group: google-cloud-dns-v1
url_pattern: /dns/v1/projects/{project}/managedZones/{managedZone}/changes/{changeId}
method: GET
host:
- https://www.googleapis.com
whitelist:
- additions
- deletions
- id
- kind
- startTime
- status
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: Configures and serves authoritative DNS records.
version: v1
slug: google-cloud-dns-v1-dns-v1-projects-project-managedzones-managedzone-changes-changeid
schemes:
- https
mapping:
additions: additions
deletions: deletions
id: id
kind: kind
startTime: startTime
status: status
hosts:
- https://www.googleapis.com
- group: google-cloud-dns-v1
url_pattern: /dns/v1/projects/{project}/managedZones/{managedZone}/rrsets
method: GET
host:
- https://www.googleapis.com
whitelist:
- kind
- nextPageToken
- rrsets
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: Configures and serves authoritative DNS records.
version: v1
slug: google-cloud-dns-v1-dns-v1-projects-project-managedzones-managedzone-rrsets
schemes:
- https
mapping:
kind: kind
nextPageToken: nextPageToken
rrsets: rrsets
hosts:
- https://www.googleapis.<EMAIL>
|
shared/data/swagger/specs/googleapis.com/dns/v1/krakend/backends.yaml
|
title: 01.01.09-Rheingold-Tools-for-Thought-0283
note: |
The potential of computers as tools to be used by individuals, and the communications possibilities opened by linking computers, were what motivated the PARC team.
It was time to demonstrate that the theories about using personal computers to manage personal communications could work in an office like theirs.
If they could demonstrate that such devices could speed their own work, they would be on the way to selling the rest of the world on the vision they held form the time-sharing days.
The first thing they needed in order to retool the world of information work was a computer designed for one person to use, something that went far beyond previous attempts.
Because they knew that vision was the human sense capable of the most sophisticated informational input, the PARC computerists knew they wanted a sophisticated graphic screen to bring the computer's power to the user.
Complex, dynamic, visual models required a large amount of computer power, so the decision to emphasize the visual display meant that the hardware would have a great deal more memory and speed than anyone else in the computer world had heretofore put at any one individual's command.
We wanted for our own use what we thought other information workers would eventually want.
We needed the computing power and the research environment to build something expensive but very flexible and growable that would someday be much less expensive but even more capable.
We all understood when we planned the Alto that the main memory of what we wanted might cost $7000 by the time it was produced, in 1974, but would drop to about $35 ten years later."
The hardware shop at PARC was only set up to produce small batches for the PARC software designers, but eventually 1500 Altos were built for Xerox executives and researchers, for associates at SAIL and SRI, as well as for the U.S. Senate, House of Representatives, certain other government agencies, and even the White House Staff.
tags:
- Core Text
- Computing History
- Rheingold 0283
- 1500 year
- 1500 century
- 1500s
- 1974 year
- 1970s
- 1970 decade
- 1900 century
- 1900s
- 1500 century early
- 1900 century late
cite:
bibkey: Rheingold_ToolsThoughtHistory_2000
page: PDF eBook
|
zettels/rheingold-examples/Rheingold-Tools-for-Thought-0283.yaml
|
name: ci
# Check out https://help.github.com/en/articles/workflow-syntax-for-github-actions for documentation on Actions.
on: [push]
jobs:
# Run our release build with optimizations.
ghcjs:
runs-on: ubuntu-16.04
steps:
- uses: actions/checkout@v1
- uses: cachix/install-nix-action@v8
- uses: cachix/cachix-action@v5
with:
skipNixBuild: true
name: hercules-ci # Add general cachix
- uses: cachix/cachix-action@v5
with:
skipNixBuild: true
name: miso-haskell # Add Miso's cachix
- name: Bootstrap nix environment
run: nix-shell shell-ghcjs.nix --max-jobs auto --run 'echo "Ready"'
- name: Update hackage packages # If things don't work, change to cabal v2-update 'hackage.haskell.org,2020-04-10T21:50:21Z'
run: nix-shell shell-ghcjs.nix --run 'cabal --project-file=ghcjs.project v2-update'
- name: Build GHCJS Version
run: nix-shell shell-ghcjs.nix --run build # or: nix-build -A release
- name: Optimize Build
run: nix-shell shell-ghcjs.nix --run optimize
# Run our development build along with doc and unit tests.
ghc:
runs-on: ubuntu-16.04
steps:
- uses: actions/checkout@v1
- uses: cachix/install-nix-action@v8
- uses: cachix/cachix-action@v5
with:
skipNixBuild: true
name: hercules-ci # Add general cachix
- uses: cachix/cachix-action@v5
with:
skipNixBuild: true
name: miso-haskell # Add Miso's cachix
- name: Bootstrap nix environment
run: nix-shell --max-jobs auto --run 'echo "Ready"'
- name: Update hackage packages # If things don't work, change to cabal v2-update 'hackage.haskell.org,2020-04-10T21:50:21Z'
run: nix-shell --run 'cabal v2-update'
- name: Build GHC Version
run: nix-shell --run 'cabal v2-build --enable-tests'
- name: Test Application
run: nix-shell --run 'cabal v2-test test:app-test'
|
.github/workflows/ci.yml
|
homepage: https://owickstrom.github.io/gi-gtk-declarative/
changelog-type: markdown
hash: bb3095d875b788b53daa77a23af646782bc3420cf48e702440e74e780e027e44
test-bench-deps:
haskell-gi-base: -any
stm: -any
base: '>=4 && <5'
unordered-containers: -any
text: -any
safe-exceptions: -any
gi-glib: -any
gi-gtk: <4
gi-gdk: -any
hedgehog: '>=1 && <2'
async: -any
gi-gobject: -any
containers: -any
gi-gtk-declarative: -any
mtl: -any
vector: -any
maintainer: <EMAIL>
synopsis: Declarative GTK+ programming in Haskell
changelog: |
* 0.7.1
- Bump gi-gtk version
* 0.7.0
- Version bounds compatibility with Stack resolver lts-17.0
- Replace Travis badge with a Github workflow one.
- Replace .travis.yml with a Github Actions Workflow.
- Improved exception handling and async handling in app-simple
- Fix race condition in app-simple
- Fix patching of grid child properties.
* 0.6.3
- Add `Grid` container widget
- Fix bugs in patching properties for all types of widgets
* 0.6.2
- Add `Notebook` container widget
* 0.6.1
- Fix Nix build issue
* 0.6.0
- Allow dependency haskell-gi-0.23
- Remove redundant code
* 0.5.0
- New `CustomWidget` API:
- easier-to-use internal state
- pass-through attributes to top widget
* 0.4.0
- Use `Vector` instead of `[]` for child widgets
* 0.3.0
- Add user documentation
- Use record for `BoxChild` properties (breaking change!)
- Use lists for child widgets instead of `MarkupOf` monad (breaking change!)
- Add support for `Paned` widget
* 0.2.0
- Introduce shadow state (breaking change!)
- Optimized patching (2x-7x faster!)
- Many bug fixes in patching
- Reimplement callback conversions
- Return pairs in declarative event handlers, for non-`()` GTK+ callback return values
* 0.1.0
- First version of `gi-gtk-declarative`!
- Basic widget without event handling
- Support for `Box` and `ScrolledWindow` containers
- Declarative CSS classes
basic-deps:
haskell-gi-base: '>=0.24 && <0.26'
haskell-gi-overloading: '>=1.0 && <1.1'
base: '>=4.10 && <5'
data-default-class: '>=0.1 && <0.2'
unordered-containers: '>=0.2 && <0.3'
text: -any
gi-glib: '>=2 && <3'
gi-gtk: '>=3 && <4'
gi-gobject: '>=2 && <3'
containers: '>=0.6 && <0.7'
haskell-gi: '>=0.24 && <0.26'
mtl: -any
vector: -any
all-versions:
- 0.1.0
- 0.2.0
- 0.3.0
- 0.4.0
- 0.4.1
- 0.4.2
- 0.4.3
- 0.5.0
- 0.6.0
- 0.6.1
- 0.6.2
- 0.6.3
- 0.7.0
- 0.7.1
author: <NAME>
latest: 0.7.1
description-type: haddock
description: |-
A declarative programming model for GTK+ user
interfaces, implementing support for various widgets
and generalized patching. This library aims to extend the
gi-gtk library as transparently as possible, and to be
a reusable library for multiple application architectures
and styles.
See [the project website](https://owickstrom.github.io/gi-gtk-declarative/)
for user guides and more information.
license-name: MPL-2.0
|
packages/gi/gi-gtk-declarative.yaml
|
image: onedata/onezone:17.06.0-rc1
imagePullPolicy: IfNotPresent
serviceType: ClusterIP
# onezone name, if empty defaults to a chart name
name: ''
# Resources requested by onezone
cpu: 1.5
memory: 4Gi
# Log level of the processes in the container
log_level: "info"
# Enable loading oneozne configuration from ONEZONE_CONFIG env variable
onepanel_batch_mode_enabled: true
# If enabled, a new web cert will be generated with CN matching the
# ONEPANEL_GENERATED_CERT_DOMAIN and signed by OnedataTestWebServerCa
# NOTE: The generation will be performed upon every startup, any
# existing certs will be backed up and placed in the same directory.
# WARNING: This functionality is devised for test purposes and must not
# be used in production.
onepanel_generate_test_web_cert: false
# The generated test web cert will be issued for below domain.
onepanel_generated_cert_domain: "xx"
# If enabled, onepanel will trust any server that has a cert signed by
# the OnedataTestWebServerCa.
# WARNING: This functionality is devised for test purposes and must not
# be used in production.
onepanel_trust_test_ca: false
# Number of nodes (pod replicas) for of this onezone
# their indexes will be asigned FROM 0 (ZERO!)
# up to, but not including the count value
onezone_nodes_count: 1
# you can use values form the rage <0,onezone_nodes_count)
# by default the node with the highest index (onezone_nodes_count-1)
# is configured as a mainNode
# If a service list is empty, all avilable nodes are assigned that service.
cluster_config:
managers: [ ]
workers: [ ]
databases: [ ]
# The gneralization of nodeSelector.
# Allows for moe fine grained controll over which
# nodes are selected by a kubernetes scheduler
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# List of taints which are tolerated by the pods
# when nodes are selected by a kubernetes scheduler
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: {}
# Specify a map of key-value pairs. For the pod
# to be eligible to run on a node, the node
# must have each of the indicated key-value pairs as labels
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# Generate certificates to access this onezone having trusted https connection
# You need to add cluster root certificate to your system for this to work
generate-certificates:
enabled: false
image: onedata/certificate-init-container:8434eb0
# If set true, onezone will not be deployed on the same node as other onezones
# or oneproviders that are part of this deployment
onedata_anti_affinity_enabled: false
# List of onezone onepanel users with administrative privileges
onepanel_admin_users:
- login: admin
password: password
# List of onezone onepanel regular users
onepanel_users:
- login: user
password: password
saml-idp:
enabled: false
|
charts/onezone/values.yaml
|