code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
--- !<SKIN>
contentType: "SKIN"
firstIndex: "2018-12-25 23:16"
game: "Unreal Tournament"
name: "Crow"
author: "Tomgilki"
description: "None"
releaseDate: "2000-01"
attachments:
- type: "IMAGE"
name: "Crow_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/C/Crow_shot_1.png"
- type: "IMAGE"
name: "Crow_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/C/Crow_shot_3.png"
- type: "IMAGE"
name: "Crow_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Skins/C/Crow_shot_2.png"
originalFilename: "TheCrow.zip"
hash: "8751f471c3bb6a373ce23f9190a472fc77f0c739"
fileSize: 533528
files:
- name: "SoldierSkins_Crow.utx"
fileSize: 1256428
hash: "1b4606b1d058f2b82cebe1792b060f0a3d84729b"
otherFiles: 3
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Skins/C/TheCrow.zip"
main: true
repack: false
state: "OK"
- url: "https://gamefront.online/files2/service/thankyou?id=1419925"
main: false
repack: false
state: "MISSING"
- url: "http://www.ut-files.com/index.php?dir=Skins/SkinsT/&file=thecrow.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Skins/Male/TheCrow.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Skins/Misc/SkinsT/thecrow.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Skins/&file=thecrow.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Skins/C/8/7/51f471/TheCrow.zip"
main: false
repack: false
state: "OK"
- url: "http://ut-files.com/index.php?dir=Skins/SkinsT/&file=thecrow.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Skins/C/8/7/51f471/TheCrow.zip"
main: false
repack: false
state: "OK"
deleted: false
skins:
- "Crow"
faces:
- "Eric"
- "Lee"
model: "Unknown"
teamSkins: true
|
content/Unreal Tournament/Skins/C/8/7/51f471/crow_[8751f471].yml
|
---
- name: Ensure NGINX main directory exists
file:
path: "{{ nginx_app_protect_conf_template.out_file_location }}"
state: directory
mode: 0755
when: nginx_app_protect_security_policy_template_enable | bool
or nginx_app_protect_log_policy_template_enable | bool
or nginx_app_protect_conf_template_enable | bool
- name: Copy NGINX App Protect security policy file
copy:
src: "{{ nginx_app_protect_security_policy_file_src }}"
dest: "{{ nginx_app_protect_security_policy_file_dst }}"
mode: 0644
when: nginx_app_protect_security_policy_file_enable | bool
- name: Copy NGINX App Protect log policy file
copy:
src: "{{ nginx_app_protect_log_policy_file_src }}"
dest: "{{ nginx_app_protect_log_policy_file_dst }}"
mode: 0644
when: nginx_app_protect_log_policy_file_enable | bool
- name: Dynamically generate NGINX App Protect security policy file
template:
src: "{{ nginx_app_protect_security_policy_template.template_file }}"
dest: "{{ nginx_app_protect_security_policy_template.out_file_location }}{{ nginx_app_protect_security_policy_template.out_file_name }}"
mode: 0644
backup: true
when: nginx_app_protect_security_policy_template_enable | bool
notify: (Handler - NGINX App Protect) Run NGINX
- name: Dynamically generate NGINX App Protect log policy file
template:
src: "{{ nginx_app_protect_log_policy_template.template_file }}"
dest: "{{ nginx_app_protect_log_policy_template.out_file_location }}{{ nginx_app_protect_log_policy_template.out_file_name }}"
mode: 0644
backup: true
when: nginx_app_protect_log_policy_template_enable | bool
notify: (Handler - NGINX App Protect) Run NGINX
- name: (DEPRECATED) Backup existing nginx.conf
copy:
src: "{{ nginx_app_protect_conf_template.out_file_location }}{{ nginx_app_protect_conf_template.out_file_name }}"
dest: "{{ nginx_app_protect_conf_template.out_file_location }}{{ nginx_app_protect_conf_template.out_file_name }}.orig"
remote_src: true
mode: 0644
when: nginx_app_protect_conf_template_enable | bool
changed_when: false
- name: (DEPRECATED) Dynamically generate nginx.conf file
template:
src: "{{ nginx_app_protect_conf_template.template_file }}"
dest: "{{ nginx_app_protect_conf_template.out_file_location }}{{ nginx_app_protect_conf_template.out_file_name }}"
mode: 0644
when:
- nginx_app_protect_conf_template_enable | bool
- nginx_app_protect_state != "absent"
notify: (Handler - NGINX App Protect) Run NGINX
- name: (DEPRECATED) Remove NGINX App Protect
block:
- name: (DEPRECATED) Comment out NGINX App Protect module reference in nginx.conf
replace:
path: /etc/nginx/nginx.conf
regexp: '^([ \t]*load_module.*ngx_http_app_protect_module.so;)'
replace: '# \1'
- name: (DEPRECATED) Comment out NGINX App Protect directives in nginx.conf
replace:
path: /etc/nginx/nginx.conf
regexp: '^([ \t]*app_protect_)'
replace: '# \1'
notify: (Handler - NGINX App Protect) Run NGINX
when: nginx_app_protect_state == "absent"
|
tasks/config/configure-app-protect.yml
|
title: Javascript functional programming
tagline: An adventure into functional javascript and functional methods
description: A longer description for your Learning Lab course, displayed on the course's landing page
# Repository setup
# The following section template information for the repository
# This will be used when the repository is crated for each individual learner
template:
name: javascript-functional-programming-course
repo: github-course-template-repo
# `before` block
# Actions to take place after the learner registers, but before they start interacting with their repo
before:
- type: respond
with: 01_first-response.md
# some actions require additional options, those should appear below `type:`
# you can execute multiple actions in the `before` block
# Steps for the course
# A list of steps that take the learner through the course
# Create one step for each action that a learner will complete
steps:
# Step 1: EDIT HERE - INTERNAL COMMENT DESCRIBING STEP
- title: The `map` method
description: Understand how map works on arrays
event: pull_request.synchronize
actions: # a list of the actions that will occur when the step is triggered by an event
- type: gate
left: '%payload.pull_request.title%'
operator: ===
right: "Greet world"
else:
- type: respond
with: 01_try-again.md
- type: respond
with: 01_nice-work.md
# Step 2: EDIT HERE -INTERNAL COMMENT FOR STEP 2
- title: The `reduce` method
description: How does reduce work on collections
event: pull_request.synchronize
# actions: # a list of the actions that will occur when the step is triggered by an event
# - type: # the type of action, full list at https://lab.github.com/docs/actions
# Step 3: EDIT HERE - INTERNAL COMMENT FOR STEP 3
- title: The `sort` method
description: How to sort on collections
event: commit_comment
# actions: # a list of the actions that will occur when the step is triggered by an event
# - type: # the type of action, full list at https://lab.github.com/docs/actions
# Step 4: EDIT HERE - INTERNAL COMMENT FOR STEP 4
- title: The `find` and `findIndex` method
description: Search for elements in an array
event: commit_comment
# actions: # a list of the actions that will occur when the step is triggered by an event
# - type: # the type of action, full list at https://lab.github.com/docs/actions
# Step 5: EDIT HERE - INTERNAL COMMENT FOR STEP 5
- title: The `some` and `every` methods
description: How to test for certain conditions in an array
event: commit_comment
# actions: # a list of the actions that will occur when the step is triggered by an event
# - type: # the type of action, full list at https://lab.github.com/docs/actions
|
config.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-16 18:26"
variationOf: "7514f3f3910f7760c2131cc4f268a9167af04129"
game: "Unreal Tournament"
name: "CTF-JellyGoose"
author: "Chad \"GreyTiger\" Barnett"
description: "None"
releaseDate: "2000-08"
attachments:
- type: "IMAGE"
name: "ctf-jellygoose_shot_5c09fd61_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/J/5/c/09fd61/ctf-jellygoose_shot_5c09fd61_1.png"
originalFilename: "ctf-jellygoose.zip"
hash: "5c09fd614c748c460a2d2c78abd27d1cbd049800"
fileSize: 801196
files:
- name: "heroes.umx"
fileSize: 452143
hash: "faad1e4b23e8a100ccf7647f37e6a112f8eb6b04"
- name: "CTF-JellyGoose.unr"
fileSize: 1927958
hash: "fd0669c015b811cae2ba064b2de843f2c0ea4817"
otherFiles: 1
dependencies:
CTF-JellyGoose.unr:
- status: "OK"
name: "heroes"
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/Capture%20The%20Flag/J/ctf-jellygoose.zip"
main: true
repack: false
state: "OK"
- url: "https://gamefront.online/files2/service/thankyou?id=1404099"
main: false
repack: false
state: "MISSING"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/MapsJ/&file=ctf-jellygoose.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/CTF&file=ctf-jellygoose.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Maps/CTF/MapsJ/ctf-jellygoose.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/Capture%20The%20Flag/J/5/c/09fd61/ctf-jellygoose.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/Capture%20The%20Flag/J/5/c/09fd61/ctf-jellygoose.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "CTF-JellyGoose"
playerCount: "6-16"
themes:
Industrial: 0.9
Natural: 0.1
bots: true
|
content/Unreal Tournament/Maps/Capture The Flag/J/5/c/09fd61/ctf-jellygoose_[5c09fd61].yml
|
---
- hosts: all
vars:
- sift_docker: false
- sift_include_volplugins: false
- sift_do_x11: false
## travis: 'No space left on device' with ffate_testing
- ffate_testing: false
- ffate_nist_nsrl_archive: ''
- jenkins_admin_username: adminffate
- jenkins_admin_password: <PASSWORD>
- jenkins_numExecutors: 3
- jenkins_plugins:
# - parameterized-trigger
- conditional-buildstep
## https://wiki.jenkins-ci.org/display/JENKINS/Rebuild+Plugin
- rebuild
- jenkins_java_options: "-Djenkins.install.runSetupWizard=false -Dmail.smtp.starttls.enable=true -Dhudson.DNSMultiCast.disabled=true -Dhudson.udp=-1 -Dhudson.model.DirectoryBrowserSupport.CSP=\\\"sandbox; default-src 'self'; img-src 'self'; style-src 'self' nvd3.org cdn.rawgit.com; script-src 'self' d3js.org nvd3.org cdn.rawgit.com\\\" -Dhudson.diyChunking=false"
- jenkins_init_changes:
- { option: "JENKINS_ARGS",
value: "--prefix={{ jenkins_url_prefix }} --httpListenAddress=127.0.0.1 --ajp13Port=-1 --httpsPort=-1 --sessionTimeout=180 {{ jenkins_java_options }}"
}
- { option: "JENKINS_JAVA_OPTIONS",
value: "{{ jenkins_java_options }}"
}
- ffate_butler_job_local: []
- ffate_view_local: []
# - jenkins_ufw_if: eth0
- jenkins_ufw_if: "{{ ansible_default_ipv4.interface }}"
- postgresql_databases:
- name: webpy
postgresql_users:
- name: webpy
priv: ALL
db: webpy
## nginx-light miss uwsgi
- hardenwebserver_nginx_debian_pkg: 'nginx-full'
- nginx_pidfile: /run/nginx/nginx.pid
pre_tasks:
- name: Redhat | set extra permissions for systemd hardening
set_fact:
hardenwebserver_systemd_files_acl:
- { p: "/etc/pki/tls/private", perm: rx }
- { p: "/etc/pki/tls/private/{{ ansible_fqdn }}.key", perm: r }
- { p: "/etc/pki/tls/certs/{{ ansible_fqdn }}.crt", perm: r }
- { p: "/var/log/nginx", perm: rwx }
when: ansible_os_family == "RedHat"
- name: Debian | set extra permissions for systemd hardening
set_fact:
hardenwebserver_systemd_files_acl:
- { p: "/etc/ssl/private", perm: rx }
- { p: "/etc/ssl/private/{{ ansible_fqdn }}.key", perm: r }
- { p: "/etc/ssl/{{ ansible_fqdn }}.crt", perm: r }
- { p: "/var/log/nginx", perm: rwx }
when: ansible_os_family == "Debian"
roles:
- juju4.forensicatorfate
|
test/integration/default/default.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-16 17:50"
game: "Unreal"
name: "dm_enterthematrix"
author: "dik<KoR> * special thanks to J[IP] *"
description: "We're In ... Watch out for Agent Smith"
releaseDate: "2004-01"
attachments: []
originalFilename: "dm_enterthematrix[IP].zip"
hash: "4861c41cb3c75ffd83b993161276a11aa72eb0ed"
fileSize: 8998663
files:
- name: "Urban.utx"
fileSize: 679765
hash: "b2f3f7ddc4f12b20b0c06041404aff71dafdfbcf"
- name: "urbansky1.utx"
fileSize: 200438
hash: "fdb17106fb94ba170a2de93d54df20e73fbf3e88"
- name: "urbansky6.utx"
fileSize: 2327168
hash: "7c36b1e1881f4dc3717f5af67cf1a5aecb081f3f"
- name: "dmurbsoundfx.uax"
fileSize: 60832
hash: "cfe4391c026108738665a6338b5b797177c0ed9f"
- name: "urbansky3.utx"
fileSize: 1118270
hash: "a718709eb12001bed3fb53c96c34217d9870a2dc"
- name: "dm_enterthematrix.unr"
fileSize: 5624372
hash: "4fb9bd393f10decc09b4ee0da3afb85e52d8d5c7"
- name: "dinosound111199.uax"
fileSize: 1324535
hash: "e27fbe4cde7cc107fbdf2bc308fc7c71c977c2f3"
- name: "urbansky5.utx"
fileSize: 1530514
hash: "c1adcdd803fee28e0516abfce06e05b424a91ed3"
- name: "urbansky2.utx"
fileSize: 2068743
hash: "81a074aab84063b063c12f0a4be2916d7a8a3276"
- name: "JumpPad.u"
fileSize: 12138
hash: "e6df5a06b2b081d3a4d429d95be1d21dff09c36b"
- name: "urbansky4.utx"
fileSize: 679769
hash: "96f34b2fe2674e73d5b5b3ef070d7dcfcfe5bed9"
- name: "Blade.utx"
fileSize: 2327164
hash: "8c2667ad67176f0135e322dd7d6ecbeae9442f13"
- name: "MaleThreeBotNPC.u"
fileSize: 1048
hash: "d1974f15e388c0b4607d31a71407ec1b04590961"
otherFiles: 0
dependencies:
dm_enterthematrix.unr:
- status: "OK"
name: "JumpPad"
- status: "OK"
name: "Urban"
- status: "OK"
name: "dmurbsoundfx"
- status: "OK"
name: "dinosound111199"
- status: "OK"
name: "Blade"
- status: "OK"
name: "MaleThreeBotNPC"
- status: "OK"
name: "urbansky2"
- status: "OK"
name: "urbansky1"
- status: "OK"
name: "urbansky4"
- status: "OK"
name: "urbansky3"
- status: "OK"
name: "urbansky6"
- status: "OK"
name: "urbansky5"
downloads:
- url: "http://unreal.ut-files.com/index.php?dir=Maps/DeathMatch/MapsE/&file=dm_enterthematrix%5BIP%5D.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal/Maps/DeathMatch/D/dm_enterthematrix%5BIP%5D.zip"
main: true
repack: false
state: "OK"
- url: "http://unrealtexture.com/Unreal/Downloads/Maps/DeathMatch/MapsE/dm_enterthematrix%5bIP%5d.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal/Maps/DeathMatch/E/4/8/61c41c/dm_enterthematrix%255BIP%255D.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal/Maps/DeathMatch/E/4/8/61c41c/dm_enterthematrix%255BIP%255D.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DmEnterTheMatrix"
playerCount: "2 to 12"
themes:
Ancient: 0.2
City: 0.4
Skaarj Tech: 0.2
Nali Castle: 0.2
bots: true
|
content/Unreal/Maps/DeathMatch/E/4/8/61c41c/dm_enterthematrix_[4861c41c].yml
|
uid: management.azure.com.iot-dps.iotdpsresource.checkprovisioningservicenameavailability
name: Check Provisioning Service Name Availability
service: IoT Hub Device Provisioning Service
groupName: Iot Dps Resource
apiVersion: 2017-11-15
summary: "Check if a provisioning service name is available. \nCheck if a provisioning service name is available. This will validate if the name is syntactically valid and if the name is usable"
consumes:
- application/json
produces:
- application/json
paths:
- content: POST https://management.azure.com/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkProvisioningServiceNameAvailability?api-version=2017-11-15
uriParameters:
- name: subscriptionId
in: path
isRequired: true
description: The subscription identifier.
types:
- uid: string
- name: api-version
in: query
isRequired: true
description: The version of the API.
types:
- uid: string
responses:
- name: 200 OK
description: This is a synchronous operation. The body contains a JSON-serialized response that specifies whether the provisioning service name is available. If the name is not available, the body contains the reason.
types:
- uid: NameAvailabilityInfo
- name: Other Status Codes
description: Default error response.
types:
- uid: ErrorDetails
requestBody:
- name: default
parameters:
- name: name
in: body
isRequired: true
description: The name of the Provisioning Service to check.
types:
- uid: string
requestHeader: []
definitions:
- name: NameAvailabilityInfo
description: Description of name availability.
kind: object
properties:
- name: nameAvailable
description: specifies if a name is available or not
types:
- uid: boolean
- name: reason
description: specifies the reason a name is unavailable
types:
- uid: NameUnavailabilityReason
- name: message
description: message containing a etailed reason name is unavailable
types:
- uid: string
- name: ErrorDetails
description: Error details.
kind: object
properties:
- name: Code
isReadyOnly: true
description: The error code.
types:
- uid: string
- name: HttpStatusCode
isReadyOnly: true
description: The HTTP status code.
types:
- uid: string
- name: Message
isReadyOnly: true
description: The error message.
types:
- uid: string
- name: Details
isReadyOnly: true
description: The error details.
types:
- uid: string
- name: OperationInputs
description: Input values for operation results call.
kind: object
properties:
- name: name
description: The name of the Provisioning Service to check.
types:
- uid: string
- name: NameUnavailabilityReason
description: specifies the reason a name is unavailable
kind: enum
properties:
- name: Invalid
types:
- uid: string
- name: AlreadyExists
types:
- uid: string
examples:
- name: DPSCheckName
request:
uri: POST https://management.azure.com/subscriptions/91d12660-3dec-467a-be2a-213b5544ddc0/providers/Microsoft.Devices/checkProvisioningServiceNameAvailability?api-version=2017-11-15
body: >-
{
"name": "test213123"
}
responses:
- statusCode: "200"
body: >-
{
"nameAvailable": true,
"reason": "Invalid",
"message": "name is valid"
}
security:
- type: oauth2
description: Azure Active Directory OAuth2 Flow.
flow: implicit
authorizationUrl: https://login.microsoftonline.com/common/oauth2/authorize
scopes:
- name: user_impersonation
description: impersonate your user account
|
docs-ref-autogen/iot-dps/IotDpsResource/CheckProvisioningServiceNameAvailability.yml
|
paths:
/affiliations:
get:
summary: Retrieves multiple Affiliations.
tags:
- Affiliations
produces:
- application/json
description: Return all Affiliations for the given Organization or User.
parameters:
- name: organization
in: query
description: Optional Organization id to retrieve affiliations for.
type: uuid
responses:
200:
description: Receive the array of Affiliations
post:
summary: Creates a new Affiliation.
tags:
- Affiliations
produces:
- application/json
description: Creates a new Affiliation for the given User email and Organization.
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/CreateAffiliationRequestBody'
examples:
createAffiliationRequestBody:
$ref: '#/components/examples/createAffiliationRequestBody'
responses:
201:
descriptions: Receive the created Affiliation
/affiliations/{id}:
post:
summary: Updates an existing Affiliation.
tags:
- Affiliations
produces:
- application/json
description: Updates a pending Affiliation to be active.
parameters:
- name: id
in: path
required: true
description: ID of the Affiliation to update
type: uuid
responses:
200:
description: Receive the updated Affiliation
delete:
summary: Deactivates an existing Affiliation.
tags:
- Affiliations
produces:
- application/json
description: Deactivates an existing Affiliation.
parameters:
- name: id
in: path
required: true
description: ID of the Affiliation to deactivate
type: uuid
responses:
200:
description: Receive the deactivated Affiliation.
components:
schemas:
CreateAffiliationRequestBody:
type: object
properties:
userEmail:
type: string
organization:
type: uuid
membershipType:
type: string
enum: [MEMBER, OWNER]
examples:
createAffiliationRequestBody:
summary: Request body for creating an Affiliation
value:
userEmail: <EMAIL>
organization: 89fe0644-50e1-4939-a488-aa6cf5977d7b
membershipType: 'MEMBER'
|
src/back-end/docs/affiliation.yaml
|
---
- name: '[OCP4-AUTO-INSTALL] Deploy an Openshift4 cluster'
hosts: localhost
connection: local
vars_files:
- vars/vars.yml
- vault/vault.yml
tasks:
- name: '[OCP4-AUTO-INSTALL][PREREQS] Prerequisites for the installation'
include_role:
name: "{{ item }}"
loop:
- common
- awscli
when:
- only_post_install != True
- name: '[OCP4-AUTO-INSTALL][INSTALL] Installation of the OCP4 Cluster'
include_role:
name: "ocp4-install"
when:
- only_post_install != True
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][CERT-MANAGER] Configure Cert-Manager'
include_role:
name: "ocp4-post-cert-manager"
when:
- cmanager is defined
- cmanager | bool
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][WORKER-NODES] Configure Worker Nodes'
include_role:
name: "ocp4-post-worker-nodes"
when:
- worker_nodes is defined
- worker_nodes | bool
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][INFRA-NODES] Deploy and configure Infra Nodes'
include_role:
name: "ocp4-post-infra-nodes"
when:
- infra_nodes is defined
- infra_nodes | bool
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][OAUTH] Configure OAuth'
include_role:
name: "ocp4-post-oauth"
when:
- oauth is defined
- oauth != False
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][LOGGING] Install and configure Logging'
include_role:
name: "ocp4-post-logging"
when:
- logging is defined
- logging | bool
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][MONITORING] Configure Monitoring'
include_role:
name: "ocp4-post-monitoring"
when:
- monitoring is defined
- monitoring | bool
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][REGISTRY] Install and configure Registry'
include_role:
name: "ocp4-post-registry"
when:
- registry is defined
- registry | bool
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL][SERVICE-MESH] Install and configure Service Mesh'
include_role:
name: "ocp4-post-service-mesh"
when:
- service_mesh is defined
- service_mesh | bool
- name: '[OCP4-AUTO-INSTALL][POST-INSTALL] Install and configure Argocd'
include_role:
name: "ocp4-post-argocd"
when:
- argocd is defined
- argocd | bool
|
deploy_all.yml
|
---
- name: "Getting VPC id... "
ec2_vpc_net_facts:
region: "{{ region }}"
filters:
"tag:Name": "{{ vpc_name }}"
register: vpc
- name: "Getting subnets id"
ec2_vpc_subnet_facts:
region: "{{ region }}"
filters:
vpc-id: "{{ vpc.vpcs[0].vpc_id }}"
register: subnet
- name: "Getting SGroup id... "
ec2_group_facts:
region: "{{ region }}"
filters:
vpc-id: "{{ vpc.vpcs[0].vpc_id }}"
group-name: "rds_{{ aws_sgroup }}"
register: group
- name: "Getting staging instance subnet id..."
ec2_instance_facts:
region: "{{ region }}"
filters:
"tag:Name": staging.project
instance-state-name: running
register: ec2_staging_state
- name: "Getting production instance subnet id..."
ec2_instance_facts:
region: "{{ region }}"
filters:
"tag:Name": production.project
instance-state-name: running
register: ec2_production_state
- name: Setting RDS facts
set_fact:
staging_subnet: "{{ ec2_staging_state['instances'][0].network_interfaces[0].subnet_id }}"
production_subnet: "{{ ec2_production_state['instances'][0].network_interfaces[0].subnet_id }}"
rds_subnet: "project"
- name: "Creating RDS Security group"
rds_subnet_group:
state: present
region: "{{ region }}"
name: "{{ rds_subnet }}"
description: "DB sgroup for project"
subnets:
- "{{ staging_subnet }}"
- "{{ production_subnet }}"
- name: "Staging RDS instance..."
rds:
command: create
instance_name: staging-rds
db_engine: "{{ aws_rds_dbtype }}"
username: "{{ project_staging_rds_username }}"
password: "{{ <PASSWORD> }}"
region: "{{ region }}"
zone: "{{ region }}b"
subnet: "{{ rds_subnet }}"
instance_type: "{{ aws_rds_instance_type }}"
size: "10"
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: staging-project
register: rds_staging_project
- name: "Production RDS instance..."
rds:
command: create
instance_name: production-rds
db_engine: "{{ aws_rds_dbtype }}"
username: "{{ project_production_rds_username }}"
password: "{{ <PASSWORD> }}"
region: "{{ region }}"
subnet: "{{ rds_subnet }}"
zone: "{{ region }}c"
instance_type: "{{ aws_rds_instance_type }}"
size: "10"
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: production-project
register: rds_production_project
- name: "Updating RDS vpc security groups... "
rds:
command: modify
instance_name: "{{ item }}"
region: "{{ region }}"
subnet:
vpc_security_groups: "{{ group['security_groups'][0].group_id }}"
loop:
- staging-project-rds
- production-project-rds
|
tasks/aws-rds.yml
|
sylius_backend_product_index:
pattern: /
methods: [GET]
defaults:
_controller: sylius.controller.product:indexAction
_sylius:
template: SyliusWebBundle:Backend/Product:index.html.twig
method: createFilterPaginator
arguments: [$criteria, $sorting, $deleted]
sylius_backend_product_index_order:
pattern: /order
methods: [GET]
defaults:
_controller: sylius.controller.product:indexAction
_sylius:
template: SyliusWebBundle:Backend/Product:indexOrder.html.twig
method: createFilterPaginator
arguments: [$criteria, $sorting, $deleted]
sylius_backend_product_children_active:
pattern: /children/active
methods: [POST]
defaults:
_controller: sylius.controller.product:childrenActiveAction
sylius_backend_product_by_taxon:
pattern: /children/taxon/{taxon}/{parent}
methods: [GET, POST]
defaults:
_controller: sylius.controller.product:productsTaxonAction
sylius_backend_product_children:
pattern: /children/{parent}
methods: [GET, POST]
defaults:
_controller: sylius.controller.product:childrenAction
sylius_backend_product_create:
pattern: /new
methods: [GET, POST]
defaults:
_controller: sylius.controller.product:createAction
_sylius:
template: SyliusWebBundle:Backend/Product:create.html.twig
redirect: sylius_backend_product_show
sylius_backend_product_update:
pattern: /{id}/edit
methods: [GET, PUT, POST]
defaults:
_controller: sylius.controller.product:updateAction
_sylius:
template: SyliusWebBundle:Backend/Product:update.html.twig
redirect: sylius_backend_product_show
sylius_backend_product_delete_all:
pattern: /delete/all
methods: [POST]
defaults:
_controller: sylius.controller.product:deleteAllAction
sylius_backend_product_edit_group:
pattern: /edit/group
methods: [POST]
defaults:
_controller: sylius.controller.product:editGroupAction
sylius_backend_product_edit_products:
pattern: /edit/products
methods: [GET, POST]
defaults:
_controller: sylius.controller.product:editProductsAction
sylius_backend_product_delete:
pattern: /{id}
methods: [DELETE]
defaults:
_controller: sylius.controller.product:deleteAction
_sylius:
template: SyliusWebBundle:Backend/Misc:delete.html.twig
redirect: sylius_backend_product_index
sylius_backend_product_history:
pattern: /{id}/history
methods: [GET]
defaults:
_controller: sylius.controller.product:historyAction
_sylius:
template: SyliusWebBundle:Backend/Product:history.html.twig
method: findForDetailsPage
arguments: [$id]
sylius_backend_product_show:
pattern: /{id}
methods: [GET]
defaults:
_controller: sylius.controller.product:showAction
_sylius:
template: SyliusWebBundle:Backend/Product:show.html.twig
method: findForDetailsPage
arguments: [$id]
sylius_backend_product_change_order:
pattern: /order_change/{drag}/{drop}/{type}
defaults:
_controller: sylius.controller.backend.dashboard:productOrderChangeAction
sylius_backend_product_reset_position:
pattern: /reset_position/products/
defaults:
_controller: sylius.controller.backend.dashboard:productPositionResetAction
|
src/Sylius/Bundle/WebBundle/Resources/config/routing/backend/product.yml
|
models:
- name: face-reidentification-retail-0095
launchers:
- framework: dlsdk
tags:
- FP32
device: CPU
model: Retail/object_reidentification/face/mobilenet_based/dldt/FP32/face-reidentification-retail-0095.xml
weights: Retail/object_reidentification/face/mobilenet_based/dldt/FP32/face-reidentification-retail-0095.bin
adapter: reid
- framework: dlsdk
tags:
- FP16
device: CPU
model: Retail/object_reidentification/face/mobilenet_based/dldt/FP16/face-reidentification-retail-0095.xml
weights: Retail/object_reidentification/face/mobilenet_based/dldt/FP16/face-reidentification-retail-0095.bin
adapter: reid
- framework: dlsdk
tags:
- INT8
device: CPU
model: Retail/object_reidentification/face/mobilenet_based/dldt/INT8/face-reidentification-retail-0095.xml
weights: Retail/object_reidentification/face/mobilenet_based/dldt/INT8/face-reidentification-retail-0095.bin
adapter: reid
- framework: dlsdk
tags:
- FP32
device: GPU
model: Retail/object_reidentification/face/mobilenet_based/dldt/FP32/face-reidentification-retail-0095.xml
weights: Retail/object_reidentification/face/mobilenet_based/dldt/FP32/face-reidentification-retail-0095.bin
adapter: reid
- framework: dlsdk
tags:
- FP16
device: GPU
model: Retail/object_reidentification/face/mobilenet_based/dldt/FP16/face-reidentification-retail-0095.xml
weights: Retail/object_reidentification/face/mobilenet_based/dldt/FP16/face-reidentification-retail-0095.bin
adapter: reid
datasets:
- name: lfw
data_source: LFW/lfw
annotation_conversion:
converter: face_reid_pairwise
pairs_file: LFW/annotation/pairs.txt
landmarks_file: LFW/annotation/lfw_landmark.txt
preprocessing:
- type: point_alignment
size: 400
- type: resize
size: 128
metrics:
- type: pairwise_accuracy_subsets
|
tools/accuracy_checker/configs/face-reidentification-retail-0095.yml
|
replicaCount: 1
image:
repository: registry.cn-hangzhou.aliyuncs.com/choerodon-agile/knowledgebase-service
pullPolicy: Always
preJob:
timeout: 300
image: registry.cn-hangzhou.aliyuncs.com/choerodon-tools/dbtool:0.6.4
preConfig:
enabled: true
configFile: application.yml
configType: k8s
registerHost: http://register-server.c7n-system:8000
datasource:
url: jdbc:mysql://localhost:3306/manager_service?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: choerodon
password: <PASSWORD>
preInitDB:
enabled: true
datasource:
url: jdbc:mysql://localhost:3306/knowledgebase_service?useUnicode=true&characterEncoding=utf-8&useSSL=false
username: choerodon
password: <PASSWORD>
deployment:
managementPort: 8281
env:
open:
## register-server
EUREKA_CLIENT_SERVICEURL_DEFAULTZONE: http://register-server.io-choerodon:8000/eureka/
## config-server
SPRING_CLOUD_CONFIG_ENABLED: true
SPRING_CLOUD_CONFIG_URI: http://config-server.framework:8010/
## mysql
SPRING_DATASOURCE_URL: jdbc:mysql://localhost:3306/knowledgebase_service?useUnicode=true&characterEncoding=utf-8&useSSL=false
SPRING_DATASOURCE_USERNAME: choerodon
SPRING_DATASOURCE_PASSWORD: <PASSWORD>
SERVICES_ATTACHMENT_URL: http://example.com/knowledgebase-service/
WIKI_URL: http://example.com
WIKI_TOKEN: <PASSWORD>
ELASTICSEARCH_IP: 127.0.0.1:9200
metrics:
path: /actuator/prometheus
group: spring-boot
logs:
parser: spring-boot
persistence:
enabled: false
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
# subPath:
service:
enabled: false
type: ClusterIP
port: 8280
ingress:
enabled: false
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources,such as Minikube. If you do want to specify resources,uncomment the following
# lines,adjust them as necessary,and remove the curly braces after 'resources:'.
limits:
# cpu: 100m
memory: 3Gi
requests:
# cpu: 100m
memory: 2Gi
|
charts/knowledgebase-service/values.yaml
|
- name: Linux s Visual Studio C++ (dokumentace)
href: ../linux/index.yml
- name: 'Stažení, instalace a nastavení úloh pro vývoj pro Linux'
href: ../linux/download-install-and-setup-the-linux-development-workload.md
- name: Připojení ke vzdálenému počítači s Linuxem
expanded: true
items:
- name: Připojení ke vzdálenému počítači s Linuxem
href: ../linux/connect-to-your-remote-linux-computer.md
- name: Nastavení zabezpečeného vzdáleného vývoje pro Linux kompatibilního se standardem FIPS
href: ../linux/set-up-fips-compliant-secure-remote-linux-development.md
- name: ConnectionManager – referenční dokumentace
href: ../linux/connectionmanager-reference.md
- name: Vytvoření nového projektu Linux
href: ../linux/create-a-new-linux-project.md
- name: Konfigurace projektu Linux
href: ../linux/configure-a-linux-project.md
- name: Vytvoření a konfigurace projektu Linux CMake
href: ../linux/cmake-linux-project.md
- name: Konfigurace linuxových projektů pro použití sanitizéru adres
href: ../linux/linux-asan-configuration.md
- name: 'Nasazení, spuštění a ladění projektu Linux'
href: ../linux/deploy-run-and-debug-your-linux-project.md
- name: 'Kurz: Vytváření projektů v jazyce C++ pro různé platformy v sadě Visual Studio'
href: ../build/get-started-linux-cmake.md
- name: Odkaz na stránku vlastností projektu Linux
expanded: false
items:
- name: Odkaz na stránku vlastností projektu Linux
href: ../linux/prop-pages-linux.md
- name: Obecné vlastnosti (Linux)
href: ../linux/prop-pages/general-linux.md
- name: Vlastnosti ladění (Linux)
href: ../linux/prop-pages/debugging-linux.md
- name: Vlastnosti adresářů VC++ (Linux)
href: ../linux/prop-pages/directories-linux.md
- name: Kopírování vlastností zdrojů (Linux)
href: ../linux/prop-pages/copy-sources-project.md
- name: Vlastnosti C/C++ (Linux)
href: ../linux/prop-pages/c-cpp-linux.md
- name: Vlastnosti linkeru (Linux)
href: ../linux/prop-pages/linker-linux.md
- name: Vlastnosti události sestavení (Linux)
href: ../linux/prop-pages/build-events-linux.md
- name: Vlastnosti vlastního kroku sestavení (Linux)
href: ../linux/prop-pages/custom-build-step-linux.md
- name: Vlastnosti projektu souboru pravidel (Linux)
href: ../linux/prop-pages/makefile-linux.md
- name: Vlastnosti vzdáleného archivu (Linux)
href: ../linux/prop-pages/remote-ar-linux.md
|
docs/linux/toc.yml
|
on:
push:
branches:
- master
pull_request:
branches:
- master
name: master
jobs:
linux:
name: Build Linux
container:
image: ubuntu:xenial
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
fetch-depth: 1
- name: Install dependencies
run: ./install.dependencies.sh
working-directory: build/linux
- name: Checkout libde265
run: ./checkout.sh
working-directory: src/libde265
- name: Checkout aom
run: ./checkout.sh
working-directory: src/aom
- name: Checkout libheif
run: ./checkout.sh
working-directory: src/libheif
- name: Build Native
run: ../build/linux/build.Native.sh
working-directory: src
- name: Copy Native
run: ../../build/linux/copy.Native.sh ../../artifacts
working-directory: src/Carbon.Codecs.Heif.Native
- name: Upload artifacts
uses: actions/upload-artifact@v1
with:
name: linux
path: artifacts
test_linux:
name: Test Linux
needs: linux
container:
image: mcr.microsoft.com/dotnet/sdk:5.0
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
fetch-depth: 1
- name: Download artifacts
uses: actions/download-artifact@v1
with:
name: linux
- name: Copy linux library
run: |
mkdir -p tests/Carbon.Codecs.Heif.Tests/bin/Release/net5.0
cp linux/* tests/Carbon.Codecs.Heif.Tests/bin/Release/net5.0
- name: Build and test Heif
run: ../../build/linux/test.Heif.sh
working-directory: tests/Carbon.Codecs.Heif.Tests
macos:
name: Build and Test MacOS
runs-on: macos-latest
steps:
- uses: actions/checkout@v1
with:
fetch-depth: 1
- name: Install dependencies
run: ./install.dependencies.sh
working-directory: build/macos
- name: Checkout libde265
run: ./checkout.sh
working-directory: src/libde265
- name: Checkout aom
run: ./checkout.sh
working-directory: src/aom
- name: Checkout libheif
run: ./checkout.sh
working-directory: src/libheif
- name: Build Native
run: ../build/macos/build.Native.sh
working-directory: src
- name: Copy Native
run: ../../build/macos/copy.Native.sh ../../artifacts
working-directory: src/Carbon.Codecs.Heif.Native
- name: Build and test Heif
run: ../../build/macos/test.Heif.sh
working-directory: tests/Carbon.Codecs.Heif.Tests
- name: Upload artifacts
uses: actions/upload-artifact@v1
with:
name: macos
path: artifacts
windows:
name: Build and Test Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
with:
fetch-depth: 1
- name: Install dependencies
run: ./install.dependencies.cmd
working-directory: build/windows
- name: Checkout libde265
run: ./checkout.cmd
working-directory: src/libde265
- name: Checkout aom
run: ./checkout.cmd
working-directory: src/aom
- name: Checkout libheif
run: ./checkout.cmd
working-directory: src/libheif
- name: Build Heif
run: ./build.Heif.cmd
working-directory: build/windows
- name: Test Heif
run: ./test.Heif.cmd
working-directory: build/windows
- name: Copy Heif
run: ./copy.Heif.cmd ../../artifacts
working-directory: build/windows
- name: Upload artifacts
uses: actions/upload-artifact@v1
with:
name: windows
path: artifacts
publish:
name: Publish NuGet package
needs:
- test_linux
- macos
- windows
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
with:
fetch-depth: 1
- name: Install dependencies
run: ./install.dependencies.cmd
working-directory: build/windows
- name: Download linux artifacts
uses: actions/download-artifact@v1
with:
name: linux
path: publish\linux
- name: Download macos artifacts
uses: actions/download-artifact@v1
with:
name: macos
path: publish\macos
- name: Download windows artifacts
uses: actions/download-artifact@v1
with:
name: windows
path: publish\windows
- name: Create NuGet package
working-directory: publish
run: |
mkdir output
..\build\windows\nuget.exe pack Carbon.Codecs.Heif.nuspec -OutputDirectory output
- name: Upload artifacts
uses: actions/upload-artifact@v1
with:
name: package
path: publish/output
|
.github/workflows/master.yml
|
- name: Check if custom password is specified for root user
debug:
msg: >
WARNING DETECTED !!! the password for mysql root user is empty.
Please set a password with a variable mysql_root_password on defaults/main.yml
te secure the root user.
when: not mysql_root_password
- name: Check if mysql root password was not set
shell: >
mysql -u root
-h localhost
-e "quit"
changed_when: false
ignore_errors: true
register: check_passwd_root
- name: Check if unix_socket plugin is enabled
shell: >
mysql -u root
-h localhost
-e "SELECT plugin from mysql.user WHERE user = 'root'"
ignore_errors: true
register: check_unix_socket_plugin
changed_when: check_unix_socket_plugin.stdout is search('socket')
when: check_passwd_root.rc == 0
- name: Disable unix_socket plugin if set
shell: >
mysql -u root
-h localhost
-e "UPDATE mysql.user SET plugin = '' WHERE user = 'root'; FLUSH PRIVILEGES;"
when:
- check_passwd_root.rc == 0
- check_unix_socket_plugin.stdout is search('socket')
- name: Delete remote login connection
shell: >
mysql -u root
-h localhost
-e "delete from mysql.user where User='root' and Host NOT IN ('localhost', '127.0.0.1');"
when:
- allow_remote_connections
- check_passwd_root.rc == 0
- name: Set MariaDB root password for 127.0.0.1, localhost
mysql_user:
name: root
password: "{{ <PASSWORD> }}"
host: "{{ item }}"
login_user: root
#login_password: "{{ <PASSWORD> }}"
login_unix_socket: "{{ mariadb_socket }}"
state: present
with_items:
- 127.0.0.1
- localhost
when: check_passwd_root.rc == 0
notify: Flush Priviliges
- name: Remove all anonymous user
mysql_user:
login_user: root
login_password: "{{ <PASSWORD> }}"
name: 'ansible'
host_all: yes
state: absent
notify: Flush Priviliges
- name: Remove test database
mysql_db:
login_user: "root"
login_password: "{{ <PASSWORD> }}"
db: "test"
state: absent
register: remove_test_db
notify:
- Remove test database priviledges
- Flush Priviliges
|
roles/mariadb/tasks/mysql_secure.yml
|
---
#- hosts: collector
# sudo: yes
# tasks:
- name: "enable supervisor analytics"
file:
path: "/etc/init/supervisor-analytics.override"
state: "absent"
- name: "redis-setup"
include: -redis-setup.yml
- name: "node-common"
include: -node-common.yml
- name: "fix up contrail collector config"
template:
src: "../../templates/provision/contrail-collector-conf.j2"
dest: "/etc/contrail/contrail-collector.conf"
- name: "fix up contrail query engine config"
template:
src: "../../templates/provision/contrail-query-engine-conf.j2"
dest: "/etc/contrail/contrail-query-engine.conf"
- name: "fix up contrail analytics api config"
template:
src: "../../templates/provision/contrail-analytics-api-conf.j2"
dest: "/etc/contrail/contrail-analytics-api.conf"
- name: "modify contrail analytics nodemgr config"
ini_file:
dest: "/etc/contrail/contrail-analytics-nodemgr.conf"
section: "DISCOVERY"
option: "server"
value: "{{ contrail_haproxy_address }}"
- name: "fix up contrail keystone auth config"
template:
src: "../../templates/provision/contrail-keystone-auth-conf.j2"
dest: "/etc/contrail/contrail-keystone-auth.conf"
force: no
- name: "delete contrail alarm gen supervisord config file"
file:
dest: "/etc/contrail/supervisord_analytics_files/contrail-alarm-gen.ini"
state: "absent"
- name: "modify contrail snmp collector config file"
ini_file:
dest: "/etc/contrail/contrail-snmp-collector.conf"
section: "{{ item.section }}"
option: "{{ item.option }}"
value: "{{ item.value }}"
with_items:
- { section: "DEFAULTS", option: "zookeeper", value: "{{ contrail_address }}:2181" }
- { section: "DISCOVERY", option: "disc_server_ip", value: "{{ contrail_haproxy_address }}" }
- { section: "DISCOVERY", option: "disc_server_port", value: "5998" }
- name: "modify contrail snmp collector ini file"
ini_file:
dest: "/etc/contrail/supervisord_analytics_files/contrail-snmp-collector.ini"
section: "program:contrail-snmp-collector"
option: "command"
value: "/usr/bin/contrail-snmp-collector --conf_file /etc/contrail/contrail-snmp-collector.conf --conf_file /etc/contrail/contrail-keystone-auth.conf"
- name: "modify contrail topology config file"
ini_file:
dest: "/etc/contrail/contrail-topology.conf"
section: "DEFAULTS"
option: "zookeeper"
value: "{{ contrail_address }}"
- name: "modify contrail topology ini file"
ini_file:
dest: "/etc/contrail/supervisord_analytics_files/contrail-topology.ini"
section: "program:contrail-topology"
option: "command"
value: "/usr/bin/contrail-topology --conf_file /etc/contrail/contrail-topology.conf"
- name: "restart supervisor analytics"
service:
name: "supervisor-analytics"
state: "restarted"
|
ansible/roles/open-contrail/tasks/provision/provision-collector.yml
|
server:
port: 8097
spring:
application:
name: javayh-nacos-zuul
profiles:
active: zuul
cloud:
nacos:
discovery:
server-addr: 127.0.0.1:8848
cluster-name: javayh-nacos
config:
group: javayh
server-addr: 127.0.0.1:8848
prefix: javayh-nacos-provider
file-extension: yml
security:
user:
name: admin
password: <PASSWORD>
zipkin:
base-url: http://localhost:8096
# 关闭服务发现,否则Spring Cloud会把zipkin的url当做服务名称
discoveryClientEnabled: false
sender:
type: web
sleuth:
sampler:
probability: 1 # 设置抽样采集率为100%,默认为0.1,即10%
#--------------------Zuul-----------------------
zuul:
routes:
provider:
path: /provider/**
serviceId: javayh-nacos-provider
sensitiveHeaders: "*"
consumer:
path: /consumer/**
serviceId: javayh-nacos-consumer
sensitiveHeaders: "*"
auth:
path: /auth/**
serviceId: javayh-nacos-oauth
sensitiveHeaders: "*"
retryable: false
ignored-services: "*"
ribbon:
eager-load:
enabled: true
host:
connect-timeout-millis: 60000
socket-timeout-millis: 60000
add-proxy-headers: true
javayh-nacos-oauth:
ribbon:
ConnectTimeout: 500
ReadTimeout: 500
#---------------------OAuth2---------------------
security:
oauth2:
client:
access-token-uri: http://localhost:${server.port}/auth/oauth/token
user-authorization-uri: http://localhost:${server.port}/auth/oauth/authorize
client-id: web
resource:
user-info-uri: http://localhost:${server.port}/auth/javayh/member
prefer-token-info: false
#----------------------超时配置-------------------
ribbon:
ReadTimeout: 60000
ConnectTimeout: 60000
MaxAutoRetries: 2
MaxAutoRetriesNextServer: 2
eureka:
enabled: true
hystrix:
command:
default:
execution:
timeout:
enabled: true
isolation:
thread:
timeoutInMilliseconds: 60000
|
javayh-nacos-zuul/src/main/resources/bootstrap.yml
|
# Set up to run the Java build script per the Travis CI documentation
language: java
# Configure the build to using the minimum and the latest stable Oracle JDKs and Open JDKs
# Consider adding openjdk-ea and/or oraclejdk-ea to test early access JDKs
jdk:
- openjdk11
- oraclejdk11
before_cache:
- rm -f $HOME/.gradle/caches/modules-2/modules-2.lock
- rm -fr $HOME/.gradle/caches/*/plugin-resolution/
cache:
directories:
- $HOME/.gradle/caches/
- $HOME/.gradle/wrapper/
# Don't fail the build on the early access JDK, or wait for it to finish before marking it done.
matrix:
fast_finish: true
allow_failures:
- jdk: openjdk-ea
# Install gradle wrapper based on version defined in wrapper.gradle
before_install:
# Use Java 8 to install the wrapper as this command will fail in Java 11.
- JAVA_HOME=`jdk_switcher home openjdk8` gradle -b wrapper.gradle wrapper
# Clean up project before running build script.
install:
# Using clean as dummy target; could install dependencies here if needed.
- ./gradlew clean
# Use xvfb to run tests that require a GUI.
services:
- xvfb
before_script:
- "export DISPLAY=:99.0"
# Build the project
script:
- ./gradlew build javadoc
# Deploy develop branch javadocs to gh_pages, and create and deploy CHANGELOG.md on tags
deploy:
# Publish SNAPSHOT artifacts to oss.jfrog.org
- provider: script
script: ./gradlew artifactoryPublish -Psnapshot -Pbuild.number=$TRAVIS_BUILD_NUMBER --stacktrace
skip_cleanup: true
on:
branch: develop
jdk: oraclejdk8
# Publish API documentation to GitHub Pages
- provider: pages
github_token: $GITHUB_API_KEY
local_dir: build/docs/javadoc
skip_cleanup: true
on:
branch: develop
jdk: oraclejdk8
# Publish release artifacts to Bintray/JCenter
- provider: script
script: ./gradlew bintrayUpload -Prelease --stacktrace
skip_cleanup: true
on:
tags: true
jdk: oraclejdk8
# Create CHANGELOG.md in the current directory
- provider: script
script: ./travis/changelog.sh >> CHANGELOG.md
skip_cleanup: true
on:
tags: true
jdk: oraclejdk8
# Create a GitHub release and publish CHANGELOG.md to the release assets
- provider: releases
api_key: $GITHUB_API_KEY
file: CHANGELOG.md
skip_cleanup: true
on:
tags: true
jdk: oraclejdk8
|
.travis.yml
|
{{- if .Values.nautobot.autoscaling.enabled }}
---
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "common.names.fullname" . }}
labels:
app.kubernetes.io/component: nautobot
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "common.names.fullname" . }}
minReplicas: {{ .Values.nautobot.autoscaling.minReplicas }}
maxReplicas: {{ .Values.nautobot.autoscaling.maxReplicas }}
metrics:
{{- toYaml .Values.nautobot.autoscaling.metrics | nindent 4 }}
behavior:
{{- toYaml .Values.nautobot.autoscaling.behavior | nindent 4 }}
{{- end }}
{{- if .Values.celeryWorker.autoscaling.enabled }}
---
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "common.names.fullname" . }}-celery-worker
labels:
app.kubernetes.io/component: nautobot-celery-worker
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "common.names.fullname" . }}-celery-worker
minReplicas: {{ .Values.celeryWorker.autoscaling.minReplicas }}
maxReplicas: {{ .Values.celeryWorker.autoscaling.maxReplicas }}
metrics:
{{- toYaml .Values.celeryWorker.autoscaling.metrics | nindent 4 }}
behavior:
{{- toYaml .Values.celeryWorker.autoscaling.behavior | nindent 4 }}
{{- end }}
{{- if and .Values.rqWorker.enabled .Values.rqWorker.autoscaling.enabled }}
---
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "common.names.fullname" . }}-rq-worker
labels:
app.kubernetes.io/component: nautobot-rq-worker
{{- if .Values.commonLabels }}
{{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "common.names.fullname" . }}-rq-worker
minReplicas: {{ .Values.rqWorker.autoscaling.minReplicas }}
maxReplicas: {{ .Values.rqWorker.autoscaling.maxReplicas }}
metrics:
{{- toYaml .Values.rqWorker.autoscaling.metrics | nindent 4 }}
behavior:
{{- toYaml .Values.rqWorker.autoscaling.behavior | nindent 4 }}
{{- end }}
|
charts/nautobot/templates/hpa.yaml
|
---
- name: Update apt cache if needed.
apt:
update_cache: yes
cache_valid_time: 3600
- name: "Install Apache."
apt:
name: "{{ item }}"
state: present
with_items:
- apache2
- apache2-bin
- apache2-data
- apache2-utils
- apachetop
- name: Enable Apache headers module.
apache2_module:
state: present
name: headers
- name: Copy Apache ServerName to prevent notices.
template:
src: httpd.conf.j2
dest: /etc/apache2/conf-available/httpd.conf
mode: 0644
- name: Copy Apache ports configuration.
template:
src: ports.conf.j2
dest: /etc/apache2/ports.conf
mode: 0644
- name: Check for existing httpd.conf.
stat:
path: /etc/apache2/conf-enabled/httpd.conf
register: apache_httpd_conf
- name: Enable Apache httpd configuration file.
command: /usr/sbin/a2enconf httpd
when: apache_httpd_conf.stat.islnk is not defined
- name: "Install PHP."
apt:
name: "{{ item }}"
state: present
with_items:
- php-pear
- php-xml-parser
- php5
- php5-apcu
- php5-cgi
- php5-cli
- php5-common
- php5-curl
- php5-dev
- php5-gd
- php5-imagick
- php5-intl
- php5-json
- php5-mcrypt
- php5-mysqlnd
- php5-pgsql
- php5-readline
- php5-recode
- php5-sqlite
- php5-xmlrpc
- libapache2-mod-php5
notify: restart apache2
- name: Increase default php5-apcu shm size.
lineinfile:
dest: /etc/php5/apache2/conf.d/20-apcu.ini
regexp: "^apc.shm"
line: "apc.shm_size=128M"
state: present
- name: Install Python software properties.
apt:
name: python-software-properties
state: present
- name: Check if MariaDB is already installed.
stat:
path: /etc/init.d/mysql
register: mariadb_installed
- name: Update apt cache if MariaDB is not yet installed.
apt:
update_cache: yes
when: NOT mariadb_installed.stat.exists
- name: Install MariaDB Server.
apt:
name: "{{ item }}"
state: present
with_items:
- mariadb-server
- mariadb-client
- mariadb-common
- python-mysqldb
register: mariadb_install_packages
when: ansible_distribution == "Debian"
- name: Install MySQL Server.
apt:
name: "{{ item }}"
state: present
with_items:
- mysql-common-5.6
- mysql-client-core-5.6
- mysql-client-5.6
- mysql-server-core-5.6
- mysql-server-5.6
- python-mysqldb
register: mariadb_install_packages
when: ansible_distribution == "Ubuntu" and ansible_distribution_release == "trusty"
- include: secure-apache.yml
- include: secure-mariadb.yml
- include: hosts.yml
- include: owncloud.yml
|
roles/lamp/tasks/main.yml
|
documentType: LandingData
title: Azure VPN Gateway Belgeleri
metadata:
document_id: null
title: 'VPN Gateway Belgeleri - Öğreticiler, API başvuruları | Microsoft Docs'
meta.description: 'Learn how to configure, create, and manage an Azure VPN gateway. Create encrypted cross-premises connections to your virtual network from on-premises locations, or create encrypted connections between VNets.'
services: vpn-gateway
author: cherylmc
manager: jeconnoc
ms.service: vpn-gateway
ms.tgt_pltfrm: na
ms.devlang: na
ms.topic: landing-page
ms.date: 04/26/2018
ms.author: cherylmc
abstract:
description: 'Bir Azure VPN ağ geçidi yapılandırmayı, oluşturmayı ve yönetmeyi öğrenin. Şirket içi konumlardan sanal ağınıza şifrelenmiş şirket içi ve dışı bağlantılar veya VNet’ler arasında şifrelenmiş bağlantılar oluşturun.'
sections:
- title: Adım Adım Öğreticiler
items:
- type: paragraph
text: 'Azure VPN ağ geçitlerini oluşturmayı, yapılandırmayı ve yönetmeyi öğrenin.'
- type: list
style: unordered
className: spaced noBullet
items:
- html: <a href="/azure/vpn-gateway/vpn-gateway-tutorial-create-gateway-powershell">VPN ağ geçidi oluşturma ve yönetme</a>
- title: Ücretsiz Pluralsight Video Eğitimi
items:
- type: list
style: cards
className: cardsFTitle
items:
- title: Azure Yöneticisi
href: 'https://go.microsoft.com/fwlink/?linkid=2012827'
image:
src: 'https://docs.microsoft.com/media/logos/logo_pluralsight.svg'
href: 'https://go.microsoft.com/fwlink/?linkid=2012827'
- title: Azure Çözüm Mimarı
href: 'https://go.microsoft.com/fwlink/?linkid=2012824'
image:
src: 'https://docs.microsoft.com/media/logos/logo_pluralsight.svg'
href: 'https://go.microsoft.com/fwlink/?linkid=2012824'
- title: Örnekler
items:
- type: paragraph
text: Sık kullanılan görevler için betik örnekleri.
- type: list
style: unordered
className: spaced noBullet
items:
- html: <a href="/azure/vpn-gateway/powershell-samples">PowerShell</a>
- title: Başvuru
items:
- type: list
style: cards
className: cardsD
items:
- title: Komut Satırı
html: '<p><a href="/powershell/module/az.network#vpn">PowerShell</a></p><p><a href="/cli/azure/network/vnet-gateway">Azure CLI</a></p>'
- title: Diller
html: <p><a href="/dotnet/api/overview/azure/virtual-network">.NET</a></p><p><a href="/java/api/overview/azure/network">Java</a></p><p><a href="/javascript/api/overview/azure/virtual-network">Node.js</a></p><p><a href="/python/api/overview/azure/network">Python</a></p>
- title: REST
html: <p><a href="/rest/api/network-gateway/">REST API başvurusu</a></p>
|
articles/vpn-gateway/index.yml
|
name: Python Performance
on:
workflow_dispatch:
push:
branches: [ main ]
paths:
- 'binding/python/test_cobra_perf.py'
- 'lib/beaglebone/**'
- 'lib/jetson/**'
- 'lib/linux/**'
- 'lib/mac/**'
- 'lib/raspberry-pi/**'
- 'lib/windows/**'
- 'res/**'
pull_request:
branches: [ main ]
paths:
- 'binding/python/test_cobra_perf.py'
- 'lib/beaglebone/**'
- 'lib/jetson/**'
- 'lib/linux/**'
- 'lib/mac/**'
- 'lib/raspberry-pi/**'
- 'lib/windows/**'
- 'res/**'
defaults:
run:
working-directory: binding/python
jobs:
perf-github-hosted:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
include:
- os: ubuntu-latest
performance_threshold_sec: 0.006
- os: windows-latest
performance_threshold_sec: 0.006
- os: macos-latest
performance_threshold_sec: 0.006
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- name: Set up Python '3.10'
uses: actions/setup-python@v2
with:
python-version: '3.10'
- name: Pre-build dependencies
run: python -m pip install --upgrade pip
- name: Install dependencies
run: pip install -r requirements.txt
- name: Test
run: python test_cobra_perf.py ${{secrets.PV_VALID_ACCESS_KEY}} 100 ${{matrix.performance_threshold_sec}}
perf-self-hosted:
runs-on: ${{ matrix.machine }}
strategy:
fail-fast: false
matrix:
machine: [rpi2, rpi3-32, rpi3-64, rpi4-32, rpi4-64, jetson, beaglebone]
include:
- machine: rpi2
num_test_iterations: 50
performance_threshold_sec: 0.08
- machine: rpi3-32
num_test_iterations: 100
performance_threshold_sec: 0.05
- machine: rpi3-64
num_test_iterations: 100
performance_threshold_sec: 0.05
- machine: rpi4-32
num_test_iterations: 100
performance_threshold_sec: 0.023
- machine: rpi4-64
num_test_iterations: 100
performance_threshold_sec: 0.022
- machine: jetson
num_test_iterations: 100
performance_threshold_sec: 0.025
- machine: beaglebone
num_test_iterations: 50
performance_threshold_sec: 0.25
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- name: Pre-build dependencies
run: python3 -m pip install --upgrade pip
- name: Install dependencies
run: pip3 install -r requirements.txt
- name: Machine state before
working-directory: res/scripts
run: bash machine-state.sh
- name: Test
run: python3 test_cobra_perf.py ${{secrets.PV_VALID_ACCESS_KEY}} ${{matrix.num_test_iterations}} ${{matrix.performance_threshold_sec}}
- name: Machine state after
working-directory: res/scripts
run: bash machine-state.sh
|
.github/workflows/python-perf.yml
|
name: production deployment
on:
push:
branches:
- master
env:
WEB_IMAGE: ghcr.io/$GITHUB_REPOSITORY/web
NGINX_IMAGE: ghcr.io/$GITHUB_REPOSITORY/nginx
jobs:
deploy:
name: Deploy to Prod server
runs-on: ubuntu-latest
steps:
- name: Checkout master
uses: actions/checkout@v1
- name: Set environment variables for container names
run: |
echo "WEB_IMAGE=${{ env.WEB_IMAGE }}" >> $GITHUB_ENV
echo "NGINX_IMAGE=${{ env.NGINX_IMAGE }}" >> $GITHUB_ENV
- name: Add environment variables to .env.prod and .env.prod.db
run: |
echo DEBUG=0 >> .env.prod
echo SECRET_KEY=${{ secrets.PROD_SECRET_KEY }} >> .env.prod
echo DJANGO_ALLOWED_HOSTS=${{ secrets.PROD_ALLOWED_HOSTS }} >> .env.prod
echo SQL_ENGINE=django.db.backends.postgresql >> .env.prod
echo SQL_DATABASE=${{ secrets.PROD_SQL_DATABASE }} >> .env.prod
echo SQL_USER=${{ secrets.PROD_SQL_USER }} >> .env.prod
echo SQL_PASSWORD=${{ secrets.PROD_SQL_PASSWORD }} >> .env.prod
echo SQL_HOST=${{ secrets.PROD_SQL_HOST }} >> .env.prod
echo SQL_PORT=${{ secrets.PROD_SQL_PORT }} >> .env.prod
echo AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} >> .env.prod
echo AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} >> .env.prod
echo AWS_STORAGE_BUCKET_NAME=${{ secrets.AWS_STORAGE_BUCKET_NAME }} >> .env.prod
echo EMAIL_USER=${{ secrets.EMAIL_USER }} >> .env.prod
echo EMAIL_PASS=${{ secrets.EMAIL_PASS }} >> .env.prod
echo DATABASE=postgres >> .env.prod
echo WEB_IMAGE=${{ env.WEB_IMAGE }} >> .env.prod
echo NGINX_IMAGE=${{ env.NGINX_IMAGE }} >> .env.prod
echo POSTGRES_USER=${{ secrets.PROD_SQL_USER }} >> .env.prod.db
echo POSTGRES_PASSWORD=${{ secrets.PROD_SQL_PASSWORD }} >> .env.prod.db
echo POSTGRES_DB=${{ secrets.PROD_SQL_DATABASE }} >> .env.prod.db
- name: Install SSH Client 🔑
uses: webfactory/ssh-agent@v0.4.1 # This step installs the ssh client into the workflow run. There's many options available for this on the action marketplace.
with:
ssh-private-key: ${{ secrets.PROD_PRIVATE_KEY }}
- name: Build and deploy images on Prod server
run: |
scp -o StrictHostKeyChecking=no -r ./.env.prod ./.env.prod.db ./docker-compose.prod.yml ${{ secrets.PROD_SERVER_USER }}@${{ secrets.PROD_SERVER_IP }}:~/cmuxovik
ssh -o StrictHostKeyChecking=no ${{ secrets.PROD_SERVER_USER }}@${{ secrets.PROD_SERVER_IP }} << 'ENDSSH'
cd ~/cmuxovik
source .env.prod
source .env.prod.db
docker login -u $GITHUB_ACTOR -p ${{ secrets.CR_PAT }} ghcr.io
docker pull $WEB_IMAGE
docker pull $NGINX_IMAGE
env $(cat .env.prod) docker-compose -f docker-compose.prod.yml up -d
ENDSSH
|
.github/workflows/prod.yml
|
mycp_list_payments_pending_ownership:
pattern: /list/{items_per_page}/{filter_number}/{filter_code}/{filter_method}/{filter_payment_date_from}/{filter_payment_date_to}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:list, items_per_page: 100,filter_number:'',filter_code:'',filter_method:'', filter_payment_date_from:'',filter_payment_date_to:''}
mycp_details_payment_pending_ownership:
pattern: /detail/{id}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:detail }
mycp_details_paymen_agency_pending_ownership:
pattern: /agency/detail/{id}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:detailAgency }
mycp_edit_payment_pending_ownership:
pattern: /edit/{id}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:edit }
mycp_save_payment_pending_ownership:
pattern: /save
defaults: { _controller: mycpBundle:BackendPendingPayOwn:save }
mycp_export_list_payment_pending_ownership:
pattern: /export/{filter_number}/{filter_code}/{filter_method}/{filter_payment_date_from}/{filter_payment_date_to}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:export,filter_number:'',filter_code:'',filter_method:'', filter_payment_date_from:'',filter_payment_date_to:''}
mycp_list_payments_agency_pending_ownership:
pattern: /agency/list/{items_per_page}/{filter_number}/{filter_code}/{filter_method}/{filter_payment_date_from}/{filter_payment_date_to}/{filter_agency}/{filter_booking}/{filter_destination}/{filter_type}/{filter_reservation_date_from}/{filter_reservation_date_to}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:listFromAgency, items_per_page: 100,filter_number:'',filter_code:'',filter_method:'', filter_payment_date_from:'',filter_payment_date_to:'', filter_agency:'', filter_booking:'', filter_destination:'', filter_type:'', filter_reservation_date_from:'', filter_reservation_date_to:''}
mycp_edit_payment_agency_pending_ownership:
pattern: /agency/edit/{id}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:editAgency }
mycp_save_payment_agency_pending_ownership:
pattern: /agency/save
defaults: { _controller: mycpBundle:BackendPendingPayOwn:saveAgency }
mycp_export_payments_agency_pending_ownership:
pattern: /agency/export/{items_per_page}/{filter_number}/{filter_code}/{filter_method}/{filter_payment_date_from}/{filter_payment_date_to}/{filter_agency}/{filter_booking}/{filter_destination}/{filter_type}/{filter_reservation_date_from}/{filter_reservation_date_to}
defaults: { _controller: mycpBundle:BackendPendingPayOwn:exportAgency, items_per_page: 100,filter_number:'',filter_code:'',filter_method:'', filter_payment_date_from:'',filter_payment_date_to:'', filter_agency:'', filter_booking:'', filter_destination:'', filter_type:'', filter_reservation_date_from:'', filter_reservation_date_to:''}
|
src/MyCp/mycpBundle/Resources/config/routingBackendPaymentPendingOwnership.yml
|
career:
- company: Simtech Group
website: https://simtech.ru/
time: 2013-2015
description: |
Работал офисным сисадмином: заказывал оборудование, устанавливал ОС и конфигурировал ПО, ремонтировал оргтехнику и отвечал на вопросы пользователей.
Поддерживал и развивал серверный парк для коммерческих веб-проектов, следил за стабильностью их работы.
За два года я установил кластер для коммерческих проектов компании и автоматизировал его развёртку. Настроил системный мониторинг в компании, улучшив качество работы сервисов.
Запустил IP-телефонию для отдела продаж, а также упростил документооборот для бухгалтерии в офисе. Задокументировал инфраструктуру и запустил первую базу знаний компании.
- company: Simtech Development
website: https://simtechdev.com/
time: 2016-2017
description: |
Основал [проект хостинг-платформы для интернет-магазинов](https://simtechdev.com/services/servers-infrastructure/cloud-hosting/) в составе обособленного подразделения компании-интегратора.
Управлял небольшой командой эксплуатации, автоматизировал процессы и инфраструктуру, вёл деловую переписку с заказчиками и занимался продвижением услуг.
Организовал работу с отделом продаж, разработки и технической поддержки. Построил финансовую модель, разработал тарифные планы услуги и
разработал стратегии продвижения среди клиентов.
Установил, настроил и автоматизировал ядро услуги, запустил мониторинг и сервис резервного копирования.
- company: FunBox
website: https://funbox.ru/
time: 2015-...
description: |
Администрирую внутреннюю и внешнюю инфраструктуры для коммерческих B2B и B2C проектов. Запустил более 10 проектов:
от проектирования архитектуры до передачи в эксплуатацию команде поддержки и, реже, вывода из эксплуатации.
Организовал техническую площадку для митапов, помогая сотрудникам обмениваться знаниями. Выпускаю [видеоролики на YouTube](https://youtube.com/c/FunBoxTeam)
и развиваю DevRel: помогаю компании строить HR-бренд и привлекать соискателей.
[Разработал мониторинг резервного копирования](https://github.com/funbox/bacula_exporter), ускорив время реакции на проблемы при сохранении данных.
Запустил георезервированный, отказоустойчивый и масштабируемый Kafka-кластер для обслуживания трафика свыше 1Gbps. Настроил мониторинг
брокеров Kafka для интеграции в командах разработки.
Запустил единую систему визуализации метрик, улучшив качество мониторинга и поддержки проектов.
Запустил георезервированный и масштабируемый ClickHouse-кластер для мониторинга и решения бизнес-задач коммерческих проектов.
Построил [централизованную систему сбора логов с балансировщиков нагрузки](/blog/nginx-log-processing-with-clickhouse/), ускорив
до 100 раз время поиска событий инженерами техподдержки.
Запустил офисную [систему контроля доступа и видеонаблюдения](/blog/how-to-maintain-pacs/), а также улучшил качество работы с охранным агентством.
Благодаря изменениям избавились от инцидентов безопасности.
Запустил хранилище артефактов для релизов приложений, что позволило консолидировать общие зависимости в проектах.
Это изменение оказало существенное влияние на развитие разработки, CI и качества тестирования проектов.
Запустил корпоративный Kubernetes-кластер для проектов компании.
|
data/career.yaml
|
name: Continuous Integration
on: push
jobs:
tests:
name: Tests
runs-on: ubuntu-latest
env:
MIX_ENV: "test"
services:
postgres:
image: postgres:13
ports:
- 5432:5432
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: <PASSWORD>
POSTGRES_DB: turret_test
options: >-
--health-cmd pg_isready
--health-interval 2s
--health-timeout 2s
--health-retries 20
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Setup Elixir
uses: erlef/setup-beam@v1
with:
elixir-version: "1.11.3"
otp-version: "23.2"
- name: Setup Node.js
uses: actions/setup-node@v2
with:
node-version: 14.x
- name: Cache mix dependencies and _build directory
id: cache-mix-dependencies
uses: actions/cache@v2.1.6
with:
path: |
deps
_build
key: ${{ runner.os }}-mix-${{ hashFiles('**/mix.lock') }}
- name: Install mix dependencies
if: steps.cache-mix-dependencies.outputs.cache-hit != 'true'
run: mix deps.get
- name: Cache npm dependencies
id: cache-npm-dependencies
uses: actions/cache@v2.1.6
with:
path: assets/node_modules
key: ${{ runner.os }}-npm-${{ hashFiles('**/assets/yarn.lock') }}
- name: Install npm dependencies
if: steps.cache-npm-dependencies.outputs.cache-hit != 'true'
run: yarn install --frozen-lockfile
working-directory: ./assets
- name: Build static assets
run: yarn deploy
working-directory: ./assets
- name: Compile
run: mix compile
- name: Run Elixir tests
run: mix test
docker:
name: Docker
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Build the Docker image
run: docker build -t turret:latest .
|
.github/workflows/ci.yml
|
---
- hosts: localhost
connection: local
gather_facts: True
tasks:
- name: Install needed pip depencies
pip:
name: "{{ item.name }}"
version: "{{ item.version }}"
loop:
- { name: 'azure', version: '3.0.0' }
- { name: 'ansible[azure]', version: '2.5.3' }
become: True
- name: set ansible root folder
set_fact:
project: project_name
- name: Create 1st level of folders
file:
state: directory
recurse: yes
path: "{{ playbook_dir }}/{{ project }}/{{ item }}"
with_items:
- [ 'library', 'module_utils', 'filter_plugins', 'roles', 'group_vars', 'host_vars' ]
- name: Create 2nd level and more folders
file:
state: directory
recurse: yes
path: "{{ playbook_dir }}/{{ project }}/roles/{{ item[0] }}/{{ item[1] }}"
with_nested:
- [ 'common', 'web', 'updates']
- [ 'tasks', 'handlers', 'templates', 'files', 'vars', 'defaults', 'meta', 'library', 'module_utils', 'lookup_plugins' ]
- name: Create defaults files
file:
state: touch
path: "{{ playbook_dir }}/{{ project }}/roles/{{ item[0] }}/{{ item[1] }}/{{ item[2] }}"
with_nested:
- [ 'common', 'web', 'updates']
- [ 'tasks', 'handlers', 'vars', 'defaults', 'meta']
- [ 'main.yml', 'Windows.yml', 'Linux.yml', 'Mac.yml']
- name: Download Azure inventory modules
get_url:
url: "https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/{{ item.name }}"
dest: "{{ playbook_dir }}/{{ project }}/{{ item.dest }}"
loop:
- { name: 'azure_rm.py', dest: 'production' }
- { name: 'azure_rm.ini', dest: 'production.ini' }
- { name: 'azure_rm.py', dest: 'stagging' }
- { name: 'azure_rm.ini', dest: 'stagging.ini' }
- name: Set excutable on azure_rm.py
file:
dest: "{{ playbook_dir }}/{{ project }}/{{ item }}"
mode: "+x"
loop:
- production
- stagging
- name: Create playbooks
file:
path: "{{ playbook_dir }}/{{ project }}/{{ item }}"
state: touch
with_items:
- [ 'main.yml']
|
Ansible/inventory/init.yml
|
---
- name: Create group for Grafana
group:
name: "{{ grafana_user_group }}"
system: true
state: present
register: grafana_group_info
- name: Create user for Grafana
user:
name: "{{ grafana_user_name }}"
group: "{{ grafana_user_group }}"
system: true
state: present
register: grafana_user_info
- name: Create config/data directories
file:
path: "{{ item }}"
state: directory
owner: "{{ grafana_user_name }}"
group: "{{ grafana_user_group }}"
mode: "0755"
loop:
- "{{ grafana_data_path }}"
- "{{ grafana_data_path }}/config"
- "{{ grafana_data_path }}/data"
- "{{ grafana_data_path }}/plugins"
- "{{ grafana_data_path }}/provisioning"
- name: Set config/data directory ownership/group
file:
path: "{{ grafana_data_path }}"
state: directory
owner: "{{ grafana_user_name }}"
group: "{{ grafana_user_group }}"
recurse: yes
- name: Configure grafana via grafana.ini
template:
src: grafana.ini.j2
dest: "{{ grafana_data_path }}/config/grafana.ini"
owner: "{{ grafana_user_name }}"
group: "{{ grafana_user_group }}"
mode: "0644"
notify: restart grafana
- name: Create and start grafana container
docker_container:
name: grafana
image: "{{ grafana_docker_image }}:{{ grafana_docker_tag }}"
pull: yes
recreate: "{{ grafana_docker_tag == 'latest' }}"
restart_policy: always
state: started
container_default_behavior: no_defaults
user: "{{ grafana_user_info.uid }}:{{ grafana_group_info.gid }}"
networks_cli_compatible: yes
network_mode: default
networks:
- name: "{{ grafana_docker_network }}"
volumes:
- "{{ grafana_data_path }}/config/grafana.ini:/etc/grafana/grafana.ini"
- "{{ grafana_data_path }}/data:/var/lib/grafana"
- "{{ grafana_data_path }}/plugins:/var/lib/grafana/plugins"
- "{{ grafana_data_path }}/provisioning:/etc/grafana/provisioning"
env:
GF_INSTALL_PLUGINS: "{{ grafana_plugins | join(',') }}"
published_ports:
- "127.0.0.1:{{ grafana_host_port }}:3000"
- name: Configure caddy
import_role:
name: caddy_path
vars:
caddy_path_name: grafana
caddy_path_host_path: "{{ grafana_server_host_path }}"
caddy_path_upstream: "127.0.0.1:{{ grafana_host_port }}"
notify: reload caddy
|
infrastructure/modules/ansible/roles/grafana/tasks/main.yml
|
uid: "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource"
fullName: "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource"
name: "SqlDatabaseGetPropertiesResource"
nameWithType: "SqlDatabaseGetPropertiesResource"
summary: "The SqlDatabaseGetPropertiesResource model."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"com.azure.resourcemanager.cosmos.models.SqlDatabaseResource\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseResource.id()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseResource.validate()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseResource.withId(java.lang.String)"
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class SqlDatabaseGetPropertiesResource extends SqlDatabaseResource"
constructors:
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.SqlDatabaseGetPropertiesResource()"
methods:
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.colls()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.etag()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.rid()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.ts()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.users()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.validate()"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.withColls(java.lang.String)"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.withId(java.lang.String)"
- "com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.withUsers(java.lang.String)"
type: "class"
metadata: {}
package: "com.azure.resourcemanager.cosmos.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-cosmos:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.cosmos.models.SqlDatabaseGetPropertiesResource.yml
|
parameters:
- name: azureSubscription
type: string
default: $(azureSubscription)
- name: spUsername
type: string
- name: spPassword
type: string
- name: registry
type: string
default: $(registry)
- name: repository
type: string
default: $(repository)
- name: repositoryPrefix
type: string
default: charts/
- name: tag
type: string
default: $(tag)
- name: azureResourceGroup
type: string
default: $(azureResourceGroup)
- name: kubernetesCluster
type: string
default: $(kubernetesCluster)
steps:
- task: AzureCLI@2
displayName: helm pull+export+upgrade
inputs:
azureSubscription: ${{ parameters.azureSubscription }}
scriptType: pscore
scriptLocation: inlineScript
inlineScript: |
#set-up kubectl credentials on the agent for the helm upgrade
az aks get-credentials -n ${{ parameters.kubernetesCluster }} -g ${{ parameters.azureResourceGroup }} --overwrite-existing
#authenticate with the registry using a service principle
echo ${{ parameters.spPassword }} | helm registry login "${{ parameters.registry }}" --username ${{ parameters.spUsername }} --password-stdin
#construct the chart reference
$chartRef = "${{ parameters.registry }}/${{ parameters.repositoryPrefix }}${{ parameters.repository }}:${{ parameters.tag }}"
Write-Host "chartRef=$chartRef"
helm chart pull $chartRef
helm chart list
helm chart export $chartRef --destination $(Pipeline.Workspace)/install
Write-Host "Starting upgrade..."
$chartFolder = "$(Pipeline.Workspace)/install/${{ parameters.repository }}"
$chartName = "${{ parameters.repository }}"
$buildID = "${{ parameters.tag }}"
$tag = $buildID
$branch = "$(Build.SourceBranch)"
$commit = git rev-parse HEAD
$repo = Split-Path -Leaf (git remote get-url origin)
$registry = "${{ parameters.registry }}"
$exportFolder = "$(Build.ArtifactStagingDirectory)/$chartName.yaml"
#dump template to YAML file
helm template $chartName $chartFolder `
--version $tag `
--set image.tag=$tag `
--set image.repository=$chartName `
--set image.registry=$registry `
--set buildID=$buildID `
--set branch=$branch `
--set commit=$commit `
--set repo=$repo `
> $exportFolder
#now lets do the upgrade...
helm upgrade $chartName $chartFolder `
--install `
--wait `
--version $tag `
--set image.tag=$tag `
--set image.repository=$chartName `
--set image.registry=$registry `
--set buildID=$buildID `
--set branch=$branch `
--set commit=$commit `
--set repo=$repo
failOnStandardError: true
|
templates/steps-helm-upgrade-v2.yml
|
---
# This is an example playbook to execute Ansible tests.
- name: Verify
hosts: all
gather_facts: false
vars:
root_password: <PASSWORD>
auth_token: <PASSWORD>
auth_config_content: |
UZTNWKEH5M3QTBMUBI34VWUB2Y
" RATE_LIMIT 3 30
" WINDOW_SIZE 17
" DISALLOW_REUSE
" TOTP_AUTH
{{ auth_token }}
auth_config_checksum: "{{ auth_config_content | hash('sha1') }}"
tasks:
- name: install dependencies
ansible.builtin.package:
name:
- 'python*-pexpect'
- name: add tmp user
ansible.builtin.user:
name: tmp
- name: set root password
ansible.builtin.user:
name: root
password: "{{ <PASSWORD> | password_hash('<PASSWORD>') }}"
- name: ensure dummy authenticator data is present
ansible.builtin.copy:
dest: /root/.google_authenticator
owner: root
group: root
mode: 0400
checksum: "{{ auth_config_checksum }}"
content: "{{ auth_config_content }}"
- name: ensure correct python is used
ansible.builtin.set_fact:
ansible_python_interpreter: /usr/bin/python3
- name: check for su prompts
ansible.builtin.expect:
command: /bin/bash -c "su - -c 'su - -c whoami' tmp"
responses:
Password: "{{ <PASSWORD> }}"
Verification: "{{ auth_token }}"
register: su_good_output
- name: verify that 2fa code was requested
assert:
that:
- "'Password: ' in su_good_output.stdout_lines"
- "'Verification code: ' in su_good_output.stdout_lines"
- "'root' in su_good_output.stdout_lines"
- name: check for su prompts again reusing auth_token
ansible.builtin.expect:
command: /bin/bash -c "su - -c 'su - -c whoami' tmp"
responses:
Password: "{{ <PASSWORD> }}"
Verification: "{{ auth_token }}"
ignore_errors: yes
register: su_failed_output
- name: verify that 2fa code failed
assert:
that:
- "'Password: ' in su_failed_output.stdout_lines"
- "'Verification code: ' in su_failed_output.stdout_lines"
- "'su: Authentication failure' in su_failed_output.stdout_lines"
- "'root' not in su_failed_output.stdout_lines"
|
molecule/default/verify.yml
|
suite: test initContainers
templates:
- templates/controlplane/daemonset.yaml
- templates/controlplane/configmap.yaml
- templates/kubelet/daemonset.yaml
- templates/kubelet/configmap.yaml
- templates/kubelet/integrations-configmap.yaml
- templates/ksm/deployment.yaml
- templates/ksm/configmap.yaml
- templates/agent-configmap.yaml
- templates/secret.yaml
tests:
- it: initContainers are not populated if nothing is set
set:
licenseKey: test
cluster: test
asserts:
- equal:
path: spec.template.spec.initContainers
value: null
template: templates/ksm/deployment.yaml
- equal:
path: spec.template.spec.initContainers
value: null
template: templates/controlplane/daemonset.yaml
- equal:
path: spec.template.spec.initContainers
value: null
template: templates/kubelet/daemonset.yaml
- it: init containers are populated
set:
licenseKey: test
cluster: test
kubelet:
initContainers:
- name: controlplane
image: test
securityContext:
mode: kubelet
controlPlane:
initContainers:
- name: controlplane
image: test
securityContext:
mode: controlPlane
ksm:
initContainers:
- name: controlplane
image: test
securityContext:
mode: ksm
asserts:
- equal:
path: spec.template.spec.initContainers
value:
- name: controlplane
image: test
securityContext:
mode: ksm
template: templates/ksm/deployment.yaml
- equal:
path: spec.template.spec.initContainers
value:
- name: controlplane
image: test
securityContext:
mode: controlPlane
template: templates/controlplane/daemonset.yaml
- equal:
path: spec.template.spec.initContainers
value:
- name: controlplane
image: test
securityContext:
mode: kubelet
template: templates/kubelet/daemonset.yaml
- it: init containers are populated and templated
set:
licenseKey: test
cluster: test
myAwesomeValue: testInjecting
ksm:
initContainers:
- name: controlplane
image: test
securityContext:
mode: "{{ .Values.myAwesomeValue }}"
asserts:
- equal:
path: spec.template.spec.initContainers
value:
- name: controlplane
image: test
securityContext:
mode: testInjecting
template: templates/ksm/deployment.yaml
|
charts/newrelic-infrastructure-v3/tests/initContainers_test.yaml
|
layout: sidebar
style: light
plugins:
- jekyll-octicons
- jekyll-github-metadata
- jemoji
permalink: /:year/:month/:day/:title/
defaults:
-
scope:
path: "" # an empty string here means all files in the project
type: "posts"
values:
layout: "post"
projects:
sort_by: pushed
#sort_by: stars
#- pushed
limit: 20
include:
- name: now-deployment
full_name: amondnet/now-deployment
- name: openapi-generator
full_name: OpenAPITools/openapi-generator
- name: flutter_kakao_login
full_name: JosephNK/flutter_kakao_login
- name: mockk
full_name: mockk/mockk
- name: maxwell
full_name: zendesk/maxwell
exclude:
archived: false
forks: false
projects:
- now-deployment
- openapi-generator
- flutter_kakao_login
- mockk
- test-a
- amondnet.github.io
- dart-json-mapper
- kiwi
- firestore_ui
- pusher-websocket-swift
# - repo-name
social_media:
# behance: your_username
# dribbble: your_username
# facebook: your_username
# hackerrank: your_username
# instagram: your_username
# keybase: your_username
linkedin: 민수-이-301a8b82
# mastodon: your_username
# medium: your_username
# stackoverflow: your_user_id
# telegram: your_username
# twitter: your_username
# unsplash: your_username
# vk: your_username
website: https://amond.net
# youtube: your_username
topics:
- name: Flutter
web_url: https://github.com/topics/flutter
image_url: https://raw.githubusercontent.com/github/explore/cebd63002168a05a6a642f309227eefeccd92950/topics/flutter/flutter.png
- name: Kotlin
web_url: https://github.com/topics/kotlin
image_url: https://raw.githubusercontent.com/github/explore/80688e429a7d4ef2fca1e82350fe8e3517d3494d/topics/kotlin/kotlin.png
- name: Spring Boot
web_url: https://github.com/topics/spring-boot
image_url: https://raw.githubusercontent.com/github/explore/80688e429a7d4ef2fca1e82350fe8e3517d3494d/topics/spring-boot/spring-boot.png
|
_config.yml
|
name: ExcelScript.PivotLabelFilter
uid: 'ExcelScript!ExcelScript.PivotLabelFilter:interface'
package: ExcelScript!
fullName: ExcelScript.PivotLabelFilter
summary: Modelo configurável para um filtro de rótulo a ser aplicado a um PivotField. O `condition` define quais critérios precisam ser definidos para que o filtro funcione.
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: condition
uid: 'ExcelScript!ExcelScript.PivotLabelFilter#condition:member'
package: ExcelScript!
fullName: condition
summary: 'Especifica a condição do filtro, que define os critérios de filtragem necessários.'
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'condition: LabelFilterCondition;'
return:
type: '<xref uid="ExcelScript!ExcelScript.LabelFilterCondition:enum" />'
- name: exclusive
uid: 'ExcelScript!ExcelScript.PivotLabelFilter#exclusive:member'
package: ExcelScript!
fullName: exclusive
summary: 'Se `true`<!-- -->, filter *exclui itens* que atendem aos critérios. O padrão é `false` (filtrar para incluir itens que atendem aos critérios).'
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'exclusive?: boolean;'
return:
type: boolean
- name: lowerBound
uid: 'ExcelScript!ExcelScript.PivotLabelFilter#lowerBound:member'
package: ExcelScript!
fullName: lowerBound
summary: 'O limite inferior do intervalo para a condição `between` de filtro. Observação: uma cadeia de caracteres numérica é tratada como um número ao ser comparada com outras cadeias de caracteres numéricas.'
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'lowerBound?: string;'
return:
type: string
- name: substring
uid: 'ExcelScript!ExcelScript.PivotLabelFilter#substring:member'
package: ExcelScript!
fullName: substring
summary: 'A subdistragem usada para `beginsWith`<!-- -->, `endsWith`<!-- -->e condições `contains` de filtro.'
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'substring?: string;'
return:
type: string
- name: upperBound
uid: 'ExcelScript!ExcelScript.PivotLabelFilter#upperBound:member'
package: ExcelScript!
fullName: upperBound
summary: 'O limite superior do intervalo para a condição `between` de filtro. Observação: uma cadeia de caracteres numérica é tratada como um número ao ser comparada com outras cadeias de caracteres numéricas.'
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'upperBound?: string;'
return:
type: string
|
docs/docs-ref-autogen/excel/excelscript/excelscript.pivotlabelfilter.yml
|
List Mode: Blacklist
# Item Types: Item types that will or will not be stacked to 64 based on the list mode above
Item Types:
- Example Item
# Item Amounts: Set the amount of items that an item of that type can hold.
Item Amounts:
Example Item: 64
# Default Max Amount: Set the default max amount for ALL items in Minecraft
# This setting could be dangerous, do proper testing before changing this value.
Default Max Amount: 64
# Hopper Movement Checks: Hoppers will attempt to stack unstackable items together.
# Setting this to false will stop hopper checks which could increase performance but will
# Stop stacking of unstackables when moving through hoppers.
Hopper Movement Checks: true
# Ground Stacking Checks: Simple Stack will check whether unstackable items on the ground
# will stack together or not.
# Setting this to false will stop unstackables stacking when on the ground and could
# increase performance at the cost of unstackables not stacking when on the ground.
Ground Stacking Checks: true
# Creative Item Dragging: When this is set to true, items that are dragged in a creative
# inventory will always become a full stack. This is a substitute for middle click dragging
# since middle click dragging can't be detected through the Spigot API.
Creative Item Dragging: false
simplestack:
commands:
reload:
success: "The config has been reloaded."
reset:
success: "The config has been reset to the default values."
setamount:
info: "Set the amount of a held item"
success: "Amount set successfully."
format: "Command format: /simplestack setamount <number>"
additem:
success: "The held item has been added to the config."
removeitem:
success: "The held item has been removed from the config."
warnings:
no_permission: "You do not have permission to use this command."
must_be_player: "You must be a player to use this command."
number_outside_of_range: "The amount specified for {MAT} is outside of the valid range. Defaulting to 64."
invalid_material: "The material {MAT} in the config does not exist."
invalid_unique_item: "An item listed in the unique items list could not be loaded."
invalid_list_mode: "The list mode {MODE} is not a valid list mode. Defaulting to blacklist mode."
invalid_max_amount: "The default max amount specified in the config is outside of the required range (1 - 64)"
material_already_exists: "That material already exists in the config."
custom_amount_does_not_exist: "A customized amount for that item type does not exist in the config."
held_item_required: "You must hold an item to run this command."
list_type:
blacklist: "Blacklist"
whitelist: "Whitelist"
|
src/main/resources/config.yml
|
---
- debug:
var: params
verbosity: 0
- set_fact:
db_project: "{{ project }}"
when: params.db_project is undefined
- set_fact:
db_project: "{{ params.db_project }}"
when: params.db_project is defined
- name: Read the database pod list
shell: "oc get pods --selector name=postgresql -o json -n {{ db_project }}"
register: list_of_db_pods
- name: Get the name of the PostgreSQL Pod
set_fact: db_pod="{{list_of_db_pods.stdout|metadata_name}}"
- name: Create the {{ params.app }} database on PostgreSQL Pod
shell: 'oc exec {{db_pod}} -n {{ db_project }} -- /bin/sh -i -c "createdb {{ project }}-{{ params.app }}"'
register: command_result
failed_when: "'exists' not in command_result.stderr and command_result.rc != 0"
changed_when: "'exists' not in command_result.stderr"
tags:
- openshift
- postgresql
- name: Copy ddl to PostgreSQL Pod
shell: 'oc rsync --no-perms=true --include="*.ddl" --exclude="*" {{ workdir }}/{{ params.repo }}/src/ddl/ {{db_pod}}:/tmp -n {{ db_project }}'
register: command_result
when: params.reset_db
failed_when: "'exists' not in command_result.stderr and command_result.rc != 0"
changed_when: "'exists' not in command_result.stderr"
tags:
- openshift
- postgresql
- name: Copy liqubase ddl to PostgreSQL Pod
shell: 'oc rsync --no-perms=true --include="*.ddl" --exclude="*" {{ boost_ddl }}/ {{db_pod}}:/tmp -n {{ db_project }}'
register: command_result
when: params.reset_db
failed_when: "'exists' not in command_result.stderr and command_result.rc != 0"
changed_when: "'exists' not in command_result.stderr"
tags:
- openshift
- postgresql
- name: Execute Drop ddl on PostgreSQL Pod
shell: 'oc exec {{db_pod}} -n {{ db_project }} -- /bin/sh -i -c "psql -d {{ project }}-{{ params.app }} -U postgres -f /tmp/drop-db.ddl"'
register: command_result
when: params.reset_db
failed_when: "'exists' not in command_result.stderr and command_result.rc != 0"
changed_when: "'exists' not in command_result.stderr"
tags:
- openshift
- postgresql
- name: Drop liquibase on PostgreSQL Pod
shell: 'oc exec {{db_pod}} -n {{ db_project }} -- /bin/sh -i -c "psql -d {{ project }}-{{ params.app }} -U postgres -f /tmp/drop-liquibase-db.ddl"'
register: command_result
when: params.reset_db
failed_when: "'exists' not in command_result.stderr and command_result.rc != 0"
changed_when: "'exists' not in command_result.stderr"
tags:
- openshift
- postgresql
|
src/boost/openshift/actions/boostcd.create_postgres_db/tasks/main.yml
|
name: Pipeline
on:
push:
tags: '*'
jobs:
release:
runs-on: windows-latest
env:
SigningCertificate: ParquetViewer_StoreKey.pfx
Solution_Path: src\ParquetViewer.sln
UWP_Project_Path: src\ParquetViewer\ParquetViewer.csproj
UWP_Project_Directory: .\src\ParquetViewer
steps:
- name: Configure Pagefile
uses: al-cheb/configure-pagefile-action@v1.2
with:
minimum-size: 32GB
maximum-size: 32GB
disk-root: "C:"
- name: Get tag
id: tag
uses: dawidd6/action-get-tag@v1
- name: Use tag
run: echo ${{steps.tag.outputs.tag}}
- name: Extract version from tag
uses: Amadevus/pwsh-script@v2
id: getVersion
with:
script: '("${{steps.tag.outputs.tag}}")'
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup MSBuild.exe
uses: microsoft/setup-msbuild@v1.1
- name: Update manifest version
run: |
[xml]$manifest = get-content ".\$env:UWP_Project_Directory\Package.appxmanifest"
$manifest.Package.Identity.Version = "${{steps.getVersion.outputs.result}}.0"
$manifest.save(".\$env:UWP_Project_Directory\Package.appxmanifest")
- name: Decode the Pfx
run: |
$pfx_cert_byte = [System.Convert]::FromBase64String("${{ secrets.APP_CODE_SIGN_PFX_BASE64 }}")
$currentDirectory = Get-Location
$certificatePath = Join-Path -Path $currentDirectory -ChildPath $env:UWP_Project_Directory -AdditionalChildPath $env:SigningCertificate
[IO.File]::WriteAllBytes("$certificatePath", $pfx_cert_byte)
- name: Build the sideload solution
run: msbuild $env:Solution_Path /p:Platform=x86 /p:AppxBundle=$env:AppxBundle /p:AppxBundlePlatforms="x86|x64" /p:AppxPackageDir=C:\DeployOutput /restore
env:
AppxBundle: Always
BuildMode: StoreUpload
Configuration: Release
- name: Remove the .pfx
run: Remove-Item -path $env:UWP_Project_Directory/$env:SigningCertificate
- name: Create archive
run: Compress-Archive -Path C:\DeployOutput\* -DestinationPath C:\DeployOutput\StorePackage_${{steps.getVersion.outputs.result}}.zip
- name: Create release
id: create_release
uses: ncipollo/release-action@v1
env:
token: ${{ secrets.GITHUB_TOKEN }}
with:
tag: "${{ github.ref }}"
artifacts: "C:\\DeployOutput\\StorePackage_${{steps.getVersion.outputs.result}}.zip"
allowUpdates: true
body: "Full Changelog: https://github.com/anchoraorg/parquet-viewer-uwp/commits/${{ github.ref }}"
|
.github/workflows/pipeline.yml
|
---
- name: Ensure clone target exists
file:
recurse: true
state: directory
dest: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}"
- name: Clone CADD-scripts
git:
repo: 'https://github.com/kircherlab/CADD-scripts.git'
dest: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}"
version: '{{ cadd_rest_api_server_cadd_scripts_version }}'
- name: Download & unpack miniconda3
get_url:
url: https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
dest: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/Miniconda3-latest-Linux-x86_64.sh"
- name: Install miniconda3
shell: >
trap "rm -rf {{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}-conda" ERR;
bash {{ cadd_rest_api_server_install_path }}/../cadd-scripts/Miniconda3-latest-Linux-x86_64.sh
-b -p {{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}-conda
args:
executable: /bin/bash
creates: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}-conda/bin/activate"
- name: Fix miniconda3
replace:
path: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}-conda/etc/profile.d/conda.sh"
regexp: '\$PS1'
replace: '${PS1:-}'
- name: Setup CADD-scripts environment
shell: |
source {{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}-conda/bin/activate
conda env create -f {{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}/src/environment.yml
args:
executable: /bin/bash
creates: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}-conda/envs/cadd-env"
- name: Install precomputed scores.
get_url:
url: https://krishna.gs.washington.edu/download/CADD/{{ item.src }}
dest: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}/data/{{ item.dest }}"
remote_src: true
with_items:
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/whole_genome_SNVs_inclAnno.tsv.gz"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/incl_anno/whole_genome_SNVs_inclAnno.tsv.gz
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/whole_genome_SNVs_inclAnno.tsv.gz.tbi"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/incl_anno/whole_genome_SNVs_inclAnno.tsv.gz.tbi
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/InDels_inclAnno.tsv.gz"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/incl_anno/InDels_inclAnno.tsv.gz
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/InDels_inclAnno.tsv.gz.tbi"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/incl_anno/InDels_inclAnno.tsv.gz.tbi
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/whole_genome_SNVs.tsv.gz"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/no_anno/whole_genome_SNVs.tsv.gz
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/whole_genome_SNVs.tsv.gz.tbi"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/no_anno/whole_genome_SNVs.tsv.gz.tbi
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/InDels.tsv.gz"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/no_anno/InDels.tsv.gz
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/InDels.tsv.gz.tbi"
dest: prescored/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/no_anno/InDels.tsv.gz.tbi
when: not cadd_rest_api_server_skip_download
- name: Download and extract data files
unarchive:
src: https://krishna.gs.washington.edu/download/CADD/{{ item.src }}
dest: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}/data/{{ item.dest }}"
creates: "{{ cadd_rest_api_server_install_path }}/../cadd-scripts/{{ cadd_rest_api_server_cadd_scripts_version }}/data/{{ item.dest }}/{{ item.creates }}"
remote_src: true
extra_opts:
- --transform
- "{{ item.transform }}"
with_items:
- src: "{{ cadd_rest_api_server_cadd_score_version }}/GRCh37/annotationsGRCh37.tar.gz"
dest: annotations
creates: GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/vep/homo_sapiens/92_GRCh37/HG19_PATCH/2000001-3000000.gz
transform: s/^GRCh37/GRCh37_{{ cadd_rest_api_server_cadd_score_version }}/
when: not cadd_rest_api_server_skip_download
|
tasks/cadd_scripts.yml
|
---
- name: install ansible openssl bindings
apt:
name: "{{ lin_use_python2 |bool |ternary(packages_pythons, packages_python3) }}"
state: present
vars:
packages_pythons: python3-openssl,python3-pip,python-openssl,python-pip
packages_python3: python3-openssl,python3-pip
- block:
- name: verify validity of pyOpenSSL (ansible openssl bindings)
command:
argv:
- "{{ ansible_python.executable }}"
- -c
- "from OpenSSL import SSL"
changed_when: false
rescue:
- name: upgrade ansible openssl bindings
command:
argv:
- "{{ ansible_python.executable }}"
- -m
- pip
- install
- --upgrade
- pyOpenSSL
- name: check whether squid ssl files need to be generated
stat:
path: "{{ ss_cert }}.{{ item }}"
loop: [crt, key, csr, bundle.pem]
register: _squid_ss_cert
changed_when: not _squid_ss_cert.stat.exists
notify: restart squid service
- name: generate squid self-signed ssl files
block:
- name: generate ssl private key (4096 bits, rsa) for squid
openssl_privatekey:
path: "{{ ss_cert }}.key"
owner: root
group: "{{ squid_group }}"
mode: 0640
- name: generate certificate signing request for squid
openssl_csr:
path: "{{ ss_cert }}.csr"
privatekey_path: "{{ ss_cert }}.key"
common_name: "{{ squid_host }}"
owner: root
group: "{{ squid_group }}"
mode: 0640
- name: generate self-signed ssl certificate for squid
openssl_certificate:
path: "{{ ss_cert }}.crt"
privatekey_path: "{{ ss_cert }}.key"
csr_path: "{{ ss_cert }}.csr"
provider: selfsigned
owner: root
group: "{{ squid_group }}"
mode: 0640
- name: make self-signed certificate bundle
shell: cat "{{ ss_cert }}.crt" "{{ ss_cert }}.key" > "{{ ss_cert }}.bundle.pem"
when: not squid_from_ppa |bool
- name: ensure self-signed bundle permissions
file:
path: "{{ ss_cert }}.bundle.pem"
state: file
owner: root
group: "{{ squid_group }}"
mode: 0640
when: not squid_from_ppa |bool
when: _squid_ss_cert is changed
and not ansible_check_mode
...
|
tasks/ssl-selfsigned.yml
|
---
- CreateTable: t(c1 INT, c2 BIGINT, c3 CHAR(32), c4 VARCHAR(10) FOR BIT DATA, c5 DECIMAL(10,5), c6 BLOB)
# 3-arg
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 't', 'c1')
- output: [ ['INT'] ]
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 't', 'c2')
- output: [ ['BIGINT'] ]
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 't', 'c3')
- output: [ ['CHAR(32)'] ]
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 't', 'c4')
- output: [ ['VARBINARY(10)'] ]
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 't', 'c5')
- output: [ ['DECIMAL(10, 5)'] ]
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 't', 'c6')
- output: [ ['BLOB'] ]
# 2-arg, default schema
---
- Statement: SELECT COLUMN_TYPE_STRING('t', 'c1')
- output: [ ['INT'] ]
# 1-arg, fully specified and default schema
---
- Statement: SELECT COLUMN_TYPE_STRING(CONCAT(CURRENT_SCHEMA, '.t.c1'))
- output: [ ['INT'] ]
---
- Statement: SELECT COLUMN_TYPE_STRING('t.c1')
- output: [ ['INT'] ]
# Specified but missing schema still defaulted
---
- Statement: SELECT COLUMN_TYPE_STRING('', 't', 'c1')
- output: [ ['INT'] ]
---
- Statement: SELECT COLUMN_TYPE_STRING('.t.c1')
- output: [ ['INT'] ]
# NULL contaminates
---
- Statement: SELECT COLUMN_TYPE_STRING(NULL, 't', 'c1')
- output: [ [NULL] ]
# No such column
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 't', 'foo')
- error: ['50018']
---
- Statement: SELECT COLUMN_TYPE_STRING('t', 'foo')
- error: ['50018']
---
- Statement: SELECT COLUMN_TYPE_STRING('t.foo')
- error: ['50018']
# No such table
---
- Statement: SELECT COLUMN_TYPE_STRING(CURRENT_SCHEMA, 'x', 'foo')
- error: ['42501']
---
- Statement: SELECT COLUMN_TYPE_STRING('x', 'foo')
- error: ['42501']
---
- Statement: SELECT COLUMN_TYPE_STRING('x.foo')
- error: ['42501']
# And view support
---
- Statement: CREATE VIEW v AS SELECT c2 AS v1 FROM t
---
- Statement: SELECT COLUMN_TYPE_STRING('v', 'v1')
- output: [ ['BIGINT'] ]
---
- Statement: DROP VIEW v
---
- Statement: explain VERBOSE select COLUMN_TYPE_STRING( CAST(? AS CHAR(1)), 'abc' )
- output: [['Project_Default(COLUMN_TYPE_STRING(CAST($1 AS VARCHAR(3)), ''abc''))'],
[' ValuesScan_Default([])']]
...
|
fdb-sql-layer-test-yaml/src/test/resources/com/foundationdb/sql/test/yaml/functional/test-column-type-string.yaml
|
---
banner:
title: 'Trouvez votre formation,<br> selon vos besoins,<br> selon vos envies '
about:
enable: true
content: Trouvez votre façon de vous former. A votre rythme, selon vos disponibilités,
devant votre mobile ou votre ordinateur, seul ou accompagné, en formation initiale
et continue.
button:
enable: true
label: A propos
URL: about
skill:
enable: false
title: Skills
item:
- title: Web Design
progress: 90%
color: "#fdb157"
- title: Logo Design
progress: 60%
color: "#9473e6"
- title: After Effects
progress: 80%
color: "#bdecf6"
- title: Web App
progress: 70%
color: "#ffbcaa"
experience:
enable: false
title: Experience
item:
- logo: images/experience/icon-1.png
title: Junior UX Designer
company: WEBEX
duration: Jan 2007 - Feb 2009
- logo: images/experience/icon-2.png
title: UX & UI Designer
company: AUGMEDIX
duration: Mar 2009 - Aug 2014
- logo: images/experience/icon-1.png
title: Senior UI Designer
company: THEMEFISHER
duration: Sep 2014 - Present
education:
enable: false
title: Education
item:
- title: Marters in UX Design
year: "2006"
academy: Masassusets Institute of Technology
- title: Honours in Fine Arts
year: "2004"
academy: Harvard University
- title: Higher Secondary Certificat
year: "2000"
academy: Cardiff School
- title: Secondary School Certificate
year: "1998"
academy: Cardiff School
service:
enable: true
title: L’offre de formations
item:
- title: Une autonomie de formation
icon: " ti-wand"
content: Trouvez la formation qui vous convient. Avec d’autres enseignants, seul
avec votre mobile, en 10 minutes ou sur 1 an.
highlighted: false
- title: Suivez votre progression
icon: " ti-dashboard"
content: Découvrez votre parcours, suivez le pas à pas... et changez-le selon
vos envies.
highlighted: true
- title: De quoi faire progresser vos élèves
icon: " ti-stats-up "
content: Apprenez-leur à apprendre, personnalisez vos cours.
highlighted: false
portfolio:
enable: false
title: Portfolio
item_show: "5"
testimonial:
enable: false
title: Testimonials
item:
- name: <NAME>
image: images/testimonial/client-1.png
designation: CEO, Funder
content: Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
<strong>quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat.</strong> Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur.
- name: <NAME>
image: images/testimonial/client-1.png
designation: CEO, Funder
content: Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
<strong>quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat.</strong> Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur.
- name: <NAME>
image: images/testimonial/client-1.png
designation: CEO, Funder
content: Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
<strong>quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat.</strong> Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur.
clients_logo_slider:
enable: true
item:
- logo: "/images/clients-logo/canope.png"
URL: ''
- logo: "/images/clients-logo/cnam.png"
URL: ''
- logo: images/clients-logo/onisep.png
URL: ''
- logo: images/clients-logo/cned.png
URL: ''
- logo: images/clients-logo/etreprof.png
URL: ''
- logo: images/clients-logo/clemi.png
URL: ''
- logo: images/clients-logo/magistere.png
URL: ''
- logo: images/clients-logo/FEI.png
URL: ''
blog:
enable: true
title: "Des premières formations à tester"
# blog post comes from "content/blog" folder
|
exampleSite/data/homepage.yml
|
openapi: 3.0.0
info:
version: 1.0.0
title: OAuth2 Refresh Token Management
description: OAuth2 refresh token management microservices endpoints.
contact:
email: <EMAIL>
license:
name: Apache 2.0
url: 'http://www.apache.org/licenses/LICENSE-2.0.html'
paths:
/oauth2/refresh_token:
get:
description: Return all refresh tokens
operationId: getAllRefreshToken
parameters:
- name: page
in: query
description: Page number
required: true
schema:
type: integer
format: int32
- name: pageSize
in: query
description: Pag size
required: false
schema:
type: integer
format: int32
- name: userId
in: query
description: Partial userId for filter
required: false
schema:
type: string
responses:
'200':
description: successful operation
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/RefreshToken'
security:
- refresh_token_auth:
- oauth.refresh_token.r
'/oauth2/refresh_token/{refreshToken}':
delete:
description: Delete a refresh token
operationId: deleteRefreshToken
parameters:
- name: refreshToken
in: path
description: Refresh Token
required: true
schema:
type: string
responses:
'400':
description: Invalid refresh token supplied
'404':
description: Refresh token not found
security:
- refresh_token_auth:
- oauth.refresh_token.w
get:
description: Get a refresh token
operationId: getRefreshToken
parameters:
- name: refreshToken
in: path
description: Refresh token
required: true
schema:
type: string
responses:
'200':
description: Successful response
content:
application/json:
schema:
$ref: '#/components/schemas/RefreshToken'
'400':
description: Invalid refresh token supplied
'404':
description: Refresh token not found
security:
- refresh_token_auth:
- oauth.refresh_token.r
- oauth.refresh_token.w
servers:
- url: 'https://oauth2.networknt.com'
components:
securitySchemes:
refresh_token_auth:
type: oauth2
flows:
implicit:
authorizationUrl: 'http://localhost:8888/oauth2/code'
scopes:
oauth.refresh_token.w: write oauth refresh token
oauth.refresh_token.r: read oauth refresh token
schemas:
RefreshToken:
type: object
required:
- refreshToken
- userId
- clientId
properties:
refreshToken:
type: string
description: refresh token
userId:
type: string
description: user id
clientId:
type: string
description: client id
scope:
type: string
description: service scopes separated by space
|
light-oauth2/test-cloud/light-oauth2/mysql/config/oauth2-refresh-token/openapi.yaml
|
location: Indonesia, West Borneo
menu:
About: /about #you need to create "about" page first
Friends: /friends
# Default post title
default_post_title: Untitled #default post title will automatically show if you forgot to write the title of your post
# Default post cover index page
default_cover_index: "/img/seira.jpg" #default cover index will automatically show if you forgot to set up the cover index on your post.
favicon: /img/favicon.png #favicon is for your logo on the tab's browser. use transparent BG, more small more good.
logo: img/logo2.svg #logo show in the menu and header
cover_page: img/cover.jpg #better use 16:9 ratio
profile_pic: /img/450x450/seira.jpg #profil pic will show on the left side of the post's title
author: <NAME> #name author in post (default)
comments:
#disqus_shortname: aflasio
# Facebook comments
#facebook:
#appid: 558386947954045
#comment_count: 5
#comment_colorscheme: light
# Valine comments https://valine.js.org
valine:
appid: 6jbQfaaH3EJezmsqw43NCcNa-gzGzoHsz
appkey: 5AsBAiusJURpbNDHTP4LHwA6
notify: false # true/false:mail notify !!!Test,Caution. https://github.com/xCss/Valine/wiki/Valine-%E8%AF%84%E8%AE%BA%E7%B3%BB%E7%BB%9F%E4%B8%AD%E7%9A%84%E9%82%AE%E4%BB%B6%E6%8F%90%E9%86%92%E8%AE%BE%E7%BD%AE
verify: false # true/false:verify code
avatar: mm # avatar style https://github.com/xCss/Valine/wiki/avatar-setting-for-valine
placeholder: Leave comment # comment box placeholder
google_analytics: UA-128493937-1 #get your google analytic on https://analytics.google.com/analytics
# if u want add more profile media social (icon same as Fontawesome 5)
# just add name ex : Instagram: https://www.instagram.com/you/
social:
Gitlab: https://gitlab.com/sira313
Twitter: https://twitter.com/aflasio
Facebook: https://facebook.com/aflasio.art
#Google-plus: https://plus.google.com/104751165505597913805
email: mailto:<EMAIL>
#Instagram: https://www.instagram.com/you/
Telegram: https://t.me/aflasio
# Code Highlight theme
# Available value:
# default | normal | night | night eighties | night blue | night bright
# https://github.com/chriskempson/tomorrow-theme
highlight_theme: night eighties
|
_config.yml
|
homeassistant:
name: Home
latitude: !secret homeassistant_latitude
longitude: !secret homeassistant_longitude
elevation: !secret homeassistant_elevation
unit_system: metric
time_zone: Europe/London
customize: !include_dir_merge_named customize
#customize_domain:
# automation:
# initial_state: 'on'
#customize_glob:
# automation.*watchdog*:
# icon: mdi:timer
# sensor.dark_sky_*:
# homebridge_hidden: true
# scene.month_*_colors:
# hidden: true
# homebridge_hidden: true
packages: !include_dir_named packages
auth_providers:
- type: homeassistant
- type: legacy_api_password
api_password: !secret http_password
- type: trusted_networks
trusted_networks:
- 172.16.0.0/24
- !secret external_ip
default_config:
system_health:
map:
logbook:
history:
config:
http:
base_url: !secret http_base_url
server_port: !secret server_port
#ssl_certificate: !secret ssl_certificate
#ssl_key: !secret ssl_key
ip_ban_enabled: true
#use_x_forwarded_for: true
#trusted_proxies:
# - 10.0.0.200
login_attempts_threshold: 3
cors_allowed_origins:
- https://google.com
- https://www.home-assistant.io
frontend:
#panel_custom:
# - name: floorplan
# sidebar_title: Alarm Panel
# sidebar_icon: mdi:security-home
# url_path: floorplan
# config:
# hide_app_toolbar:
# config: /local/custom_ui/floorplan/floorplan.yaml
discovery:
#ignore:
# - samsung_tv
updater:
include_used_components: true
sun:
logger: !include logger.yaml
recorder: !include recorder.yaml
group: !include_dir_merge_named group
device_tracker: !include_dir_merge_list device_tracker
sensor: !include_dir_merge_list sensor
automation: !include_dir_merge_list automation
#scene: !include_dir_merge_list scene
switch: !include_dir_merge_list switch
script: !include_dir_merge_named script
#input_boolean: !include_dir_merge_named input_boolean
#shell_command: !include_dir_merge_named shell_command
|
config/configuration.yaml
|
name: tsbt
version: '1.3.8'
base: core18
summary: nodejs application to access bluetooth tsbt tags
description: |
tsbt reader and usbutils
grade: devel
confinement: strict
parts:
tsbt:
plugin: nodejs
build-packages: [npm, python]
source: .
stage-snaps:
- bluez
- pi-bluetooth
- grafana-ijohnson
- influxdb-configurable
- network-manager
stage-packages:
- libbluetooth-dev
- libudev-dev
- build-essential
- libreadline-dev
prime:
- -usr/share/doc
- -usr/share/man
- -usr/bin/npm
espruino-tools:
plugin: dump
build-packages: [npm]
source: https://github.com/espruino/EspruinoTools.git
source-type: git
organize:
'*': EspruinoTools/
prime:
- -usr/share/doc
- -usr/share/man
- -usr/bin/npm
usbutils:
plugin: autotools
source: https://github.com/gregkh/usbutils.git
source-type: git
stage-packages:
- libusb-1.0-0
build-packages:
- npm
- libusb-1.0-0-dev
- libglib2.0-dev
- libudev-dev
- zlib1g-dev
configflags:
- --prefix=/usr
- --libdir=/usr/lib
- --localstatedir=/var/lib
prime:
- -usr/share/doc
- -usr/share/man
- -usr/bin/npm
architectures:
- build-on: armhf
run-on: armhf
- build-on: arm64
run-on: arm64
plugs:
client:
interface: bluez
apps:
lsusb:
command: usr/bin/lsusb
plugs: [hardware-observe, network]
init:
command: usr/bin/hciconfig hci0 up
daemon: simple
plugs:
- serial-port
hciattach:
command: usr/bin/hciattach
plugs:
- serial-port
tsbt:
command: nice -15 node $SNAP/index.js
daemon: simple
start-timeout: 30s
plugs:
- client
- home
- network
- network-bind
- bluetooth-control
- network-control
node:
command: node
plugs:
- home
- network
- network-bind
- bluetooth-control
- network-control
espr:
command: node $SNAP/EspruinoTools/bin/espruino-cli.js
plugs:
- client
- home
- network
- network-bind
- bluetooth-control
- network-control
|
snapcraft.yaml
|
name: 'Test Azure Spring Cloud'
description: 'Deploy applications to Azure Spring Cloud and manage deployments.'
inputs:
azure-subscription:
description: 'Select the Azure Resource Manager subscription for the deployment.'
required: true
resource-group-name:
description: 'Select the Azure Resource Group for the deployment.'
required: false
action:
description: 'Action to be performed on Azure Spring Cloud.'
required: true
default: 'deploy'
service-name:
description: 'Select the Azure Spring Cloud service to which to deploy.'
required: true
app-name:
description: 'Select the Azure Spring Cloud app to deploy.'
required: true
use-staging-deployment:
description: "Automatically select the deployment that's set as Staging at the time the task runs."
required: true
default: true
create-new-deployment:
description: "Whether to target the deployment that's set as Staging at the time of execution. If unchecked, the 'Deployment Name' setting must be set."
required: false
default: false
deployment-name:
description: 'The deployment to which this task will apply. Lowercase letters, - and numbers only; must start with a letter.'
required: false
package:
description: "File path to the package or a folder containing the Spring Cloud app contents."
required: false
default: '${{ github.workspace }}/**/*.jar'
environment-variables:
description: "Edit the app's environment variables."
required: false
jvm-options:
description: "Edit the app's JVM options. A String containing JVM Options. Example: `-Xms1024m -Xmx2048m`"
required: false
runtime-version:
description: 'The runtime on which the app will run.'
required: false
dotnetcore-mainentry-path:
description: 'The path to the .NET executable relative to zip root.'
required: false
version:
description: 'The runtime on which the app will run.'
required: false
cpu:
description: 'Required CPU. 1 core can be represented by 1 or 1000m. This should be 500m or 1 for Basic tier, and {500m, 1, 2, 3, 4} for Standard tier.'
required: false
memory:
description: 'Required memory. 1 GB can be represented by 1Gi or 1024Mi. This should be {512Mi, 1Gi, 2Gi} for Basic tier, and {512Mi, 1Gi, 2Gi, ..., 8Gi} for Standard tier.'
required: false
branding:
icon: 'icon.svg'
runs:
using: 'node12'
main: 'dist/index.js'
|
action.yml
|
jobs:
- template: '../steps/system_test_general.yaml'
parameters:
name: 'feature_gates_regression_kafka'
display_name: 'feature-gates-regression-bundle I. - kafka + oauth'
test_case: 'kafka/**/*ST,!kafka/dynamicconfiguration/**/*ST,security/oauth/**/*ST'
groups: 'regression'
cluster_operator_install_type: 'bundle'
timeout: 360
- template: '../steps/system_test_general.yaml'
parameters:
name: 'feature_gates_regression_security'
display_name: 'feature-gates-regression-bundle II. - security'
test_case: 'security/**/*ST,!security/oauth/**/*ST'
groups: 'regression'
cluster_operator_install_type: 'bundle'
timeout: 360
- template: '../steps/system_test_general.yaml'
parameters:
name: 'feature_gates_regression_connect_tracing_watcher'
display_name: 'feature-gates-regression-bundle III. - connect + tracing + watcher'
test_case: 'connect/**/*ST,tracing/**/*ST,watcher/**/*ST'
groups: 'regression'
cluster_operator_install_type: 'bundle'
timeout: 360
- template: '../steps/system_test_general.yaml'
parameters:
name: 'feature_gates_regression_operators'
display_name: 'feature-gates-regression-bundle IV. - operators'
test_case: 'operators/**/*ST'
groups: 'regression'
cluster_operator_install_type: 'bundle'
timeout: 360
- template: '../steps/system_test_general.yaml'
parameters:
name: 'feature_gates_regression_rollingupdate_watcher'
display_name: 'feature-gates-regression-bundle V. - rollingupdate'
test_case: 'rollingupdate/**/*ST'
groups: 'regression'
cluster_operator_install_type: 'bundle'
timeout: 360
- template: '../steps/system_test_general.yaml'
parameters:
name: 'feature_gates_regression_mirrormaker'
display_name: 'feature-gates-regression-bundle VI. - mirrormaker + dynamicconfiguration'
test_case: 'mirrormaker/**/*ST,kafka/dynamicconfiguration/**/*ST'
groups: 'regression'
cluster_operator_install_type: 'bundle'
timeout: 360
- template: '../steps/system_test_general.yaml'
parameters:
name: 'feature_gates_regression_all_remaining'
display_name: 'feature-gates-regression-bundle VII. - remaining system tests'
test_case: '!kafka/**/*ST,!mirrormaker/**/*ST,!connect/**/*ST,!security/**/*ST,!operators/**/*ST,!rollingupdate/**/*ST,!watcher/**/*ST,!tracing/**/*ST'
groups: 'regression'
cluster_operator_install_type: 'bundle'
timeout: 360
|
.azure/templates/jobs/feature_gates_regression_jobs.yaml
|
name: Nightly CI/CD
on:
schedule:
# minute 0, hour 2 UTC which is 6pm in PST (5pm PDT), any day of month, any month, any day of the week
# if we want to support only Mon - Fri we need to change the check how we look for new changes. Currently we
# check for any new changes in the last 24 hours regardless of day)
- cron: '0 2 * * *'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
checkForChanges:
name: Check for new changes
outputs:
hasChanged: ${{ steps.checkChange.outputs.hasChanged }}
runs-on: ubuntu-latest
steps:
# Check-out repo
- uses: actions/checkout@v2
# Check if any changes have been pushed to main since last release
- name: Check latest commit age
id: checkChange
# Here we query the github rest api for the commits, use jq (json parser) to grab the first commit and put contents into $HOME/commit.json
# Note: we ignore commits made by the bot account that commits the auto version bump changes.
# Then get the timestamp date for the first commit and check if it was over a day old.
run: |
curl -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' -sL https://api.github.com/repos/${{ github.repository }}/commits | jq -r '[.[] | select(.author.login != "github-action-bot")][0]' > $HOME/commit.json
commitdate=$(jq -r '.commit.author.date' $HOME/commit.json)
commiturl=$(jq -r '.url' $HOME/commit.json)
echo Last commit: $commiturl
echo Last commit date \(UTC\): $commitdate
timestamp=$(date --utc -d "$commitdate" +%s)
echo Commit timestamp: $timestamp
currentTimestamp=$(date --utc +%s)
echo Current timestamp: $currentTimestamp
days=$(( ($currentTimestamp - $timestamp ) / 86400 ))
echo Days since last commit: $days
if [ $days = "0" ]; then
echo "::set-output name=hasChanged::true"
else
echo "::set-output name=hasChanged::false"
fi
release:
name: Create alpha releases
needs: checkForChanges
if: needs.checkForChanges.outputs.hasChanged == 'true'
runs-on: ubuntu-latest
steps:
# Check-out repo
- uses: actions/checkout@v2
# Ensure node version is great enough
- name: Use Node.js v12.x
uses: actions/setup-node@v1
with:
node-version: '12.x'
# Install dependencies
- name: Install rush
run: npm install -g @microsoft/rush
- name: Install dependencies
run: rush install
# Get datetime
- name: Get datetime for alpha release name
id: datetime
run: |
echo "::set-output name=datetime::$(date +'%Y%m%d.%H%M')"
# Bump alpha package versions
- name: Bump alpha package versions
run: node common/config/node_modules/beachball/bin/beachball canary --canary-name alpha+${{ steps.datetime.outputs.datetime }} --tag dev --no-publish
# Build packages
- name: Build @azure/communication-react package
run: rush build -t @azure/communication-react
# Test Packages
- name: Test @azure/communication-react package
run: rush test -t @azure/communication-react
# Publish package
- name: Publish alpha packages
run: echo "No endpoint to publish to yet..."
|
.github/workflows/nightly-ci.yml
|
admin:
access_log_path: /tmp/admin_access.log
address: {socket_address: {address: 0.0.0.0, port_value: 9901}}
static_resources:
listeners:
- name: frontend_proxy
address: {socket_address: {address: 0.0.0.0, port_value: 57314}}
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
stat_prefix: frontend
stream_idle_timeout: 0s # not setting this to 0s can break long gRPC streams
codec_type: AUTO
route_config:
name: frontend_route
virtual_hosts:
- name: frontend_service
domains: ["*"]
routes:
- match: { prefix: "/Health/Check" }
route:
cluster: frontend_cluster
- match: { prefix: "/Frontend/ScanFile" }
route:
cluster: frontend_cluster
max_grpc_timeout: 0s # clients set their deadline
http_filters:
- name: envoy.router
# tls_context:
# common_tls_context:
# alpn_protocols: "h2"
# tls_certificates:
# - certificate_chain: { filename: "/path/to/envoy.crt" }
# private_key: { filename: "/path/to/envoy.key" }
clusters:
- name: frontend_cluster
connect_timeout: 0.5s
type: STRICT_DNS
lb_policy: LEAST_REQUEST
load_assignment:
cluster_name: frontend_cluster
endpoints:
- lb_endpoints:
- endpoint: {address: {socket_address: {address: strelka_frontend_1, port_value: 57314}}}
- endpoint: {address: {socket_address: {address: strelka_frontend_2, port_value: 57314}}}
# required for gRPC clusters
http2_protocol_options: { }
# required to keep gRPC stream connections alive
upstream_connection_options:
tcp_keepalive:
keepalive_probes: 4
keepalive_time: 15
keepalive_interval: 15
# circuit breaker *_requests should be 10x the times of expected active requests
# this number should be estimated based on the overall potential concurrency of all clients
circuit_breakers:
thresholds:
max_pending_requests: 10240
max_requests: 10240
max_retries: 0 # gRPC streams cannot be retried, client-side retry is required
# ensures upstream servers are up and available
health_checks:
timeout: 0.5s
interval: 30s
interval_jitter: 1s
unhealthy_threshold: 1
healthy_threshold: 1
grpc_health_check: { }
|
misc/envoy/frontend/envoy.yaml
|
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
name: monitor-metallb-speaker
name: monitor-metallb-speaker
namespace: '{{.NameSpace}}'
spec:
endpoints:
- interval: 30s
port: speaker-metrics
bearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token"
scheme: "https"
tlsConfig:
caFile: "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt"
serverName: "metallb-speaker-monitor-service.{{.NameSpace}}.svc"
certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt
keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key
- interval: 30s
port: frr-metrics
bearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token"
scheme: "https"
tlsConfig:
caFile: "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt"
serverName: "metallb-speaker-monitor-service.{{.NameSpace}}.svc"
certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt
keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key
jobLabel: "app.kubernetes.io/component"
namespaceSelector:
matchNames:
- '{{.NameSpace}}'
selector:
matchLabels:
name: metallb-speaker-monitor-service
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/scrape: "true"
service.beta.openshift.io/serving-cert-secret-name: speaker-certs-secret
labels:
name: metallb-speaker-monitor-service
name: metallb-speaker-monitor-service
namespace: '{{.NameSpace}}'
spec:
selector:
app.kubernetes.io/component: speaker
clusterIP: None
ports:
- name: speaker-metrics
port: 7472
targetPort: 27472
- name: frr-metrics
port: 7473
targetPort: 27473
sessionAffinity: None
type: ClusterIP
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
name: monitor-metallb-controller
name: monitor-metallb-controller
namespace: '{{.NameSpace}}'
spec:
endpoints:
- interval: 30s
port: controller-metrics
bearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token"
scheme: "https"
tlsConfig:
caFile: "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt"
serverName: "metallb-controller-monitor-service.{{.NameSpace}}.svc"
certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt
keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key
jobLabel: "app.kubernetes.io/component"
namespaceSelector:
matchNames:
- '{{.NameSpace}}'
selector:
matchLabels:
name: metallb-controller-monitor-service
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/scrape: "true"
service.beta.openshift.io/serving-cert-secret-name: controller-certs-secret
labels:
name: metallb-controller-monitor-service
name: metallb-controller-monitor-service
namespace: '{{.NameSpace}}'
spec:
selector:
app.kubernetes.io/component: controller
clusterIP: None
ports:
- name: controller-metrics
port: 7472
targetPort: 27472
sessionAffinity: None
type: ClusterIP
|
bindata/deployment/frr/service-monitors.yaml
|
trigger:
- master
# using ubuntu latest vmImage
pool:
vmImage: 'ubuntu-latest'
pr: 'none'
stages:
- stage:
jobs:
- job: 'build'
steps:
- task: SonarCloudPrepare@1
inputs:
SonarCloud: 'sulavAryalpipeLineDemo'
organization: '042020-dotnet-uta'
scannerMode: 'MSBuild'
projectKey: '042020-dotnet-uta_sulavAryal-repo0'
projectName: 'sulavAryal-repo0'
extraProperties: |
sonar.exclusions=**/obj/**,**/*.dll
sonar.branch.name=$(Build.SourceBranchName)
sonar.cs.opencover.reportsPaths=$(Build.SourcesDirectory)/**/coverage.opencover.xml
sonar.cs.vstest.reportsPaths=$(Agent.TempDirectory)/*.trx
- script: dotnet build 'Project 0_Working\ConsoleShopper.UI\ConsoleShopper.UI.csproj'
- task: DotNetCoreCLI@2
displayName: Run Tests
inputs:
command: test
arguments: '--configuration $(BuildConfiguration) /p:CollectCoverage=true /p:CoverletOutputFormat=opencover --logger trx'
projects: '**/*Tests.csproj'
nobuild: true
- script: |
dotnet tool install -g dotnet-reportgenerator-globaltool
displayName: Install reportgenerator
- task: DotNetCoreCLI@2
- script: echo "##vso[task.prependpath]$HOME/.dotnet/tools"
- task: DotNetCoreCLI@2
inputs:
command: test
arguments: '--configuration $(BuildConfiguration) /p:CollectCoverage=true /p:CoverletOutputFormat=opencover --logger trx'
projects: '**/*Tests.csproj'
- script: reportgenerator "-reports:Project 0_Working\ConsoleShopper.Tests\coverage.opencover.xml" "-targetdir:Project 0_Working\ConsoleShopper.Tests\Coverage\" "-reporttypes:HtmlInline_AzurePipelines;Cobertura"
workingDirectory: 'Project 0_Working\ConsoleShopper.Tests\'
displayName: Create Code Coverage Report
- task: PublishCodeCoverageResults@1
displayName: 'Publish Code Coverage'
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(Build.SourcesDirectory)/Coverage/Cobertura.xml'
reportDirectory: '$(Build.SourcesDirectory)/Coverage'
- task: SonarCloudAnalyze@1
displayName: 'Run Code Analysis'
continueOnError: false
- task: SonarCloudPublish@1
inputs:
pollingTimeoutSec: '300'
|
azure-pipelines.yml
|
---
spec_name: 'template'
spec_version: '2022-03-17'
# -------------------------------------------------
# - Submission scripts -
# -------------------------------------------------
# All run script templates will be pre-formatted with these parameters.
script_global_settings: {
n_thr: 8,
mem_mb: 15000,
fd_thr: 0.3
}
# Header of script to be submitted to sbatch
# Very important! this is actually substiting based on what you gave us in the inputs...
# because sometimes you need to have memory on sbatch be higher than on your job!
header: |
#!/bin/bash -e
#SBATCH --job-name=${job_name}
#SBATCH --output=${log_path}
#SBATCH --partition=broadwl
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=${n_tasks}
#SBATCH --mem=${mem}
#SBATCH --time=${time}
${job_array}
# Preamble, might include necessary modules to be loaded
preamble: |
# load necessary modules
module load python
echo "~~~~~~~~~~~~~ BEGIN SLURM JOB ~~~~~~~~~~~~~~"
# Additional stuff to add to the end of an array
array_footer: |
# sanity checks
echo "SLURM_JOBID: " $SLURM_JOBID
echo "SLURM_ARRAY_TASK_ID: " $SLURM_ARRAY_TASK_ID
echo "SLURM_ARRAY_JOB_ID: " $SLURM_ARRAY_JOB_ID
bash ${path_to_array}
exit
run_script: |
#!/bin/bash -e
echo "Begin running job ${job_id}, corresponding to:"
echo "Subject ${subject}"
echo "-----------------------------"
echo "create output dir"
if [ -d ${this_job_output_dir} ]; then
mkdir -p ${this_job_output_dir}
else
echo "No working directory detected for job ${job_id}. Skipping..."
fi
# print the input file we generated earlier
cat ${this_job_inputs_dir}/test_file.txt
echo "doing work stuff" > ${this_job_work_dir}/${job_id}.txt
echo "Job complete!"
cp -r ${this_job_work_dir}/* ${this_job_output_dir}/
exit_status=$?
echo "exit status: $exit_status"
if [ $exit_status -eq 0 ]
then
echo "it appears things went well, go ahead and rm work and input dirs from scratch"
rm -rf ${this_job_work_dir}
echo "SUCCESS"
echo $exit_status
exit 0
else
echo "it appears things did not go well. we wont touch nothing"
echo "FAILURE"
echo $exit_status
exit 1
fi
clean_script: |
#!/bin/bash -e
echo "Cleaning up for job ${job_id}, corresponding to:"
echo "Subject ${subject}"
echo "-----------------------------"
if [ -d ${this_job_work_dir} ]; then
echo "Deleting working directory for job ${job_id}..."
rm -rf ${this_job_work_dir}
else
echo "No working directory detected for job ${job_id}. Skipping..."
fi
if [ -f ${this_job_log_file} ]; then
echo "Deleting log file for job ${job_id}..."
rm ${this_job_log_file}
else
echo "No log file detected for job ${job_id}. Skipping..."
fi
echo "Done."
exit
copy_script: |
#!/bin/bash -e
echo "Copying inputs for job ${job_id}, corresponding to:"
echo "Subject ${subject}"
echo "-----------------------------"
echo "Creating directory structure"
if [ ! -d ${this_job_inputs_dir} ]; then
mkdir -p ${this_job_inputs_dir}
fi
echo "Copying made up inputs..."
echo "howdy" > ~/test_file.txt
cp ~/test_file.txt ${this_job_inputs_dir}/
rm ~/test_file.txt
echo "Done"
echo "-----------------------------"
exit
# -------------------------------------------------
# - Inputs and outputs -
# -------------------------------------------------
# This contains information about the jobs to be run.
# See example for details.
database: '~/template.csv'
# Base directory where outputs are stored
output_path: "~/outputs"
# Subject-specific sub-directory (e.g., for BIDS)
output_path_subject: ['sub-{subject}']
base_directory_name: 'template_batch'
# -------------------------------------------------
# - Job specification -
# -------------------------------------------------
expected_n_files: 1 # for a given run
job_ramp_up_time: {
minutes: 0
}
job_time: {
minutes: 1
}
max_job_time: {
hours: 0,
minutes: 1
}
|
slurmhelper/specs/template_2022-03-17.yml
|
- name: Install support software
apt: name={{ item }}
with_items:
- git
- sqlite3
- imagemagick
- name: Install web server
apt: name={{item}}
with_items:
- apache2
- libapache2-mod-passenger
- libapache2-mod-xsendfile
- name: Stop web server
service: name=apache2 state=stopped
- name: Install httpd.conf files
template: src={{item}}.j2 dest=/etc/apache2/sites-available/{{item}}
with_items:
- 000-default.conf
- default-ssl.conf
- name: Install a mod_xsendfile config file
template: src=xsendfile.conf.j2 dest=/etc/apache2/mods-available/xsendfile.conf
- name: Install ruby runtime and dev requirements
apt: name={{item}}
with_items:
- ruby-bundler
- libsqlite3-dev
- libmysqlclient-dev
- libssl-dev
- libvirt-dev
- name: Install WCC companion software
apt: name={{ item }}
with_items:
- postfix
- proftpd
- name: Clone WCC
git: repo=git://wcc-git.wide.ad.jp/WCC dest={{wcc_path}}
- name: Fix owner and group of the WCC directory
file: path={{wcc_path}}
owner=www-data
group=www-data
recurse=yes
- name: Install WCC ruby bundles
sudo_user: www-data
script: install-bundle.sh {{wcc_path}}
- name: Configure WCC
sudo_user: www-data
template: src={{item}}.j2 dest={{wcc_path}}/config/initializers/{{item}}
with_items:
- secret_token.rb
- action_mailer.rb
- devise.rb
- envs.rb
- name: Configure vncproxy
sudo_user: www-data
template: src=config.yml.j2 dest={{wcc_path}}/config/config.yml
- name: Do rake
sudo_user: www-data
script: rake.sh {{wcc_path}}
- name: Copy the HV status check script
sudo_user: www-data
copy: src=check_hv.rb dest={{wcc_path}}/script/check_hv.rb
mode=755
- name: Install the HV status checking job
cron: name="Run check_hv.rb"
user=www-data
minute="*/7"
job="(export set RAILS_ENV=production; cd {{wcc_path}}; /usr/bin/bundle exec rails runner script/check_hv.rb > /dev/null 2>&1)"
- name: Install the IPv4 addr status checking job
cron: name="Run check_vmaddr.rb"
user=www-data
minute="*/5"
job="(export set RAILS_ENV=production; cd {{wcc_path}}; /usr/bin/bundle exec rails runner script/check_vmaddr.rb > /dev/null 2>&1)"
- name: Delete existing storage entries
sudo_user: www-data
command: sqlite3 {{wcc_path}}/db/production.sqlite3 'delete from storage_locations'
- name: Configure the default storage entry
sudo_user: www-data
command: sqlite3 {{wcc_path}}/db/production.sqlite3 'insert into storage_locations (id, name, hostname) values ({{wcc_default_storage_location}}, "{{wcc_storage_path}}", "{{wcc_storage_node}}")'
- name: Configure ProFTPd
copy: src=proftpd-wcc.conf dest=/etc/proftpd/conf.d/proftpd-wcc.conf
mode=644
- name: Fix the permission of the FTP directory
file: path={{wcc_upload_path}}
group=www-data
mode=775
- name: Restart ProFTPd
service: name=proftpd state=restarted
- name: Enable apache2 sites
command: a2ensite {{item}}
with_items:
- 000-default
- default-ssl
- name: Enable apache2 modules
command: a2enmod {{item}}
with_items:
- ssl
- passenger
- xsendfile
- rewrite
- name: Restart apache2
service: name=apache2 state=restarted
|
roles/wcc/tasks/main.yml
|
_id: 2d0291e0-9a38-11e9-96b3-ab4686ae2525
message: >-
Familial gdu.aydp.hashtafak.github.io.pun.kz phacoemulsion bileaflet
[URL=http://allwallsmn.com/amoxicillin-500-mg/]amoxicillin online[/URL]
[URL=http://heavenlyhappyhour.com/glucophage/]glucophage[/URL]
[URL=http://disclosenews.com/fildena/]fildena[/URL]
[URL=http://ezhandui.com/cialis-price/]generic cialis 20mg[/URL]
[URL=http://tattoosideasblog.com/ventolin/]ventolin[/URL] ventolin
[URL=http://disclosenews.com/female-cialis/]female cialis[/URL]
[URL=http://heavenlyhappyhour.com/zanaflex-online/]medicine zanaflex[/URL]
[URL=http://thegrizzlygrowler.com/generic-viagra/]viagra[/URL] post-coronary
signifying carers, <a href="http://allwallsmn.com/amoxicillin-500-mg/">buy
amoxicillin online without prescription</a> <a
href="http://heavenlyhappyhour.com/glucophage/">glucophage</a> <a
href="http://disclosenews.com/fildena/">fildena without a prescription</a> <a
href="http://ezhandui.com/cialis-price/">cialis canadian</a> <a
href="http://tattoosideasblog.com/ventolin/">ventolin</a> <a
href="http://disclosenews.com/female-cialis/">buy cheap female cialis in
uk</a> <a href="http://heavenlyhappyhour.com/zanaflex-online/">in which
disease devomine tablet use eormistin zanaflex</a> <a
href="http://thegrizzlygrowler.com/generic-viagra/">generic viagra</a>
viagra.com pre-actinic
http://allwallsmn.com/amoxicillin-500-mg/#generic-amoxicillin-500-mg generic
amoxicillin 500 mg
http://heavenlyhappyhour.com/glucophage/#glucophage-for-sale glucophage
http://disclosenews.com/fildena/#fildena fildena
http://ezhandui.com/cialis-price/#cialis cialis
http://tattoosideasblog.com/ventolin/#ventolin ventolin
http://disclosenews.com/female-cialis/#cheap-female-cialis fda approved cialis
female viagra
http://heavenlyhappyhour.com/zanaflex-online/#biorganic-vitamin-c-zanaflex-levkeran
buy zanaflex http://thegrizzlygrowler.com/generic-viagra/#online-viagra buy
viagra cheap confidential efficiency.
name: otusikipax
email: <PASSWORD>
url: 'http://allwallsmn.com/amoxicillin-500-mg/'
hidden: ''
date: '2019-06-29T06:36:07.802Z'
|
_data/comments/dear-diary/comment-1561790167802.yml
|
---
# The hostname of this host.
clean_hostname: "{{ clean_hostname | splitext | first | lower }}"
# The cluster name, e.g. "my-happy-cluster". Set in inventory.
k8s_cluster: null
# The cluster role ('master' or 'node'). Set in inventory.
k8s_role: null
# All K8s hosts, regardless of role or cluster.
k8s_hosts: []
# All K8s masters, regardless of cluster.
k8s_master_hosts: []
# All K8s nodes, regardless of cluster.
k8s_node_hosts: []
# All K8s hosts in this cluster.
k8s_cluster_hosts: []
# The IP address of this host.
ipv4_address: "{{ ansible_default_ipv4.address | default(ansible_all_ipv4_addresses[0]) }}"
# Account ID, access key, etc used to create policies, users, Route 53 records, etc.
aws_region: 'us-east-1'
aws_account_id: null
aws_default_access_key_id: null
aws_default_secret_access_key: null
# Name and password of IAM user for cluster.
k8s_aws_iam_cluster_user: null
k8s_aws_iam_cluster_password: null
# Pattern for which SSM parameters this cluster is permitted to access.
k8s_aws_ssm_policy_pattern: "{{ ndk8s_aws_iam_cluster_user }}.*"
# Prefix of SSM parameter store data for cluster.
k8s_aws_iam_cluster_ssm_parameter_store_data_prefix: null
# Name of SSM parameter store data for cluster.
k8s_aws_iam_cluster_ssm_parameter_store_data_name: "{{ k8s_aws_iam_cluster_ssm_parameter_store_data_prefix }}{{ k8s_cluster }}.cluster_data"
# A Route 53 "private" zone ID.
route53_private_zone_id: null
# A Route 53 "personal" zone ID.
route53_personal_zone_id: null
# A private domain name.
private_domain_name: 'example.net'
# A personal domain name.
personal_domain_name: 'example.com'
# A "private" FQDN.
host_private_fqdn: "{{ k8s_cluster }}.{{ private_domain_name }}"
# A "personal" FQDN.
host_personal_fqdn: "{{ k8s_cluster }}.{{ personal_domain_name }}"
# Prefix to NFS path exported to k8s clusters.
k8s_nfs_prefix: null
# Cluster groups are special groups that have a defined setup task.
k8s_cluster_groups:
- 'get_config' # Get Kubectl config.
- 'route53' # Add DNS records.
- 'helm' # Install Helm for package control.
- 'metrics_server' # Local metrics gathering and reporting.
- 'nfs_provisioner' # Allow provisioning persistent network storage.
- 'local_storage_provisioner' # Allow provisioning persistent local storage.
- 'traefik' # Install and setup Traefik as Ingress Controller.
- 'argocd' # Install ArgoCD as GitOps controller.
- 'whoami' # Install Whoami as test service.
- 'prometheus' # Install Kube-Prometheus-Stack for monitoring and reporting.
- 'aws_iam_user' # Create an AWS IAM user for the cluster.
- 'aws_s3_bucket' # Create an S3 bucket for the cluster.
- 'aws_iam_policy' # Allow retrieval of secrets from AWS SSM Parameter Store.
- 'external_secrets' # Allow secrets to persist past cluster lifetimes.
# Some cluster groups contain all hosts.
k8s_universal_cluster_groups:
- 'get_config'
- 'route53'
- 'helm'
- 'metrics_server'
- 'nfs_provisioner'
- 'local_storage_provisioner'
- 'traefik'
- 'argocd'
- 'whoami'
- 'prometheus'
- 'aws_iam_user'
- 'aws_s3_bucket'
- 'aws_iam_policy'
- 'external_secrets'
# The hostname of the K8s NFS server.
k8s_nfs_server: null
# An easy password for accessing some services.
easy_password: null
# S3 bucket for this cluster.
k8s_s3_bucket_name: "clusters.{{ clean_hostname }}"
# A GitHub token for this cluster.
k8s_github_token: null
# Kubernetes version.
k8s_version: '1.20'
# LetsEncrypt private domain resolver name.
letsencrypt_private_domain_resolver: null
# LetsEncrypt email address.
letsencrypt_email_address: null
# Traefik directory.
traefik_data_dir: '/mnt/host/traefik'
# Storage directory.
k8s_storage_dir: '/mnt/host/k8s'
|
roles/hellholt.pve_k8s/defaults/main.yaml
|
name: create-release
on:
workflow_dispatch:
inputs:
version:
description: 'The Version'
required: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set Version in Cargo
run: |
sed -i ':a;N;$!ba;s/name = "cqlsh"\nversion = "[^"]*"/name = "cqlsh"\nversion = "${{ github.event.inputs.version }}"/g' Cargo.toml
sed -i ':a;N;$!ba;s/name = "cqlsh"\nversion = "[^"]*"/name = "cqlsh"\nversion = "${{ github.event.inputs.version }}"/g' Cargo.lock
- name: Install targets
run: |
sudo apt-get install -y mingw-w64
rustup target add x86_64-pc-windows-gnu
rustup target add x86_64-unknown-linux-gnu
- uses: Swatinem/rust-cache@v1
- name: Run tests
run: cargo test --verbose
- name: Build Linux
run: cargo build --target x86_64-unknown-linux-gnu --verbose --release
- name: Create Linux artifacts
run: |
mkdir staging
upx -9 -o staging/cqlsh target/x86_64-unknown-linux-gnu/release/cqlsh
cd staging
zip cqlsh-linux.zip cqlsh
tar -cvzf cqlsh-linux.tar.gz cqlsh
rm cqlsh
- name: Build Windows
run: cargo build --target x86_64-pc-windows-gnu --verbose --release
- name: Create Windows artifacts
run: |
upx -9 -o staging/cqlsh.exe target/x86_64-pc-windows-gnu/release/cqlsh.exe
cd staging
zip cqlsh-windows.zip cqlsh.exe
tar -cvzf cqlsh-windows.tar.gz cqlsh.exe
rm cqlsh.exe
- name: Build-musl macOS x86
uses: Shogan/rust-musl-action@v1.0.2
with:
args:
cargo build --target x86_64-apple-darwin --verbose --release
- name: Create MacOS artifacts
run: |
upx -9 -o staging/cqlsh target/x86_64-apple-darwin/release/cqlsh
cd staging
zip cqlsh-macos.zip cqlsh
tar -cvzf cqlsh-macos.tar.gz cqlsh
rm cqlsh
- name: Commit to the repo
run: |
git config --global user.name "28Smiles"
git config --global user.email "<EMAIL>"
git add Cargo.toml Cargo.lock
git tag "v${{ github.event.inputs.version }}" $(git commit -m "chore: Bump Version" | sed -nE 's/^\[[^ ]+\s([0-9a-f]+)\].*$/\1/p')
git push
git push --tags
- uses: marvinpinto/action-automatic-releases@latest
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
automatic_release_tag: "v${{ github.event.inputs.version }}"
prerelease: false
title: "v${{ github.event.inputs.version }}"
files: |
staging/*
LICENSE
README.md
- uses: actions/upload-artifact@v2
with:
name: package
path: staging
|
.github/workflows/create-release.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-17 03:08"
game: "Unreal Tournament"
name: "DM-]NBK[-ImmortalsTown"
author: "-]NBK[-^NiX*"
description: "None"
releaseDate: "2014-01"
attachments: []
originalFilename: "DM-]NBK[-ImmortalsTown.zip"
hash: "121ce4f29f5fa892a6793399d62a519c5207c624"
fileSize: 58458036
files:
- name: "Ribeira2.utx"
fileSize: 9275225
hash: "caa0d4b986807a46607e848457e8d496441a0c9e"
- name: "[BBST]-Hollywood.utx"
fileSize: 9155281
hash: "dacff927b8e047d2c2ec9eae033ff58b48e05c7f"
- name: "sliderTexturesV2.utx"
fileSize: 1507364
hash: "bfe13c91b91b8caa62166bb4c71f767c64237888"
- name: "Coret_FX.utx"
fileSize: 1024846
hash: "c5cc6701885ba74abc1f772fd6b8a6c95690bc31"
- name: "city_rain.utx"
fileSize: 6359116
hash: "42d53ab3564dc9a4b45f2c35ca687123f951a60b"
- name: "Citytex.utx"
fileSize: 1292670
hash: "58a03339982c078310cd46904695fc1e66c28c15"
- name: "!!![UT]Townplan.utx"
fileSize: 4669156
hash: "7f0b615be3edee2869fc3b769f8a2b0eb75de5b5"
- name: "citydocdoom.utx"
fileSize: 1487186
hash: "042cbaa54ea3fef0087f03909adb5507b2603511"
- name: "DM-]NBK[-ImmortalsTown.unr"
fileSize: 10996239
hash: "3ce202a90d52c93fe775cb7d066ccf212b499ca3"
- name: "Hope.utx"
fileSize: 12006832
hash: "6434c974569e40df433c361f83887c4d411c9bce"
- name: "!!SD!!-LittleThings.utx"
fileSize: 3488271
hash: "3473596172b4b74d236ae576557fd32e8aa538dc"
- name: "[BBST]-Airport.utx"
fileSize: 7541701
hash: "5ede6862a4b4f443c3311c33924925f24ee5768d"
- name: "NBKHasteTextures.utx"
fileSize: 620879
hash: "13ef3b862f7e074c9f65656c83bfb9b94c6325dc"
- name: "dino081199.utx"
fileSize: 4324206
hash: "493c722bb86b552eea2c19d88403b1c3cea27003"
- name: "Starship.utx"
fileSize: 7590442
hash: "b65f117faf61e374949d6b583bb96c101fbc954b"
- name: "zeitkind.utx"
fileSize: 1895633
hash: "46bf1e580f96cf9a47d9d94143e66731245f4d6b"
- name: "WesternWood.utx"
fileSize: 4466382
hash: "1b1fa7919a66168f2bf0eee06a308cd046e033fa"
- name: "BestICann.umx"
fileSize: 7236993
hash: "c12e9b98ea4b497b19e0066ebe763239b820b7e3"
- name: "city_cool.utx"
fileSize: 3422055
hash: "9f55bf0c8c5088b090dbdbcf8d8b0f1ab3484911"
- name: "nya_inf_tex.utx"
fileSize: 2710801
hash: "251c0334181d22cd5099fd81a4e1fd1cff5162bc"
- name: "ASC-m5.utx"
fileSize: 2453112
hash: "d4f424066bf91ef50f46655711572a92c4460bc2"
otherFiles: 1
dependencies:
DM-]NBK[-ImmortalsTown.unr:
- status: "OK"
name: "[BBST]-Airport"
- status: "OK"
name: "zeitkind"
- status: "OK"
name: "NBKHasteTextures"
- status: "OK"
name: "sliderTexturesV2"
- status: "OK"
name: "city_cool"
- status: "OK"
name: "!!SD!!-LittleThings"
- status: "OK"
name: "dino081199"
- status: "OK"
name: "nya_inf_tex"
- status: "OK"
name: "city_rain"
- status: "OK"
name: "!!![UT]Townplan"
- status: "OK"
name: "citydocdoom"
- status: "OK"
name: "WesternWood"
- status: "OK"
name: "Citytex"
- status: "OK"
name: "[BBST]-Hollywood"
- status: "OK"
name: "BestICann"
- status: "OK"
name: "Hope"
- status: "OK"
name: "Ribeira2"
- status: "OK"
name: "ASC-m5"
downloads:
- url: "http://ut99maps.gamezoo.org/maps.html"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/DeathMatch/N/DM-%5DNBK%5B-ImmortalsTown.zip"
main: true
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/DeathMatch&file=DM-%5DNBK%5B-ImmortalsTown.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/DeathMatch/N/1/2/1ce4f2/DM-%255DNBK%255B-ImmortalsTown.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/DeathMatch/N/1/2/1ce4f2/DM-%255DNBK%255B-ImmortalsTown.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DM-]NBK[-ImmortalsTown"
playerCount: "Unknown"
themes:
Industrial: 0.4
City: 0.4
Skaarj Tech: 0.2
bots: false
|
content/Unreal Tournament/Maps/DeathMatch/N/1/2/1ce4f2/dm-nbk-immortalstown_[121ce4f2].yml
|
version: 2.1
executors:
default-jvm:
working_directory: ~/repo/maybe
docker:
- image: circleci/openjdk:15-jdk-buster
environment:
JVM_OPTS: -Xmx4096m
TERM: dumb
commands:
setup:
description: "Setup the environment to run tasks on CI"
steps:
- checkout:
path: ~/repo/maybe
- run:
name: Provisioning
command: |
cp gradle.properties.sample gradle.properties
sed -i "s/<SIGNING_PASSWORD>/$SIGNING_PASSWORD/g" gradle.properties
sed -i "s/<OSSRH_PASSWORD>/$OSSRH_PASSWORD/g" gradle.properties
- restore_cache:
keys:
- v4-dependencies-{{ checksum "buildscript-gradle.lockfile" }}-{{ checksum "gradle.lockfile" }}
- v4-dependencies-{{ checksum "buildscript-gradle.lockfile" }}-
- v4-dependencies-
store-cache:
description: "Store Gradle configuration and dependendies in cache"
steps:
- save_cache:
paths:
- ~/repo/maybe/.gradle
- ~/.gradle
key: v4-dependencies-{{ checksum "buildscript-gradle.lockfile" }}-{{ checksum "gradle.lockfile" }}
jobs:
build:
executor: default-jvm
steps:
- setup
- run:
name: Compile
command: ./gradlew compileJava compileTestJava
- run:
name: Lint Gradle
command: ./gradlew lintGradle
- run:
name: SonarLint
command: ./gradlew sonarlintMain sonarlintTest
- run:
name: Checkstyle
command: ./gradlew checkstyleMain checkstyleTest
- run:
name: Test
command: ./gradlew test
- run:
name: Build
command: ./gradlew build
- run:
name: Coverage
command: ./gradlew jacocoTestReport
- store-cache
- run:
name: Upload reports
command: bash <(curl -s https://codecov.io/bash)
- store_test_results:
path: build/test-results
publish-snapshot:
executor: default-jvm
steps:
- setup
- run:
name: Publish to Sonatype Snapshot OSSRH
command: ./gradlew publish
- store-cache
publish-release:
executor: default-jvm
steps:
- setup
- run:
name: Upload to Sonatype Release OSSRH
command: ./gradlew -PreleaseVersion publish
- run:
name: Close and Release to Maven Central
command: ./gradlew -PreleaseVersion closeAndReleaseRepository
- store-cache
workflows:
main:
jobs:
- build
- publish-snapshot:
requires:
- build
filters:
branches:
only: master
- publish-release:
requires:
- build
filters:
branches:
only: release
|
.circleci/config.yml
|
---
- type: replace
path: /releases/-
value:
name: jumpbox
version: latest
- type: replace
path: /releases/name=routing?
value:
name: routing
version: latest
- type: replace
path: /instance_groups/-
value:
name: kafka
instances: 3
jobs:
- name: jumpbox #just to mark instance group as k3s-agent target
release: jumpbox
properties:
jumpbox:
one_time_setup: false
vm_type: 1cpu-4g
stemcell: default
networks:
- name: ((network))
persistent_disk_type: ((disk_type))
azs: [z1,z2]
#install kafka manager helm chart
- type: replace
path: /addons/name=10-add-servers/jobs/name=action/properties/actions/-
value:
type: helm_chart
name: kafka-manager
chart: stable/kafka-manager
namespace: kafka
version: ((helm_kafka_manager))
properties:
- name: zkHosts
value: paas-templates-bi-zookeeper-client:2181 #dns of zookeeper service
#expose kafka manager ui through oauth2 + traefik +gorouters
- type: replace
path: /addons/name=10-add-servers/jobs/name=action/properties/actions/-
value:
type: helm_chart
name: oauth2-proxy-kafka-manager
chart: stable/oauth2-proxy
namespace: kafka
version: ((helm_oauth2))
properties:
- name: config.clientID
value: generic-ops
- name: config.clientSecret
value: ((/bosh-master/ops-routing/generic-ops-client-secret))
- name: config.cookieSecret
value: ((/bosh-master/ops-routing/cookie_secret_ops))
#create traefik 1 ingress for kafka manager access
- type: replace
path: /addons/name=10-add-servers/jobs/name=action/properties/actions/-
value:
type: kubectl
name: "kafka-manager-ui-ingress"
cmd: "apply"
options: ""
content:
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kafka-manager-ui-ingress
namespace: kafka
spec:
rules:
- host: kafka-bi-manager.((/secrets/cloudfoundry_ops_domain))
http:
paths:
- path: /
backend:
serviceName: oauth2-proxy-kafka-manager
servicePort: 80
# register route in ops gorouter
- type: replace
path: /addons/name=20-add-agents/jobs/name=route_registrar/properties/route_registrar/routes/-
value:
name: kafka-bi-manager
uris:
- kafka-bi-manager.((/secrets/cloudfoundry_ops_domain))
port: 80
registration_interval: 20s
|
master-depls/metabase/template/70-enable-bi-cdc-event-infra/3-kafka-bi-operators.yml
|
name: Application Benchmarks Tests
on:
push:
branches:
- main
pull_request:
branches:
- main
schedule:
# run every day at 1AM
- cron: '0 1 * * *'
concurrency:
group: ${{ github.repository }}-${{ github.ref }}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
Checks:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest]
python-version: [3.8]
steps:
- name: Print Concurrency Group
env:
CONCURRENCY_GROUP: ${{ github.repository }}-${{ github.ref }}-${{ github.head_ref }}
run: |
echo -e "\033[31;1;4mConcurrency Group\033[0m"
echo -e "$CONCURRENCY_GROUP\n"
shell: bash
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Dependencies
run: |
pip install -U -r requirements-dev.txt
sudo apt-get -y install pandoc graphviz
sudo apt-get -y install python3-enchant
sudo apt-get -y install hunspell-en-us
pip install pyenchant
shell: bash
- uses: ./.github/actions/install-main-dependencies
- uses: ./.github/actions/install-applications
- run: pip check
if: ${{ !cancelled() }}
shell: bash
- run: make copyright
if: ${{ !cancelled() }}
shell: bash
- run: make spell
if: ${{ !cancelled() }}
shell: bash
- name: Style Check
run: |
make clean_sphinx
make style
if: ${{ !cancelled() }}
shell: bash
- run: make lint
if: ${{ !cancelled() }}
shell: bash
- run: make mypy
if: ${{ !cancelled() }}
shell: bash
- name: Run make html
run: |
make clean_sphinx
make html SPHINXOPTS=-W
cd docs/_build/html
mkdir artifacts
tar -zcvf artifacts/documentation.tar.gz --exclude=./artifacts .
if: ${{ !cancelled() }}
shell: bash
- name: Run upload documentation
uses: actions/upload-artifact@v2
with:
name: documentation
path: docs/_build/html/artifacts/documentation.tar.gz
if: ${{ !cancelled() }}
Benchmarks-Dev:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: [3.8]
domain: ["finance", "machine_learning", "nature", "optimization"]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Dependencies
run: |
pushd ${{ matrix.domain }}
if [ -n "$(find benchmarks/* -not -name '__*' | head -1)" ]; then
pip install -U asv virtualenv
# for qiskit-aer build under asv
sudo apt-get -y install libopenblas-dev
fi
popd
shell: bash
- name: Benchmarks
run: |
pushd ${{ matrix.domain }}
if [ -n "$(find benchmarks/* -not -name '__*' | head -1)" ]; then
asv machine --yes
asv run --quick --show-stderr
fi
popd
shell: bash
|
.github/workflows/main.yml
|
name: Stagging
on:
push:
branches: [ main ]
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
jobs:
build:
runs-on: ubuntu-latest
steps:
- name : Send Slack Notification (Start)
uses: act10ns/slack@v1
with:
status: ${{ job.status }}
channel: '#crime-map-app'
if: always()
- name : Checkout Repo
uses: actions/checkout@v2
- name: Setup Java
uses: actions/setup-java@v1
with:
java-version: '12.x'
- name : Setup Flutter
uses: subosito/flutter-action@v1
with:
flutter-version: '2.2.3'
- name: Flutter Pub get
run : |
echo flutter clean
echo flutter pub get
- name : Flutter Analyze
run : flutter analyze
- name : Run Test
run : flutter test
- name : Run Test Coverage
run : flutter test --coverage
- name : Install lcov
run : sudo apt-get install -y lcov
# - name : Remove Generated All Generated Files
# run : lcov --remove coverage/lcov.info 'lib/*/*.part.dart' 'lib/generated_plugin_registrant.dart' -o coverage/lcov.info
- name : Upload Coverage To Codecov
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: coverage/lcov.info
- name : Build Apk
run: flutter build apk --target-platform android-arm,android-arm64,android-x64 --split-per-abi
- name: Upload Artifact To Firebase App Distribution
uses: wzieba/Firebase-Distribution-Github-Action@v1
with:
appId: ${{secrets.FIREBASE_APP_ID}}
token: ${{secrets.FIREBASE_TOKEN}}
file: build/app/outputs/flutter-apk/app-armeabi-v7a-release.apk
# - name : Upload Apk To Slack
# uses: adrey/slack-file-upload-action@master
# with:
# token: ${{secrets.SLACK_TOKEN}}
# path: build/app/outputs/flutter-apk/app-armeabi-v7a-release.apk
# channel: crime-map-app
- name: Slack Notification(Complete)
uses: act10ns/slack@v1
with:
status: ${{ job.status }}
steps: ${{ toJson(steps) }}
channel: '#crime-map-app'
if: always()
|
.github/workflows/stagging.yml
|
l_german:
embassies_perk_modifier:0 "Botschaften"
embassies_perk_modifier_desc:0 "Dieser Charakter profitiert von Botschaften, die an fremden Höfen eingerichtet wurden."
confidants_perk_modifier:0 "Vertraute"
confidants_perk_modifier_desc:0 "Dieser Charakter hat immer jemanden, an den er sich wenden kann, wenn die Lage heikel wird."
sound_foundations_martial_gain:0 "Gute Grundlagen"
sound_foundations_martial_gain_desc:0 "Nichts lehrt einen so viel über das Leben wie das Großziehen eines Kindes."
sound_foundations_diplomacy_gain:0 "Gute Grundlagen"
sound_foundations_diplomacy_gain_desc:0 "Nichts lehrt einen so viel über das Leben wie das Großziehen eines Kindes."
sound_foundations_intrigue_gain:0 "Gute Grundlagen"
sound_foundations_intrigue_gain_desc:0 "Nichts lehrt einen so viel über das Leben wie das Großziehen eines Kindes."
sound_foundations_stewardship_gain:0 "Gute Grundlagen"
sound_foundations_stewardship_gain_desc:0 "Nichts lehrt einen so viel über das Leben wie das Großziehen eines Kindes."
sound_foundations_learning_gain:0 "Gute Grundlagen"
sound_foundations_learning_gain_desc:0 "Nichts lehrt einen so viel über das Leben wie das Großziehen eines Kindes."
commission_epic_modifier:0 "Geplantes Familienepos"
commission_epic_modifier_desc:0 "Dieser Charakter aalt sich im Ruhm der Geschichte, genauer gesagt in den Legenden über die Errungenschaften seiner Vorfahren."
commission_exceptional_epic_modifier:0 "Geplantes exzellentes Familienepos"
commission_exceptional_epic_modifier_desc:0 "Dieser Charakter aalt sich im Ruhm der Geschichte, genauer gesagt in den Legenden über die Errungenschaften seiner Vorfahren."
commission_epic_emphasized_intrigue_modifier:0 "Geschichten über Ränke"
commission_epic_emphasized_intrigue_modifier_desc:0 "Dieser Charakter entspringt einer langen Reihe bösartiger Manipulatoren. Angeblich."
commission_epic_military_innovation_modifier:0 "Innovationen aus Geschichten"
commission_epic_military_innovation_modifier_desc:0 "Von erdachten Geschichten beflügelt hat dieser Charakter einige originelle Militärtaktiken entdeckt."
commission_epic_shared_with_vassals_modifier:0 "Geteilte Geschichten"
commission_epic_shared_with_vassals_modifier_desc:0 "Dieser Charakter hat Geschichten über die guten alten Gepflogenheiten von Treue und Ruhm verbreitet."
commission_epic_honesty_modifier:0 "Ehrlichkeit"
commission_epic_honesty_modifier_desc:0 "Dieser Charakter hat bewiesen, dass seine Hingabe mehr der Ehrlichkeit und weniger dem Ruhm gilt."
commission_epic_clerical_guidance_modifier:0 "Geistige Führung"
commission_epic_clerical_guidance_modifier_desc:0 "Wie in den goldenen Zeiten seiner Dynastie lauscht dieser Charakter den Ratschlägen des Klerus sehr genau."
sell_titles_related_to_wisdom_modifier:0 "Verwandte Weisheit"
sell_titles_related_to_wisdom_modifier_desc:0 "Dieser Charakter hat jemanden Weises und Frommes zurück in den Schoß der Familie geholt."
sell_titles_clever_servant_modifier:0 "Cleverer Cousin"
sell_titles_clever_servant_modifier_desc:0 "Dieser Charakter hat einen treuen Verwandten, der außerordentlich geschickt ist, was die Kunst der Manipulation anbelangt."
sell_titles_respected_by_peers_modifier:0 "Von seinesgleichen respektiert"
sell_titles_respected_by_peers_modifier_desc:0 "Dieser Charakter genießt großen Respekt im Reich."
sell_titles_shameful_behavior_modifier:0 "Beschämendes Verhalten"
sell_titles_shameful_behavior_modifier_desc:0 "Dieser Charakter wurde der unehrenhaften Praktik des Titelverkaufs bezichtigt."
friendly_counsel_martial_gain:0 "$friendly_counsel_perk_name$"
friendly_counsel_martial_gain_desc:0 "Bei guten Freunden kann man sich immer auf einen guten Ratschlag verlassen."
friendly_counsel_diplomacy_gain:0 "$friendly_counsel_perk_name$"
friendly_counsel_diplomacy_gain_desc:0 "$friendly_counsel_martial_gain_desc$"
friendly_counsel_intrigue_gain:0 "$friendly_counsel_perk_name$"
friendly_counsel_intrigue_gain_desc:0 "$friendly_counsel_martial_gain_desc$"
friendly_counsel_stewardship_gain:0 "$friendly_counsel_perk_name$"
friendly_counsel_stewardship_gain_desc:0 "$friendly_counsel_martial_gain_desc$"
friendly_counsel_learning_gain:0 "$friendly_counsel_perk_name$"
friendly_counsel_learning_gain_desc:0 "$friendly_counsel_martial_gain_desc$"
decentralized_rule_perk_modifier:0 "Dezentralisierte Herrschaft"
decentralized_rule_perk_modifier_desc:0 "Treue Freunde sorgen für ein treues Reich!"
|
project/ck3/base_game/localization/german/modifiers/perk_modifiers_l_german.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-20 11:25"
game: "Unreal Tournament 2004"
name: "dm-verticalfactory2"
author: "Danilo 'Dan' <NAME>"
description: "Liandri Corp. has bought a strange factory in an unknown location. The\
\ central area has more than three floors so, be carefull to not fall, you could\
\ really hurt yourself."
releaseDate: "2003-07"
attachments:
- type: "IMAGE"
name: "dm-verticalfactory2_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/D/dm-verticalfactory2_shot_2.png"
- type: "IMAGE"
name: "dm-verticalfactory2_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/D/dm-verticalfactory2_shot_1.png"
originalFilename: "dm-verticalfactory2.zip"
hash: "35828bad6f26050127ceb035e39246decd7111ac"
fileSize: 3373628
files:
- name: "dm-verticalfactory2.ut2"
fileSize: 10765147
hash: "4e4f141599a69ba34b9af0984904d85d454acd28"
- name: "dan-factory.utx"
fileSize: 885958
hash: "ba12b8af7e99126ee4113aac28d126cc44b949e3"
otherFiles: 2
dependencies:
dm-verticalfactory2.ut2:
- status: "OK"
name: "dan-factory"
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/DeathMatch/D/dm-verticalfactory2.zip"
main: true
repack: false
state: "OK"
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=1823"
main: false
repack: false
state: "MISSING"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/DeathMatch/V/3/5/828bad/dm-verticalfactory2.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/DeathMatch/V/3/5/828bad/dm-verticalfactory2.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DM-VerticalFactory2"
playerCount: "4"
themes:
Tech: 0.2
Industrial: 0.7
Ancient: 0.1
bots: true
|
content/Unreal Tournament 2004/Maps/DeathMatch/V/3/5/828bad/dm-verticalfactory2_[35828bad].yml
|
items:
- uid: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus
id: IBatchJobStatus
artifact: com.microsoft.store:partnercenter:1.15.3
parent: com.microsoft.store.partnercenter.devicesdeployment
children:
- com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus.get()
langs:
- java
name: IBatchJobStatus
nameWithType: IBatchJobStatus
fullName: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus
type: Interface
package: com.microsoft.store.partnercenter.devicesdeployment
summary: Represents the operations that can be done on the partner's device.
syntax:
content: public interface IBatchJobStatus extends IPartnerComponent<Tuple<String,String>>, IEntityGetOperations<BatchUploadDetails>
implements:
- com.microsoft.store.partnercenter.IPartnerComponent<com.microsoft.store.partnercenter.models.utils.Tuple<java.lang.String,java.lang.String>>
- com.microsoft.store.partnercenter.genericoperations.IEntityGetOperations<com.microsoft.store.partnercenter.models.devicesdeployment.BatchUploadDetails>
- uid: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus.get()
id: get()
artifact: com.microsoft.store:partnercenter:1.15.3
parent: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus
langs:
- java
name: get()
nameWithType: IBatchJobStatus.get()
fullName: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus.get()
overload: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus.get*
type: Method
package: com.microsoft.store.partnercenter.devicesdeployment
summary: Retrieves a specific customer devices batch upload status details.
syntax:
content: public abstract BatchUploadDetails get()
return:
type: com.microsoft.store.partnercenter.models.devicesdeployment.BatchUploadDetails
description: The devices batch upload status details.
references:
- uid: com.microsoft.store.partnercenter.models.devicesdeployment.BatchUploadDetails
name: BatchUploadDetails
nameWithType: BatchUploadDetails
fullName: com.microsoft.store.partnercenter.models.devicesdeployment.BatchUploadDetails
- uid: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus.get*
name: get
nameWithType: IBatchJobStatus.get
fullName: com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus.get
package: com.microsoft.store.partnercenter.devicesdeployment
- uid: com.microsoft.store.partnercenter.IPartnerComponent<com.microsoft.store.partnercenter.models.utils.Tuple<java.lang.String,java.lang.String>>
name: IPartnerComponent<Tuple<String,String>>
nameWithType: IPartnerComponent<Tuple<String,String>>
fullName: com.microsoft.store.partnercenter.IPartnerComponent<com.microsoft.store.partnercenter.models.utils.Tuple<java.lang.String,java.lang.String>>
- uid: com.microsoft.store.partnercenter.genericoperations.IEntityGetOperations<com.microsoft.store.partnercenter.models.devicesdeployment.BatchUploadDetails>
name: IEntityGetOperations<BatchUploadDetails>
nameWithType: IEntityGetOperations<BatchUploadDetails>
fullName: com.microsoft.store.partnercenter.genericoperations.IEntityGetOperations<com.microsoft.store.partnercenter.models.devicesdeployment.BatchUploadDetails>
- uid: com.microsoft.store.partnercenter.IPartnerComponent
name: IPartnerComponent
nameWithType: IPartnerComponent
fullName: com.microsoft.store.partnercenter.IPartnerComponent
- uid: com.microsoft.store.partnercenter.models.utils.Tuple
name: Tuple
nameWithType: Tuple
fullName: com.microsoft.store.partnercenter.models.utils.Tuple
- uid: java.lang.String,java.lang.String
name: String,String
nameWithType: String,String
fullName: java.lang.String,java.lang.String
- uid: com.microsoft.store.partnercenter.genericoperations.IEntityGetOperations
name: IEntityGetOperations
nameWithType: IEntityGetOperations
fullName: com.microsoft.store.partnercenter.genericoperations.IEntityGetOperations
|
docs-ref-autogen/com.microsoft.store.partnercenter.devicesdeployment.IBatchJobStatus.yml
|
pmid: '27386852'
categories:
- name: Symptoms
enabled: true
- name: Diagnosis
enabled: true
- name: Genetics
enabled: true
- name: Physiopathology
enabled: true
- name: Biochemistry
enabled: false
title: >-
Nonrecurrent PMP22-RAI1 contiguous gene deletions arise from replication-based
mechanisms and result in Smith-Magenis syndrome with evident peripheral
neuropathy.
abstract: >-
Hereditary neuropathy with liability to pressure palsies (HNPP) and
Smith-Magenis syndrome (SMS) are genomic disorders associated with deletion
copy number variants involving chromosome 17p12 and 17p11.2, respectively.
Nonallelic homologous recombination (NAHR)-mediated recurrent deletions are
responsible for the majority of HNPP and SMS cases; the rearrangement products
encompass the key dosage-sensitive genes PMP22 and RAI1, respectively, and
result in haploinsufficiency for these genes. Less frequently, nonrecurrent
genomic rearrangements occur at this locus. Contiguous gene duplications
encompassing both PMP22 and RAI1, i.e., PMP22-RAI1 duplications, have been
investigated, and replication-based mechanisms rather than NAHR have been
proposed for these rearrangements. In the current study, we report molecular
and clinical characterizations of six subjects with the reciprocal phenomenon
of deletions spanning both genes, i.e., PMP22-RAI1 deletions. Molecular
studies utilizing high-resolution array comparative genomic hybridization and
breakpoint junction sequencing identified mutational signatures that were
suggestive of replication-based mechanisms. Systematic clinical studies
revealed features consistent with SMS, including features of intellectual
disability, speech and gross motor delays, behavioral problems and ocular
abnormalities. Five out of six subjects presented clinical signs and/or
objective electrophysiologic studies of peripheral neuropathy. Clinical
profiling may improve the clinical management of this unique group of
subjects, as the peripheral neuropathy can be more severe or of earlier onset
as compared to SMS patients having the common recurrent deletion. Moreover,
the current study, in combination with the previous report of PMP22-RAI1
duplications, contributes to the understanding of rare complex phenotypes
involving multiple dosage-sensitive genes from a genetic mechanistic
standpoint.
abstractLink: 'https://www.ncbi.nlm.nih.gov/pubmed/27386852'
fullTextLink: ''
date: 2016/07
authors:
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
keywords:
- keyword: Adolescent
- keyword: Child
- keyword: 'Child, Preschool'
- keyword: 'Chromosomes, Human, Pair 17'
- keyword: DNA Copy Number Variations
- keyword: DNA Replication
- keyword: Gene Deletion
- keyword: Haploinsufficiency
- keyword: Homologous Recombination
- keyword: Intellectual Disability
- keyword: physiopathology
- keyword: Mutation
- keyword: Myelin Proteins
- keyword: Smith-Magenis Syndrome
- keyword: Transcription Factors
cites:
- pmid: '25087610'
- pmid: '9758599'
- pmid: '19543269'
- pmid: '24357149'
- pmid: '2602357'
- pmid: '27071622'
- pmid: '21659953'
- pmid: '9820031'
- pmid: '25886820'
- pmid: '9326934'
- pmid: '1322507'
- pmid: '4285163'
- pmid: '20059347'
- pmid: '23657883'
- pmid: '26189194'
- pmid: '8256814'
- pmid: '8931707'
- pmid: '21124890'
- pmid: '25887218'
- pmid: '9973284'
- pmid: '21248748'
- pmid: '16845274'
- pmid: '22654670'
- pmid: '26273056'
- pmid: '10330358'
- pmid: '17539903'
- pmid: '19023044'
- pmid: '17620487'
- pmid: '25892534'
- pmid: '8872467'
- pmid: '8882782'
- pmid: '16444292'
- pmid: '17273973'
- pmid: '19439022'
- pmid: '1303282'
- pmid: '12652298'
- pmid: '25959774'
- pmid: '1746552'
- pmid: '15526218'
- pmid: '20071523'
- pmid: '18160035'
- pmid: '8651284'
- pmid: '7894481'
- pmid: '26544804'
- pmid: '27617127'
- pmid: '11735029'
- pmid: '26641089'
- pmid: '8422677'
- pmid: '12649807'
- pmid: '22722545'
- pmid: '18809224'
- pmid: '25564734'
- pmid: '19180184'
- pmid: '25065914'
- pmid: '21981782'
- pmid: '11997338'
- pmid: '26924765'
- pmid: '17041942'
- pmid: '20271590'
citedBy: []
|
src/data/citations/27386852.yml
|
# The English language => Just for example all the keywords
"en":
name: English
native: English
describe:
it:
its:
pending:
subject:
shared_examples_for:
share_as:
it_should_behave_like:
should:
should_not:
before:
after:
hooks:
each:
all:
suite:
matchers:
be:
true_word:
false_word:
nil_word:
empty_word:
be_a_kind_of:
be_close:
be_an_instance_of:
eql:
equal:
exist:
have:
have_exactly:
have_at_least:
have_at_most:
include:
match:
raise_error:
satisfy:
"ar":
name: Arabic
native: العربية
"de":
name: German
native: Deutsch
describe: beschreibe|kontext
it: es
its:
pending:
subject: subjekt
share_as:
shared_examples_for:
it_should_behave_like:
should: sollte
should_not: sollte_nicht
before: vorher
after: nachher
hooks:
each: von_jeder
all: von_alle
suite: suite
matchers:
be: sein
true_word: wahr*
false_word: falsch*
nil_word: null*
empty_word: leer*
be_a_kind_of: ein_typ*sein
be_close: nahe_liegen*
be_an_instance_of:
eql: glc
equal: gleich
exist: wesen
have: haben
have_exactly: genau_haben*
have_at_least: mindestens_haben*
have_at_most: maximal_haben*
include: beinhalten*
match: passen*
raise_error:
satisfy: erfüllen*
# Please put the right words for this languages! =D
"es":
name: Spanish
native: Español
"en-au":
name: Australian
native: Australian
"fr":
name: French
native: Français
"ko":
name: Korean
native: 한국어
"pt":
name: Portuguese
native: Português
describe: descreva|contexto
it: isso|isto|especificar|exemplo
its: exemplo_do_assunto # I don't know about this word
pending: pendente
subject: assunto
shared_examples_for: exemplos_distribuidos|exemplo_distribuido
share_as: distribua_como
it_should_behave_like: deve_se_comportar_como
should: deve
should_not: nao_deve
before: antes
after: depois
hooks:
each: de_cada|de_cada_exemplo
all: de_todos|de_todos_exemplos
suite: suite
matchers:
be: ser|estar
true_word: verdadeiro
false_word: falso
nil_word: nulo
empty_word: vazio
be_a_kind_of: ser_do_tipo
be_close: estar_perto|estar_proximo
be_an_instance_of: ser_instancia_de
eql: igl
equal: igual|igual_a
exist: existir
have: ter
have_exactly: ter_exatamente
have_at_least: ter_no_minimo
have_at_most: ter_no_maximo
include: incluir
match: corresponder
raise_error: mostrar_erro|mostrar_excessao
satisfy: satisfazer
|
lib/spec-i18n/languages.yml
|
---
- name: "Check if {{ cluster_name }} already exists"
docker_container_info:
name: "{{ cluster_name + '-control-plane' }}"
register: cluster_check_result
- name: Create KinD Config
template:
dest: "{{ kind_home_dir + '/' + cluster_name + '/' + kind_cluster_config }}"
src: 'kind-cluster-config.yml.j2'
mode: "755"
when: kind_create_config
- name: "Set {{ cluster_name }} config file"
set_fact:
kind_cluster_config_file: "{{ kind_home_dir + '/' + cluster_name + '/' + kind_cluster_config }}"
when: kind_create_config
- name: "Start KinD Cluster: '{{ cluster_name }}'"
command:
argv:
- "{{ kind_binary }}"
- create
- cluster
- --name={{ cluster_name }}
- --config={{ kind_cluster_config_file }}
register: kind_cluster_result
when: (cluster_config is not defined and not cluster_check_result.exists)
- name: "Get KinD Cluster: '{{ cluster_name }}' Cluster Nodes"
command:
argv:
- "{{ kind_binary }}"
- get
- nodes
- --name={{ cluster_name }}
register: cluster_nodes_result
changed_when: False
- name: Set fact about cluster nodes
set_fact:
cluster_nodes: "{{ cluster_nodes_result.stdout.split('\n') }}"
- name: Create and start KinD Registry
docker_container:
state: started
image: 'registry:2'
restart_policy: 'always'
name: '{{ container_registry_name }}'
network_mode: kind
published_ports:
- '0.0.0.0:{{ container_registry_port }}:5000'
- name: "Label KinD Cluster: '{{ cluster_name }}' Cluster Nodes"
k8s:
state: present
resource_definition:
api_version: v1
kind: Node
metadata:
annotations:
tilt.dev/registry: 'localhost:{{ container_registry_port }}'
tilt.dev/registry-from-cluster: '{{ container_registry_name }}:{{ container_registry_port }}'
name: '{{ item }}'
loop: '{{ cluster_nodes }}'
when: kind_cluster_result is defined
- name: Get all worker nodes
k8s_info:
kind: Node
label_selectors:
- '!node-role.kubernetes.io/master'
register: worker_nodes
- name: "Label KinD Cluster '{{ cluster_name }}' Worker Nodes"
k8s:
state: present
resource_definition:
api_version: v1
kind: Node
metadata:
labels:
node-role.kubernetes.io/worker: ''
name: '{{ item }}'
loop: "{{ worker_nodes | json_query('resources[*].metadata.name') }}"
|
tasks/kind_create.yml
|
name: CI
on:
push:
pull_request:
schedule:
# Weekly, at 5:30 AM on Sunday (somewhat randomly chosen).
- cron: '30 5 * * 0'
workflow_dispatch:
jobs:
build:
strategy:
matrix:
platform: [x86_64, i386]
runs-on: windows-latest
name: 'Build: ${{ matrix.platform }}'
defaults:
run:
shell: pwsh
env:
platform: '${{ matrix.platform }}'
global_env: 'HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager\Environment'
user_env: 'HKCU:\Environment'
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Stack
uses: haskell/actions/setup@v1
with:
enable-stack: true
stack-no-global: true
# TODO: caching.
- name: Build dependencies
run: stack --no-terminal build --arch '${{ matrix.platform }}' --only-dependencies
- name: Build
run: stack --no-terminal build --arch '${{ matrix.platform }}' --copy-bins --local-bin-path ../install
- name: Upload binaries
uses: actions/upload-artifact@v2
with:
name: 'windows-env-${{ matrix.platform }}'
path: '${{ runner.workspace }}/install/'
if-no-files-found: error
- name: Run tests
run: |
$((Get-ItemProperty $env:global_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -and $(throw "Global PATH shouldn't contain C:\dumb")
$((Get-ItemProperty $env:user_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -and $(throw "User PATH shouldn't contain C:\dumb")
../install/addpath -y C:\dumb
$((Get-ItemProperty $env:global_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -and $(throw "Global PATH shouldn't contain C:\dumb")
$((Get-ItemProperty $env:user_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -or $(throw "User PATH should contain C:\dumb")
../install/addpath -g -y C:\dumb
$((Get-ItemProperty $env:global_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -or $(throw "Global PATH should contain C:\dumb")
$((Get-ItemProperty $env:user_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -or $(throw "User PATH should contain C:\dumb")
../install/delpath -g -y C:\dumb
$((Get-ItemProperty $env:global_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -and $(throw "Global PATH shouldn't contain C:\dumb")
$((Get-ItemProperty $env:user_env).PATH.Split(';') | Select-String -Pattern '^C:\\dumb$' -Quiet) -and $(throw "User PATH shouldn't contain C:\dumb")
|
.github/workflows/ci.yml
|
gallery:
- url: https://500px.com/photo/1029425635/adelantos-de-primavera-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/1029425635/m%3D900/v2?sig=af0eaa062ae6a0a618368dd92fb2446a19a944c0a242dc5e0104e644b9cd954d
alt: "Adelantos de primavera by <NAME> on 500px.com"
title: "Adelantos de primavera"
- url: https://500px.com/photo/1029368280/mountain-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/1029368280/m%3D900/v2?sig=5d8cde2081b6151c79998600b1b1e482ab00014d97789dba7488f05affeb7d70
alt: "Mountain"
- url: https://500px.com/photo/1028716053/saleve-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/1028716053/m%3D900/v2?sig=71c246a315b4725f39ae23ba290bf8237237f8f189c98e05f83efc8ee6713c29
alt: "Saleve"
- url: https://500px.com/photo/1027547474/vallorbe-un-lugar-detenido-en-el-tiempo-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/1027547474/m%3D900/v2?sig=2cc61b1a5f0360c2d5bd2294d7242f563aefdeefecb6e772043dab7ea74dd2a4
alt: "Vallorbe, un lugar detenido en el tiempo by Cesar Ordóñez on 500px.com"
title: "Vallorbe, un lugar detenido en el tiempo"
- url: https://500px.com/photo/1027547314/place-to-live-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/1027547314/m%3D900/v2?sig=4cc6f5c4c5a1c4bfb1a94aee71e0cc4a0988f710d53198b6e684aa418f70a2de
alt: "Place to live by Cesar Ordóñez on 500px.com"
title: "Place to live"
- url: https://500px.com/photo/1016199300/sunset-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/1016199300/m%3D900/v2?sig=589aec03df9ce80ccac6b8383381d21a102394b36a9151176bd01a9ef01f2bff
alt: "Sunset by Cesar Ordóñez on 500px.com"
title: "Sunset"
- url: https://500px.com/photo/298515395/recuerdos-vivos-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/298515395/m%3D900/v2?sig=09056d5e7b52924cd033514c20f6b6e7e262f626c1e3dcd922798f92747c8363
alt: "Recuerdos vivos by <NAME> on 500px.com"
title: "Recuerdos vivos"
- url: https://500px.com/photo/295881659/en-camino-by-cesar-ordonez
image_path: https://drscdn.500px.org/photo/295881659/m%3D900/v2?sig=3659603fd4c19af4f07a004d48e23aa754eaa0e20497f789d1ad0a690b6266fb
alt: "En camino by <NAME> on 500px.com"
title: "En camino"
|
_data/gallery.yml
|
Token Env Var: SEU TOKEN AQUI
# Prefixo de comando para todos os comandos.
Prefix: '+'
# O ID do canal para o qual o bot enviará mensagens.
Log Channel: ID DO CANAL ONDE O BOT MANDA QUE ESTÁ QUEBRADO (INT)
# O ID do usuário do qual deseja fazer ping pelo comando Broken.
Broken User ID: log-server
# Define o status 'Reproduzindo' do bot.
Game Status:
# 'true' exibirá 'Reproduzindo ___' (___ definido abaixo), 'false' não exibirá nada.
Active: true
# A atividade do jogo a ser exibida.
# NOTE: Use '{prefix}' como um espaço reservado para o prefixo do comando bots.
# Padrão: '{prefix} help'
Game: NOME DO JOGO
# Configurações que serão aplicadas a quase todas as mensagens incorporadas, a menos que especificado de outra forma.
Embed Settings:
# A cor da barra lateral para mensagens incorporadas, no padrão RGB.
Color:
r: 0
g: 0
b: 0
# Esta opção excluirá a mensagem do usuário após o envio de um comando.
# NOTE: O bot precisa da permissão "gerenciar mensagens" para que isso funcione.
Delete Commands: false
# Se deve ou não mostrar o autor de cada comando na resposta dada.
# NOTE: Funciona bem com a opção "Excluir comandos" acima.
Show Author: false
Footer:
# O texto para o rodapé das mensagens
Text: TEXTO RODAPÉ
# O URL do ícone na parte inferior da imagem do rodapé.
Icon URL: https://example.com/sample.png
# O link para o convite do servidor
Server Invite: https://discordapp.com/abcabc
# O texto da mensagem de log online.
# NOTE: Use '{nomedeusuário}' como espaço reservado para o nome de usuário do bot
Online Message: '{username} Online!'
# O texto para a mensagem de log de reinicialização.
# NOTE: Use '{nomedeusuário}' como um espaço reservado para o nome de usuário do bot.
Restarting Message: '{username} Restarting...'
Stop Message: '{username} Stop...'
# O arquivo em que os dados são armazenados. Provavelmente não deveria mexer com isso.
Data File: ./Data/data_storage.json
# O Nome do BOT para chamandas de client event
Bot Name: NOME DO BOT PARA CHAMADAS CLIENT ENVENT
|
Config.yml
|
amazon_cloudfront:
- description: >-
Le pourcentage de toutes les requêtes pour lesquelles le code d'état HTTP
est 4xx.
integration: amazon_cloudfront
interval: '60'
metric_name: aws.cloudfront.4xx_error_rate
metric_type: gauge
orientation: '-1'
per_unit_name: ''
short_name: 4xx err rate
unit_name: percent
- description: >-
Le pourcentage de toutes les requêtes pour lesquelles le code d'état HTTP
est 5xx.
integration: amazon_cloudfront
interval: '60'
metric_name: aws.cloudfront.5xx_error_rate
metric_type: gauge
orientation: '-1'
per_unit_name: ''
short_name: 5xx err rate
unit_name: percent
- description: >-
Nombre d'octets téléchargés par les utilisateurs pour les requêtes GET,
HEAD et OPTIONS.
integration: amazon_cloudfront
interval: '60'
metric_name: aws.cloudfront.bytes_downloaded
metric_type: count
orientation: '0'
per_unit_name: ''
short_name: B downlded
unit_name: byte
- description: >-
Le nombre d'octets téléchargés vers votre origine avec CloudFront à l'aide
des requêtes POST et PUT.
integration: amazon_cloudfront
interval: '60'
metric_name: aws.cloudfront.bytes_uploaded
metric_type: count
orientation: '0'
per_unit_name: ''
short_name: B uplded
unit_name: byte
- description: >-
Le nombre de requêtes pour toutes les méthodes HTTP et pour les requêtes
HTTP et HTTPS.
integration: amazon_cloudfront
interval: '60'
metric_name: aws.cloudfront.requests
metric_type: count
orientation: '0'
per_unit_name: ''
short_name: requests
unit_name: ''
- description: >-
Pourcentage de toutes les demandes pour lesquelles le code d'état HTTP est
4xx ou 5xx.
integration: amazon_cloudfront
interval: '60'
metric_name: aws.cloudfront.total_error_rate
metric_type: gauge
orientation: '-1'
per_unit_name: ''
short_name: total err rate
unit_name: percent
|
data/integrations/amazon_cloudfront.fr.yaml
|
SDG_GOAL: >-
Goal 16. Promote the emergence of peaceful and inclusive societies for sustainable development, ensure access to justice for all and build, at all levels, effective, accountable and inclusive institutions
SDG_TARGET: >-
16.1 Significantly reduce all forms of violence and associated death rates everywhere
SDG_INDICATOR: >-
16.1.1. Percentage of people who have been victims of gender-based violence
CONTACT_ORGANISATION: ISTEEBU
CONTACT_NAME: <NAME>
ORGANISATION_UNIT: Social studies and statistics and poverty monitoring (ESSSP)
CONTACT_FUNCT: ESSSP department manager
CONTACT_PHONE: '79401939'
CONTACT_MAIL: <NAME>
CONTACT_EMAIL: <EMAIL>
STAT_CONC_DEF: Percentage of people who have been victims of gender-based violence
UNIT_MEASURE: Percentage
SOURCE_TYPE: Household surveys
COLL_METHOD: >-
Data are collected from household surveys using the "household" questionnaire
FREQ_COLL: The probable date of the next collection will be 2021 (Next DHS)
REL_CAL_POLICY: '2023'
DATA_SOURCE: ISTEEBU
COMPILING_ORG: ISTEEBU
INST_MANDATE: >-
ISTEEBU is the body recognized by the Burundian government for the collection, processing and dissemination of official statistics helping in planning and decision-making.
RATIONALE: >-
This indicator is relevant because it highlights the percentage of women who have been victims of gender-based violence.
REC_USE_LIM: >-
Updating the indicator requires a household survey which is very resource intensive. The indicator cannot be calculated by gender.
DATA_COMP: >-
Ratio of people who have been victims of gender-based violence to the total population, expressed as a percentage
DATA_VALIDATION: >-
The results of the survey are validated at two levels: technical validation and official validation before publication
COVERAGE: Available by gender and Place of residence (Women; Men; Urban; Rural)
COMPARABILITY: >-
The indicator can be compared to other indicators of the same type at regional and international level
OTHER_DOC: EDSB II Household Survey Report 2016_2017
|
translations/en/16-1-1.yml
|
uid: "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphMessageRuleActions.withForwardAsAttachmentTo*"
fullName: "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphMessageRuleActions.withForwardAsAttachmentTo"
name: "withForwardAsAttachmentTo"
nameWithType: "MicrosoftGraphMessageRuleActions.withForwardAsAttachmentTo"
members:
- uid: "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphMessageRuleActions.withForwardAsAttachmentTo(java.util.List<com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphRecipient>)"
fullName: "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphMessageRuleActions.withForwardAsAttachmentTo(List<MicrosoftGraphRecipient> forwardAsAttachmentTo)"
name: "withForwardAsAttachmentTo(List<MicrosoftGraphRecipient> forwardAsAttachmentTo)"
nameWithType: "MicrosoftGraphMessageRuleActions.withForwardAsAttachmentTo(List<MicrosoftGraphRecipient> forwardAsAttachmentTo)"
summary: "Set the forwardAsAttachmentTo property: The email addresses of the recipients to which a message should be forwarded as an attachment."
parameters:
- description: "the forwardAsAttachmentTo value to set."
name: "forwardAsAttachmentTo"
type: "<xref href=\"java.util.List?alt=java.util.List&text=List\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphRecipient?alt=com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphRecipient&text=MicrosoftGraphRecipient\" data-throw-if-not-resolved=\"False\" />>"
syntax: "public MicrosoftGraphMessageRuleActions withForwardAsAttachmentTo(List<MicrosoftGraphRecipient> forwardAsAttachmentTo)"
returns:
description: "the MicrosoftGraphMessageRuleActions object itself."
type: "<xref href=\"com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphMessageRuleActions?alt=com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphMessageRuleActions&text=MicrosoftGraphMessageRuleActions\" data-throw-if-not-resolved=\"False\" />"
type: "method"
metadata: {}
package: "com.azure.resourcemanager.authorization.fluent.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-authorization:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphMessageRuleActions.withForwardAsAttachmentTo.yml
|
version: '3'
services:
redis:
restart: always
container_name: redis
hostname: 'redis'
image: redis:4.0-alpine
command:
- --loglevel warning
volumes:
- /srv/gitlab/redis:/var/lib/redis:Z
postgresql:
restart: always
container_name: postgresql
hostname: 'postgresql'
image: postgres:10.3-alpine
volumes:
- /srv/gitlab/postgresql/data:/var/lib/postgresql/data:rw
environment:
- POSTGRES_USER=gitlab
- POSTGRES_PASSWORD=<PASSWORD>
- POSTGRES_DB=gitlabhq_production
gitlab:
restart: always
container_name: gitlab
image: gitlab/gitlab-ce:10.5.5-ce.0
hostname: 'git.cloud.lab'
depends_on:
- redis
- postgresql
ports:
- "10080:10080"
- "10022:22"
- "10081:443"
volumes:
- /srv/gitlab/logs:/var/log/gitlab:rw
- /srv/gitlab/data:/var/opt/gitlab:rw
- /srv/gitlab/config:/etc/gitlab:rw
- /srv/gitlab/backups:/mnt/backups:rw
environment:
# http_proxy: "http://proxy_host:port"
# https_proxy: "https://proxy_host:port"
# HTTP_PROXY: "http://proxy_host:port"
# HTTPS_PROXY: "https://proxy_host:port"
GITLAB_OMNIBUS_CONFIG: |
external_url 'http://git.cloud.lab:10080'
gitlab_rails['gitlab_https'] = false
gitlab_rails['gitlab_shell_ssh_port'] = 10022
postgresql['enable'] = false
gitlab_rails['db_username'] = 'gitlab'
gitlab_rails['db_host'] = 'postgresql'
gitlab_rails['db_adapter'] = 'postgresql'
gitlab_rails['db_port'] = 5432
gitlab_rails['db_database'] = 'gitlabhq_production'
gitlab_rails['db_encoding'] = 'utf8'
gitlab_rails['db_password'] = '<PASSWORD>'
redis['enable'] = false
gitlab_rails['redis_host'] = 'redis'
gitlab_rails['redis_port'] = 6379
gitlab_rails['timezone'] = 'Asia/Ho_Chi_Minh'
gitlab_rails['initial_root_password'] = '<PASSWORD>'
gitlab_rails['env'] = { :http_proxy => 'http://proxy_host:port', :https_proxy => 'https://proxy_host:port' }
gitlab_rails['backup_upload_connection'] = {:provider => 'Local', :local_root => '/mnt/backups'}
gitlab_rails['backup_upload_remote_directory'] = 'gitlab_backups'
# gitlab_rails['smtp_enable'] = true
# gitlab_rails['smtp_address'] = 'smtp.server'
# gitlab_rails['smtp_port'] = 465
# gitlab_rails['smtp_user_name'] = 'admin@test'
# gitlab_rails['smtp_password'] = 'password'
# gitlab_rails['smtp_domain'] = 'smtp.domain'
# gitlab_rails['smtp_authentication'] = 'login'
# gitlab_rails['smtp_tls'] = true
# gitlab_rails['smtp_openssl_verify_mode'] = 'peer'
# gitlab_monitor['enable'] = false
unicorn['worker_processes'] = 3
unicorn['worker_timeout'] = 60
jenkins:
restart: always
container_name: jenkins
image: jenkinsci/blueocean:1.4.2
user: root
# environment:
# http_proxy: "http://proxy_host:port"
# https_proxy: "https://proxy_host:port"
# HTTP_PROXY: "http://proxy_host:port"
# HTTPS_PROXY: "https://proxy_host:port"
ports:
- "49001:8080"
volumes:
- /srv/jenkins_home:/var/jenkins_home
- /var/run/docker.sock:/var/run/docker.sock
|
docker-compose.yml
|
title: Azure IoT Hub Belgeleri
metadata:
title: Azure IoT Hub Belgeleri
description: Azure IoT Hub’ı kullanarak milyarlarca Nesnelerin İnterneti varlığına bağlanma, bunları izleme ve denetleme hakkında bilgi edinin.
services: iot-hub
ms.service: iot-hub
ms.topic: landing-page
author: robinsh
ms.author: robinsh
manager: eliotgra
ms.date: 03/23/2020
ms.openlocfilehash: 101c9623ed102fc8e4419df14e383e1df6afb010
ms.sourcegitcommit: 7581df526837b1484de136cf6ae1560c21bf7e73
ms.translationtype: HT
ms.contentlocale: tr-TR
ms.lasthandoff: 03/31/2020
ms.locfileid: "80420846"
landingContent:
- title: Başlarken
linkLists:
- linkListType: quickstart
links:
- text: Azure portalını kullanarak merkez oluşturma
url: iot-hub-create-through-portal.md
- text: CLI kullanarak bir cihazdan merkeze telemetri gönderme
url: quickstart-send-telemetry-cli.md
- text: Merkeze bağlı bir cihazı denetleme
url: quickstart-control-device-node.md
- linkListType: get-started
links:
- text: Cihaz yönetimi
url: iot-hub-node-node-device-management-get-started.md
- text: Cihazları yapılandırma
url: iot-hub-configuration-best-practices.md
- title: Cihaz Durumunu Eşitleme
linkLists:
- linkListType: how-to-guide
links:
- text: Node.js kullanarak Cihaz İkizleriyle çalışma
url: iot-hub-node-node-twin-getstarted.md
- text: IoT ölçümlerini anlama
url: iot-hub-metrics.md
- text: Tanılama Günlüğüne Kaydetme
url: iot-hub-monitor-resource-health.md
- text: IoT ile bağlantı kesilmelerini algılama, tanılama ve sorunları giderme
url: iot-hub-troubleshoot-connectivity.md
- title: Çözümünüzü ölçeklendirme
linkLists:
- linkListType: concept
links:
- text: Doğru katmanı seçme
url: iot-hub-scaling.md
- text: Kotaları ve bant genişliği azaltmayı anlama
url: iot-hub-devguide-quotas-throttling.md
- title: Protokolünüzü seçme
linkLists:
- linkListType: concept
links:
- text: Desteklenen Protokoller
url: iot-hub-devguide-protocols.md
- text: TLS Desteği
url: iot-hub-tls-support.md
- text: MQTT Desteği
url: iot-hub-mqtt-support.md
- text: IP Adresleme
url: iot-hub-understand-ip-address.md
- text: AMQP
url: iot-hub-amqp-support.md
- text: Protokol ağ geçidi
url: iot-hub-protocol-gateway.md
- title: Cihaz verilerini başka bir hizmete yönlendirme
linkLists:
- linkListType: concept
links:
- text: Azure IoT Hub ileti yönlendirmesini anlama
url: iot-hub-devguide-messages-d2c.md
- linkListType: tutorial
links:
- text: Azure CLI ile hazırlanma
url: tutorial-routing-config-message-routing-cli.md
- text: Azure Resource Manager şablonu ile hazırlanma
url: tutorial-routing-config-message-routing-rm-template.md
- text: Yönlendirme sonuçlarını çalıştırma ve görüntüleme
url: tutorial-routing-view-message-routing-results.md
- title: IoT Hub Araçları’nı kullanma
linkLists:
- linkListType: how-to-guide
links:
- text: VS Code için IoT Araçları’nı kullanma
url: iot-hub-create-use-iot-toolkit.md
- text: IoT Gezgini
url: ../iot-pnp/howto-install-iot-explorer.md
- title: IoT Güvenlik sertifikalarını kullanma
linkLists:
- linkListType: concept
links:
- text: IoT Hub’a erişimi denetleme
url: iot-hub-devguide-security.md
- text: X.509 CA sertifikalarını kullanarak Cihaz Kimlik Doğrulaması
url: iot-hub-x509ca-overview.md
- text: IoT Hub ile X.509 güvenliği
url: iot-hub-security-x509-get-started.md
- text: X.509 CA sertifikası güvenlik kavramları
url: iot-hub-x509ca-concept.md
|
articles/iot-hub/index.yml
|
service id: fff
endpoint id: ffff
endpoint name: fake
pre-processor configuration:
synchronicity: request-response
elements: +requestHeaders,messageId,packageKey
endpoint uri: https://api-unitttest.airfranceklm.com/
origin uri: https://docker.kml/backend/url
dropped headers:
- C
- D
added headers:
E: F
G: H
post-processor configuration:
---
name: base rejection scenario
point: PreProcessor
desc: Checking that the unit test will compile and will nominally pass.
api client request:
remote address: 127.0.0.1
http verb: GET
resource: /fff
query params:
myQuery: 123
headers:
A: B
C: C1
payload: AAAA
# RequestPayload can be for GET-type requests.
application:
name: the-app
extended attributes:
a: b
package key:
key: dfgf
authorization context:
scope: "12345"
user context: "456"
grant type: CC
# This is what should be sent to lambda
sidecar pre-processor:
input:
masheryMessageId: unit-test-call-uuid
synchronicity: RequestResponse
packageKey: dfgf
serviceId: fff
endpointId: ffff
point: PreProcessor
request:
headers:
a: B
c: C1
e: F
g: H
x-mashery-message-id: unit-test-call-uuid
output:
terminate:
statusCode: 403
message: "Lambda 403"
mashery response:
complete: true
status code: 403
headers:
Content-Type: application/xml
payload: "<h1><![CDATA[Lambda 403]]></h1>"
---
name: rejection with single code
point: PreProcessor
desc: Checking that the unit test will compile and will nominally pass.
inherited:
- client-request
- sidecar pre-processor request
# This is what should be sent to lambda
sidecar pre-processor:
output:
terminate:
statusCode: 403
# Message is not provided
mashery response:
complete: true
status code: 403
headers:
Content-Type: application/xml
payload: "<h1><![CDATA[Service cannot be provided, code 0x000003BB]]></h1>"
---
name: rejection with payload
point: PreProcessor
desc: Checking that the unit test will compile and will nominally pass.
inherited:
- client-request
- sidecar pre-processor request
sidecar pre-processor:
output:
terminate:
statusCode: 403
passHeaders:
X-sidecar: Error
payload: "==PAYLOAD=="
# Message is not provided
mashery response:
complete: true
status code: 403
headers:
X-sidecar: Error
payload: "==PAYLOAD=="
---
name: rejection with json and content type
point: PreProcessor
desc: Checking that the unit test will compile and will nominally pass.
inherited:
- client-request
- sidecar pre-processor request
sidecar pre-processor:
output:
terminate:
statusCode: 403
passHeaders:
Content-Type: application/json+hal
json:
a: b
# Message is not provided
mashery response:
complete: true
status code: 403
headers:
Content-Type: application/json+hal
payload: "{\"a\":\"b\"}"
---
name: rejection with json and without content type
point: PreProcessor
desc: Checking that the unit test will compile and will nominally pass.
inherited:
- client-request
- sidecar pre-processor request
# This is what should be sent to lambda
sidecar pre-processor:
output:
terminate:
statusCode: 403
passHeaders:
x-sidecar: super
json:
a: b
# Message is not provided
mashery response:
complete: true
status code: 403
headers:
x-sidecar: super
Content-Type: application/json
payload: "{\"a\":\"b\"}"
---
name: error reporting on internal problem
point: PreProcessor
desc: Checking that the unit test will compile and will nominally pass.
inherited:
- client-request
- sidecar pre-processor request
# This is what should be sent to lambda
sidecar pre-processor:
throw exception: "Lambda has failed"
mashery response:
complete: true
status code: 550
headers:
Content-Type: application/xml
payload: "<h1><![CDATA[Internal server error before processing the call, code 0x000003BB]]></h1>"
|
sidecar/adapter-core/src/test/resources/com/airfranceklm/amt/sidecar/base-test.yml
|
sidebar:
about: false # set to False or comment line if you want to remove the "how to use?" in the sidebar
education: true # set to False if you want education in main section instead of in sidebar
# Profile information
name: <NAME>
tagline: Full Stack Developer
avatar: profile.png #place a 100x100 picture inside /assets/images/ folder and provide the name of the file below
# Sidebar links
email: <EMAIL>
phone: +91 888 280 5280
website: pccofvns.github.io #do not add http://
linkedin: pccofvns
github: pccofvns
gitlab:
bitbucket:
twitter: '@pccofvns'
stack-overflow: pccofvns
codewars:
languages:
- idiom: English
level: Professional
- idiom: Hindi
level: Native
interests:
- item: Trekking
link: https://ghumakkad.github.io/blog
- item: Mountaineering
link: https://ghumakkad.github.io/blog
- item: Reading
link: http://goodreads.com/pccofvns
- item: Swimming
link:
career-profile:
title: Career Profile
summary: |
Software Engineer with a broad range of experience in developing full stack of n-tier and client/server, microservices applications.
Innovative and Free Thinker. Interested in challenging projects. Specialties: Software Engineering, Design and Architecture, Java/JEE, JavaScript, nodejs, angularjs
education:
- degree: B.Tech. in Electronics
university: I.E.T. Lucknow
time: 2005 - 2009
details: |
# Describe your study here lorem ipsum dolor sit amet, consectetuer
# adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum
# sociis natoque penatibus et magnis dis parturient montes, nascetur
# ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu,
# pretium quis, sem.
#
# - Bullet point
# - Bullet point
experiences:
- role: Manager Technology
time: Jun 2014 – Present
company: Optum, Noida
details: |
Low level design and development of State Health Exchanges as per Patient Protection and Affordable Care Act (ACA, known colloquially as "Obamacare") of USA.
- Producing technical design for developers to work on after discussion with business analysts.
- Improve quality code delivered by Development
- Streamline and improve technology stacks used in organization.
- Full stack development of the web application
- role: Senior Software Engineer
time: Sep 2012 - May 2014
company: 3Pillar Global, Noida
details: |
Design and Development of Advance Advertising System for platforms like iTV, IPTV, VOD (Video On Demand), Cable TV etc. as per the standards define by The Society of Cable Telecommunications Engineers or SCTE.
# - Bullet point
# - Bullet point
- role: Software Engineer
time: Feb 2005 - Aug 2009
company: GlobalLogic, Noida
details: |
Design and development of web applications/services mobile applications in different projects.
# - Bullet point
# - Bullet point
projects:
title: Projects
intro: >
Other projects.
assignments:
- title: MyForm16
link: "http://www.myform16.com"
tagline: "Income Tax eFiling simplified. Webapp for filing Income Tax Return online in India"
- title: School Management System
link: "#"
tagline: "A web application along with REST API for mobile apps to manage day to day interactions between school administration, teachers, students and their parents."
#publications:
# title: Publications
# intro: |
# You can list your publications in this section. Lorem ipsum dolor sit
# amet, consectetur adipiscing elit. Vestibulum et ligula in nunc
# bibendum fringilla a eu lectus.
#
# papers:
# - title: The Art of Computer Programming
# authors: <NAME>
# conference: Addison-Wesley, 1968
#
# - title: "Genetic Programming III: Darwinian Invention & Problem Solving"
# authors: <NAME>., <NAME>., <NAME>., <NAME>.
# conference: "Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 1st edn. (1999)"
#
# - title: A syntax directed compiler for Algol 60
# authors: <NAME>
# conference: "Comm. ACM 4 (1961), 51–55"
skills:
title: Skills & Proficiency
toolset:
- name: Java/JEE
level: 90%
- name: Javascript & jQuery
level: 70%
- name: Web Development
level: 90%
- name: HTML5 & CSS
level: 50%
- name: Ruby on Rails
level: 30%
footer: >
|
_data/cv.yml
|
_id: 6804ae70-3e04-11ea-9c07-251cc9b0efee
message: >-
Trying lwr.mpge.hashtafak.github.io.vun.ba evaluation
[URL=http://onlineroyal-casino.top/#no-deposit-online-casinos]foxwoods
casino[/URL] [URL=http://casinousgambling.space/#gambling]gambling
system[/URL]
[URL=http://scoutcampreviews.com/extra-super-avana/#extra-super-avana-lowest-price]extra
super avana lowest price[/URL]
[URL=http://chesscoachcentral.com/product/viagra-extra-dosage/#cheap-viagra-extra-dosage-online]cheap
viagra extra dosage online[/URL]
[URL=http://online-casinoroyal.best/#free-casino-games]casino free slots[/URL]
[URL=http://casino-gamblingbonus.top/#top-gambling-bonus]top gambling
bonus[/URL]
[URL=http://loveandlightmusic.net/product/clindamycin/#buy-clindamycin-without-prescription]clindamycin[/URL]
cheap clindamycin online
[URL=http://redemptionbrewworks.com/xalatan/#xalatan]xalatan[/URL] xalatan
online
[URL=http://loveandlightmusic.net/product/serofloinhaler/#buying-serofloinhaler-online]buying
serofloinhaler online[/URL] incorrectly <a
href="http://onlineroyal-casino.top/#new-no-deposit-casinos">casino open
usa</a> <a href="http://casinousgambling.space/#online-gambling">online
gambling</a> <a
href="http://scoutcampreviews.com/extra-super-avana/#extra-super-avana-canada">discount
extra super avana</a> <a
href="http://chesscoachcentral.com/product/viagra-extra-dosage/#viagra-extra-dosage-lowest-price">viagra
extra dosage tablets</a> <a
href="http://online-casinoroyal.best/#atlantic-city-casinos">free online
casino</a> <a
href="http://casino-gamblingbonus.top/#top-gambling-bonus">internet gambling
guide</a> <a
href="http://loveandlightmusic.net/product/clindamycin/#order-clindamycin">clindamycin</a>
<a href="http://redemptionbrewworks.com/xalatan/#best-price-xalatan">xalatan
online</a> <a
href="http://loveandlightmusic.net/product/serofloinhaler/#serofloinhaler-for-sale-overnight">serofloinhaler
for sale overnight</a> differently data,
http://onlineroyal-casino.top/#pala-casino casinos
http://casinousgambling.space/#online-gambling gambling
http://scoutcampreviews.com/extra-super-avana/#extra-super-avana extra super
avana extra super avana lowest price
http://chesscoachcentral.com/product/viagra-extra-dosage/#pharmacy-prices-for-viagra-extra-dosage
viagra extra dosage tablets
http://online-casinoroyal.best/#online-casino-gambling best online casino free
online casino http://casino-gamblingbonus.top/#gambling top gambling resource
http://loveandlightmusic.net/product/clindamycin/#cheap-clindamycin-online buy
clindamycin online cheap
http://redemptionbrewworks.com/xalatan/#lowest-price-xalatan xalatan.com
lowest price
http://loveandlightmusic.net/product/serofloinhaler/#buying-serofloinhaler-online
buy serofloinhaler uk spironolactone winner lignocaine pylorus.
name: udagata
email: <PASSWORD>
url: 'http://onlineroyal-casino.top/'
hidden: ''
date: '2020-01-23T17:18:43.704Z'
|
_data/comments/dear-diary/comment-1579799923705.yml
|
nameWithType: RegistryDockerTaskStep.UpdateStages.OverridingArgumentUpdate.withOverridingArgument
type: method
members:
- fullName: com.microsoft.azure.management.containerregistry.RegistryDockerTaskStep.UpdateStages.OverridingArgumentUpdate.withOverridingArgument(String name, OverridingArgument overridingArgument)
name: withOverridingArgument(String name, OverridingArgument overridingArgument)
nameWithType: RegistryDockerTaskStep.UpdateStages.OverridingArgumentUpdate.withOverridingArgument(String name, OverridingArgument overridingArgument)
parameters:
- description: <p>the name of the value to be overridden. </p>
name: name
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
- description: <p>the content of the overriding argument. </p>
name: overridingArgument
type: <xref href="com.microsoft.azure.management.containerregistry.OverridingArgument?alt=com.microsoft.azure.management.containerregistry.OverridingArgument&text=OverridingArgument" data-throw-if-not-resolved="False"/>
returns:
description: <p>the next stage of the container Docker task step update. </p>
type: <xref href="com.microsoft.azure.management.containerregistry.RegistryDockerTaskStep.Update?alt=com.microsoft.azure.management.containerregistry.RegistryDockerTaskStep.Update&text=RegistryDockerTaskStep.Update" data-throw-if-not-resolved="False"/>
summary: >-
<p>The function that specifies the overriding argument and what it will override.</p>
<p></p>
syntax: public Update withOverridingArgument(String name, OverridingArgument overridingArgument)
uid: com.microsoft.azure.management.containerregistry.RegistryDockerTaskStep.UpdateStages.OverridingArgumentUpdate.withOverridingArgument(String,OverridingArgument)
uid: com.microsoft.azure.management.containerregistry.RegistryDockerTaskStep.UpdateStages.OverridingArgumentUpdate.withOverridingArgument*
fullName: com.microsoft.azure.management.containerregistry.RegistryDockerTaskStep.UpdateStages.OverridingArgumentUpdate.withOverridingArgument
name: withOverridingArgument(String name, OverridingArgument overridingArgument)
package: com.microsoft.azure.management.containerregistry
metadata: {}
|
legacy/docs-ref-autogen/com.microsoft.azure.management.containerregistry.RegistryDockerTaskStep.UpdateStages.OverridingArgumentUpdate.withOverridingArgument.yml
|
director_uuid: XXXXXXX #STACK_LEVEL
instance_groups:
- azs:
- z1
instances: 3
jobs:
- name: mms-automation-agent
properties:
mms-automation-agent:
indexed_consul_services:
mms-automation-agent:
name: {{.Guid}}
mmsApiKey: {{.BoshParam "mms-api-key"}} # SERVICE-INSTANCE LEVEL
mmsBaseUrl: {{.BoshParam "mms-base-url"}} # SERVICE-INSTANCE LEVEL
mmsGroupId: {{.BoshParam "mms-group-id"}} # SERVICE-INSTANCE LEVEL
service_id: {{.Guid}} #SERVICE-LEVEL same as service-id
release: mongoenterprise
- name: consul
properties:
consul:
acl_datacenter: some-datacenter #STACK_LEVEL
acl_token: <PASSWORD> #STACK_LEVEL
datacenter: some-datacenter #STACK_LEVEL
encrypt: XXXXXXXXXXXXXXX== #STACK_LEVEL
join_hosts:
- 192.168.0.50 #STACK_LEVEL
- 192.168.0.51 #STACK_LEVEL
- 192.168.0.52 #STACK_LEVEL
server: false
release: konsul
migrated_from:
- az: z1
name: mongoenterprise_{{.Guid}}_z1 #SERVICE-INSTANCE LEVEL e.g. mongoenterprise_<serviceid>
name: mongoenterprise_{{.Guid}} #SERVICE-INSTANCE LEVEL e.g. mongoenterprise_<serviceid>
networks:
- default:
- dns
- gateway
name: mongoenterprise #STACK_LEVEL
persistent_disk_type: {{.BoshParam "plan"}}
stemcell: bosh-openstack-kvm-ubuntu-trusty-go_agent
vm_type: {{.Guid}} #SERVICE-INSTANCE LEVEL
name: {{.Prefix}}-{{.Guid}} #SERVICE-INSTANCE LEVEL e.g. <serviceid>
properties: {}
releases:
- name: {{.BoshParam "release-name" }} #SERVICE-TEMPLATE LEVEL
version: {{.BoshParam "release-version" }} #SERVICE-TEMPLATE LEVEL
- name: konsul
version: {{.BoshParam "consul-version" }} #SERVICE_TEMPLATE LEVEL
stemcells:
- alias: bosh-openstack-kvm-ubuntu-trusty-go_agent
name: bosh-openstack-kvm-ubuntu-trusty-go_agent
version: 3262.4
update:
canaries: 1
canary_watch_time: 30000-180000
max_in_flight: 1
serial: true
update_watch_time: 5000-180000
|
broker/src/test/resources/bosh/template_mongodbent_v5.yml
|
cuda: 0
execution: 3
evolution:
max_generations: 100
train_best_generations: 50
elitism: 0.0
tournament_size: 2
max_layers: 5
sequential_layers: false
add_layer_prob: 0.3
rm_layer_prob: 0.1
gene_mutation_prob: 0.1
mutate_gan_type_prob: 0
crossover_rate: 0.0
freeze_when_change: false
control_number_layers: false
freeze_best: false
dynamic_shape: false
min_div_scale: 8
algorithm: NEAT
adjust_learning_rate: false
nslc:
archive_prob: 0.1
neighbors_size: all
evaluation:
type: all-vs-all
mix_adversaries: false
best_size: 2
same_batches: false
iterations: 1
initialize_all: true
reset_optimizer: false
clone_adversarial: false
speciation:
size: 2
keep_best: true
threshold: 1
distance: num_genes
fitness:
discriminator: loss_rasgan
generator: loss_rasgan
fid_true_sample_size: 10000
fid_sample_size: 10000
fid_dimension: 2048
fid_batch_size: 50
evaluation_batches: 1
skill_rating:
tau: 0.3
sigma: 0.06
gan:
dataset: FashionMNIST
dataset_resize: null
dataset_classes: null
batches_limit: 50
batch_size: 64
data_loader_workers: 5
critic_iterations: 1
generator_iterations: 1
possible_gan_types:
- gan
- wgan
- lsgan
- rsgan
- rasgan
- hinge
type: rasgan
label_smoothing: false
normalization:
- none
- batch
- spectral
use_wscale: false
use_minibatch_stddev: false
dropout: false
latent_dim: 128
discriminator:
population_size: 5
fixed: false
use_gradient_penalty: false
gradient_penalty_lambda: 10
possible_layers:
- Conv2d
optimizer:
type: RMSprop
copy_optimizer_state: false
learning_rate: 0.0002
weight_decay: 0
generator:
population_size: 5
fixed: false
possible_layers:
- Deconv2d
- Deconv2dUpsample
optimizer:
type: RMSprop
copy_optimizer_state: false
learning_rate: 0.0002
weight_decay: 0
layer:
keep_weights: true
resize_weights: true
resize_linear_weights: true
activation_functions:
- ReLU
linear:
min_features_power: 6
max_features_power: 13
conv2d:
min_channels_power: 6
max_channels_power: 8
random_out_channels: true
kernel_size: 3
force_double: false
enable_skip_conn: false
stride: 2
deconv2d:
use_upsample: false
enable_skip_conn: false
upsample:
limit_output_size: true
stats:
num_generated_samples: 36
print_interval: 1
calc_inception_score: false
calc_fid_score: false
calc_fid_score_best: true
calc_rmse_score: false
calc_skill_rating: false
save_best_model: true
save_best_interval: 5
notify: false
min_notification_interval: 30
|
conf/config.yaml
|
---
AWSTemplateFormatVersion: 2010-09-09
Description: Architecture to experiment with AWS Advanced Networking Certification Sample Question 2
Metadata:
Author:
Description: <NAME> (<EMAIL>)
License:
Use as you wish. No warranty offered - this is just being used to experiment with AWS.
Question:
Description:
Your application server instances reside in the private subnet of your VPC. These instances need to
access a Git repository on the Internet. You create a NAT gateway in the public subnet of your VPC. The
NAT gateway can reach the Git repository, but instances in the private subnet cannot. You confirm that a
default route in the private subnet route table points to the NAT gateway. The security group for your
application server instances permits all traffic to the NAT gateway.
What configuration change should you make to ensure that these instances can reach the patch server?
Answers:
Description:
A) Assign public IP addresses to the instances and route 0.0.0.0/0 to the Internet gateway.
B) Configure an outbound rule on the application server instance security group for the Git repository.
C) Configure inbound network access control lists (network ACLs) to allow traffic from the Git repository to
the public subnet.
D) Configure an inbound rule on the application server instance security group for the Git repository.
AWS::CloudFormation::Interface:
ParameterGroups:
- Label:
default: Amazon VPC Parameters
Parameters:
- EC2KeyName
- BastionInstanceType
- BastionSecurityGroup
- Subnets
- Ami
ParameterLabels:
BastionSecurityGroup:
default: Bastion Security Group
BastionInstanceType:
default: Instance Type
EC2KeyName:
default: Existing Key Pair
Ami:
default: Amazon Machine Image (Ami)
Subnets:
default: Public Subnet
Parameters:
Ami:
Type: String
AllowedValues:
- ami-14c5486b
ConstraintDescription: Must be a valid Amazon Image
Default: ami-14c5486b
Description: Amazon Machine Image to use for bastion host
BastionSecurityGroup:
Type: AWS::EC2::SecurityGroup::Id
Description: Select the bastion security group
BastionInstanceType:
Type: String
AllowedValues:
- t2.nano
- t2.micro
- t2.small
ConstraintDescription: Must be a valid Amazon EC2 instance type
Default: t2.nano
Description: Bastion EC2 instance type
EC2KeyName:
Type: AWS::EC2::KeyPair::KeyName
Description: Name of an existing EC2 KeyPair. You will use this to connect to your bastion host.
Subnets:
Type: List<AWS::EC2::Subnet::Id>
Description: Select existing public subnet.
Resources:
BastionAutoScalingGroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
Cooldown: 60
HealthCheckGracePeriod: 120
HealthCheckType: EC2
LaunchConfigurationName: !Ref BastionLaunchConfiguration
MaxSize: 1
MinSize: 1
Tags:
- Key: Name
Value: !Join [ '', [ 'Bastion / ', !Ref 'AWS::StackName' ] ]
PropagateAtLaunch: true
VPCZoneIdentifier:
- !Select [ 0, !Ref Subnets ]
BastionLaunchConfiguration:
Type: AWS::AutoScaling::LaunchConfiguration
Properties:
IamInstanceProfile: !Ref BastionInstanceProfile
ImageId: !Ref Ami
InstanceMonitoring: true
InstanceType: !Ref BastionInstanceType
KeyName: !Ref EC2KeyName
SecurityGroups:
- !Ref BastionSecurityGroup
BastionInstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Path: '/'
Roles:
- !Ref BastionInstanceRole
BastionInstanceRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: 2012-10-17
Statement:
- Effect: Allow
Principal:
Service:
- ec2.amazonaws.com
Action:
- sts:AssumeRole
Path: '/'
Policies:
- PolicyName: logs
PolicyDocument:
Version: 2012-10-17
Statement:
- Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
- logs:DescribeLogStreams
Resource:
- arn:aws:logs:*:*:*
|
advanced-networking/aws-sample-questions/sample-question-2/aws-sample-question-2-03-bastion.yaml
|
uid: api.kaiza.la.v1.kaizala.microsoft.com.groupmanagementapis.getsubscribers
name: Get Subscribers
service: Kaizala
groupName: Group Management APIs
apiVersion: "1.0"
summary: "Obtenir les abonnés d’un groupe public \nCette API récupère la liste des abonnés d’un groupe public géré"
consumes:
- application/json
produces:
- application/json
paths:
- content: POST https://{endpoint-url}/v1/groups/{test-public-group-id}/subscribers
uriParameters:
- name: test-public-group-id
in: path
isRequired: true
description: ''
types:
- uid: string
responses:
- name: 200 OK
description: ''
requestBody:
- name: default
parameters:
- name: count
in: body
isRequired: true
description: Nombre d’abonnés dans le groupe public
types:
- uid: integer
- name: cursor
in: body
isRequired: true
description: Début du resultset. Pour la pagination. À utiliser dans le corps de la demande pour extraire le jeu de résultats suivant. Présent en réponse uniquement s’il existe un jeu de résultats suivant valide.
types:
- uid: string
requestHeader:
- name: accessToken
in: header
isRequired: true
description: Jeton d’accès reçu à partir du point de fin d’th
types:
- uid: string
- name: Content-Type
in: header
isRequired: true
description: application/json
types:
- uid: string
definitions:
- name: Request - Subscribers of a public group
description: Obtenir les abonnés d’un groupe public
kind: object
properties:
- name: count
description: Nombre d’abonnés dans le groupe public
types:
- uid: integer
- name: cursor
description: Début du resultset. Pour la pagination. À utiliser dans le corps de la demande pour extraire le jeu de résultats suivant. Présent en réponse uniquement s’il existe un jeu de résultats suivant valide.
types:
- uid: string
examples: []
security: []
metadata:
description: "En savoir plus sur Kaizala service : obtenir les abonnés d’un groupe public \nCette API récupère la liste des abonnés d’un groupe public géré"
ms.openlocfilehash: 64079d76fd23dcdbe98176698f0c5ba6b388cf8b
ms.sourcegitcommit: <PASSWORD>
ms.translationtype: MT
ms.contentlocale: fr-FR
ms.lasthandoff: 07/06/2021
ms.locfileid: "53302468"
errorCodes: []
|
docs-ref-autogen/kaizala.microsoft.com/Group-Management-APIs/Get-Subscribers.yml
|
version: '3.4'
# all common shared environment variables defined here:
x-common-env-variables: &common-variables
EDGEX_SECURITY_SECRET_STORE: "false"
Registry_Host: edgex-core-consul
Clients_CoreData_Host: edgex-core-data
Clients_Data_Host: edgex-core-data # For device Services
Clients_Notifications_Host: edgex-support-notifications
Clients_Metadata_Host: edgex-core-metadata
Clients_Command_Host: edgex-core-command
Clients_Scheduler_Host: edgex-support-scheduler
Clients_RulesEngine_Host: edgex-kuiper
Clients_VirtualDevice_Host: edgex-device-virtual
Databases_Primary_Host: edgex-redis
# Required in case old configuration from previous release used.
# Change to "true" if re-enabling logging service for remote logging
Logging_EnableRemote: "false"
networks:
loss-detection-app_edgex-network:
external: true
volumes:
db_vol:
driver: local
pos_vol:
driver: local
services:
event-reconciler:
image: rtsf-at-checkout/event-reconciler:dev
ports:
- "9083:9083"
- "48095:48095"
restart: "on-failure:5"
container_name: event-reconciler
hostname: event-reconciler
networks:
- loss-detection-app_edgex-network
environment:
<<: *common-variables
Service_Host: event-reconciler
MessageBus_SubscribeHost_Host: edgex-core-data
ApplicationSettings_ProductLookupEndpoint: product-lookup:8083
device-scale:
image: rtsf-at-checkout/device-scale:dev
restart: "on-failure:5"
container_name: device-scale
hostname: device-scale
networks:
- loss-detection-app_edgex-network
environment:
<<: *common-variables
Service_Host: device-scale
devices:
- /dev/:/dev/
privileged: true
product-lookup:
image: rtsf-at-checkout/product-lookup:dev
command: "/product-lookup -file /db_initialization/all-products.json"
ports:
- "8083:8083"
restart: "on-failure:5"
container_name: product-lookup
hostname: product-lookup
networks:
- loss-detection-app_edgex-network
loss-detector:
image: rtsf-at-checkout/loss-detector:dev
ports:
- "48096:48096"
restart: "on-failure:5"
container_name: loss-detector
hostname: loss-detector
networks:
- loss-detection-app_edgex-network
environment:
<<: *common-variables
Service_Host: loss-detector
MessageBus_SubscribeHost_Host: event-reconciler
ApplicationSettings_NotificationsURL: http://edgex-support-notifications:48060
rsp-event-handler:
image: rtsf-at-checkout/rsp-controller-event-handler:dev
restart: "on-failure:5"
container_name: rsp-event-handler
hostname: rsp-event-handler
ports:
- 42222:48096
networks:
- loss-detection-app_edgex-network
environment:
<<: *common-variables
Service_Host: rsp-event-handler
MessageBus_SubscribeHost_Host: edgex-core-data
|
loss-detection-app/docker-compose.loss-detection.yml
|
---
frAtmIwfConnAtm2FrLastChange: 1.3.6.1.2.1.86.1.2.1.10
frAtmIwfConnSarTimeOuts: 1.3.6.1.2.1.86.1.2.1.19
frAtmIwfConnectionDescriptor: 1.3.6.1.2.1.86.1.2.1.13
frAtmIwfConnOverSizedSDUs: 1.3.6.1.2.1.86.1.2.1.17
frAtmIwfConnEncapsulationMappingMode: 1.3.6.1.2.1.86.1.4.1.6
frAtmIwfVclCrossConnectIdentifier: 1.3.6.1.2.1.86.1.5.1.1
frAtmIwfConnStatusChange: 1.3.6.1.2.1.86.2.0.1
frAtmIwfConnFailedFrameTranslate: 1.3.6.1.2.1.86.1.2.1.14
frAtmIwfConnCongestionMappingMode: 1.3.6.1.2.1.86.1.4.1.5
frAtmIwfConnFragAndReassEnabled: 1.3.6.1.2.1.86.1.4.1.8
frAtmIwfConnVpi: 1.3.6.1.2.1.86.1.2.1.3
frAtmIwfConnFr2AtmOperStatus: 1.3.6.1.2.1.86.1.2.1.11
frAtmIwfConnEncapsulationMappings: 1.3.6.1.2.1.86.1.4.1.7
frAtmIwfConnArpTranslationEnabled: 1.3.6.1.2.1.86.1.4.1.9
frAtmIwfCompliances: 1.3.6.1.2.1.86.3.2
frAtmIwfConnIndexNext: 1.3.6.1.2.1.86.1.1
frAtmIwfConnDlci: 1.3.6.1.2.1.86.1.2.1.6
frAtmIwfConnRowStatus: 1.3.6.1.2.1.86.1.2.1.7
frAtmIwfConnAdminStatus: 1.3.6.1.2.1.86.1.2.1.8
frAtmIwfConnClpToDeMappingMode: 1.3.6.1.2.1.86.1.4.1.4
frAtmIwfConformance: 1.3.6.1.2.1.86.3
frAtmIwfConnAtmPort: 1.3.6.1.2.1.86.1.2.1.2
frAtmIwfConnFr2AtmLastChange: 1.3.6.1.2.1.86.1.2.1.12
frAtmIwfConnectionDescriptorTable: 1.3.6.1.2.1.86.1.4
frAtmIwfConnectionDescriptorEntry: 1.3.6.1.2.1.86.1.4.1
frAtmIwfConnectionDescriptorIndex: 1.3.6.1.2.1.86.1.4.1.1
frAtmIwfMIB: 1.3.6.1.2.1.86
frAtmIwfMIBObjects: 1.3.6.1.2.1.86.1
frAtmIwfConnIndex: 1.3.6.1.2.1.86.1.2.1.1
frAtmIwfConnVci: 1.3.6.1.2.1.86.1.2.1.4
frAtmIwfConnFrPort: 1.3.6.1.2.1.86.1.2.1.5
frAtmIwfConnectionDescriptorIndexNext: 1.3.6.1.2.1.86.1.3
frAtmIwfConnDescriptorRowStatus: 1.3.6.1.2.1.86.1.4.1.2
frAtmIwfVclTable: 1.3.6.1.2.1.86.1.5
frAtmIwfVclEntry: 1.3.6.1.2.1.86.1.5.1
frAtmIwfTrapsPrefix: 1.3.6.1.2.1.86.2.0
frAtmIwfConnAtm2FrOperStatus: 1.3.6.1.2.1.86.1.2.1.9
frAtmIwfGroups: 1.3.6.1.2.1.86.3.1
frAtmIwfConnOverSizedFrames: 1.3.6.1.2.1.86.1.2.1.15
frAtmIwfConnFailedAal5PduTranslate: 1.3.6.1.2.1.86.1.2.1.16
frAtmIwfConnCrcErrors: 1.3.6.1.2.1.86.1.2.1.18
frAtmIwfConnectionTable: 1.3.6.1.2.1.86.1.2
frAtmIwfConnectionEntry: 1.3.6.1.2.1.86.1.2.1
frAtmIwfConnDeToClpMappingMode: 1.3.6.1.2.1.86.1.4.1.3
frAtmIwfTraps: 1.3.6.1.2.1.86.2
|
data/snmp/mibs/FR-ATM-PVC-SERVICE-IWF-MIB.yaml
|
version: '3'
services:
client:
hostname: client
image: jiscmoonshot/client
cap_add:
- IPC_LOCK
volumes:
- ./config/user_credentials.xml:/config/user_credentials.xml
- /tmp/.X11-unix/X0:/tmp/.X11-unix/X0
build:
context: .
dockerfile: Dockerfile.client
sshserver:
hostname: sshserver
image: jiscmoonshot/sshserver
environment:
RPP: idp1
USERS: "moonshot"
build:
context: .
dockerfile: Dockerfile.sshserver
volumes:
- ./config/ca.pem:/etc/ca.pem
- ./config/client.pem:/etc/client.pem
- ./config/client.key:/etc/client.key
httpserver:
hostname: httpserver
image: jiscmoonshot/httpserver
environment:
RPP: idp1
build:
context: .
dockerfile: Dockerfile.httpserver
volumes:
- ./config/ca.pem:/etc/ca.pem
- ./config/client.pem:/etc/client.pem
- ./config/client.key:/etc/client.key
idp1:
hostname: idp1
image: jiscmoonshot/idp
environment:
IDP_REALM: test1.org
TR_HOSTNAME: trustrouter
TR_GSSNAME: <EMAIL>
APC_REALM: apc.org
build:
context: .
dockerfile: Dockerfile.idp
volumes:
- ./config/idp_users.txt:/etc/freeradius/mods-config/files/idp_users.txt
- ./config/ca.pem:/etc/freeradius/certs/ca.pem
- ./config/server.pem:/etc/freeradius/certs/server.pem
- ./config/server.key:/etc/freeradius/certs/server.key
- ./config/client.pem:/etc/freeradius/certs/client.pem
- ./config/client.key:/etc/freeradius/certs/client.key
- ./config/idp1_credential.xml:/credential.xml
idp2:
hostname: idp2
image: jiscmoonshot/idp
environment:
IDP_REALM: test2.org
TR_HOSTNAME: trustrouter
TR_GSSNAME: <EMAIL>
APC_REALM: apc.org
build:
context: .
dockerfile: Dockerfile.idp
volumes:
- ./config/idp_users.txt:/etc/freeradius/mods-config/files/idp_users.txt
- ./config/ca.pem:/etc/freeradius/certs/ca.pem
- ./config/server.pem:/etc/freeradius/certs/server.pem
- ./config/server.key:/etc/freeradius/certs/server.key
- ./config/client.pem:/etc/freeradius/certs/client.pem
- ./config/client.key:/etc/freeradius/certs/client.key
- ./config/idp2_credential.xml:/credential.xml
apc:
hostname: apc
image: jiscmoonshot/apc
build:
context: .
dockerfile: Dockerfile.apc
environment:
APC_REALM: apc.org
TR_GSSNAME: <EMAIL>
volumes:
- ./config/ca.pem:/etc/freeradius/certs/ca.pem
- ./config/server.pem:/etc/freeradius/certs/server.pem
- ./config/server.key:/etc/freeradius/certs/server.key
- ./config/client.pem:/etc/freeradius/certs/client.pem
- ./config/client.key:/etc/freeradius/certs/client.key
- ./config/apc_users.txt:/etc/freeradius/mods-config/files/apc_users.txt
trustrouter:
hostname: trustrouter
image: jiscmoonshot/trustrouter
build:
context: .
dockerfile: Dockerfile.tr
environment:
APC_HOSTNAME: apc
volumes:
- ./config/ca.pem:/etc/ca.pem
- ./config/client.pem:/etc/client.pem
- ./config/client.key:/etc/client.key
- ./config/tr_credential.xml:/credential.xml
- ./config/main.cfg:/etc/trust_router/conf.d/default/main.cfg
- ./config/orgs.cfg:/etc/trust_router/conf.d/default/orgs.cfg
|
docker-compose.yml
|
version: "3.8"
# environment variable PWD is assumed to be grapl root directory
services:
grapl-rust-node-identifier-integration-tests:
image: grapl/grapl-node-identifier-test:${TAG:-latest}
build:
context: ${PWD}/src/rust
target: build-test-integration
args:
- PROFILE=debug
command: bash -c "
wait-for-it grapl-provision:8126 --timeout=180 &&
cargo test --manifest-path node-identifier/Cargo.toml --features integration"
environment:
- GRAPL_LOG_LEVEL=${GRAPL_LOG_LEVEL:-INFO}
- RUST_LOG=INFO
- RUST_BACKTRACE=1
- BUCKET_PREFIX=local-grapl
- IS_LOCAL=True
- MG_ALPHAS=grapl-master-graph-db:9080
- SOURCE_QUEUE_URL=http://sqs.us-east-1.amazonaws.com:9324/queue/grapl-node-identifier-retry-queue
- GRAPH_MERGER_QUEUE_URL=http://sqs.us-east-1.amazonaws.com:9324/queue/grapl-graph-merger-queue
- STATIC_MAPPING_TABLE=local-grapl-static_mapping_table
- DYNAMIC_SESSION_TABLE=local-grapl-dynamic_session_table
- PROCESS_HISTORY_TABLE=local-grapl-process_history_table
- FILE_HISTORY_TABLE=local-grapl-file_history_table
- INBOUND_CONNECTION_HISTORY_TABLE=local-grapl-inbound_connection_history_table
- OUTBOUND_CONNECTION_HISTORY_TABLE=local-grapl-outbound_connection_history_table
- NETWORK_CONNECTION_HISTORY_TABLE=local-grapl-network_connection_history_table
- IP_CONNECTION_HISTORY_TABLE=local-grapl-ip_connection_history_table
- ASSET_ID_MAPPINGS=local-grapl-asset_id_mappings
grapl-analyzerlib-integration-tests:
image: grapl/grapl-analyzerlib-test:${TAG:-latest}
build:
context: ${PWD}/src
dockerfile: ./python/Dockerfile
target: grapl-analyzerlib-test
command: bash -c '
wait-for-it grapl-provision:8126 --timeout=180 &&
source venv/bin/activate &&
cd grapl_analyzerlib &&
py.test -n auto -m "integration_test"'
environment:
- GRAPL_LOG_LEVEL=${GRAPL_LOG_LEVEL:-INFO}
- BUCKET_PREFIX=local-grapl
- IS_LOCAL=True
- MG_ALPHAS=grapl-master-graph-db:9080
# grapl-analyzer-deployer-integration-tests:
analyzer-executor-integration-tests:
image: grapl/grapl-analyzer-executor-test:${TAG:-latest}
build:
context: ${PWD}/src
dockerfile: ./python/Dockerfile
target: analyzer-executor-test
command: bash -c '
source venv/bin/activate &&
cd analyzer_executor &&
export PYTHONPATH="$${PYTHONPATH}:$$(pwd)/src" &&
py.test -n auto -m "integration_test"'
environment:
- HITCACHE_ADDR=dev-shared-redis
- HITCACHE_PORT=6379
- MESSAGECACHE_ADDR=dev-shared-redis
- MESSAGECACHE_PORT=6379
- GRAPL_LOG_LEVEL=${GRAPL_LOG_LEVEL:-INFO}
- IS_LOCAL=True
- IS_RETRY=False
engagement-edge-integration-tests:
image: grapl/grapl-engagement-edge-test:${TAG:-latest}
build:
context: ${PWD}/src
dockerfile: ./python/Dockerfile
target: engagement-edge-test
command: bash -c '
wait-for-it grapl-engagement-edge:8900 --timeout=180 &&
source venv/bin/activate &&
cd engagement_edge &&
py.test -n auto -m "integration_test"'
environment:
- IS_LOCAL=True
- BUCKET_PREFIX=local-grapl
- UX_BUCKET_URL="ux_bucket_url"
engagement-view-integration-tests:
image: cypress/included:5.6.0
working_dir: /test
command: --browser chrome --headless # entrypoint is: cypress run
volumes:
- .:/test
networks:
default:
external:
name: grapl-network
|
test/docker-compose.integration-tests.yml
|
author: mporter
description: An electro-mechcanical binary clock
version: 0.1.0
libs_version: ${mos.version}
modules_version: ${mos.version}
mongoose_os_version: ${mos.version}
# Optional. List of tags for online search.
tags:
- c
# List of files / directories with C sources. No slashes at the end of dir names.
sources:
- src
# List of dirs. Files from these dirs will be copied to the device filesystem
filesystem:
- fs
# Custom configuration entries, settable via "device configuration"
# Below is a custom firmware configuration example.
# Uncomment and modify according to your needs:
config_schema:
- ["binclock_app", "o", {title: "Binary clock app"}]
- ["i2c.enable", true]
# - ["my_app.bool_value", "b", false, {title: "Some boolean value"}]
# - ["my_app.string_value", "s", "", {title: "Some string value"}]
# - ["my_app.int_value", "i", 123, {title: "Some integer value"}]
# These settings get compiled into the C structure, and can be accessed
# from the C code this way:
#
# printf("Hello from %s!\n", mgos_sys_config_get_device_id());
#
# Settings are cool: can be modified remotely without full firmware upgrade!
#
# To see all available compiled settings, buid the firmware and open
# build/gen/mgos_config.h file.
#
# Also, in this config_schema section, you can override existing
# settings that has been created by other libraries. For example, debug log
# level is 2 by default. For this firmware we can override it to 3:
#
# config_schema:
# - ["debug.level", 3]
libs:
- origin: https://github.com/mongoose-os-libs/boards
- origin: https://github.com/mongoose-os-libs/ca-bundle
- origin: https://github.com/mongoose-os-libs/rpc-service-config
- origin: https://github.com/mongoose-os-libs/rpc-service-fs
- origin: https://github.com/mongoose-os-libs/rpc-uart
- origin: https://github.com/mongoose-os-libs/arduino-adafruit-pwm-servo
- origin: https://github.com/mongoose-os-libs/rpc-service-ota
- origin: https://github.com/mongoose-os-libs/rpc-service-wifi
- origin: https://github.com/mongoose-os-libs/rpc-ws
- origin: https://github.com/mongoose-os-libs/wifi
- origin: https://github.com/mongoose-os-libs/wifi-setup
- origin: https://github.com/mongoose-os-libs/mjs
- origin: https://github.com/mongoose-os-libs/dash
- origin: https://github.com/mongoose-os-libs/dns-sd
- origin: https://github.com/mongoose-os-libs/shadow
- origin: https://github.com/mongoose-os-libs/ota-shadow
- origin: https://github.com/mongoose-os-libs/ota-http-client
- origin: https://github.com/mongoose-os-libs/ota-http-server
- origin: https://github.com/mongoose-os-libs/spi
- origin: https://github.com/mongoose-os-libs/vfs-dev-spi-flash
- origin: https://github.com/mongoose-os-libs/core
- origin: https://github.com/mongoose-os-libs/http-server
- origin: https://github.com/mongoose-os-libs/provision
- origin: https://github.com/mongoose-os-libs/sntp
# Used by the mos tool to catch mos binaries incompatible with this file format
manifest_version: 2017-09-29
|
mos.yml
|
security:
encoders:
AppBundle\Entity\User:
algorithm: md5
encode_as_base64: false
iterations: 0
# https://symfony.com/doc/current/security.html#b-configuring-how-users-are-loaded
providers:
in_memory:
memory: ~
medical_db:
entity:
class: AppBundle:User
property: username
firewalls:
# disables authentication for assets and the profiler, adapt it according to your needs
dev:
pattern: ^/(_(profiler|wdt)|css|images|js)/
security: false
main:
anonymous: ~
# activate different ways to authenticate
# https://symfony.com/doc/current/security.html#a-configuring-how-your-users-will-authenticate
http_basic: ~
# https://symfony.com/doc/current/security/form_login_setup.html
provider: medical_db
form_login:
login_path: medical_auth_login
check_path: medical_auth_login
logout:
path: /auth/logout
target: /
access_control:
- { path: ^/auth/login, roles: IS_AUTHENTICATED_ANONYMOUSLY }
- { path: ^/user, roles: ROLE_ADMIN }
- { path: ^/transaksi, roles: [ROLE_PELAYANAN, ROLE_ADMIN] }
- { path: ^/pelanggan, roles: [ROLE_PELAYANAN, ROLE_ADMIN] }
- { path: ^/sampel, roles: [ROLE_PELAYANAN, ROLE_ANALIS, ROLE_ADMIN]}
- { path: ^/hasil/list, roles: [ROLE_PELAYANAN, ROLE_MANAJER, ROLE_ADMIN] }
- { path: ^/hasil/pending, roles: [ROLE_PELAYANAN, ROLE_MANAJER, ROLE_ADMIN] }
- { path: ^/hasil/approved, roles: [ROLE_PELAYANAN, ROLE_MANAJER, ROLE_ANALIS, ROLE_ADMIN]}
- { path: ^/hasil/refusal, roles: [ROLE_PELAYANAN, ROLE_MANAJER, ROLE_ANALIS, ROLE_ADMIN]}
- { path: ^/lab, roles: [ROLE_ANALIS, ROLE_ADMIN]}
- { path: ^/stp, roles: [ROLE_MANAJER, ROLE_ADMIN]}
- { path: ^/, roles: [ROLE_ADMIN, ROLE_MANAJER, ROLE_ANALIS, ROLE_PELAYANAN] }
|
app/config/security.yml
|
name: eggnog
description: Functional annotation of proteins using orthologous groups and phylogenies
icon: false
tags:
- annotation
- fasta
- prokaryote
modules:
- eggnog_download
- eggnog_mapper
input:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- fasta:
type: file
description: FASTA assembly file
pattern: "*.{fasta,fasta.gz,fa,fa.gz,fna,fna.gz}"
- db:
type: folder
description: directory containing eggNOG databases
pattern: "*"
output:
- meta:
type: map
description: |
Groovy Map containing sample information
e.g. [ id:'test', single_end:false ]
- hits:
type: file
description: Results from the search phase, from HMMER, Diamond or MMseqs2
pattern: "*.emapper.hits"
- seed_orthologs:
type: file
description: Results from parsing the hits
pattern: "*.emapper.seed_orthologs"
- annotations:
type: file
description: Results from the annotation phase
pattern: "*.emapper.annotations"
- xlsx:
type: file
description: Annotations in .xlsx format
pattern: "*.emapper.annotations.xlsx"
- orthologs:
type: file
description: List of orthologs found for each query
pattern: "*.emapper.orthologs"
- genepred:
type: file
description: Sequences of predicted CDS
pattern: "*.emapper.genepred.fasta"
- gff:
type: file
description: GFF of predicted CDS
pattern: "*.emapper.gff"
- no_anno:
type: file
description: Sequences without annotation
pattern: "*.emapper.no_annotations.fasta"
- pfam:
type: file
description: Positions of the PFAM domains identified
pattern: "*.emapper.pfam"
- versions:
type: file
description: File containing software versions
pattern: "versions.yml"
docs:
introduction: |
The `eggnog` module uses [eggNOG-mapper](https://github.com/eggnogdb/eggnog-mapper) to assign
functional annotation to protein sequences. eggNOG-mapper uses orthologous groups and phylogenies
from the eggNOG database to more precisely functionally annotate than traditional homology methods.
output:
tree: |
eggnog/
├── <SAMPLE_NAME>
│ ├── <SAMPLE_NAME>.emapper.annotations
│ ├── <SAMPLE_NAME>.emapper.hits
│ ├── <SAMPLE_NAME>.emapper.seed_orthologs
│ └── logs
│ └── eggnog
│ ├── nf-eggnog.{begin,err,log,out,run,sh,trace}
│ └── versions.yml
├── logs
│ └── custom_dumpsoftwareversions
│ ├── nf-custom_dumpsoftwareversions.{begin,err,log,out,run,sh,trace}
│ └── versions.yml
├── nf-reports
│ └── eggnog-trace.txt
├── software_versions.yml
└── software_versions_mqc.yml
add_note: false
folders:
- name: eggNOG-mapper
description: |
Below is a description of the _per-sample_ results from [eggNOG-mapper](https://github.com/eggnogdb/eggnog-mapper).
For full details about each of the eggNOG output files, see
[eggNOG-mapper - Ouputs](https://github.com/eggnogdb/eggnog-mapper/wiki/eggNOG-mapper-v2.1.5-to-v2.1.7#Output_format).
table: |
| Filename | Description |
|--------------------------|-------------|
| <SAMPLE_NAME>.emapper.annotations | Results from the annotation phase |
| <SAMPLE_NAME>.emapper.hits | Results from the search phase, from HMMER, Diamond or MMseqs2 |
| <SAMPLE_NAME>.emapper.seed_orthologs | Results from parsing the hits |
| <SAMPLE_NAME>.emapper.annotations.xlsx | (Optional) Annotations in .xlsx format |
| <SAMPLE_NAME>.emapper.orthologs | (Optional) List of orthologs found for each query |
| <SAMPLE_NAME>.emapper.genepred.fasta | (Optional) Sequences of predicted CDS |
| <SAMPLE_NAME>.emapper.gff | (Optional) GFF of predicted CDS |
| <SAMPLE_NAME>.emapper.no_annotations.fasta | (Optional) Sequences without annotation |
| <SAMPLE_NAME>.emapper.pfam | (Optional) Positions of the PFAM domains identified |
citations:
- eggnog_mapper
- eggnog_db
|
subworkflows/local/eggnog/meta.yml
|
name: DevTools
on:
push:
branches: [master]
pull_request: # run on all PRs, not just PRs to a particular branch
jobs:
integration:
runs-on: macos-latest # while macbots are much slower, linux reliably crashes running this
steps:
- name: Set $DEPOT_TOOLS_PATH
run: echo "DEPOT_TOOLS_PATH=$GITHUB_WORKSPACE/depot-tools" >> $GITHUB_ENV
- name: Set $DEVTOOLS_PATH
run: echo "DEVTOOLS_PATH=$GITHUB_WORKSPACE/devtools-frontend" >> $GITHUB_ENV
- name: Set $BLINK_TOOLS_PATH
run: echo "BLINK_TOOLS_PATH=$GITHUB_WORKSPACE/blink_tools" >> $GITHUB_ENV
- name: Set $PATH
run: echo "$DEPOT_TOOLS_PATH" >> $GITHUB_PATH
- name: git clone
uses: actions/checkout@v2
with:
path: lighthouse
- name: Generate cache hash
run: bash $GITHUB_WORKSPACE/lighthouse/.github/scripts/generate-devtools-hash.sh > cdt-test-hash.txt
- name: Set week of the year
run: echo "WEEK_OF_THE_YEAR=$(date +%V)" >> $GITHUB_ENV
# Caches are invalidated at least once a week, so that's the max time between testing
# with the latest dependencies. Any commit to the DevTools repo touching Lighthouse
# code will invalidate the cache sooner.
- name: Cache depot tools, devtools, blink tools and content shell
uses: actions/cache@v2
with:
path: |
${{ env.DEPOT_TOOLS_PATH }}
${{ env.DEVTOOLS_PATH }}
${{ env.BLINK_TOOLS_PATH }}
${{ github.workspace }}/lighthouse/.tmp/chromium-web-tests/content-shells
# This hash key changes:
# 1) every monday (so invalidates once a week)
# 2) every commit to CDT touching files important to Lighthouse web tests
# 3) every change to file in Lighthouse repo important to running these tests.
#
# The number is how many times this hash key was manually updated to break the cache.
key: ${{ runner.os }}-2-${{ env.WEEK_OF_THE_YEAR }}-${{ hashFiles('cdt-test-hash.txt') }}
- name: Use Node.js 14.x
uses: actions/setup-node@v1
with:
node-version: 14.x
- run: yarn --frozen-lockfile
working-directory: ${{ github.workspace }}/lighthouse
- run: yarn build-report
working-directory: ${{ github.workspace }}/lighthouse
- run: yarn build-devtools
working-directory: ${{ github.workspace }}/lighthouse
- name: Download depot tools
run: bash $GITHUB_WORKSPACE/lighthouse/lighthouse-core/test/chromium-web-tests/download-depot-tools.sh
- name: Download DevTools Frontend
run: bash $GITHUB_WORKSPACE/lighthouse/lighthouse-core/test/chromium-web-tests/download-devtools.sh
- name: Download Blink Tools
run: bash $GITHUB_WORKSPACE/lighthouse/lighthouse-core/test/chromium-web-tests/download-blink-tools.sh
- name: Download Content Shell
run: bash $GITHUB_WORKSPACE/lighthouse/lighthouse-core/test/chromium-web-tests/download-content-shell.sh
- name: Install python deps
run: pip install six requests
# In addition to running the layout tests, this also rolls Lighthouse to DevTools and builds it.
- name: Run Web Tests
run: bash $GITHUB_WORKSPACE/lighthouse/lighthouse-core/test/chromium-web-tests/run-web-tests.sh
- name: Print diff
if: failure()
run: find "$GITHUB_WORKSPACE/lighthouse/.tmp/layout-test-results/retry_3" -name '*-diff.txt' -exec cat {} \;
- name: Upload results
uses: actions/upload-artifact@v2
if: failure()
with:
name: results
path: ${{ github.workspace }}/lighthouse/.tmp/layout-test-results
# Run smoke tests via DevTools
- name: Define ToT chrome path
run: echo "CHROME_PATH=/Users/runner/chrome-mac-tot/Chromium.app/Contents/MacOS/Chromium" >> $GITHUB_ENV
- name: Install Chrome ToT
working-directory: /Users/runner
run: bash $GITHUB_WORKSPACE/lighthouse/lighthouse-core/scripts/download-chrome.sh && mv chrome-mac chrome-mac-tot
- run: mkdir latest-run
working-directory: ${{ github.workspace }}/lighthouse
- name: yarn smoke --runner devtools
# TODO: run on all tests.
# - Current DevTools hangs on any page with a service worker.
# https://github.com/GoogleChrome/lighthouse/issues/13396
# - Various other issues that needed investigation.
run: yarn smoke --runner devtools --retries=2 --invert-match a11y byte-efficiency byte-gzip dbw errors-expired-ssl errors-infinite-loop lantern-idle-callback-short legacy-javascript metrics-tricky-tti metrics-tricky-tti-late-fcp offline-ready offline-sw-broken offline-sw-slow oopif-requests perf-budgets perf-diagnostics-third-party perf-fonts perf-frame-metrics perf-preload perf-trace-elements pwa redirects-client-paint-server redirects-history-push-state redirects-multiple-server redirects-single-client redirects-single-server screenshot seo-passing seo-tap-targets
working-directory: ${{ github.workspace }}/lighthouse
|
.github/workflows/devtools.yml
|
admin.order.index:
path: /admin/order/index
defaults:
_controller: 'order.controller.admin:indexAction'
options:
require_admin_permission: order.index
admin.order.grid:
path: /admin/order/grid
defaults:
_controller: 'order.controller.admin:gridAction'
options:
expose: true
require_admin_permission: order.index
admin.order.products.grid:
path: /admin/order/products/grid
defaults:
_controller: 'order.controller.admin:viewProductsGridAction'
options:
expose: true
require_admin_permission: order.edit
admin.order.add:
path: /admin/order/add
defaults:
_controller: 'order.controller.admin:addAction'
options:
require_admin_permission: order.add
admin.order.edit:
path: '/admin/order/edit/{id}'
defaults:
_controller: 'order.controller.admin:editAction'
options:
expose: true
require_admin_permission: order.edit
admin.order.delete:
path: '/admin/order/delete/{id}'
defaults:
_controller: 'order.controller.admin:deleteAction'
options:
expose: true
require_admin_permission: order.delete
front.order_cart.index:
path: /order/cart
defaults:
_controller: 'order_cart.controller.front:indexAction'
options:
breadcrumb:
label: order.breadcrumb.cart
route: front.order_cart.index
front.order_cart.add:
path: '/order/cart/add/{id},{variant},{quantity}'
defaults:
_controller: 'order_cart.controller.front:addAction'
variant: 'null'
quantity: '1'
options:
expose: true
front.order_cart.edit:
path: '/order/cart/edit/{id},{quantity}'
defaults:
_controller: 'order_cart.controller.front:editAction'
options:
expose: true
front.order_cart.delete:
path: '/order/cart/delete/{id}'
defaults:
_controller: 'order_cart.controller.front:deleteAction'
options:
expose: true
front.order_address.index:
path: /order/address
defaults:
_controller: 'order_address.controller.front:indexAction'
options: { }
front.order_confirm.index:
path: /order/confirm
defaults:
_controller: 'order_confirmation.controller.front:indexAction'
options:
breadcrumb:
label: order.breadcrumb.confirmation
route: front.order_confirm.index
admin.order_status.index:
path: /admin/order/status/index
defaults:
_controller: 'order_status.controller.admin:indexAction'
options:
require_admin_permission: order_status.index
admin.order_status.grid:
path: /admin/order/status/grid
defaults:
_controller: 'order_status.controller.admin:gridAction'
options:
expose: true
require_admin_permission: order_status.index
admin.order_status.add:
path: /admin/order/status/add
defaults:
_controller: 'order_status.controller.admin:addAction'
options:
require_admin_permission: order_status.add
admin.order_status.edit:
path: '/admin/order/status/edit/{id}'
defaults:
_controller: 'order_status.controller.admin:editAction'
options:
expose: true
require_admin_permission: order_status.edit
admin.order_status.delete:
path: '/admin/order/status/delete/{id}'
defaults:
_controller: 'order_status.controller.admin:deleteAction'
options:
expose: true
require_admin_permission: order_status.delete
admin.order_status_group.index:
path: /admin/order/status/group/index
defaults:
_controller: 'order_status_group.controller.admin:indexAction'
options:
require_admin_permission: order_status_group.index
admin.order_status_group.grid:
path: /admin/order/status/group/grid
defaults:
_controller: 'order_status_group.controller.admin:gridAction'
options:
expose: true
require_admin_permission: order_status_group.index
admin.order_status_group.add:
path: /admin/order/status/group/add
defaults:
_controller: 'order_status_group.controller.admin:addAction'
options:
require_admin_permission: order_status_group.add
admin.order_status_group.edit:
path: '/admin/order/status/group/edit/{id}'
defaults:
_controller: 'order_status_group.controller.admin:editAction'
options:
expose: true
require_admin_permission: order_status_group.edit
admin.order_status_group.delete:
path: '/admin/order/status/group/delete/{id}'
defaults:
_controller: 'order_status_group.controller.admin:deleteAction'
options:
expose: true
require_admin_permission: order_status_group.delete
|
src/WellCommerce/Bundle/OrderBundle/Resources/config/routing.yml
|
version: 2
jobs:
cypress/build:
resource_class: xlarge
docker:
- image: cypress/base:14.16.0
steps:
- checkout
- restore_cache:
keys:
- cache-{{ .Branch }}-{{ checksum "package.json" }}
- run:
name: Npm CI
command: npm ci && npm rebuild node-sass && npx browserslist@latest --update-db
- run:
command: npx cypress verify
- save_cache:
key: cache-{{ .Branch }}-{{ checksum "package.json" }}
paths:
- ~/.npm
- ~/.cache
- run: npm install -g npm@latest && npm run lint
- persist_to_workspace:
root: ~/
paths:
- project
- .cache/Cypress
- run: ls -la
cypress/e2e-tests:
resource_class: xlarge
docker:
- image: cypress/browsers:node14.16.0-chrome89-ff86
parallelism: 10
steps:
- attach_workspace:
at: ~/
- run: ls -la
- run:
name: before tests
command: circleci tests glob "tests/e2e/*/*.js" | circleci tests split > circleci.tests && sed -i 's/No timing found for "//g' circleci.tests && sed -i 's/"//g' circleci.tests && grep "\S" circleci.tests > uno.txt && mv uno.txt circleci.tests && cat circleci.tests
- run:
name: run tests
command: TZ=UTC npx vue-cli-service test:e2e --mode test --headless --browser chrome --reporter mocha-junit-reporter --reporter-options mochaFile=reports/mocha/test-results.xml --spec $(cat circleci.tests | tr "\n" ",")
- run:
name: preparate coverage
command: mkdir coverage && mv .nyc_output/out.json coverage/coverage-$CIRCLE_NODE_INDEX-$(date +"%s%N").json
- save_cache:
paths:
- coverage/
key: pm-{{ .Environment.CIRCLE_NODE_INDEX }}-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- run: ls -la coverage
cypress/report:
docker:
- image: cypress/base:14.16.0
steps:
- attach_workspace:
at: ~/
- run:
name: tests unit
command: npm run test-unit
- run: mkdir coverage
- restore_cache:
keys:
- pm-0-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-1-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-2-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-3-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-4-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-5-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-6-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-7-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-8-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- restore_cache:
keys:
- pm-9-{{ .Branch }}-{{ .Environment.CIRCLE_WORKFLOW_ID }}-coverage.json
- run: ls -la coverage && cp jest-coverage/coverage-final.json coverage/from-jest.json
- run: npx nyc merge coverage && mkdir .nyc_output
- run: mv coverage.json .nyc_output/out.json
- run: npx nyc report --reporter html --reporter text --reporter json-summary --report-dir combined-coverage
- store_artifacts:
path: combined-coverage
- store_test_results:
path: test_summary_reports
workflows:
build:
jobs:
- cypress/build
- cypress/e2e-tests:
requires:
- cypress/build
- cypress/report:
requires:
- cypress/e2e-tests
version: 2
|
.circleci/config.yml
|
swagger: "2.0"
info:
version: "0.0.1"
title: Movie DB
# during dev, should point to your local machine
host: localhost:8081
# basePath prefixes all resource paths
basePath: /
#
schemes:
# tip: remove http to make production-grade
- http
- https
# format of bodies a client can send (Content-Type)
consumes:
- application/json
# format of the responses to the client (Accepts)
produces:
- application/json
- text/html
paths:
/movies:
# binds a127 app logic to a route
x-swagger-router-controller: movies
get:
description: Returns a list of stored movies data
# used as the method name of the controller
operationId: index
parameters:
- name: genre
in: query
description: Filter movies by genre
type: string
responses:
"200":
description: Success
schema:
# a pointer to a definition
$ref: "#/definitions/MovieListBody"
# responses may fall through to errors
default:
description: Error
schema:
$ref: "#/definitions/ErrorResponse"
post:
description: Creates a new movie entry
operationId: create
parameters:
- name: movie
required: true
in: body
description: a new movie details
schema:
$ref: "#/definitions/MovieBody"
responses:
"200":
description: A successfully stored movie details
schema:
$ref: "#/definitions/MovieBody"
default:
description: Error
schema:
$ref: "#/definitions/ErrorResponse"
/movies/{movieId}:
# binds a127 app logic to a route
x-swagger-router-controller: movies
get:
description: Returns a single movie entry by given id
operationId: show
parameters:
- name: movieId
required: true
in: path
type: string
responses:
"200":
description: A movie details
schema:
$ref: "#/definitions/MovieBody"
default:
description: Error
schema:
$ref: "#/definitions/ErrorResponse"
put:
description: Updates a movie entry by given id
operationId: update
parameters:
- name: movieId
required: true
in: path
type: string
- name: movie
required: true
in: body
description: An updated movie details
schema:
$ref: "#/definitions/MovieBody"
responses:
"200":
description: A successfully updated movie details
schema:
$ref: "#/definitions/MovieBody"
default:
description: Error
schema:
$ref: "#/definitions/ErrorResponse"
delete:
description: Deletes a single movie entry by given id
operationId: destroy
parameters:
- name: movieId
required: true
in: path
type: string
responses:
"200":
description: A deleted movie details
schema:
$ref: "#/definitions/MovieBody"
default:
description: Error
schema:
$ref: "#/definitions/ErrorResponse"
# complex objects have schema definitions
definitions:
MovieListBody:
required:
- movies
properties:
movies:
type: array
items:
$ref: "#/definitions/Movie"
Movie:
required:
- title
- genre
- year
properties:
title:
type: string
genre:
type: string
year:
type: integer
minimum: 1990
rank:
type: integer
default: 5
minimum: 1
maximum: 10
MovieBody:
required:
- movie
properties:
movie:
$ref: "#/definitions/Movie"
ErrorResponse:
required:
- message
properties:
message:
type: string
|
SWG_NodeJS02/swagger-integration/api/swagger/swagger.yaml
|
name: Build
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
workflow_dispatch:
jobs:
image_job:
name: Create Docker Image
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- uses: dorny/paths-filter@v2
id: changes
with:
filters: |
dockerfile:
- 'Dockerfile'
- name: Docker Login
if: steps.changes.outputs.dockerfile == 'true'
uses: docker/login-action@v1.10.0
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Docker Build
if: steps.changes.outputs.dockerfile == 'true'
run: |
docker build . -t disroop/vscode-plugin-builder:latest
- name: Publish
if: steps.changes.outputs.dockerfile == 'true'
run: |
docker push disroop/vscode-plugin-builder:latest
vscode_build_job:
name: VSCode Build Plugin
needs: [image_job]
if: always()
runs-on: ubuntu-latest
container:
image: disroop/vscode-plugin-builder:latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: install dependencies
run: npm install
- name: lint
run: npm run lint
- name: compile
run: npm run compile
- name: run unit test
run: npm run test
- name: SonarCloud Scan
uses: sonarsource/sonarcloud-github-action@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
- name: create vscode package
run: vsce package
- name: Upload a Build Artifact
if: contains('refs/heads/master', github.ref)
uses: actions/upload-artifact@v2
with:
path: ./*.vsix
|
.github/workflows/vsce-build-publish.yml
|
version: "3"
# All available services
services:
# Computation
jupyter:
container_name: "ds-flow_jupyter"
restart: "always"
build: services/jupyter
env_file:
- ./config/jupyter.env
- ./config/minio.env
- ./config/shared_database.env
volumes:
- ./shared/notebooks/:/home/jovyan/work/notebooks
- ./shared/dags/:/home/jovyan/work/dags
- ./services/apistar/api/:/home/jovyan/work/api
ports:
- 9888:8888
entrypoint: sh -c 'start-notebook.sh --NotebookApp.token=$$<PASSWORD>'
# Visualization
superset:
container_name: "ds-flow_superset"
restart: "always"
image: tylerfowler/superset:0.24
depends_on:
- postgres
env_file:
- ./config/superset_container.env
- ./config/superset_database.env
ports:
- 9555:8088
# Misc Storage
postgres:
container_name: "ds-flow_postgres"
restart: "always"
image: postgres
env_file:
- ./config/postgres.env
- ./config/superset_database.env
- ./config/airflow_database.env
- ./config/shared_database.env
volumes:
- postgres_volume:/var/lib/postgresql/data/
- ./services/postgres/:/docker-entrypoint-initdb.d/
ports:
- 5432:5432
#pgweb for db management
pgweb:
container_name: pgweb
restart: always
image: sosedoff/pgweb
ports:
- 9777:8081
links:
- postgres:postgres # my database container is called postgres, not db
environment:
- DATABASE_URL=postgres://postgres:postgres@postgres:5432/postgres?sslmode=disable
depends_on:
- postgres
# Scheduling
airflow:
container_name: "ds-flow_airflow"
restart: "always"
image: puckel/docker-airflow:1.9.0-2
depends_on:
- postgres
env_file:
- ./config/airflow_container.env
- ./config/minio.env
- ./config/shared_database.env
volumes:
- ./shared/requirements.txt:/requirements.txt
- ./shared/dags/:/usr/local/airflow/dags
ports:
- 9111:8080
command: webserver
# Model Storage
minio:
container_name: "ds-flow_minio"
restart: "always"
image: minio/minio
env_file:
- ./config/minio.env
volumes:
- minio_volume:/data
ports:
- 9444:9000
command: server /data
# API
apistar:
container_name: "ds-flow_apistar"
restart: "always"
build: services/apistar
env_file:
- ./config/minio.env
volumes:
- ./services/apistar/api:/usr/src/app
ports:
- 9999:8000
command: gunicorn app:app -b 0.0.0.0:8000
# Admin Overview
portainer:
container_name: "ds-flow_portainer"
restart: "always"
image: portainer/portainer
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- portainer_volume:/data
ports:
- 9090:9000
command: -H unix:///var/run/docker.sock
# Volumes to persist data
volumes:
postgres_volume:
minio_volume:
portainer_volume:
|
docker-compose.yml
|
---
- name: Core Plugins | KubeGems Agent
community.kubernetes.k8s:
state: present
definition: "{{ lookup('template', item) | from_yaml }}"
with_items:
- kubegems/agent/issuer.yaml
- kubegems/agent/certification.yaml
- kubegems/agent/sa.yaml
- kubegems/agent/clusterrole.yaml
- kubegems/agent/clusterrolebinding.yaml
- kubegems/agent/deploy.yaml
- kubegems/agent/kubectl.yaml
- kubegems/agent/service.yaml
- name: Core Plugins | KubeGems Controller | CRDs
community.kubernetes.k8s:
state: present
definition: "{{ lookup('template', item) | from_yaml }}"
with_fileglob:
- "templates/kubegems/controller/crds/*.yaml"
- name: Core Plugins | KubeGems Controller
community.kubernetes.k8s:
state: present
definition: "{{ lookup('template', item) | from_yaml }}"
with_fileglob:
- "templates/kubegems/controller/*.yaml"
- name: Core Plugins | Basic Gateway | CRDs
community.kubernetes.k8s:
state: present
definition: "{{ lookup('template', item) | from_yaml }}"
with_items:
- gateway/crd/crd.yaml
- name: Core Plugins | Basic Gateway
community.kubernetes.k8s:
state: present
definition: "{{ lookup('template', item) | from_yaml }}"
with_items:
- gateway/metrics/service.yaml
- gateway/rbac/sa.yaml
- gateway/rbac/nginx-ingress-operator-leader-election-role.yaml
- gateway/rbac/nginx-ingress-operator-leader-election-rolebinding.yaml
- gateway/rbac/nginx-ingress-operator-manager-role.yaml
- gateway/rbac/nginx-ingress-operator-manager-rolebinding.yaml
- gateway/rbac/nginx-ingress-operator-metrics-reader.yaml
- gateway/rbac/nginx-ingress-operator-proxy-role.yaml
- gateway/rbac/nginx-ingress-operator-proxy-rolebinding.yaml
- gateway/configmap.yaml
- gateway/operator.yaml
- gateway/service.yaml
- name: Core Plugins | Argo Rollouts | CRDs
community.kubernetes.k8s:
state: present
definition: "{{ lookup('template', item) | from_yaml }}"
with_items:
- argo-rollouts/v1.1.0/crds/analysisruns.yaml
- argo-rollouts/v1.1.0/crds/analysistemplates.yaml
- argo-rollouts/v1.1.0/crds/clusteranalysistemplates.yaml
- argo-rollouts/v1.1.0/crds/experiments.yaml
- argo-rollouts/v1.1.0/crds/rollouts.yaml
- name: Core Plugins | Argo Rollouts
community.kubernetes.k8s:
state: '{{ "present" if core_plugins.argo_rollouts.enabled else "absent" }}'
definition: "{{ lookup('template', item) | from_yaml }}"
with_items:
- argo-rollouts/v1.1.0/sa.yaml
- argo-rollouts/v1.1.0/secret.yaml
- argo-rollouts/v1.1.0/clusterrole-rollouts.yaml
- argo-rollouts/v1.1.0/clusterrole-admin.yaml
- argo-rollouts/v1.1.0/clusterrole-edit.yaml
- argo-rollouts/v1.1.0/clusterrole-view.yaml
- argo-rollouts/v1.1.0/clusterrolebinding-rollouts.yaml
- argo-rollouts/v1.1.0/deploy.yaml
- argo-rollouts/v1.1.0/service.yaml
- name: Core Plugins | Argo Rollouts AnalysisTemplate
community.kubernetes.k8s:
state: '{{ "present" if core_plugins.argo_rollouts.enabled else "absent" }}'
definition: "{{ lookup('file', item) | from_yaml }}"
ignore_errors: yes
with_items:
- argo-rollouts/analysis-template.yaml
|
roles/installer/tasks/core-plugins/kubegems-local.yaml
|
info:
title: Stencil server
version: 0.1.0
produces:
- application/json
consumes:
- application/json
paths:
/ping:
get:
summary: service health check
operationId: ping
responses:
"200":
description: "returns pong message"
/v1/namespaces/{namespace}/descriptors:
post:
summary: upload descriptors
consumes:
- "multipart/form-data"
produces:
- "application/json"
tags:
- descriptors
parameters:
- name: "namespace"
in: "path"
required: true
type: "string"
- name: "name"
in: "formData"
required: true
type: "string"
- name: "version"
in: "formData"
description: "version number for descriptor file. This should follow semantic version compatability"
required: true
type: "string"
- name: "latest"
in: "formData"
description: "mark this descriptor file as latest"
required: false
type: "boolean"
- name: "dryrun"
in: "formData"
description: "flag for dryRun"
required: false
type: "boolean"
- name: "skiprules"
in: "formData"
description: "list of rules to skip"
required: false
type: "array"
items:
type: string
enum:
- FILE_NO_BREAKING_CHANGE
- MESSAGE_NO_DELETE
- FIELD_NO_BREAKING_CHANGE
- ENUM_NO_BREAKING_CHANGE
- name: "file"
in: "formData"
description: "descriptorset file to upload"
required: true
type: "file"
responses:
"200":
description: "Success response"
"409":
description: "Conflict"
get:
summary: list all available descriptor names under one namespace
tags:
- descriptors
parameters:
- name: "namespace"
in: "path"
required: true
type: "string"
responses:
"200":
description: "returns list of descriptor names"
schema:
type: "array"
items:
type: string
/v1/namespaces/{namespace}/descriptors/{name}/versions:
get:
summary: list all available versions for specified descriptor
tags:
- descriptors
parameters:
- name: "namespace"
in: "path"
required: true
type: "string"
- name: "name"
in: "path"
required: true
type: "string"
responses:
"200":
description: "returns list of versions"
schema:
type: "array"
items:
type: string
/v1/namespaces/{namespace}/descriptors/{name}/versions/{version}:
get:
summary: download specified descriptor file
tags:
- descriptors
produces:
- application/octet-stream
parameters:
- name: "namespace"
in: "path"
required: true
type: "string"
- name: "name"
in: "path"
required: true
type: "string"
- name: "version"
in: "path"
required: true
type: "string"
responses:
"200":
description: "download response"
/v1/namespaces/{namespace}/metadata:
post:
summary: update metadata
tags:
- metadata
parameters:
- name: "namespace"
in: "path"
required: true
type: "string"
- name: "body"
in: "body"
description: "specify name and version in payload"
required: true
schema:
$ref: "#/definitions/MetadataPayload"
responses:
"200":
description: "Success response"
/v1/namespaces/{namespace}/metadata/{name}:
get:
summary: get latest version for specified descriptor
tags:
- metadata
parameters:
- name: "namespace"
in: "path"
required: true
type: "string"
- name: "name"
in: "path"
required: true
type: "string"
responses:
"200":
description: "Success response"
schema:
$ref: "#/definitions/MetadataResponse"
tags:
- name: "descriptors"
description: "Manage descriptors"
- name: "metadata"
description: "manage latest versions for uploaded descriptor files"
definitions:
MetadataResponse:
properties:
updated:
type: string
version:
type: string
type: object
MetadataPayload:
properties:
name:
type: string
version:
type: string
type: object
schemes:
- http
swagger: "2.0"
|
server/swagger.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-20 13:38"
game: "Unreal Tournament 2004"
name: "CTF-Skydive"
author: "-=B0$s=-"
description: "Can you lead your team to victory? or trow them thousands of feet down\
\ to there deaths? Welcome to the skydive arena!, At the top of these skyscrapers\
\ liandri found an other arena for there championship, there is no way you can simply\
\ walk to the other base and capture a flag, there is simply to mutch air between\
\ them. so than you fly!, Liadri funded this arena, and placed state of the art\
\ gravity deformers to shoot the contestants to there location. Sounds like fun\
\ doesn't it? lets just see how much fun you think it is when you are shot out of\
\ your orbit and find your self further.. and further away from the arena, and the\
\ ground just a bit closer every passing second.Now get your gear, and watch your\
\ step., Enter Skydive!"
releaseDate: "2006-05"
attachments:
- type: "IMAGE"
name: "CTF-Skydive_shot_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/S/CTF-Skydive_shot_4.png"
- type: "IMAGE"
name: "CTF-Skydive_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/S/CTF-Skydive_shot_3.png"
- type: "IMAGE"
name: "CTF-Skydive_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/S/CTF-Skydive_shot_2.png"
- type: "IMAGE"
name: "CTF-Skydive_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/S/CTF-Skydive_shot_1.png"
originalFilename: "skydive.zip"
hash: "a63ff0338ae1344a47810564b00d76eedec647ce"
fileSize: 2484080
files:
- name: "CTF-Skydive.ut2"
fileSize: 7208719
hash: "0c039ba690536d8f018e248d783cc9960c80156d"
otherFiles: 4
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/S/skydive.zip"
main: true
repack: false
state: "OK"
- url: "https://gamefront.online/files2/service/thankyou?id=5098647"
main: false
repack: false
state: "MISSING"
- url: "https://gamebanana.com/maps/download/14162"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/S/a/6/3ff033/skydive.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/Capture%20The%20Flag/S/a/6/3ff033/skydive.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "CTF-Skydive"
playerCount: "Unknown"
themes:
Tech: 0.1
City: 0.9
bots: true
|
content/Unreal Tournament 2004/Maps/Capture The Flag/S/a/6/3ff033/ctf-skydive_[a63ff033].yml
|