code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
inscription:
path: /inscription.html
defaults: { _controller: MainFrontBundle:Client:inscription}
identification:
path: /connexion.html
defaults: { _controller: MainFrontBundle:Client:identification}
mon_compte:
path: /compte.html
defaults: { _controller: MainFrontBundle:Client:compte}
my_command:
path: /commande.html
defaults: { _controller: MainFrontBundle:Client:commande}
detail_commande:
path: /{id}/detail-commande.html
defaults: { _controller: MainFrontBundle:Client:detcommande}
requirements:
id: \d+
mes_adresse:
path: /mesadresses.html
defaults: { _controller: MainFrontBundle:Client:adresse}
coupon:
path: /{id}/coupon.html
defaults: { _controller: MainFrontBundle:Client:coupon}
mot_de_pass_oublie:
path: /mot_de_pass_oublie.html
defaults: { _controller: MainFrontBundle:Client:resetPassword}
reset:
path: /reset.html
defaults: { _controller: MainFrontBundle:Client:reset}
#logout_client:
# path: /deconnexion.html
# defaults: { _controller: MainFrontBundle:Client:deconnexion}
traking_coupon:
path: /traking/{ref}/{ship}
defaults: { _controller: BackCommandeBundle:Coupon:traking}
confirm_client:
path: /{token}/confirm.html
defaults: { _controller: MainFrontBundle:Client:confirm}
viewcp:
path: /viewcop
defaults: { _controller: MainFrontBundle:Client:codePostale }
listdelegation:
path: /delegationlist
defaults: { _controller: MainFrontBundle:Client:delegation }
listvilleajx:
path: /delegationlist
defaults: { _controller: BackPartnerBundle:SellingPoint:delegation }
ville:
path: /ville
defaults: { _controller: MainFrontBundle:Client:ville}
count_cmd:
path: /countcmd
defaults: { _controller: BackCommandeBundle:Command:countcmd}
login_check_client:
pattern: /login_check_client
logout_client:
pattern: /logout_client
#adresse
mes_adresse:
path: /mesadresses.html
defaults: { _controller: MainFrontBundle:Client:adresse}
delete_mes_adresse:
path: /adressedelete
defaults: { _controller: MainFrontBundle:Client:deleteadresse}
default_adresse:
path: /defaultdresse
defaults: { _controller: MainFrontBundle:Client:defaultdresse}
coupon_commande_pdf:
path: /printcommand/{id}
defaults: { _controller: MainFrontBundle:Client:printliv}
requirements:
id: \d+
update_mes_adresse:
path: /adresseupdate
defaults: { _controller: MainFrontBundle:Client:updateadresse}
update_mes_adresse_livraison:
path: /adresseupdatelivraison
defaults: { _controller: MainFrontBundle:Client:updateadresselivraison}
mes_bigfid:
path: /mes_bigfid.html
defaults: { _controller: MainFrontBundle:Client:bigfid}
list_villeajx:
path: /searchlocality
defaults: { _controller: MainFrontBundle:Client:searchlocality}
abonnement:
path: /abonnement.html
defaults: { _controller: MainFrontBundle:Client:abonnement}
confirmation_inscription:
path: /confirmation_inscription.html
defaults: { _controller: MainFrontBundle:Client:confirmation}
message_inscription:
path: /message_inscription.html
defaults: { _controller: MainFrontBundle:Client:message}
|
src/Main/FrontBundle/Resources/config/routing/client.yml
|
---
- name: Test juniper_junos_config module
hosts: all
connection: local
gather_facts: no
roles:
- Juniper.junos
tasks:
#################
- name: Retrieve the committed configuration
juniper_junos_config:
retrieve: 'committed'
diff: false
check: false
commit: false
register: test1
ignore_errors: True
tags: [ test1 ]
- name: Check TEST 1
assert:
that:
- test1.config
- "'host-name choc-qfx-a;' in test1.config"
#################
- name: Append .foo to the hostname using private config mode.
juniper_junos_config:
config_mode: 'private'
load: 'merge'
lines:
- "set system host-name {{ inventory_hostname }}.foo"
register: test2
ignore_errors: True
tags: [ test2 ]
- name: Check TEST 2
assert:
that:
- test2.diff_lines
- "'+ host-name choc-qfx-a.foo;' in test2.diff_lines"
#################
- name: Rollback to the previous config.
juniper_junos_config:
config_mode: 'private'
rollback: "1"
register: test3
ignore_errors: True
tags: [ test3 ]
- name: Check TEST 3
assert:
that:
- test3.diff_lines
- "'- host-name choc-qfx-a.foo;' in test3.diff_lines"
#################
- name: Creates directory
file:
path: out
state: directory
- name: Configure LLDP
juniper_junos_config:
load: 'merge'
lines:
- "set protocols lldp advertisement-interval 30"
- "set protocols lldp transmit-delay 2"
- "set protocols lldp hold-multiplier 4"
- "set protocols lldp ptopo-configuration-trap-interval 30"
- "set protocols lldp ptopo-configuration-maximum-hold-time 300"
- "set protocols lldp lldp-configuration-notification-interval 30"
- "set protocols lldp interface all disable"
- "set protocols lldp interface ge-1/1/1"
format: 'set'
comment: 'Start LLDP with given options'
dest_dir: './out'
register: test4
ignore_errors: True
tags: [ test4 ]
- name: Rollback to the rescue config.
juniper_junos_config:
rollback: 'rescue'
- name: Check out/choc-qfx-a.diff exists
stat:
path: out/choc-qfx-a.diff
register: stat_result_1
- name: Check TEST 4
assert:
that:
- stat_result_1.stat.exists == True
- test4.diff_lines
- "'+ interface ge-1/1/1;' in test4.diff_lines"
- name: Clean up TEST 4
file:
path: out
state: absent
#################
- name: Retrieve [edit system services] of current committed config.
juniper_junos_config:
retrieve: 'committed'
filter: 'system/services'
diff: true
check: false
commit: false
register: test5
ignore_errors: True
tags: [ test5 ]
- name: Check TEST 5
assert:
that:
- test5.failed == False
- "'system {' in test5.config_lines"
#################
#TODO: Add tests for commit check and commit confirmed workflows
|
tests/pb.juniper_junos_config.yml
|
uid: az_mariadb
name: az mariadb
summary: Gérer les serveurs de Azure Database for MariaDB.
description: ''
commands:
- az_mariadb_db
- az_mariadb_db_create
- az_mariadb_db_delete
- az_mariadb_db_list
- az_mariadb_db_show
- az_mariadb_server
- az_mariadb_server_configuration
- az_mariadb_server_configuration_list
- az_mariadb_server_configuration_set
- az_mariadb_server_configuration_show
- az_mariadb_server_create
- az_mariadb_server_delete
- az_mariadb_server_firewall-rule
- az_mariadb_server_firewall-rule_create
- az_mariadb_server_firewall-rule_delete
- az_mariadb_server_firewall-rule_list
- az_mariadb_server_firewall-rule_show
- az_mariadb_server_firewall-rule_update
- az_mariadb_server_georestore
- az_mariadb_server_list
- az_mariadb_server_list-skus
- az_mariadb_server_private-endpoint-connection
- az_mariadb_server_private-endpoint-connection_approve
- az_mariadb_server_private-endpoint-connection_delete
- az_mariadb_server_private-endpoint-connection_reject
- az_mariadb_server_private-endpoint-connection_show
- az_mariadb_server_private-link-resource
- az_mariadb_server_private-link-resource_list
- az_mariadb_server_replica
- az_mariadb_server_replica_create
- az_mariadb_server_replica_list
- az_mariadb_server_replica_stop
- az_mariadb_server_restart
- az_mariadb_server_restore
- az_mariadb_server_show
- az_mariadb_server_show-connection-string
- az_mariadb_server_start
- az_mariadb_server_stop
- az_mariadb_server_update
- az_mariadb_server_vnet-rule
- az_mariadb_server_vnet-rule_create
- az_mariadb_server_vnet-rule_delete
- az_mariadb_server_vnet-rule_list
- az_mariadb_server_vnet-rule_show
- az_mariadb_server_vnet-rule_update
- az_mariadb_server_wait
- az_mariadb_server-logs
- az_mariadb_server-logs_download
- az_mariadb_server-logs_list
globalParameters:
- name: --debug
summary: Augmentez le niveau de détail de la journalisation pour afficher tous les journaux de débogage.
- name: --help -h
summary: Affichez ce message d’aide et quittez.
- name: --only-show-errors
summary: Afficher uniquement les erreurs, en supprimant les avertissements.
- name: --output -o
defaultValue: json
parameterValueGroup: json, jsonc, table, tsv
summary: Format de sortie.
- name: --query
summary: Chaîne de requêtes JMESPath. Pour obtenir plus d’informations et des exemples, consultez <a href="http://jmespath.org/">http://jmespath.org/</a>.
- name: --verbose
summary: Augmentez le niveau de détail de la journalisation. Utilisez --debug pour des journaux de débogage complets.
metadata:
description: Gérer les serveurs de Azure Database for MariaDB.
ms.openlocfilehash: b67d2c4d2055be6a6b606f4d4d2a0e524a1b3638
ms.sourcegitcommit: 612554c029ba1f28d3a43fd22e743104a1a30ea0
ms.translationtype: MT
ms.contentlocale: fr-FR
ms.lasthandoff: 04/07/2021
ms.locfileid: "106816396"
|
latest/docs-ref-autogen/mariadb.yml
|
site_name: SchoolSquirrel Docs
site_url: https://schoolsquirrel.github.io
site_author: <NAME>
site_description: >-
SchoolSquirrel - Die Platform für digitalen Unterricht.
Mit Chat, Aufgaben, Video-Konferenzen und mehr!
copyright: Copyright © 2019 - 2020 <NAME>
### Repository ###
repo_name: SchoolSquirrel
repo_url: https://github.com/SchoolSquirrel/SchoolSquirrel
edit_uri: edit/master/docs/docs
### Navigation ###
nav:
- Home:
- index.md
- Über:
- about/index.md
- about/about.md
- about/vs.md
- about/features.md
- about/security.md
- about/help.md
- Schüler:
- Login: students/login.md
- Aufgaben: students/assignments.md
- Lehrer:
- Login: teachers/login.md
- Aufgaben: teachers/assignments.md
- Administratoren:
- Login: admins/login.md
- Aufgaben: admins/assignments.md
- Entwickler:
- API Dokumentation: developers/apiDocs.md
### Extra Configuration ###
extra:
social:
- icon: fontawesome/brands/github
link: https://github.com/SchoolSquirrel
- icon: fontawesome/brands/docker
link: https://hub.docker.com/r/schoolsquirrel/schoolsquirrel
### Theme Configuration ###
theme:
name: material
custom_dir: overrides
language: de
features:
- tabs
palette:
scheme: default
primary: blue
accent: orange
font:
text: Roboto
code: Roboto Mono
favicon: assets/logo/favicon.ico
logo: assets/logo/favicon.ico
extra_css:
- styles/extra.css
### Plugin Configuration ###
plugins:
- search
- exclude:
glob:
- _snippets/*
markdown_extensions:
- admonition
- footnotes
- pymdownx.details
- pymdownx.superfences
- pymdownx.snippets:
check_paths: true
- markdown.extensions.meta
- markdown.extensions.admonition
- markdown.extensions.attr_list
- markdown.extensions.def_list
- markdown.extensions.footnotes
- markdown.extensions.meta
- markdown.extensions.toc:
permalink: true
- pymdownx.arithmatex:
generic: true
- pymdownx.betterem:
smart_enable: all
- pymdownx.caret
- pymdownx.critic
- pymdownx.details
- pymdownx.emoji:
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
- pymdownx.highlight
- pymdownx.inlinehilite
- pymdownx.keys
- pymdownx.magiclink:
repo_url_shorthand: true
user: squidfunk
repo: mkdocs-material
- pymdownx.mark
- pymdownx.smartsymbols
- pymdownx.snippets:
check_paths: true
- pymdownx.superfences
- pymdownx.tabbed
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.tilde
|
docs/mkdocs.yml
|
trigger:
branches:
include:
- develop
- release/*
- feature/*
- refs/tags/*
paths:
exclude:
- README.md
- azure-pipelines.yml
- documentation
resources:
repositories:
- repository: AzurePipelinesTemplates
type: github
endpoint: FaganSC
name: FaganSC/AzurePipelinesTemplates
ref: master
variables:
node_version: '10.x'
package_manager: 'npm'
name: $(Build.DefinitionName) $(SourceBranchName) $(Date:yyyyMMdd)$(Rev:.r)
stages:
- stage: Versioning
jobs:
- template: general/versioning.yml@AzurePipelinesTemplates
- stage: Build
jobs:
- template: spfx/build.yml@AzurePipelinesTemplates
dependsOn:
- Versioning
- stage: Test
dependsOn:
- Build
jobs:
- template: spfx/test.yml@AzurePipelinesTemplates
parameters:
working_directory:
package_manager: ${{ variables.package_manager }}
node_version: ${{ variables.node_version }}
- stage: DeployDevelopment
displayName: Deploy to Development Tenant SharePoint App Catalog
dependsOn:
- Test
jobs:
- template: spfx/deploy.yml@AzurePipelinesTemplates
parameters:
target_environment: Development
variables_group: DevelopmentTenant
o365cli_deploy_extra_arguments: '--skipFeatureDeployment'
condition: succeeded()
- stage: DeployProduction
displayName: Deploy to Production Tenant SharePoint App Catalog
dependsOn:
- Test
jobs:
- template: spfx/deploy.yml@AzurePipelinesTemplates
parameters:
target_environment: Release
isPreRelease: False
hasChangeLog: True
variables_group: ProductionTenant
o365cli_deploy_extra_arguments: '--skipFeatureDeployment'
condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags'))
- stage: GitHubRelease
displayName: GitHub Release
dependsOn:
- DeployProduction
jobs:
- template: spfx/release.yml@AzurePipelinesTemplates
parameters:
release_title: 'Release $(Build.BuildNumber)'
target_environment: Production
gitHubConnection: GitHub
condition: and(succeeded(), startsWith(variables['Build.SourceBranch'], 'refs/tags'))
isPreRelease: False
|
azure-pipelines.yml
|
- url: https://github.com/drjosephliu/author_classification_project
name: Author Classification with BERT
summary: |
Final project for CIS 530 Natural Language Processing. We used the
Reuters_50_50 dataset, which consists of 5000 news articles by the top 50
authors, and applied various models to predict author identification. The
best published accuracy was 69.1% by Qian et al (2017) using a GRU-based
model. We smashed that with a 92.9% accuracy using an end-to-end embedding
and classification model with BERT as a layer in a forward-feed neural
network and bag of words as an additional feature.
tags: [Python, scikit-learn, numpy]
image:
- url: https://github.com/drjosephliu/kickstarter_campaign_predictor
name: Predicting the Success or Failure of Kickstarter Campaigns
summary: |
Final project for CIS 520 Machine Learning. We examined over 380,000
kickstarter campaigns spanning a period of 2009 to 2018 and fitted the data
across a variety of models: logistic regression, XGBoost, Adaboost, decision
tree, random forest and neural network. XGBoost performed best with an
accuracy of 70.23% and a recall of 43.64%.
tags: [Python, scikit-learn, numpy]
image:
- url: https://github.com/drjosephliu/webcrawler_project
name: Web Crawler & Text Summarizer
summary: Final project for CIS 552 Advanced Programming in Haskell. We made a web crawler that crawls news sites and summarises articles using TF-IDF. I built the parser, crawler, logger and wrote QuickCheck tests.
tags: [Haskell, QuickCheck]
image:
- url: https://github.com/drjosephliu/CheckMyDigitalFootprint
name: Penn OS
summary: Final project for CIS 548 Operating Systems where we had to build an operating system complete with shell, filesystem and scheduler. I was repsonsible for the shell.
tags: [C]
image:
- url: https://github.com/drjosephliu/music_streaming_project
name: Music Streaming Protocol
summary: Final project for CIS 553 Network Systems where we designed our own music streaming protocol called Purrfect Meowsical Protocol (PMP).
tags: [Python]
image:
- url: https://github.com/drjosephliu/YelpRestaurantRecommendations
name: Yelp Restaurant Recommendations
summary: Final project CIT 594 Data Structures where we designed a restaurant recommendation algorithm based on co-reviewers' tastes using modified version of breadth-first search.
- url: https://github.com/drjosephliu/CheckMyDigitalFootprint
name: Check My Digital Footprint
summary: Final project for CIT 591 Intro to Software Development. We made a GUI app that scans your gmail inbox and lists which listservs you are subscribed to. I was responsible for the frontend.
tags: [java, javaFX]
image: images/digitalFootprint.jpg
|
src/data/classProjects.yml
|
openapi: 3.0.2
info:
version: 1.0.0
title: Service
description: Service Template
servers:
- url: /api/v1
security:
- BasicAuth: []
tags:
- name: Settings
description: Operations on settings objects
- name: Audit Logs
description: Operations on audit log entries
paths:
/settings:
get:
description: Returns the current settings
tags:
- Settings
operationId: getSettings
responses:
"200":
description: Successfully returned the current settings
content:
application/json:
schema:
$ref: "#/components/schemas/Settings"
"400":
$ref: "#/components/responses/400Error"
put:
description: Set new settings
tags:
- Settings
operationId: setSettings
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Settings"
responses:
"200":
description: Successfully set new settings
content:
application/json:
schema:
$ref: "#/components/schemas/Settings"
"400":
$ref: "#/components/responses/400Error"
/auditlogs:
get:
description: Returns a list of all audit log entries
tags:
- Audit Logs
operationId: getAuditLogEntries
parameters:
- $ref: "#/components/parameters/PageLimit"
- $ref: "#/components/parameters/PageOffset"
responses:
"200":
description: Successfully returned a list of all audit log entries
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/AuditLogEntry"
"400":
$ref: "#/components/responses/400Error"
/auditlogs/{entryId}:
get:
description: Obtain information about a audit log entry
tags:
- Audit Logs
operationId: getAuditLogEntry
parameters:
- name: entryId
in: path
required: true
schema:
type: string
format: uuid
responses:
"200":
description: Successfully returned a audit log entry
content:
application/json:
schema:
$ref: "#/components/schemas/AuditLogEntry"
"400":
$ref: "#/components/responses/400Error"
components:
securitySchemes:
BasicAuth:
type: http
scheme: basic
schemas:
Settings:
type: object
required:
- applicationName
properties:
applicationName:
type: string
example: Application
AuditLogEntry:
type: object
required:
- category
- level
properties:
id:
type: string
format: uuid
category:
type: string
enum:
- GET_SETTINGS
- SET_SETTINGS
example: SET_STTINGS
level:
type: string
enum:
- ERROR
- INFO
- WARNING
message:
type: string
parameters:
PageLimit:
name: limit
in: query
description: Limits the number of items on a page
schema:
type: integer
PageOffset:
name: offset
in: query
description: Specifies the page number of the artists to be displayed
schema:
type: integer
responses:
400Error:
description: Invalid request
content:
application/json:
schema:
type: object
properties:
message:
type: string
|
backend/service/src/main/resources/openapi.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "bcs-k8s-custom-scheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "bcs-k8s-custom-scheduler.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "bcs-k8s-custom-scheduler.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "bcs-k8s-custom-scheduler.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "bcs-k8s-custom-scheduler.fullname" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --insecure_address=0.0.0.0
- --insecure_port=8080
- --log_max_size=500
- --log_max_num=10
- --logtostderr=true
- --alsologtostderr=true
- --v=3
- --stderrthreshold=2
- --cluster={{ .Values.options.cluster }}
- --cni_annotation_key={{ .Values.options.cniAnnotationKey }}
- --cni_annotation_value={{ .Values.options.cniAnnotationValue }}
- --fixed_ip_annotation_key={{ .Values.options.fixedIPAnnotationKey }}
- --fixed_ip_annotation_value={{ .Values.options.fixedIPAnnotationValue }}
{{- if ne .Values.options.caFile "" }}
- --ca_file={{ .Values.options.caFile }}
{{- end }}
{{- if and (ne .Values.options.serverCertFile "") (ne .Values.options.serverKeyFile "") }}
- --server_cert_file={{ .Values.options.serverCertFile}}
- --server_key_file={{ .Values.options.serverKeyFile}}
{{- end }}
{{- if eq .Values.workMode "cloudnetservice" }}
- --custom_scheduler_type=IpSchedulerV2
- --cloud_netservice_endpoints={{ .Values.cloudNetserviceOptions.cloudNetserviceEndpoints }}
{{- if and (ne .Values.cloudNetserviceOptions.cloudNetserviceCertFile "") (ne .Values.cloudNetserviceOptions.cloudNetserviceKeyFile "") }}
- --cloud_netservice_client_ca_file={{ .Values.cloudNetserviceOptions.cloudNetserviceCaFile }}
- --cloud_netservice_client_cert_file={{ .Values.cloudNetserviceOptions.cloudNetserviceCertFile }}
- --cloud_netservice_client_key_file={{ .Values.cloudNetserviceOptions.cloudNetserviceKeyFile }}
{{- end }}
{{- end }}
{{- if eq .Values.workMode "netservice"}}
- --custom_scheduler_type=IpSchedulerV1
- --bcs_zookeeper={{ .Values.netserviceOptions.bcsZkHosts }}
{{- end }}
ports:
- name: http
containerPort: 8080
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
|
docs/features/bcs-k8s-custom-scheduler/deploy/bcs-k8s-custom-scheduler/templates/deployment.yaml
|
homepage: ''
changelog-type: markdown
hash: 65e88ed1c6390e823520b528661019d61c66539664097dd6db9a0fe891f2b4a3
test-bench-deps: {}
maintainer: <EMAIL>
synopsis: First class pattern matching
changelog: |
# Revision history for generic-match
## 0.1.0.0 -- 2020-08-23
* First release to hackage, as a minimally useful implementation.
basic-deps:
base: '>=4.12 && <4.16'
generics-sop: '>=0.5 && <0.6'
all-versions:
- 0.1.0.0
- 0.2.0.0
- 0.2.0.1
- 0.2.0.2
- 0.3.0.0
- 0.3.0.1
author: <NAME>
latest: 0.3.0.1
description-type: markdown
description: |
# generic-match
[](https://hackage.haskell.org/package/generic-match)
[](https://travis-ci.org/SamuelSchlesinger/generic-match)
## What?
An implementation of first-class pattern matches in Haskell, based on [generics-sop](https://hackage.haskell.org/package/generics-sop).
## Why?
Oftentimes, when writing Haskell code, we want to branch on multiple cases of
a sum type, such as
```haskell
data TravelMethod
= Airplane Airport UTCTime
| Train TrainStation UTCTime
| Driving
```
For instance, lets say that we want to grab out the time. In Haskell, we can
do this by writing:
```haskell
timeOfTravel :: TravelMethod -> Maybe UTCTime
timeOfTravel = \case
Airplane _airport time -> Just time
Train _trainStation time -> Just time
Driving -> Nothing
```
This is concise, and preferable to many other languages, but in this case we
can do even better using this library.
```haskell
timeOfTravel travelMethod = match travelMethod (Just . flip const) (Just . flip const) Nothing
```
In this example, perhaps we don't save much, but I hope the principle is clear.
The case for using this library is when you want to branch on the contents of
each different sum, and you _already_ have functions or concise combinators to
build functions that handle your inputs. For a Haskeller, this is already
rather familiar, I claim!
```haskell
either l r x == match x l r
maybe n j x == match x n j
```
## Examples
```haskell
data DatabaseAccess a =
ConnectionFailure String
| InvalidRowCount Int
| Successful a
deriving (GHC.Generic, Generic)
doThing :: m (DatabaseAccess Bool)
...
x <- doThing >>= \g -> match g error (error . show) pure
```
## Contribution
Contributions are very welcome! Feel free to create an issue or a PR or
ping me on any platform to chat about whatever, especially improvements to my
libraries.
## Compatibility
I support all of the GHC versions that I typically do in my code, from 8.6.1
to 8.10.2. I build locally on Mac, and my travis builds are on Linux, so
if you run into problems on Windows, let me know. If you want to be sure that
a build will pass, run the test script in this repository.
license-name: MIT
|
packages/ge/generic-match.yaml
|
language: java
jdk: oraclejdk7
env:
matrix:
# android-17 is always included
## note: we're using android-17 for building, so android-16 env is disabled here.
# - ANDROID_SDKS=sysimg-16 ANDROID_TARGET=android-16 ANDROID_ABI=armeabi-v7a
- ANDROID_SDKS=android-17,sysimg-17 ANDROID_TARGET=android-17 ANDROID_ABI=armeabi-v7a
- ANDROID_SDKS=android-18,sysimg-18 ANDROID_TARGET=android-18 ANDROID_ABI=armeabi-v7a
before_install:
# Install base Android SDK
- sudo apt-get update -qq
- if [ `uname -m` = x86_64 ]; then sudo apt-get install -qq --force-yes libgd2-xpm ia32-libs ia32-libs-multiarch > /dev/null; fi
- wget http://dl.google.com/android/android-sdk_r22.2.1-linux.tgz
- tar xzf android-sdk_r22.2.1-linux.tgz
- export ANDROID_HOME=$PWD/android-sdk-linux
- export PATH=${PATH}:${ANDROID_HOME}/tools:${ANDROID_HOME}/platform-tools
# Install required components.
# For a full list, run `android list sdk -a --extended`
# Note that sysimg-16 downloads the ARM, x86 and MIPS images (we should optimize this).
# Other relevant API's:
# addon-google_apis-google-16
# yes, we do agree SDK EULA
- echo 'y' | android update sdk --filter platform-tools,android-17,extra-android-support,$ANDROID_SDKS --no-ui --force
- BUILD_TOOLS_ID=`android list sdk --extended -a | grep "build-tools-18.1.0" |cut -d ' ' -f 2`
- echo 'y' | android update sdk -a --filter $BUILD_TOOLS_ID --no-ui --force
- export PATH=${PATH}:${ANDROID_HOME}/build-tools/18.1.0
## disable emulator related things for now
# # Create and start emulator
# - echo no | android create avd --force -n test -t $ANDROID_TARGET --abi $ANDROID_ABI
# - emulator -avd test -no-skin -no-audio -no-window &
## disable emulator related things for now
# before_script:
# - chmod +x wait_for_emulator
# - ./wait_for_emulator
# NDK
- wget http://dl.google.com/android/ndk/android-ndk-r9-linux-x86_64.tar.bz2
- tar jxf android-ndk-r9-linux-x86_64.tar.bz2
- export ANDROID_NDK_ROOT=$PWD/android-ndk-r9
- export PATH=$ANDROID_NDK_ROOT:$PATH
# Android NDK & SDK build
script: ./Engine/porting/Android/GameEngine-android/build.py --rebuild --assemble --project SampleProject
|
.travis.yml
|
v17development-flarum-badges:
admin:
auto_moderator:
action_drivers:
badge_id: 'ID odznaki'
give_badge: 'Przyznaj odznakę'
remove_badge: 'Odbierz odznakę'
badge_selector:
loading_badges: 'Ładowanie odznak'
placeholder: 'Wybierz odznakę'
metric_drivers:
badges_received: 'Przyznano odznaki'
not_installed:
content: 'To rozszerzenie oferuje większą funkcjonalność jeśli rozszerzenie <a>Auto Moderator</a> jest włączone. Użytkownicy mogą być wtedy automatycznie nagradzani odznakami bez udziału moderatorów lub administracji!'
dismiss: 'Nie pokazuj ponownie'
install: Zainstaluj
title: 'Automatycznie nagradzaj użytkowników odznakami'
requirement_drivers:
has_badge: 'Posiada odznakę'
badge:
description: 'Opis odznaki'
icon: 'Ikona odznaki'
name: 'Nazwa odznaki'
visible: Widoczna
visible_description: 'Jeśli odznaczysz tę opcję, odznaka nie będzie widoczna.'
badge_category:
blockview: 'Układ blokowy'
blockview_description: 'Wyświetl odznaki w tej kategorii w postaci bloków na liście odznak. Jeśli wyłączone, odznaki będą wyświetlane w postaci tabeli.'
description: 'Opis kategorii'
enabled: Włączona
enabled_description: 'Jeśli wyłączysz kategorię, wszystkie zawarte w niej odznaki zostaną ukryte, a kategoria nie będzie widoczna.'
name: 'Nazwa kategorii'
confirm_dialog:
'no': Nie
title: Potwierdź
'yes': Tak
confirm_messages:
delete_badge: 'Czy jesteś pewien, że chcesz usunąć tę odznakę? Zostanie ona odebrana wszystkim użytkownikom i jest to nieodwracalne.'
delete_category: 'Czy jesteś pewien, że chcesz usunąć tę kategorię? Jeżeli są w niej jakieś odznaki, zostaną one oznaczone jako "Bez kategorii".'
create_category: 'Utwórz kategorię'
edit_category: 'Edytuj kategorię'
new_badge: 'Nowa odznaka'
nothing_here_yet: 'Nie utworzono jeszcze żadnych odznak ani kategorii.'
permissions:
badge_detail_users: 'Przeglądanie listy użytkowników z daną odznaką'
give_badge: 'Przyznawanie odznak użytkownikom'
take_badge: 'Odbieranie odznak użytkownikom'
uncategorized: 'Bez kategorii'
update_badge: 'Edytuj odznakę'
update_category: 'Edytuj kategorię'
forum:
all_badges: 'Zobacz wszystkie odznaki'
all_users: 'Zobacz wszystkich użytkowników'
badge:
badge: Odznaka
badge_details: 'Szczegóły odznaki'
badges: Odznaki
category: 'Kategoria odznaki'
description: 'Opis odznaki'
earned_by_count: '{count, plural, one {<b># użytkownik uzyskał</b>} other {<b># użytkowników uzyskało</b>}} tę odznakę'
earned_count: '{count, plural, one {<b>{count} użytkownik</b> uzyskał} other {<b>{count} użytkowników</b> uzyskało}} tę odznakę'
earned_on: 'Odznakę przyznano'
earning_reason: 'Powód przyznania odznaki'
name: '<NAME>'
no_earning_reason: 'Nie podano powodu przyznania odznaki'
received_on: 'Uzyskano {date}'
update_earning_reason: 'Edytuj powód przyznania odznaki'
badge_information: 'Informacje o odznace'
give_badge: 'Przyznaj odznakę'
moderation:
give_badge: 'Przyznaj odznakę'
remove_badge: 'Odbierz odznakę użytkownikowi'
remove_badge_confirm: 'Czy jesteś pewien, że chcesz odebrać tę odznakę temu użytkownikowi?'
no_received: 'Żaden użytkownik nie uzyskał jeszcze tej odznaki.'
notification:
settings: 'Uzyskałem nową odznakę'
title: 'Uzyskałeś nową odznakę'
select_badge: 'Wybierz odznakę'
uncategorized: '=> v17development-flarum-badges.admin.uncategorized'
update_badge: 'Edytuj odznakę'
user_has_badge: 'Ten użytkownik już ma tę odznakę.'
user_no_badges: 'Ten użytkownik nie ma jeszcze żadnych odznak.'
|
locale/v17development-user-badges.yml
|
---
# name of the systemd service to install, default to directory basename
docker_compose_service_default_project: "{{docker_compose_service_path|copsf_basename}}"
docker_compose_service_project: "{{(docker_compose_service_working_directory or docker_compose_service_default_path)|copsf_basename}}"
# working directory for compose file
docker_compose_service_path: "{{ docker_compose_service_working_directory or docker_compose_service_default_path}}"
docker_compose_service_files: []
# files to use in compose command line
docker_compose_service_service_activated: "{{docker_compose_service_state not in ['stopped']}}"
# one of:
# no, on-success, on-failure, on-abnormal,
# on-watchdog, on-abort, or always.
docker_compose_service_args: ""
docker_compose_service_pull: false
docker_compose_service_pull_args: "{{docker_compose_service_args}}"
docker_compose_service_config_args: "{{docker_compose_service_args}}"
docker_compose_service_up_args: "{{docker_compose_service_args}}"
docker_compose_service_stop_args: "{{docker_compose_service_args}}"
docker_compose_service_down_args: "{{docker_compose_service_args}} {% if docker_compose_service_down_volumes %}-v{%endif %}"
#
docker_compose_service_restart: "no"
docker_compose_service_restart_sec: "0"
docker_compose_service_timeout_sec: "300"
# restart_sec: "10"
docker_compose_service_state: started
docker_compose_service_down_volumes: false
docker_compose_service_stdout: "journal"
docker_compose_service_stderr: "journal"
docker_compose_service_before: ""
docker_compose_service_after: ""
docker_compose_service_requires: ""
docker_compose_service_wantedby: "multi-user.target"
#
docker_compose_service_default_before: ""
docker_compose_service_default_after: "docker.service network.service"
docker_compose_service_default_requires: "docker.service"
#
docker_compose_service__before: "{{docker_compose_service_default_before}} {{docker_compose_service_before}}"
docker_compose_service__after: "{{docker_compose_service_default_after}} {{docker_compose_service_after}}"
docker_compose_service__requires: "{{docker_compose_service_default_requires}} {{docker_compose_service_requires}}"
# for backward compat !
docker_compose_service_working_directory: null
docker_compose_service_default_path: /tmp
docker_compose_service_docker_compose_file: null
docker_compose_service__files: |-
{%- set r = [] %}
{%- for i in [docker_compose_service_docker_compose_file] + docker_compose_service_files %}
{%- if i %}{% set _ = r.append(i) %}{% endif%}
{%- endfor%}
{{- r|copsf_uniquify|to_json }}
|
docker_compose_service/defaults/main.yml
|
-
fs_dir_name: rootfs
fs_dirs:
-
source_dir: bin
target_dir: bin
ignore_files:
- Test.bin
- TestSuite.bin
- query.bin
- cve
- checksum
is_strip: TRUE
-
source_dir: libs
target_dir: lib
ignore_files:
- .a
is_strip: TRUE
dir_mode: 755
file_mode: 644
-
source_dir: usr/lib
target_dir: usr/lib
ignore_files:
- .a
is_strip: TRUE
dir_mode: 755
file_mode: 644
-
source_dir: config
target_dir: etc
-
source_dir: system
target_dir: system
-
target_dir: dev # "/dev" directory is mandatory for Linux init.
-
source_dir: sbin
target_dir: sbin
-
source_dir: usr/bin
target_dir: usr/bin
-
source_dir: usr/sbin
target_dir: usr/sbin
-
target_dir: proc
-
target_dir: mnt
-
target_dir: opt
-
target_dir: tmp
-
target_dir: var
-
target_dir: userdata
-
target_dir: sys
-
source_dir: etc
target_dir: etc
-
source_dir: vendor
target_dir: vendor
-
target_dir: storage
-
source_dir: sdk_linux/src_tmp/ko
target_dir: ko
dir_mode: 755
file_mode: 755
fs_filemode:
-
file_dir: lib/ld-uClibc-0.9.33.2.so
file_mode: 555
-
file_dir: lib/ld-2.24.so
file_mode: 555
-
file_dir: etc/init.cfg
file_mode: 400
fs_symlink:
-
source: libc.so
link_name: ${fs_dir}/lib/ld-musl-arm.so.1
-
source: mksh
link_name: ${fs_dir}/bin/sh
-
source: ./
link_name: ${fs_dir}/usr/lib/a7_softfp_neon-vfpv4
-
source: mksh
link_name: ${fs_dir}/bin/shell
fs_make_cmd:
- ${root_path}/build/lite/make_rootfs/rootfsimg_linux.sh ${fs_dir} ext4
-
fs_dir_name: userfs
fs_dirs:
-
source_dir: storage/etc
target_dir: etc
-
source_dir: data
target_dir: data
fs_make_cmd:
- ${root_path}/build/lite/make_rootfs/rootfsimg_linux.sh ${fs_dir} ext4
-
fs_dir_name: userdata
fs_make_cmd:
- ${root_path}/build/lite/make_rootfs/rootfsimg_linux.sh ${fs_dir} ext4 1024
-
fs_attr:
dmverity_enable:
fs_dir_name: systemfs
fs_make_cmd:
- ${root_path}/build/lite/make_rootfs/rootfsimg_linux.sh ${fs_dir} ext4
-
fs_attr:
dmverity_enable:
fs_dir_name: systemhashfs
fs_make_cmd:
- ${root_path}/build/lite/make_rootfs/dmverity_linux.sh ${out_path} veritysetup
- ${root_path}/build/lite/make_rootfs/rootfsimg_linux.sh ${fs_dir} ext4 6
- ${root_path}/build/lite/make_rootfs/dmverity_linux.sh ${out_path} addS82ohos
|
hispark_taurus_linux/fs.yml
|
eu:
javascripts:
aspect_dropdown:
add_to_aspect: "Adiskidea gehitu"
all_aspects: "Alderdi guztiak"
error: "Ezin izan da <%= name %>(r)ekin harremanetan hasi. Isiltzen ari zara erabiltzaile hori?"
select_aspects: "Alderdiak aukeratu"
started_sharing_with: "<%= name %>(r)ekin harremanetan hasi zara!"
stopped_sharing_with: "Jada ez zaude <%= name %>(r)ekin harremanetan."
toggle:
few: "<%= count %> arlotan"
many: "<%= count %> arlotan"
one: "Arlo <%= count %>ean"
other: "<%= count %> arlotan"
two: "<%= count %> arlotan"
zero: "Alderdial hautatu"
aspect_navigation:
deselect_all: "Guztiak deshautatu"
no_aspects: "Ez da alderdirik hautatu"
select_all: "Guztiak hautatu"
comments:
hide: "iruzkinak ezkutatu"
show: "iruzkin guztiak erakutsi"
confirm_dialog: "Ziur al zaude?"
delete: "Ezabatu"
failed_to_like: "Huts gustoko egitean!"
failed_to_post_message: "Huts egin du mezuaren bidalketak!"
getting_started:
alright_ill_wait: "Beno, itxarongo dut."
hey: "Aizu, <%= name %>!"
no_tags: "Aizu, ez duzu etiketarik jarraitzen! Jarraitu hala ere?"
preparing_your_stream: "Zure kronologia pertsonalizatua prestatzen..."
header:
admin: "Kudeatu"
contacts: "Lagunak"
home: "Hasiera"
log_out: "Atera"
mark_all_as_read: "Guztiak irakurri gisa markatu"
messages: "Mezu pribatuak"
notifications: "Jakinarazpenak"
profile: "Profila"
recent_notifications: "Jakinarazpen Berrienak"
search: "Aurkitu jendea edo #etiketak"
settings: "Ezarpenak"
view_all: "Guztiak ikusi"
ignore: "Isildu"
infinite_scroll:
no_more: "Mezu gehiagorik ez dago."
my_activity: "Nire Jarduera"
my_stream: "Kronologia"
photo_uploader:
looking_good: "Ene bada, oso jatorra dirudizu!"
publisher:
at_least_one_aspect: "Gutxienez alderdi batean partekatu behar duzu"
limited: "Mugatua - zure mezua aukeratu duzun jendeak bakarrik ikusiko du"
public: "Publikoa - zure mezua edonork ikusi ahalko du, bai eta bilaketa zerbitzuetan agertu ere"
reshares:
duplicate: "Oso ona, e? Dagoeneko birpartekatu duzu mezu hori!"
search_for: "Bilatu <%= name %>"
show_more: "erakutsi gehiago"
stream:
comment: "Iruzkindu"
follow: "Jarraitu"
hide: "Ezkutatu"
hide_nsfw_posts: "Ezkutatu #nsfw mezuak"
like: "Gustuko dut"
likes:
few: "<%= count %>(e)k gustuko dute"
many: "<%= count %>(e)k gustuko dute"
one: "<%= count %>ek gustuko du"
other: "<%= count %>(e)k gustuko dute"
two: "<%= count %>(e)k gustuko dute"
zero: "<%= count %>(e)k gustuko dute"
limited: "Mugatua"
more_comments:
few: "Erakutsi <%= count %> iruzkin gehiago"
many: "Erakutsi <%= count %> iruzkin gehiago"
one: "Erakutsi iruzkin <%= count %> gehiago"
other: "Erakutsi <%= count %> iruzkin gehiago"
two: "Erakutsi <%= count %> iruzkin gehiago"
zero: "Erakutsi <%= count %> iruzkin gehiago"
original_post_deleted: "Jatorrizko mezua egileak ezabatu du."
public: "Publikoa"
reshare: "Birpartekatu"
reshares:
few: "<%= count %> Birpartekaketa"
many: "<%= count %> Birpartekaketa"
one: "Birpartekaketa <%= count %> "
other: "<%= count %> Birpatekaketa"
two: "<%= count %> Birpatekaketa"
zero: "<%= count %> Birpatekaketa"
show_nsfw_post: "Mezua erakutsi"
show_nsfw_posts: "Guztiak erakutsi"
unfollow: "Ez jarraitu"
unlike: "Ez dut gustuko"
tags:
wasnt_that_interesting: "Beno, suposatzen dut #<%= tagName %> ez zela oso interesgarria..."
timeago:
day: "egun bat"
days: "%d egun"
hour: "ordu bat"
hours: "%d ordu"
minute: "minutu bat"
minutes: "%d minutu"
month: "hilabete bat"
months: "%d hilabete"
prefixAgo: "duela"
prefixFromNow: ""
seconds: "minutu bat baino gutxiago"
suffixAgo: "d(ir)ela"
suffixFromNow: "barru"
year: "urte bat"
years: "%d urte"
videos:
unknown: "Bideo mota ezezaguna"
watch: "Ikusi bideo hau <%= provider %>(e)n"
|
config/locales/javascript/javascript.eu.yml
|
founders:
- name: <NAME>
position: CEO
image: bruce.jpg
twitter: BrucePon
linkedin: ponbruce
bio: |
<NAME> is the CEO and co-founder of BigchainDB. Prior to BigchainDB, Bruce co-founded Avantalion International Consulting, a consulting firm specialized in building banks and industry startups in Europe and Asia for companies such as Mercedes-Benz, Volkswagen, Mitsubishi. Bruce is a board member, advisor and investor in six startups.
He has an Advanced Executive Certificate from MIT Sloan and a B.Sc in Engineering from the University of Saskatchewan.
- name: <NAME>
position: CTO
image: trent.jpg
twitter: trentmc0
facebook: trent.mcconaghy
github: trentmc
bio: |
<NAME> has 20 years of deep technology experience with a focus on machine learning, data visualization and user experience. He was a researcher at the Canadian Department of Defense and in 1999, he co-founded Analog Design Automation Inc. and was its CTO until its acquisition by Synopsys Inc. In 2004, he co-founded Solido Design Automation Inc., once again in the role of CTO.
Trent has written two critically acclaimed books on machine learning, creativity and circuit design and has authored or co-authored more than 40 papers and patents.
Trent has a PhD in Engineering from KU Leuven, Belgium and Bachelor’s degrees in Engineering and in Computer Science from the University of Saskatchewan where he won awards for the top PhD thesis and top undergraduate thesis.
- name: <NAME>
position: Strategic Partnerships
image: masha.jpg
linkedin: mashamc
bio: |
<NAME>, curator and researcher, is a co-founder and CMO at BigchainDB and ascribe.io, a service enabling immutable attribution for artists and clear provenance for digital and physical art.
She has a PhD in Arts from Pantheon-Sorbonne University, Paris and a Museology Degree from Louvre School, Paris. She has organized exhibitions throughout the world and has worked with curators at the Louvre Museum in Paris and directed a commercial gallery in Vancouver.
Her current pursuits are at the intersection of art, IP and applications of new technology.
team:
- name: <NAME>
position: Engineer
image: troy.jpg
twitter: TroyMc
github: ttmc
|
_src/_data/team.yml
|
---
# ansible/playbooks/configure.yml
- name: "Install and configure Smart Display"
hosts: "all"
handlers:
- name: "Reboot System"
ansible.builtin.reboot:
reboot_timeout: 3600
become: true
tags:
- "smartdisplay"
- "system"
- "reboot"
tasks:
- name: "Manage smartdisplay Packages"
ansible.builtin.package:
name:
- "cockpit"
- "gdm"
- "glances"
- "gnome-kiosk"
- "gnome-terminal"
- "nodejs"
state: "present"
become: true
tags:
- "smartdisplay"
- "package"
- name: "Manage cockpit Service"
ansible.builtin.service:
name: "cockpit.socket"
state: "started"
enabled: true
become: true
tags:
- "smartdisplay"
- "cockpit"
- "service"
- name: "Manage default Target"
ansible.builtin.file:
src: "/usr/lib/systemd/system/graphical.target"
dest: "/etc/systemd/system/default.target"
state: "link"
become: true
notify:
- "Reboot System"
tags:
- "smartdisplay"
- "target"
- name: "Reboot System"
ansible.builtin.reboot:
reboot_timeout: 3600
become: true
tags:
- "smartdisplay"
- "reboot"
# mirror specific tasks
- name: "Manage smartdisplay User"
ansible.builtin.user:
name: "mirror"
# create new password hash: `mkpasswd --method=sha-512`
# default: password
# yamllint disable-line rule:line-length
password: <PASSWORD>/"
home: "/var/lib/mirror/"
become: true
tags:
- "mirror"
- "user"
- name: "Manage mirror Source Code"
ansible.builtin.git:
repo: "https://github.com/MichMich/MagicMirror.git"
version: "v2.18.0"
dest: "/var/lin/mirror/app/"
become: true
become_user: "mirror"
tags:
- "smartdisplay"
- "git"
- name: "Manage mirror NPM Packages"
community.general.npm:
path: "/var/lib/mirror/app/"
state: "present"
become: true
become_user: "mirror"
tags:
- "mirror"
- "npm"
post_tasks:
- name: "Guide"
ansible.builtin.debug:
msg: "The system is configured. You can log in via:
- ssh {{ ansible_ssh_host }}
- https://{{ ansible_ssh_host }}:9090
- a connected display"
...
|
ansible/playbooks/configure.yml
|
language: php
sudo: required
env:
global:
- GLPI_SOURCE="https://github.com/glpi-project/glpi"
- FI_SOURCE="https://github.com/fusioninventory/fusioninventory-for-glpi"
- CS=7.2
- APIDOCS_PHP=7.1
- DBNAME=glpitest
- OLDDBNAME=glpiupgradetest
- AFTER_SUCCESS_BRANCH=9.3/bugfixes
- secure: <KEY>
matrix:
- GLPI_BRANCH=9.3.0 FI_BRANCH=glpi9.3+1.1
- GLPI_BRANCH=9.3/bugfixes FI_BRANCH=glpi9.3
- GLPI_BRANCH=master FI_BRANCH=master
php:
- 5.6
- 7.0
- 7.1
- 7.2
- nightly
matrix:
allow_failures:
- php: nightly
- env: GLPI_BRANCH=master FI_BRANCH=master
before_install:
- "./tests/before_install.sh"
before_script:
- "./tests/before_script.sh"
script:
- if [ "${TRAVIS_PHP_VERSION:0:3}" = "$CS" ] && [ "$GLPI_BRANCH" = "$AFTER_SUCCESS_BRANCH" ]; then COVERAGE="--nccfc CommonTreeDropdown CommonDropdown CommonDBTM CommonGLPI CommonDBConnexity CommonDBRelation"; else COVERAGE="-ncc"; fi
- if [ -e ../../scripts/cliinstall.php ] ; then php ../../scripts/cliinstall.php --db=$OLDDBNAME --user=root --tests ; fi
- if [ -e ../../tools/cliinstall.php ] ; then php ../../tools/cliinstall.php --db=$OLDDBNAME --user=root --tests ; fi
- php ../fusioninventory/scripts/cli_install.php --tests --as-user glpi
- mysql -u root $OLDDBNAME < tests/plugin_flyvemdm_empty_2.0.0-rc.1.sql
- mysql -u root $OLDDBNAME < tests/plugin_flyvemdm_config_2.0.0-rc.1.sql
- php tools/cli_install.php --tests
- rm ../../tests/config_db.php
- if [ -e ../../scripts/cliinstall.php ] ; then php ../../scripts/cliinstall.php --db=$DBNAME --user=root --tests ; fi
- if [ -e ../../tools/cliinstall.php ] ; then php ../../tools/cliinstall.php --db=$DBNAME --user=root --tests ; fi
- php ../fusioninventory/scripts/cli_install.php --tests --as-user glpi
- vendor/bin/atoum -ft -bf tests/bootstrap.php -d tests/suite-install -ncc
- vendor/bin/atoum -ft -bf tests/bootstrap.php -d tests/suite-integration -mcn 1 $COVERAGE
- vendor/bin/atoum -ft -bf tests/bootstrap.php -d tests/suite-unit $COVERAGE
- vendor/bin/atoum -ft -bf tests/bootstrap.php -d tests/suite-uninstall -ncc
- if [ ${TRAVIS_PHP_VERSION:0:3} == "$CS" ] && [ "$GLPI_BRANCH" = "$AFTER_SUCCESS_BRANCH" ]; then vendor/bin/phpcs -p --standard=vendor/glpi-project/coding-standard/GlpiStandard/ *.php install/ inc/ front/ ajax/ tests/ RoboFile.php; fi
after_success:
# let's update the documentation and locales
- if [ ${TRAVIS_PHP_VERSION:0:3} == "$CS" ] && [ "$GLPI_BRANCH" = "$AFTER_SUCCESS_BRANCH" ]; then tests/after_success.sh; fi
- if [ ${TRAVIS_PHP_VERSION:0:3} == "$APIDOCS_PHP" ] && [ "$GLPI_BRANCH" = "$AFTER_SUCCESS_BRANCH" ]; then tests/apidocs.sh; fi
cache:
directories:
- "$HOME/.composer/cache"
notifications:
webhooks: https://hooks.aethonan.pro/travisci/-1001061475099/
addons:
apt:
update: true
packages:
- mosquitto
- mosquitto-clients
|
spec/fixtures/configs/di/DIOHz0r:flyve-mdm-glpi-plugin.yml
|
uid: "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives"
fullName: "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives"
name: "PerformanceTierServiceLevelObjectives"
nameWithType: "PerformanceTierServiceLevelObjectives"
summary: "Service level objectives for performance tier."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class PerformanceTierServiceLevelObjectives"
constructors:
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.PerformanceTierServiceLevelObjectives()"
methods:
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.edition()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.hardwareGeneration()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.id()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.maxBackupRetentionDays()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.maxStorageMB()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.minBackupRetentionDays()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.minStorageMB()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.vCore()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.validate()"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withEdition(java.lang.String)"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withHardwareGeneration(java.lang.String)"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withId(java.lang.String)"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withMaxBackupRetentionDays(java.lang.Integer)"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withMaxStorageMB(java.lang.Integer)"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withMinBackupRetentionDays(java.lang.Integer)"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withMinStorageMB(java.lang.Integer)"
- "com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.withVCore(java.lang.Integer)"
type: "class"
metadata: {}
package: "com.azure.resourcemanager.mysql.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-mysql:1.0.0-beta.1
|
preview/docs-ref-autogen/com.azure.resourcemanager.mysql.models.PerformanceTierServiceLevelObjectives.yml
|
- block:
- name: Create ECMP route with next hops on {{ dst_port_1 }} and {{ dst_port_2 }}.
shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}"
- pause:
seconds: 3
- include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml
vars:
dst_port: "{{ dst_port_1 }}, {{ dst_port_2 }}"
dst_port_ptf_id: "{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}"
- name: Add next hop to ECMP route.
shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}"
- pause:
seconds: 3
- include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml
vars:
dst_port: "{{ dst_port_1 }}, {{ dst_port_2 }}"
dst_port_ptf_id: "{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}"
- include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml
vars:
dst_port: "{{ dst_port_3 }}"
dst_port_ptf_id: "{{ dst_port_3_ptf_id }}"
expect_received: False
- name: Delete next hop from ECMP route.
shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}"
- pause:
seconds: 3
- include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml
vars:
dst_port: "{{ dst_port_1 }}, {{ dst_port_2 }}"
dst_port_ptf_id: "{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}"
- include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml
vars:
dst_port: "{{ dst_port_3 }}"
dst_port_ptf_id: "{{ dst_port_3_ptf_id }}"
expect_received: False
become: yes
always:
- name: Remove route
shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}"
ignore_errors: yes
become: yes
|
ansible/roles/test/tasks/everflow_testbed/testcase_3.yml
|
annonce_homepage:
path: /oussama
defaults: { _controller: AnnonceBundle:Default:index }
annonce_page1:
path: /homeAnnonce
defaults: { _controller: AnnonceBundle:Annonce:AjouterAnnonce}
annonce_Affichage:
path: /AffichageAnnonce
defaults: { _controller: AnnonceBundle:Annonce:read1}
delete_annonce:
path: /delete/{id}
defaults: { _controller: AnnonceBundle:Annonce:delete }
update_annonce:
path: /modifier/{id}
defaults: { _controller: AnnonceBundle:Annonce:modifier }
homeAdmin:
path: /homeAdmin
defaults: { _controller: AnnonceBundle:Annonce:homeAdmin }
AnnonceClient:
path: /AnnonceClient
defaults: { _controller: AnnonceBundle:Annonce:AnnonceClient }
Ajoutercategorie:
path: /Ajoutercategorie
defaults: { _controller: AnnonceBundle:Annonce:AjouterCategorie }
AfficherCategorie:
path: /Affichercategorie/{id}
defaults: { _controller: AnnonceBundle:Annonce:afficherCategorie }
participer:
path: /participer/{idCategorie}
defaults: { _controller: AnnonceBundle:Annonce:participer }
affichertous:
path: /affichertous
defaults: { _controller: AnnonceBundle:Annonce:affichertous }
show:
path: /show/{id}
defaults: { _controller: AnnonceBundle:Annonce:show }
deletecategorie:
path: /deletecategorie/{id}
defaults: { _controller: AnnonceBundle:Annonce:deletecategorie }
showUser:
path: /showUser/{id}
defaults: { _controller: AnnonceBundle:Annonce:showUser }
chart:
path: /chart
defaults: { _controller: AnnonceBundle:State:State }
pie:
path: /pie
defaults: { _controller: AnnonceBundle:State:pie }
Annonceparticiper:
path: /Annonceparticiper
defaults: { _controller: AnnonceBundle:Annonce:Annonceparticiper }
annuler:
path: /annuler/{id}
defaults: { _controller: AnnonceBundle:Annonce:annuler }
favoriser:
path: /favoriser
defaults: { _controller: AnnonceBundle:Annonce:favoriser }
adore:
path: /adore/{id}
defaults: { _controller: AnnonceBundle:Annonce:adore }
showlesjaime:
path: /showlesjaime/{id}
defaults: { _controller: AnnonceBundle:Annonce:showlesjaime }
|
src/AnnonceBundle/Resources/config/routing.yml
|
items:
- uid: com.azure.resourcemanager.compute.models.GalleryImageVersion.Update
id: Update
artifact: com.azure.resourcemanager:azure-resourcemanager-compute:2.0.0
parent: com.azure.resourcemanager.compute.models
langs:
- java
name: GalleryImageVersion.Update
nameWithType: GalleryImageVersion.Update
fullName: com.azure.resourcemanager.compute.models.GalleryImageVersion.Update
type: Interface
package: com.azure.resourcemanager.compute.models
summary: The template for a gallery image version update operation, containing all the settings that can be modified.
syntax:
content: public static interface GalleryImageVersion.Update extends Appliable<GalleryImageVersion>, GalleryImageVersion.UpdateStages.WithAvailableRegion, GalleryImageVersion.UpdateStages.WithEndOfLifeDate, GalleryImageVersion.UpdateStages.WithExcludeFromLatest, GalleryImageVersion.UpdateStages.WithTags
implements:
- com.azure.resourcemanager.resources.fluentcore.model.Appliable<com.azure.resourcemanager.compute.models.GalleryImageVersion>
- com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithAvailableRegion
- com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithEndOfLifeDate
- com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithExcludeFromLatest
- com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithTags
references:
- uid: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithAvailableRegion
name: GalleryImageVersion.UpdateStages.WithAvailableRegion
nameWithType: GalleryImageVersion.UpdateStages.WithAvailableRegion
fullName: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithAvailableRegion
- uid: com.azure.resourcemanager.resources.fluentcore.model.Appliable<com.azure.resourcemanager.compute.models.GalleryImageVersion>
name: Appliable<GalleryImageVersion>
nameWithType: Appliable<GalleryImageVersion>
fullName: com.azure.resourcemanager.resources.fluentcore.model.Appliable<com.azure.resourcemanager.compute.models.GalleryImageVersion>
- uid: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithEndOfLifeDate
name: GalleryImageVersion.UpdateStages.WithEndOfLifeDate
nameWithType: GalleryImageVersion.UpdateStages.WithEndOfLifeDate
fullName: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithEndOfLifeDate
- uid: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithExcludeFromLatest
name: GalleryImageVersion.UpdateStages.WithExcludeFromLatest
nameWithType: GalleryImageVersion.UpdateStages.WithExcludeFromLatest
fullName: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithExcludeFromLatest
- uid: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithTags
name: GalleryImageVersion.UpdateStages.WithTags
nameWithType: GalleryImageVersion.UpdateStages.WithTags
fullName: com.azure.resourcemanager.compute.models.GalleryImageVersion.UpdateStages.WithTags
- uid: com.azure.resourcemanager.resources.fluentcore.model.Appliable
name: Appliable
nameWithType: Appliable
fullName: com.azure.resourcemanager.resources.fluentcore.model.Appliable
- uid: com.azure.resourcemanager.compute.models.GalleryImageVersion
name: GalleryImageVersion
nameWithType: GalleryImageVersion
fullName: com.azure.resourcemanager.compute.models.GalleryImageVersion
|
docs-ref-autogen/com.azure.resourcemanager.compute.models.GalleryImageVersion.Update.yml
|
name: release and push to central
on:
workflow_dispatch:
inputs:
releaseversion:
description: 'Release version'
required: true
jobs:
publish:
runs-on: ubuntu-latest
steps:
- run: echo "Will start a Maven Central upload with version ${{ github.event.inputs.releaseversion }}"
- uses: actions/checkout@v2
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Set up Maven Central Repository
uses: actions/setup-java@v1
with:
java-version: 1.8
server-id: ossrh # Value of the distributionManagement/repository/id field of the pom.xml
server-username: MAVEN_USERNAME # env variable for username in deploy
server-password: <PASSWORD> # env variable for token in deploy
gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} # Value of the GPG private key to import
gpg-passphrase: <PASSWORD> # env variable for GPG private key passphrase
- name: Set projects Maven version to GitHub Action GUI set version
run: mvn versions:set "-DnewVersion=${{ github.event.inputs.releaseversion }}"
- name: Publish package
run: |
mvn --batch-mode clean deploy -DskipTests=true
rm ./pom.xml.versionsBackup
git config --global user.name '<NAME>'
git config --global user.email '<EMAIL>'
git commit -a -m "build: update pom version"
git push
git tag "v${{ github.event.inputs.releaseversion }}"
git push origin --tags
env:
MAVEN_USERNAME: ${{ secrets.OSS_SONATYPE_USERNAME }}
MAVEN_CENTRAL_TOKEN: ${{ secrets.OSS_SONATYPE_PASSWORD }}
MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
- name: Generate changelog
id: changelog
uses: metcalfc/changelog-generator@v3.0.0
with:
myToken: ${{ secrets.GITHUB_TOKEN }}
- name: Create GitHub Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.event.inputs.releaseversion }}
release_name: ${{ github.event.inputs.releaseversion }}
body: |
Grab the new version from Maven central:
```
<dependencies>
<dependency>
<groupId>io.github.stawirej</groupId>
<artifactId>fluent-api</artifactId>
<version>${{ github.event.inputs.releaseversion }}</version>
</dependency>
</dependencies>
```
### Things that changed in this release
${{ steps.changelog.outputs.changelog }}
draft: false
prerelease: false
|
.github/workflows/release.yml
|
thing-id: 4195865
name: "Triumph - Interlocking puzzle by <NAME> (STC #15)"
tags: [puzzle, 3D_puzzle]
images: [assembly-A.jpg, assembly-B.jpg, assembly-C.jpg, pieces.jpg]
targets: [bases, tips]
configurations:
- {name: '', code: '', targets: ''}
- {name: 'larger', code: '$burr_scale = 32;', targets: ''}
description: |
_Triumph_ is a clever design by <NAME>: six pieces in three mirror-image pairs assemble in various ways to form interesting polyhedral structures. In Coffin's words:
"Wouldn’t it be fun to have an interlocking puzzle that could be assembled different ways to form several different geometric shapes? With some effort and perhaps a little confusion, Triumph can be assembled into any one of three different polyhedral shapes, all having a three-fold axis of symmetry, as well as into many other nondescript shapes."
Two of those three shapes are shown in the photos (one of them in two distinct color configurations). I'll leave it to the reader to find the third! If you enjoy _Triumph_, be sure to check out two related puzzles as well. [Triumph Companion](https://www.thingiverse.com/thing:4195866) is a sort of sequel to _Triumph_, with six new pieces that also form numerous polyhedral constructions and can be combined with _Triumph_ for even greater variety. [Fusion-Confusion](https://www.thingiverse.com/thing:4195886) is a modification of _Triumph_ that converts it from a polyhedral recreation into more of a bona fide puzzle.
### Printing Instructions
Print three copies each of `coffin.triumph.bases.stl` and `coffin.triumph.tips.stl`. Use one filament color for the bases and a different filament color for the tips.
This puzzle uses "snap joints" so that it can be printed in multiple colors. After you print it, first connect all the joints before attempting to solve the puzzle. Snap each male connector into a corresponding female connector (for example, each male connector labeled "A" should connect to a female connector with a matching "A" label on the interior). If they come out too loose, they can be reinforced with a few drops of superglue, but this usually isn't necessary. The joints are designed to be printed without supports.
Optionally, you may print `coffin.triumph.bases-larger.stl` and `coffin.triumph.tips-larger.stl` for a grander version of the puzzle, which will use around 50% more material.
${ppp-boilerplate}
${stewart-coffin-bio}
Happy puzzling!
|
src/main/scad/stewart-coffin/triumph/coffin.triumph.yaml
|
name: microservice starters publish
on:
# push:
# branches: [ master ]
release:
types: [created]
env:
TAOTAO_CLOUD_OSSRH_USERNAME: ${{secrets.TAOTAO_CLOUD_OSSRH_USERNAME}}
TAOTAO_CLOUD_OSSRH_PASSWORD: ${{secrets.TAOTAO_CLOUD_OSSRH_PASSWORD}}
TAOTAO_CLOUD_MAVEN_USERNAME: ${{secrets.TAOTAO_CLOUD_MAVEN_USERNAME}}
TAOTAO_CLOUD_MAVEN_PASSWORD: ${{secrets.TAOTAO_CLOUD_MAVEN_PASSWORD}}
TAOTAO_CLOUD_GITHUB_USERNAME: ${{secrets.TAOTAO_CLOUD_GITHUB_USERNAME}}
TAOTAO_CLOUD_GITHUB_TOKEN: ${{secrets.TAOTAO_CLOUD_GITHUB_TOKEN}}
TAOTAO_CLOUD_REGISTRY_USERNAME: ${{secrets.TAOTAO_CLOUD_REGISTRY_USERNAME}}
TAOTAO_CLOUD_REGISTRY_PASSWORD: ${{secrets.TAOTAO_CLOUD_REGISTRY_PASSWORD}}
jobs:
publish:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v2
- name: set up jdk 17
uses: actions/setup-java@v2
with:
java-version: '17'
distribution: 'adopt'
server-id: github
- name: cache gradle packages
uses: actions/cache@v2
with:
path: ~/.gradle/caches
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }}
restore-keys: ${{ runner.os }}-gradle
# base64 secring.gpg > secring.gpg.b64 SIGNING_SECRET_KEY_RING_FILE -> secring.gpg.b64
- name: decode signing secret key ring file
run: |
echo "${{secrets.TAOTAO_CLOUD_SIGNING_SECRET_KEY_RING_FILE}}" > ~/secring.gpg.b64 && base64 -d ~/secring.gpg.b64 > ~/secring.gpg
echo "signing.keyId=${{secrets.TAOTAO_CLOUD_SIGNING_KEY_ID}}" >> gradle.properties
echo "signing.password=${{secrets.TAOTAO_CLOUD_SIGNING_PASSWORD}}" >> gradle.properties
echo "signing.secretKeyRingFile=$HOME/secring.gpg" >> gradle.properties
- name: build publish dependencies with gradle
run: |
cd $PWD/taotao-cloud-dependencies
gradle publishToMavenLocal publishAllPublicationsToSonatypeRepository
#publishMavenJavaPublicationToGitHubRepository
- name: build publish starter with gradle
run: |
starter_dir=$PWD/taotao-cloud-microservice/taotao-cloud-starter
for starter in `ls $starter_dir`
do
if [ -d $starter_dir"/"$starter ];then
cd $starter_dir"/"$starter
gradle --continue clean build -x test -x bootJar publishAllPublicationsToSonatypeRepository
fi
done
#publishMavenJavaPublicationToGitHubRepository
#gradle publishAllPublicationsToSonatypeRepository -Psigning.keyId=${{secrets.TAOTAO_CLOUD_SIGNING_KEY_ID}} -Psigning.password=${{secrets.TAOTAO_CLOUD_SIGNING_PASSWORD}} -Psigning.secretKeyRingFile=$(echo ~/secring.gpg)
|
.github/workflows/taotao-cloud-microservice-starters-publish.yml
|
# Name of your site (displayed in the header)
name: marcoonroad's blog
title: marcoonroad's blog
# Short bio or description (displayed in the header)
description: Zzzzzz...
# URL of your avatar or profile pic (you could use your GitHub profile pic)
# avatar: https://scontent-gru2-1.xx.fbcdn.net/v/t1.0-9/fr/cp0/e15/q65/75941_684038581685637_5336390958017503811_n.jpg?efg=eyJpIjoiYiJ9&oh=86f594ae941da4319af9356b76552d14&oe=589DEB67
avatar: https://avatars3.githubusercontent.com/u/3820407?s=460&v=4
github-projects:
- modest
- families
- talents
- cuid
- hieroglyphs
- sporadic
- nocoiner
- spadille
- twostep
personal-resumes:
- path: assets/resume-en-us.pdf
label: "Resume EN-US"
- path: assets/curriculo-pt-br.pdf
label: "Currículo PT-BR"
- path: assets/cover-letter-en-us.pdf
label: "Cover Letter EN-US"
- path: assets/carta-de-apresentacao-pt-br.pdf
label: "Carta Apres. PT-BR"
#
# Flags below are optional
#
# Includes an icon in the footer for each username you enter
footer-links:
dribbble:
email: <EMAIL>
ethereum: "0x74Bd7803A3e3bA7d28baf5aD22287D0506A04b67"
bitcoin: "13jnVxMenKUWb1RA1pKsQkri5HY1u89o2D"
tezos: "tz1Y5yUPjqkc32EwpfGFmCbqNCXm7253Csu9"
facebook: # marcoonroad
flickr:
github: marcoonroad # barryclark/jekyll-now
instagram: marcoonroad
spotify: "https://open.spotify.com/user/31p6jaavqahpv6sloy2xdc7re6mi?si=iSQzkS9SShG9hAfFnFh51Q" # whole URL here
youtubemusic: "https://music.youtube.com/playlist?list=PLd79hgAAAf3hI8rXrX0t0_LVqw4fzjQbF" # whole URL here too
linkedin: marcoonroad
pinterest: marcoonroad
rss: feed # just type anything here for a working RSS icon
twitter: marcoonroad
stackoverflow: # your stackoverflow profile, e.g. "users/50476/bart-kiers"
youtube: user/marcoonroad7
googleplus: # anything in your profile username that comes after plus.google.com/
# Enter your Disqus shortname (not your username) to enable commenting on posts
# You can find your shortname on the Settings page of your Disqus account
disqus: marcoonroad
# Enter your Google Analytics web tracking code (e.g. UA-2110908-2) to activate tracking
google_analytics: UA-41209773-2
# Your website URL (e.g. http://barryclark.github.io or http://www.barryclark.co)
# Used for Sitemap.xml and your RSS feed
url: "https://marcoonroad.dev"
# If you're hosting your site at a Project repository on GitHub pages
# (http://yourusername.github.io/repository-name)
# and NOT your User repository (http://yourusername.github.io)
# then add in the baseurl here, like this: "/repository-name"
baseurl: ""
#
# !! You don't need to change any of the configuration flags below !!
#
permalink: /:title/
# The release of Jekyll Now that you're using
version: v3.7.4
# Jekyll 3 now only supports Kramdown for Markdown
kramdown:
# Use GitHub flavored markdown, including triple backtick fenced code blocks
input: GFM
# Jekyll 3 and GitHub Pages now only support rouge for syntax highlighting
syntax_highlighter: rouge
syntax_highlighter_opts:
# Use existing pygments syntax highlighting css
css_class: 'highlight'
# Set the Sass partials directory, as we're using @imports
sass:
style: :compressed # :expanded # You might prefer to minify using :compressed
# Use the following plug-ins
plugins:
- jekyll-sitemap # Create a sitemap using the official Jekyll sitemap gem
- jekyll-feed # Create an Atom feed using the official Jekyll feed gem
- jekyll-mentions
- jekyll-redirect-from
- jekyll-gist
- jekyll-seo-tag
# - jekyll-press
# Exclude these files from your production _site
exclude:
- Gemfile
- Gemfile.lock
- LICENSE
- README.md
- CNAME
- vendor
|
_config.yml
|
id: 1c408090-0320-44d5-aba8-e55500c2cfad
name: Rare/New Outbound connections from VMs by process and location
description: |
This identifies if there is unusual connections to an unexpected remote location from a VM
when compared with the last 30 days.
This may indicate connection to a C2 or data exfiltration from the associated VM.
It is recommended that you understand what the associated process is and whether or not this is expected.
severity: Low
requiredDataConnectors:
- dataTypes:
- VMConnection
queryFrequency: 1h
queryPeriod: 30d
triggerOperator: gt
triggerThreshold: 5
tactics:
- CommandAndControl
- InitialAccess
- Exfiltration
- Discover
- Collection
query: |
let CountryCount = VMConnection
| where TimeGenerated < ago(1h) and TimeGenerated >= ago(30d)
| where isnotempty(RemoteCountry)
| where Direction == "outbound"
| summarize count() by RemoteCountry
| where count_ > 5;
let Full = VMConnection
| where TimeGenerated >= ago(1h)
| where isnotempty(RemoteCountry)
| where Direction == "outbound"
| summarize min(TimeGenerated), max(TimeGenerated), sum(BytesSent), sum(BytesReceived)
by Computer, Direction, ProcessName, SourceIp, DestinationIp, DestinationPort, Protocol, RemoteCountry, MaliciousIp;
let Today = VMConnection
| where TimeGenerated >= ago(1h)
| where isnotempty(RemoteCountry)
| where Direction == "outbound"
| summarize count() by ProcessName, SourceIp, DestinationIp, RemoteCountry;
let lastweek = VMConnection
| where TimeGenerated < ago(1h) and TimeGenerated >= ago(7d)
| where isnotempty(RemoteCountry)
| where Direction == "outbound"
| summarize SourceIpCount = count() by ProcessName, SourceIp, DestinationIp, RemoteCountry
| where SourceIpCount > 7 ;
let New = Today | join kind= leftanti (
lastweek
) on ProcessName, SourceIp, DestinationIp, RemoteCountry;
let RemoveCommonCountry = New | join kind= leftanti (
CountryCount
) on RemoteCountry;
RemoveCommonCountry | join kind= innerunique (
Full
) on RemoteCountry
|
Detections/VMConnection/RareOutboundConnectionByProcess.yaml
|
name: cyberneom
description: Guided meditation with your own voice
# The following defines the version and build number for your application.
# A version number is three numbers separated by dots, like 1.2.43
# followed by an optional build number separated by a +.
# Both the version and the builder number may be overridden in flutter
# build by specifying --build-name and --build-number, respectively.
# In Android, build-name is used as versionName while build-number used as versionCode.
# Read more about Android versioning at https://developer.android.com/studio/publish/versioning
# In iOS, build-name is used as CFBundleShortVersionString while build-number used as CFBundleVersion.
# Read more about iOS versioning at
# https://developer.apple.com/library/archive/documentation/General/Reference/InfoPlistKeyReference/Articles/CoreFoundationKeys.html
version: 1.0.0+1
environment:
sdk: ">=2.12.0 <3.0.0"
dependencies:
flutter:
sdk: flutter
flutter_localizations:
sdk: flutter
rxdart: ^0.26.0
get: 4.1.4
google_fonts: 2.0.0
font_awesome_flutter: 9.1.0
transparent_image: 2.0.0
marquee: 2.1.0
google_mobile_ads: 0.12.1+1
firebase_core: 1.3.0
cloud_firestore: 2.3.0
firebase_storage: 9.0.0
firebase_auth: 2.0.0
firebase_analytics: 8.1.2
firebase_messaging: 10.0.3
flutter_login_facebook: 1.0.1
auth_buttons: ^1.0.1+3
flutter_facebook_auth: 3.3.3+1
http: 0.13.3
rflutter_alert: 2.0.2
logger: 1.0.0
enum_to_string: 2.0.1
geolocator: 7.6.0
geocoding: 2.0.0
flutter_rating_bar: 4.0.0
image: 3.0.2
animator: 3.0.0
image_picker: 0.7.4
path_provider: ^2.0.2
timeago: 3.0.2
cached_network_image: ^3.0.0
flutter_svg: 0.22.0
video_player: 2.1.1
video_compress: 3.0.0
catcher: 0.6.6
cupertino_icons: 1.0.3
url_launcher: 6.0.3
google_sign_in: 5.1.0
intl: 0.17.0
scroll_snap_list: 0.8.0
oauth2: 2.0.0
pedantic: 1.11.0
shared_preferences: ^2.0.6
#DATE PICKERS
syncfusion_flutter_datepicker: ^19.2.47
flutter_datetime_picker: ^1.5.1
share: ^2.0.4
carousel_slider: ^4.0.0
surround_sound: ^0.3.1
english_words: ^4.0.0
sleek_circular_slider: ^2.0.1
uuid: ^3.0.4
#First we need permission.RECORD_AUDIO and Privacy Policy
#sound_stream: 0.3.0s
#noise_meter: ^3.0.2
#Tonic is a Flutter-compatible Dart package that models components of music theory.
#Used for Neom Harmony
#tonic: ^0.2.4
#AudioProcessing
#flutter_fft: ^1.0.2+6
#Virtual Reality Experimental
panorama: ^0.4.1
video_360: ^0.0.3
split_view: ^3.1.0
flutter_custom_tabs: 1.0.3
dev_dependencies:
flutter_test:
sdk: flutter
flutter_launcher_icons: 0.9.1
flutter_icons:
image_path: assets/IconCyberneom.png
android: true
ios: true
# For information on the generic Dart part of this file, see the
# following page: https://www.dartlang.org/tools/pub/pubspec
# The following section is specific to Flutter.
flutter:
# The following line ensures that the Material Icons font is
# included with your application, so that you can use the icons in
# the material Icons class.
uses-material-design: true
# To add assets to your application, add an assets section, like this:
# assets:
# - images/a_dot_burr.jpeg
# - images/a_dot_ham.jpeg
# An image asset can refer to one or more resolution-specific "variants", see
# https://flutter.dev/assets-and-images/#resolution-aware.
# For details regarding adding assets from package dependencies, see
# https://flutter.dev/assets-and-images/#from-packages
# To add custom fonts to your application, add a fonts section here,
# in this "flutter" section. Each entry in this list should have a
# "family" key with the font family name, and a "fonts" key with a
# list giving the asset and other descriptors for the font. For
# example:
fonts:
- family: Open-Sans
fonts:
- asset: assets/font/OpenSans-Regular.ttf
weight: 400
- asset: assets/font/OpenSans-Light.ttf
weight: 300
- asset: assets/font/OpenSans-SemiBold.ttf
weight: 600
- asset: assets/font/OpenSans-Bold.ttf
weight: 700
- asset: assets/font/OpenSans-ExtraBold.ttf
weight: 800
# - asset: fonts/Schyler-Italic.ttf
# style: italic
# - family: Trajan Pro
# fonts:
# - asset: fonts/TrajanPro.ttf
# - asset: fonts/TrajanPro_Bold.ttf
# weight: 700
#
# For details regarding fonts from package dependencies,
# see https://flutter.dev/custom-fonts/#from-packages
assets:
- assets/
- assets/font/
- assets/logo/
- assets/third-party-logo/
- assets/image/
- assets/frequency-presets.json
|
pubspec.yaml
|
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: cloudflare-creds
namespace: networking
stringData:
api-token: ENC[AES256_GCM,data:S/E990efRk77EL1Opn8k1RAiYwT9wLRtNRVuQa7lRTnwg6y17MdhmQ==,iv:GgnmhkC2KCnpbjzBu0iywMTdGp3BA3t2D9TfPY+e3NY=,tag:Q7thPBiU/EB5Cu/FhZxsUQ==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age: []
lastmodified: "2021-09-20T22:44:32Z"
mac: ENC[AES256_GCM,data:ee1zmYKORfwGac561VFsXkuBxfNnmODUaUI0b/2KvYoWdlmLmAMETN/sVL/TOfgzIbW0dJY4oZSh1rm2Svpj7J8kxtHc/6pG5xClND5QLAZh+JpPs/oO8qVXnkCCE0NcLpZb6W64eUfzcJl4Ub9jvPt6RfKtX6hWtQqoGH0L9BA=,iv:IcgvYF9qlr/tXlv9F4qsCsR1c8msPvx5Zwb7+qR27bU=,tag:AWBIuojNeI9DcAlk3YoSQw==,type:str]
pgp:
- created_at: "2021-09-20T22:44:31Z"
enc: |
-----BEGIN PGP MESSAGE-----
hQIMA1GgX0oycVV1AQ//bQiHOYnOUQRuS0D7B2fitx3PoalEliSKjHRdF3WMCZfG
JtFLxv0mN5vRLTMwcYKeHdP+8dmxRxPnMGv6uDMh6W099kPBNBmT7/DXd/8UFviu
IuetChXjRlgWm3jSxQymQulgsY2qcsy6gNNDmnOBk6peuapDWu41jCJKeKHeL79n
F8WUcVRthLQzRYwzFcVpzOqwaeD8hMKzXHonTQQBdHYKOw7KUG9DmO/nmU1kVllZ
VEzBHPFu/5GL6SeqQwz6VjbvizyGdxjsrjkpv10CYcb2LUL8v+5LgImZx0ARvA7q
9d/1mkxap4c1LPXg/x3zWblLwtsTtImrlUwNqPF1FpUQOvTiMRvOHcSRQejTiu4D
kXDRtVRKeLl9fePBk0Z5xj6Ou4Tdy7VN+Y0uyXCpsrFZzC2A1IAzDFERxx2C9BTm
U9DLpmNyits/oSbGo5RaNIUHnZ69Z63H2ASvIQo/sX3KJA7IQWWaU3sMTZM5bbuT
+fIzMTNBrVakasJbOjhabF1+u1TJZ5dw7gZQ0JX09HBfxbC46EXgAfOCFYLy0Jo7
EvCRjM+HBUrJmtEYoBq94cShDzpyAx59WFKfkOrVtZQC5w1dulvhBCyrkYi0CwtS
tvZRig6C9L97VbH3lCpq9yLsGJkGBHxvO404v+h7e90hRhlEYSVkIth2FFiqJu/S
XAHa3mLiqmmnwIOHxWDcaAt+INFRiGKq3a3tOGh4JFgl41oXRDp/W01pPNEnst5J
dVvZR1oy64jienpPvdGnSb0DL8D17QOAK7Tn6vY4r6p28RqNkqnQUJTWSu2H
=cI9X
-----END PGP MESSAGE-----
fp: 17299B027FB6B1C68971411B5FF3C7583ADD1EFB
- created_at: "2021-09-20T22:44:31Z"
enc: |
-----BEGIN PGP MESSAGE-----
hQIMA5XaZDWXjonIAQ//X4uON/TY70qKJby86CA4NRseBWTK4Fg3JSLJxglkSgjX
fL+dTrQ4xseH/WRDcEUjEaNazoAjuEsQXiCd2DKePYiwbEq6BvdjUp5UlKiymfMm
vD4E40PjNBZSuN1AmCUYJcNsPiL1WH3FJCa+GI/LKHje8ZXB1mpl/fxGJTTS+ID/
dGAdsD+ZbU9nX1eonNC3D2smWB4wYXpzR/6YHIqtgVJ872hpd8ykQo+qYz5/OqC6
Z+DonPLw0gwu7Gdmtih3M66B7vzV8aWZSGtQeJQOrAzOv+hqUJAJnqEPoAaPTzyT
iXYpOOjOjvdgB4ETHCZglvj7bunyM0FSvdb94reFzlwDtRVUT7ssSBSn3VGWfck0
nOt0W2e+wIU3JnPNMUhVz14bci4c0Mpo3qESbFI0Eiw0RTbBOcjZ1g1if84lyChG
qkrhtHnONzYy90XJiN4ppOUVNe6cyvxip5HPXC+uq7hn9WQo6tUAm03HrGyzBZqg
H/bsv5LNQqyivMVg5UE17EXrVOr0jQZflcOn2ubVaWzJThMHWb+MgDASf+HVWQ43
m9HDS<KEY>BB
hvWGJsntLLMr76mNaENBNQHrNMg/jI9Yg7aL1xtYN3n4a4c6Zag4amQxCbcFGtfS
XAH2xCrlV/OHnhf1WUEGQOsxxRwde9kANjyy5o1FTMvQhMZ3MKrxXoQ1GFGNDF0j
/KtAgz97BsIBiU/DjxYH4c/cS4xu+Xp/zn+wFiU+0ZvC66tjbvWBqw2n65KU
=0OP4
-----END PGP MESSAGE-----
fp: 147C67A0D68CF30B3C7ED3AB43A189D025B879AC
encrypted_regex: ^(data|stringData)$
version: 3.7.1
|
cluster/apps/networking/external-dns/secret.sops.yaml
|
---
- description: Add detailed information to your Monzo transactions!
link: https://paper.dropbox.com/doc/Monzo-Receipts-API--AV72zCKRwCP0dRyzJVMyisBrAg-eZscuadhVo1QOF2N48JXe
name: Monzo Receipts API
- description: ''
link: https://api-docs.transferwise.com/#transferwise-api
name: 'TransferWise API'
- description: The Alexa Skills Kit (ASK) is a collection of self-service APIs, tools,
documentation, and code samples that makes it fast and easy for you to add skills
to Alexa.
link: https://developer.amazon.com/alexa-skills-kit/
name: Alexa Skill Kit
rows: '2.5'
- description: Arctic is a high performance datastore accessible from python. It uses
MongoDB to store pandas dataframes, numpy arrays and pickled objects, and can
query millions of rows per second per client and scale to hundreds of millions
of rows per second per MongoDB instance.
link: https://github.com/manahl/arctic
name: 'Man AHL: Arctic'
rows: '3.0'
- description: PyBloqs is a flexible framework for visualizing data and automated
creation of reports, and works with Pandas, matplotlib and highcharts. Reports
can be displayed as HTML in the browser or exported in a variety of formats (including
HTML, PDF, SVG, PNG).
link: https://github.com/manahl/PyBloqs
name: 'Man AHL: PyBloqs'
rows: '3.0'
- description: Pynorama is a tool for visualizing intricate datasets (e.g. text data,
documents) for which a simple table format is not suitable, and was created with
Natural Language Processing applications in mind.
link: https://github.com/manahl/pynorama
name: 'Man AHL: Pynorama'
rows: '2.5'
- description: 'An incredibly easy method for websites to receive bitcoin payments.
This service is completely free and secure. Perfect for business or personal use.'
link: https://www.blockchain.com/api/api_receive
name: 'Blockchain: Receive Payments'
rows: '2.0'
- description: Blockchain's APIs to send and receive payment from Blockchain Wallets.
link: https://www.blockchain.com/api/blockchain_wallet_api
name: 'Blockchain: Wallet'
rows: '2.0'
- description: Query JSON data on blocks and transactions. Almost all the data you
see on this website is available in JSON format.
link: https://www.blockchain.com/api/blockchain_api
name: 'Blockchain: Data API'
rows: '2.0'
- description: 'Simple plain text API for querying blockchain data.'
link: https://www.blockchain.com/api/q
name: 'Blockchain: Simple Query API'
- description: 'Low latency streaming socket channel providing data on new blocks
and transactions. Subscribe to notification on blocks, transactions or an address
and receive JSON objects describing a transaction or block when an event occurs.'
link: https://www.blockchain.com/api/api_websocket
name: 'Blockchain: Websockets'
rows: '2.5'
- description: 'Currency data from the major bitcoin exchanges.'
link: https://www.blockchain.com/api/exchange_rates_api
name: 'Blockchain: Exchange Rates'
- description: 'JSON feed containing the data from the charts and stats pages.'
link: https://www.blockchain.com/api/charts_api
name: 'Blockchain: Charts and Statistics'
rows: '2.0'
- description: ''
link: http://hackcambridge.featurespace.co.uk:20080/
name: Featurespace Javascript Collector (JSC)
- description: 'A demo version of the national GP Connect API which allows you to
retrieve patient records and manage GP appointments. Supporting files and documentation
will be posted to the #sponsor-tpp Slack channel.'
link: https://developer.nhs.uk/apis/gpconnect/
name: 'TPP: GP Connect'
rows: '2.5'
- description: |
Keanu is a general-purpose probabalistic programming library. Probabalistic
programming is programming that accounts for uncertainty. You can express what you don't
know about a program by describingthe unknowns as probability distributions.
link: https://improbable-research.github.io/keanu/
name: 'Improbable: Keanu'
rows: '2.5'
|
assets/resources/apis.yml
|
---
# enabled test cases sets
enable:
- "Blobber"
- "Invalid sharder response"
# sets of test cases
sets:
- name: "Blobber"
tests:
- "Send wrong or not send a response"
- name: "Invalid sharder response"
tests:
- "Sharder sends wrong transaction verification"
# test cases
tests:
- name: "Send wrong or not send a response"
flow:
- command:
name: 'build_test_blobbers'
- set_monitor: "sharder-1"
- cleanup_bc: {}
- command:
name: "cleanup_blobbers"
- command:
name: "cleanup_0dns"
- start: ['sharder-1', 'miner-1', 'miner-2', 'miner-3']
- wait_round:
round: 15 # just wait the BC starts
- start: ['0dns']
# wait the 0dns starts
- command:
name: "sleep_20s"
- start: ['blobber-1', 'blobber-2', 'blobber-3']
- wait_add:
blobbers: ['blobber-1', 'blobber-2', 'blobber-3']
timeout: '5m'
- command:
name: 'init_allocation'
timeout: '35m'
# 3th blobber becomes 'bad', sending 'bad' responses to zbox
- storage_tree:
bad: ['blobber-3']
# just wait some time the blobber-3 pools new 'bad' state
- command:
name: 'sleep_20s'
- command:
name: 'list'
- command:
name: 'download'
- command:
name: 'delete'
- command:
name: 'upload'
- name: "Sharder sends wrong transaction verification"
flow:
- set_monitor: "sharder-1"
- cleanup_bc: {}
- command:
name: "cleanup_0dns"
- start: ['sharder-1']
- start: ["sharder-2"]
- start: ['miner-1', 'miner-2', 'miner-3']
- wait_sharder_keep:
timeout: '5m'
sharders: ['sharder-2']
- wait_view_change:
expect_magic_block:
miners: ['miner-1', 'miner-2', 'miner-3']
sharders: ['sharder-1', 'sharder-2']
- verify_transaction:
by: ['sharder-2']
- start: ['0dns']
- command:
name: "sleep_20s" # wait the 0dns starts
# double and triple sleep may help for a big network latency
- command:
name: "sleep_20s" # wait the 0dns starts
- command:
name: "sleep_20s" # wait the 0dns starts
- command:
name: 'unverified_transaction'
...
|
quickstart/1s_2m_4b/config/reference/0Chain/docker.local/config/conductor.blobber-1.yaml
|
---
apiVersion: apiextensions.crossplane.io/v1
kind: Composition
metadata:
name: xbuckets.test.crossplane.io
spec:
writeConnectionSecretsToNamespace: crossplane-system
compositeTypeRef:
apiVersion: test.crossplane.io/v1alpha1
kind: XBucket
patchSets:
- name: external-name
patches:
- type: FromCompositeFieldPath
fromFieldPath: metadata.annotations[crossplane.io/external-name]
toFieldPath: metadata.annotations[crossplane.io/external-name]
resources:
- name: bucket
base:
apiVersion: storage.gcp.crossplane.io/v1alpha3
kind: Bucket
spec:
location: US
storageClass: MULTI_REGIONAL
patches:
- type: PatchSet
patchSetName: external-name
- type: FromCompositeFieldPath
fromFieldPath: spec.parameters.location
toFieldPath: spec.location
transforms:
- type: map
map:
USA: US
Europe: EU
- name: service-account
base:
apiVersion: iam.gcp.crossplane.io/v1alpha1
kind: ServiceAccount
spec:
forProvider:
displayName: "Scale Testing"
description: "A service account for scale testing"
patches:
- type: PatchSet
patchSetName: external-name
- type: ToCompositeFieldPath
fromFieldPath: status.atProvider.email
toFieldPath: status.serviceAccount.email
connectionDetails:
- name: serviceAccountEmail
type: FromFieldPath
fromFieldPath: status.atProvider.email
- name: service-account-key
base:
apiVersion: iam.gcp.crossplane.io/v1alpha1
kind: ServiceAccountKey
spec:
forProvider:
serviceAccountSelector:
matchControllerRef: true
writeConnectionSecretToRef:
namespace: crossplane-system
patches:
- type: FromCompositeFieldPath
fromFieldPath: metadata.uid
toFieldPath: spec.writeConnectionSecretToRef.name
transforms:
- type: string
string:
fmt: "%s-service-account-key"
connectionDetails:
- name: serviceAccountName
type: FromFieldPath
fromFieldPath: metadata.name
- name: serviceAccountPrivateKey
type: FromConnectionSecretKey
fromConnectionSecretKey: privateKey
- name: bucket-policy-member
base:
apiVersion: storage.gcp.crossplane.io/v1alpha1
kind: BucketPolicyMember
spec:
forProvider:
bucketSelector:
matchControllerRef: true
serviceAccountMemberSelector:
matchControllerRef: true
role: roles/storage.objectAdmin
# Workaround for the below issue. Pretty sure deleting the bucket will
# implicitly delete this policy member.
# https://github.com/crossplane/provider-gcp/issues/370
deletionPolicy: Orphan
|
package/xbuckets.test.crossplane.io/composition.yaml
|
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: components.backstage.io
spec:
group: backstage.io
names:
kind: Component
listKind: ComponentList
plural: components
singular: component
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Component is the Schema for the components API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ComponentSpec defines the desired state of Component
properties:
ProvidesApis:
items:
type: string
type: array
consumesApis:
items:
type: string
type: array
dependsOn:
items:
type: string
type: array
lifecycle:
type: string
owner:
type: string
subcomponentOf:
type: string
system:
type: string
type:
description: Foo is an example field of Component. Edit component_types.go to remove/update
type: string
type: object
status:
description: ComponentStatus defines the observed state of Component
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
|
backstage-crds/apiextensions.k8s.io_v1_customresourcedefinition_components.backstage.io.yaml
|
tasks:
- id: handout01_00
markdown: |
Na hodině jsme se naučili pracovat s interaktivní Python konzolí. Zkus pomocí Pythonu vypočítat:
3+(4+6)×8÷2−1 =
(Závorky v Pythonu fungují jako v matematice)
- id: handout01_01
markdown: |
Jsou i jiné operátory než `+`, `-` a ty pro násobení a dělení.
Co dělá s čísly operátor `%` (procento)?
- id: handout01_02
markdown: |
A co dělá operátor `**` (dvě hvězdičky)?
- id: handout01_03
markdown: |
Jaký je v Pythonu rozdíl mezi dělením pomocí `/` a `//`? (Zkus si v Pythonu a odpověz slovně)
- id: handout01_04
markdown: |
Řetězce jdou spojovat sčítáním. Například:
```python
>>> 'A' + "B"
'AB'
```
Poznáš, co je tady špatně? Jak bys to spravila?
```python
>>> 'Ahoj' + 'PyLadies!'
```
- id: handout01_05
markdown: |
Řetězce se dají sčítat. Dají se i násobit? Dělit? Odečítat? (Odpověz slovně)
- id: handout01_06
markdown: |
Co se stane, když se pokusím sečíst číslo a řetězec? (Můžeš vložit výsledek z konzole, ale odpověz i slovně)
- id: handout01_07
markdown: |
A vynásobit? (Můžeš vložit výsledek z konzole, ale odpověz i slovně)
- section:
markdown: |
Poslední úkol se neodevzdává. Je pro tebe, aby sis před další hodinou ověřila, že vše funguje, jak má:
- id: handout01_08
markdown: |
V adresáři 01 ve složce pro kurz si vytvoř soubor `funguju.py` . Do souboru ulož:
```
print("Hurá, funguju!")
```
Pak se v příkazové řádce přepni do adresáře, kde jsi vytvořila soubor, aktivuj virtuální prostředí a do příkazové řádky napiš:
```
python funguju.py
```
Objevila se oslavná hláška? Gratulujeme, jsi připravena na další hodinu!
submit: false
- section:
markdown: |
**Co po této lekci umím:**
- numbered: false
submit: false
markdown: |
- Spustit příkazovou řádku a ukončit práci v ní
- Základní příkazy v příkazové řádce:
- zeptat se na adresář, ve kterém jsem
- zjistit, co v adresáři je
- přesunout se do daného adresáře
- vytvořit či smazat adresář
- Mám nainstalovaný a funkční Python 3.6 nebo 3.7 a vytvořené virtuální prostředí
- Mám adresář pro kurz PyLadies a v něm složky/adresáře pro jednotlivé hodiny
- Umím aktivovat virtuální prostředí (určitě si doma zkus, že funguje)
- Umím spustit a ukončit interaktivní Python
- Umím použít základní operátory (`+`,`-`,`/`,`//`,`*`,`%`) v Pythonu
- Mám nainstalovaný textový editor a umím si v něm vytvořit a uložit soubory
**Pokud ti něco z výše uvedeného nefunguje, prosím, kontaktuj organizátorky, rády ti pomohou.**
|
data/2020_pyladies_praha_spring/tasks/01_prvni_program.yaml
|
documentType: LandingData
title: Azure IoT Edge
metadata:
title: Azure IoT Edge - チュートリアル、API リファレンス
description: Azure IoT Edge を使用して、Azure サービスとカスタム ビジネス ロジックをエッジのデバイスにデプロイします。
author: kgremban
manager: timlt
ms.service: iot-edge
services: iot-edge
ms.topic: landing-page
ms.date: 06/21/2018
ms.author: kgremban
ms.openlocfilehash: 4c2ffde78b2a29601722f4233c69c99b37f332d5
ms.sourcegitcommit: 150a40d8ba2beaf9e22b6feff414f8298a8ef868
ms.translationtype: HT
ms.contentlocale: ja-JP
ms.lasthandoff: 06/27/2018
ms.locfileid: "37028542"
abstract:
description: Azure IoT Edge は、IoT Hub の上に構築されるモノのインターネット (IoT) サービスです。 このサービスは、クラウド内ではなく、デバイスで (つまり "エッジで") データを分析したいお客様を対象としています 。 ワークロードの一部をエッジに移動することで、デバイスがクラウドにメッセージを送る時間を減らし、状態の変化により迅速に反応できるようにすることができます。
sections:
- title: 5 分間のクイック スタート
items:
- type: paragraph
text: 初めての IoT Edge モジュールをデプロイし、シミュレートされたデバイスでデータを処理します。
- type: list
style: icon48
items:
- image:
src: https://docs.microsoft.com/media/logos/logo_Windows.svg
text: Windows
href: /azure/iot-edge/quickstart
- image:
src: https://docs.microsoft.com/media/logos/logo_linux-color.svg
text: Linux
href: /azure/iot-edge/quickstart-linux
- title: ステップバイステップのチュートリアル
items:
- type: paragraph
text: IoT Edge をインストールして構成する方法、モジュールをデプロイする方法、Stream Analytics などの Azure サービスをエッジで利用する方法について説明します。
- type: list
style: ordered
items:
- html: <a href="/azure/iot-edge/tutorial-deploy-function">Azure Functions を使用してメッセージをフィルター処理する</a>
- html: <a href="/azure/iot-edge/tutorial-deploy-stream-analytics">Azure Stream Analytics をモジュールとして使用して平均値を見つける</a>
- html: <a href="/azure/iot-edge/tutorial-deploy-machine-learning">Azure Machine Learning をモジュールとして使用してデータにスコアを付ける</a>
- html: <a href="/azure/iot-edge/tutorial-csharp-module">C# モジュールを使用してデータをフィルター処理する</a>
- html: <a href="/azure/iot-edge/tutorial-python-module">Python モジュールを使用してデータをフィルター処理する</a>
- html: <a href="/azure/iot-edge/tutorial-node-module">Node.js モジュールを使用してデータをフィルター処理する</a>
- html: <a href="/azure/iot-edge/tutorial-store-data-sql-server">SQL Server を使用してエッジでデータを格納する</a>
|
articles/iot-edge/index.yml
|
uid: "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight"
fullName: "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight"
name: "MicrosoftGraphSharedInsight"
nameWithType: "MicrosoftGraphSharedInsight"
summary: "sharedInsight."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
- "<xref href=\"com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity.additionalProperties()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity.id()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity.validate()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity.withAdditionalProperties(java.util.Map<java.lang.String,java.lang.Object>)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity.withId(java.lang.String)"
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public final class MicrosoftGraphSharedInsight extends MicrosoftGraphEntity"
constructors:
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.MicrosoftGraphSharedInsight()"
methods:
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.additionalProperties()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.lastShared()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.lastSharedMethod()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.resource()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.resourceReference()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.resourceVisualization()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.sharingHistory()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.validate()"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withAdditionalProperties(java.util.Map<java.lang.String,java.lang.Object>)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withId(java.lang.String)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withLastShared(com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharingDetail)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withLastSharedMethod(com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withResource(com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphEntity)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withResourceReference(com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphResourceReference)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withResourceVisualization(com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphResourceVisualization)"
- "com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.withSharingHistory(java.util.List<com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharingDetail>)"
type: "class"
metadata: {}
package: "com.azure.resourcemanager.authorization.fluent.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-authorization:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.authorization.fluent.models.MicrosoftGraphSharedInsight.yml
|
items:
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages
id: ComputerVisionTagImageDefinitionStages
artifact: com.microsoft.azure.cognitiveservices:azure-cognitiveservices-computervision:1.0.4-beta
parent: com.microsoft.azure.cognitiveservices.vision.computervision
children:
- com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithAllOptions
- com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithExecute
- com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithUrl
langs:
- java
name: ComputerVision.ComputerVisionTagImageDefinitionStages
nameWithType: ComputerVision.ComputerVisionTagImageDefinitionStages
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages
type: Interface
package: com.microsoft.azure.cognitiveservices.vision.computervision
summary: Grouping of tagImage definition stages.
syntax:
content: public static interface ComputerVision.ComputerVisionTagImageDefinitionStages
references:
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithAllOptions
name: ComputerVision.ComputerVisionTagImageDefinitionStages.WithAllOptions
nameWithType: ComputerVision.ComputerVisionTagImageDefinitionStages.WithAllOptions
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithAllOptions
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithExecute
name: ComputerVision.ComputerVisionTagImageDefinitionStages.WithExecute
nameWithType: ComputerVision.ComputerVisionTagImageDefinitionStages.WithExecute
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithExecute
- uid: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithUrl
name: ComputerVision.ComputerVisionTagImageDefinitionStages.WithUrl
nameWithType: ComputerVision.ComputerVisionTagImageDefinitionStages.WithUrl
fullName: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.WithUrl
|
docs-ref-autogen/com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVision.ComputerVisionTagImageDefinitionStages.yml
|
service: twitter-harvest
provider:
name: aws
runtime: python2.7
stage: dev
region: ap-southeast-2
iamRoleStatements:
- Effect: 'Allow'
Action:
- 'dynamodb:*'
Resource:
- Fn::Join:
- ''
- - 'arn:aws:dynamodb:*:*:table/'
- Ref: twitterConfigTable
# you can add statements to the Lambda function's IAM Role here
# iamRoleStatements:
# - Effect: "Allow"
# Action:
# - "s3:ListBucket"
# Resource: { "Fn::Join" : ["", ["arn:aws:s3:::", { "Ref" : "ServerlessDeploymentBucket" } ] ] }
# - Effect: "Allow"
# Action:
# - "s3:PutObject"
# Resource:
# Fn::Join:
# - ""
# - - "arn:aws:s3:::"
# - "Ref" : "ServerlessDeploymentBucket"
# - "/*"
# you can define service wide environment variables here
# environment:
# variable1: value1
# you can add packaging information here
package:
# include:
# - include-me.py
# - include-me-dir/**
exclude:
- .git/**
# - exclude-me.py
# - exclude-me-dir/**
functions:
harvest:
handler: handler.harvest
memorySize: 128
timeout: 5
events:
- schedule:
rate: rate(2 minutes)
enabled: true
user_tally:
handler: handler.user_tally
memorySize: 128
timeout: 5
resources:
twitterConfigTable:
Type: AWS::DynamoDB::Table
Properties:
AttributeDefinitions:
- AttributeName: ConfigKey
AttributeType: S
- AttributeName: RangeKey
AttributeType: S
KeySchema:
- AttributeName: ConfigKey
KeyType: HASH
- AttributeName: RangeKey
KeyType: RANGE
ProvisionedThroughput:
ReadCapacityUnits: 5
WriteCapacityUnits: 5
# The following are a few example events you can configure
# NOTE: Please make sure to change your handler code to work with those events
# Check the event documentation for details
# events:
# - http:
# path: users/create
# method: get
# - s3: ${env:BUCKET}
# - schedule: rate(10 minutes)
# - sns: greeter-topic
# - stream: arn:aws:dynamodb:region:XXXXXX:table/foo/stream/1970-01-01T00:00:00.000
# - alexaSkill
# - iot:
# sql: "SELECT * FROM 'some_topic'"
# - cloudwatchEvent:
# event:
# source:
# - "aws.ec2"
# detail-type:
# - "EC2 Instance State-change Notification"
# detail:
# state:
# - pending
# Define function environment variables here
# environment:
# variable2: value2
# you can add CloudFormation resource templates here
#resources:
# Resources:
# NewResource:
# Type: AWS::S3::Bucket
# Properties:
# BucketName: my-new-bucket
# Outputs:
# NewOutput:
# Description: "Description for the output"
# Value: "Some output value"
|
serverless.yml
|
AWSTemplateFormatVersion: '2010-09-09'
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html
Parameters:
SSESpecificationSSEEnabled:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-dax-cluster-ssespecification.html#cfn-dax-cluster-ssespecification-sseenabled
AllowedValues:
- 'true'
- 'false'
Default: null
Description:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-description
Default: null
ReplicationFactor:
Type: Number
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-replicationfactor
ParameterGroupName:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-parametergroupname
Default: null
IAMRoleARN:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-iamrolearn
SubnetGroupName:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-subnetgroupname
Default: null
PreferredMaintenanceWindow:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-preferredmaintenancewindow
Default: null
NotificationTopicARN:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-notificationtopicarn
Default: null
NodeType:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-nodetype
ClusterName:
Type: String
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-clustername
Default: null
Tags:
Type: Json
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html#cfn-dax-cluster-tags
Default: null
Resources:
Resource:
Type: AWS::DAX::Cluster
Description: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html
Properties:
SSESpecification:
SSEEnabled: !Ref 'SSESpecificationSSEEnabled'
Description: !Ref 'Description'
ReplicationFactor: !Ref 'ReplicationFactor'
ParameterGroupName: !Ref 'ParameterGroupName'
IAMRoleARN: !Ref 'IAMRoleARN'
SubnetGroupName: !Ref 'SubnetGroupName'
PreferredMaintenanceWindow: !Ref 'PreferredMaintenanceWindow'
NotificationTopicARN: !Ref 'NotificationTopicARN'
NodeType: !Ref 'NodeType'
ClusterName: !Ref 'ClusterName'
Tags: !Ref 'Tags'
Outputs:
ClusterDiscoveryEndpoint:
Value:
GetAtt:
- Resource
- ClusterDiscoveryEndpoint
Arn:
Value:
GetAtt:
- Resource
- Arn
|
aws-cloudformation-resources-all-fields/AWS-DAX-Cluster/16.1.0/product.template-us-west-2.yaml
|
apiVersion: apps/v1
# Source: cni/templates/daemonset.yaml
# This manifest installs the Istio install-cni container, as well
# as the Istio CNI plugin and config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
metadata:
name: istio-cni-node
spec:
template:
metadata:
labels:
sidecar.istio.io/inject: "false"
annotations:
sidecar.istio.io/inject: "false"
spec:
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 5
serviceAccountName: istio-cni
priorityClassName: system-node-critical
nodeSelector:
kubernetes.io/os: linux
containers:
# This container installs the Istio CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: "docker.io/istio/install-cni:1.12.1"
command: ["install-cni"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: istio-cni-config
key: cni_network_config
- name: CNI_NET_DIR
value: /etc/cni/net.d
# Deploy as a standalone CNI plugin or as chained?
- name: CHAINED_CNI_PLUGIN
value: "true"
- name: REPAIR_ENABLED
value: "true"
- name: REPAIR_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: REPAIR_LABEL_PODS
value: "true"
# Set to true to enable pod deletion
- name: REPAIR_DELETE_PODS
value: "true"
- name: REPAIR_RUN_AS_DAEMON
value: "true"
- name: REPAIR_SIDECAR_ANNOTATION
value: "sidecar.istio.io/status"
- name: REPAIR_INIT_CONTAINER_NAME
value: "istio-validation"
- name: REPAIR_BROKEN_POD_LABEL_KEY
value: "cni.istio.io/uninitialized"
- name: REPAIR_BROKEN_POD_LABEL_VALUE
value: "true"
resources:
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-net-dir
mountPath: /host/etc/cni/net.d
- name: cni-log-dir
mountPath: /var/run/istio-cni
livenessProbe:
httpGet:
port: 8000
path: /healthz
initialDelaySeconds: 5
readinessProbe:
httpGet:
port: 8000
path: /readyz
securityContext:
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
volumes:
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used for UDS log
- name: cni-log-dir
hostPath:
path: /var/run/istio-cni
tolerations:
# Make sure istio-cni-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
|
deploy/base/istio/cni/daemonset.istio-cni-node.k8s.yaml
|
# Site settings
# These are used to personalize your new site. If you look in the HTML files,
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: Home
locale: "fi"
author:
name: <NAME>
email: <EMAIL>
description: >- # this means to ignore newlines until "baseurl:"
<NAME> kotisivut on tehty nettisivujen tekemisen tueksi
liittyen omaan oppimispolkuuni.
Julkaisu on GitHub Inc -palvelussa.
show_excerpts: false # set to true to show excerpts on the homepage
sass:
sass_dir: _sass
# Minima date format
# refer to https://shopify.github.io/liquid/filters/date/ if you want to customize this
minima:
date_format: "%d %b, %Y"
skin: solarized-dark # classic (default), dark. solarized, solarized-dark
# generate social links in footer
social_links:
twitter: timohoo1
github: timohoo
# devto: jekyll
# dribbble: jekyll
# facebook: jekyll
# flickr: jekyll
instagram: ""
# linkedin: jekyll
# pinterest: jekyll
# youtube: jekyll
# youtube_channel: UC8CXR0-3I70i1tfPg1PAE1g
# youtube_channel_name: CloudCannon
telegram: ""
# googleplus: +jekyll
# microdotblog: jekyll
# keybase: jekyll
# Mastodon instances
# mastodon:
# - username: jekyll
# instance: example.com
# - username: jekyll2
# instance: example.com
# GitLab instances
# gitlab:
# - username: jekyll
# instance: example.com
# - username: jekyll2
# instance: example.com
# If you want to link only specific pages in your header, uncomment
# this and add the path to the pages in order as they should show up
header_pages:
- tokapuu.md
# - puu.md
- albumi.md
- kuvagalleria.md
# Fix into github Authentication error: "GitHub Metadata: No GitHub API authentication could be found."
github: [metadata]
# Build settings
theme: minima
plugins:
- jekyll-feed
- jekyll-seo-tag
# Outputting
permalink: /:categories/:title/
#paginate: 5 # amount of posts to show
#paginate_path: /page:num/
timezone: Europe/Helsinki # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# Exclude from processing.
# The following items will not be processed, by default. Create a custom list
# to override the default setting.
# exclude:
# - Gemfile
# - Gemfile.lock
# - node_modules
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/
# Kuvagallerian hakemistot:
imagesurl: "/assets/images/gallery/"
thumbsurl: "/assets/images/thumbs/"
defaults:
# _posts
# _pages
# - scope:
# path: "_pages"
# type: default
#archives
category_archive:
type: liquid
path: /categories/
tag_archive:
type: liquid
path: /tags/
|
_config.yml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubedialer-{{ .Release.Name }}
labels:
app: kube-dialer
heritage: "{{ .Release.Service }}"
release: "{{ .Release.Name }}"
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
kruz.io/component.role: kube-dialer
kruz.io/component.tier: edge-kube-dialer
kruz.io/info.vendor: Kruz IO Inc.
spec:
selector:
matchLabels:
app: kube-dialer
replicas: {{default 1 .Values.replicas}}
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: kube-dialer
heritage: "{{ .Release.Service }}"
release: "{{ .Release.Name }}"
spec:
volumes:
# The static assets
- name: static
emptyDir: {}
# Map all keys to files.
- name: dialer-config
configMap:
name: dialer-config
- name: dialer-tiles
configMap:
name: dialer-tiles
# Pod Security
automountServiceAccountToken: false
securityContext:
runAsNonRoot: true
runAsUser: 10001
fsGroup: 1000
initContainers:
- name: dialer-staticdata
image: {{ .Values.image.name }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: [ "sh" ]
args: [ "-c", "cp -r /www/* /static && cp /config/config.toml /static/"]
volumeMounts:
- name: dialer-config
mountPath: /config
- name: static
mountPath: /static
containers:
- name: dialer
image: {{ .Values.image.name }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
#Container Security
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
command: [ "hugo" ]
args: [ "server", "--bind", "0.0.0.0", "-p", "{{ .Values.dialerPort }}","-s", "/static", "--log", "--verboseLog"]
volumeMounts:
- name: static
mountPath: /static
- name: dialer-tiles
mountPath: /static/content/tiles
ports:
- name: http
containerPort: {{ .Values.dialerPort }}
protocol: TCP
|
deploy/charts/dialer/templates/dialer-deployment.yaml
|
resources:
- bases/azure.microsoft.com_storages.yaml
- bases/azure.microsoft.com_cosmosdbs.yaml
- bases/azure.microsoft.com_rediscaches.yaml
- bases/azure.microsoft.com_eventhubs.yaml
- bases/azure.microsoft.com_resourcegroups.yaml
- bases/azure.microsoft.com_eventhubnamespaces.yaml
- bases/azure.microsoft.com_consumergroups.yaml
- bases/azure.microsoft.com_keyvaults.yaml
- bases/azure.microsoft.com_azuresqlservers.yaml
- bases/azure.microsoft.com_azuresqlusers.yaml
- bases/azure.microsoft.com_azuresqldatabases.yaml
- bases/azure.microsoft.com_azuresqlfirewallrules.yaml
- bases/azure.microsoft.com_azuresqlactions.yaml
- bases/azure.microsoft.com_azuresqlfailovergroups.yaml
- bases/azure.microsoft.com_blobcontainers.yaml
- bases/azure.microsoft.com_azuredatalakegen2filesystems.yaml
- bases/azure.microsoft.com_appinsights.yaml
- bases/azure.microsoft.com_postgresqlservers.yaml
- bases/azure.microsoft.com_postgresqldatabases.yaml
- bases/azure.microsoft.com_postgresqlfirewallrules.yaml
- bases/azure.microsoft.com_apimservices.yaml
- bases/azure.microsoft.com_apimgmtapis.yaml
- bases/azure.microsoft.com_virtualnetworks.yaml
- bases/azure.microsoft.com_keyvaultkeys.yaml
- bases/azure.microsoft.com_azuresqlvnetrules.yaml
- bases/azure.microsoft.com_azuremanageddisks.yaml
# +kubebuilder:scaffold:crdkustomizeresource
#patches:
# [WEBHOOK] patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_eventhubs.yaml
#- patches/webhook_in_resourcegroups.yaml
#- patches/webhook_in_eventhubnamespaces.yaml
#- patches/webhook_in_consumergroups.yaml
#- patches/webhook_in_keyvaults.yaml
#- patches/webhook_in_azuresqlservers.yaml
#- patches/webhook_in_azuresqlusers.yaml
#- patches/webhook_in_azuresqldatabases.yaml
#- patches/webhook_in_azuresqlfirewallrules.yaml
#- patches/webhook_in_azuresqlactions.yaml
#- patches/webhook_in_azuresqlfailovergroups.yaml
#- patches/webhook_in_blobcontainers.yaml
#- patches/webhook_in_azuredatalakegen2filesystems.yaml
#- patches/webhook_in_appinsights.yaml
#- patches/webhook_in_postgresqlservers.yaml
#- patches/webhook_in_postgresqldatabases.yaml
#- patches/webhook_in_postgresqlfirewallrules.yaml
#- patches/webhook_in_apimservices.yaml
#- patches/webhook_in_apimgmtapis.yaml
#- patches/webhook_in_virtualnetworks.yaml
#- patches/webhook_in_keyvaultkeys.yaml
#- patches/webhook_in_azuresqlvnetrules.yaml
#- patches/webhook_in_azuremanageddisks.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [CAINJECTION] patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_eventhubs.yaml
#- patches/cainjection_in_resourcegroups.yaml
#- patches/cainjection_in_eventhubnamespaces.yaml
#- patches/cainjection_in_consumergroups.yaml
#- patches/cainjection_in_keyvaults.yaml
#- patches/cainjection_in_azuresqlservers.yaml
#- patches/cainjection_in_azuresqlusers.yaml
#- patches/cainjection_in_azuresqldatabases.yaml
#- patches/cainjection_in_azuresqlfirewallrules.yaml
#- patches/cainjection_in_azuresqlactions.yaml
#- patches/cainjection_in_azuresqlfailovergroups.yaml
#- patches/cainjection_in_blobcontainers.yaml
#- patches/cainjection_in_azuredatalakegen2filesystems.yaml
#- patches/cainjection_in_appinsights.yaml
#- patches/cainjection_in_postgresqlservers.yaml
#- patches/cainjection_in_postgresqldatabases.yaml
#- patches/cainjection_in_postgresqlfirewallrules.yaml
#- patches/cainjection_in_apimservices.yaml
#- patches/cainjection_in_apimgmtapis.yaml
#- patches/cainjection_in_virtualnetworks.yaml
#- patches/cainjection_in_keyvaultkeys.yaml
#- patches/cainjection_in_azuresqlvnetrules.yaml
#- patches/cainjection_in_azuremanageddisks.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml
|
config/crd/kustomization.yaml
|
#By default is set to true
#if is set to true only the owner of the warp can delete itself.
#if is set to false everyone can delete a warp.
deletewarp:
onlyOwner: false
#By default is true
#If you turn off this you can use this plugin without the permissions
usePermission: false
#By default is set to true
warpRequest:
#If you set this to false you disable the request to warp to a player
sendRequest: true
requestLifetime: "60"
#By default you can't disable the request from a player to him
#Modify this to change the prefix of the messages in the chat
#Dont remove the space at the end of the line, or everything will be written attached
messagiesPrefix: "&5[EasyWarp] "
messagies:
#In this section you can change the messages for the warp
#Chenge the number after the "&" to change the color
warp:
warped: "&b↑ &8» &7You warped yourself to {warp}"
notExist: "&b↑ &8» &7There is no warp with this name"
arg: "&b↑ &8» &cInsert a warp name"
permission: "&b↑ &8» &cYou not have this permission"
#Change this string tho change the messages when a player set a warp
setwarp:
succesfullySet: "&b↑ &8» &7Warp successfully set"
alredyExist: "&b↑ &8» &7This warp already exist"
arg: "&b↑ &8» &cInsert a valid warp name"
permission: "&b↑ &8» &cYou not have this permission"
delwarp:
succesfullyDeleted: "&b↑ &8» &7Successfully deleted the warp"
permission: "&b↑ &8» &cYou not have this permission"
onlyOwner: "&b↑ &8» &cOnly the owner of this warp can delete it"
notExixst: "&b↑ &8» &7There is no warp with this name"
arg: "&b↑ &8» &cInsert a valid warp name"
warps:
permission: "&b↑ &8» &cYou not have this permission"
#Dont modify {warps} and {OwnerName} or will not work
warpList: "&b↑ &8» &7Name: &b{warp} &6Owner: &b{OwnerName}"
noWarpIist: "&b↑ &8» &cThere is no warp in the list"
warpreq:
permission: "&b↑ &8» &cYou not have this permission"
warpToYou: "&b↑ &8» &7WTF ARE YOU DOING"
succesfullyWarpedsender: "&b↑ &8» &7You have successfully sent a request to {player}"
succesfullyWarpedrecivier: "&b↑ &8» &7You have received a request from {player}"
offline: "&b↑ &8» &7This player is offline"
aredySent: "&b↑ &8» &cYou have already sent a request to {player}"
arg: "&b↑ &8» &cInsert a player namer"
expired: "&b↑ &8» &7The request to {player} expired"
expiredRecivier: "&b↑ &8» &7The request from {player} expired"
warphere:
permission: "&b↑ &8» &cYou not have this permission"
warpToYou: "&b↑ &8» &7WTF ARE YOU DOING"
succesfullyWarpedsender: "&b↑ &8» &7You have successfully sent a request to {player}"
succesfullyWarpedrecivier: "&b↑ &8» &7You have received a request from {player}"
offline: "&b↑ &8» &7This player is offline"
alredySent: "&b↑ &8» &7You have already sent a request to {player}"
expired: "&b↑ &8» &7The request to {player} expired"
expiredRecivier: "&b↑ &8» &7The request from {player} expired"
arg: "&b↑ &8» &cInsert a player name"
acceptWarp:
#for the warphere these 2 work in reverse
acceptedSender: "&b↑ &8» &7You will bw warped to {player}"
acceptedRecivier: "&b↑ &8» &7{player} is warping to you"
###############################################à
noRequest: "&b↑ &8» &7There is no request"
toYou: "&b↑ &8» &7WHY ARE YOU DOING THIS"
offline: "&b↑ &8» &7This player is offline"
arg: "&b↑ &8» &cInsert a player name"
warpno:
reject: "&b↑ &8» &7You removed the request to {player}"
rejectRecivier : "&b↑ &8» &7{player} declined your request"
notResponder: "&b↑ &8» &7You cannot decline your request \nType /warpremove {player} to remove the request"
noRequest: "&b↑ &8» &7There is no request from this player"
toYou: "&b↑ &8» &7WHY ARE YOU DOING THIS"
offline: "&b↑ &8» &7This player is offline"
arg: "&b↑ &8» &cInsert a player name"
warpRemove:
removed: "&b↑ &8» &7Request destroyed"
removedRecivier: "&b↑ &8» &7The request from {player} was destroyed"
noRequest: "&b↑ &8» &7There is no request to this player"
noRecivier: "&b↑ &8» &7You cannot remove your reuest \nType /warpremove {player} to remove the request"
toYou: "&b↑ &8» &7WHY ARE YOU DOING THIS"
offline: "&b↑ &8» &7This player is offline"
arg: "&b↑ &8» &cInsert a player name"
warpList:
noPending: "&b↑ &8» &7You have no pending requests"
pending: "&b↑ &8» &cYou have pending requests from {player}"
|
src/main/resources/config.yml
|
sudo: false
language: rust
env:
global:
- CRATE_NAME=cobalt
- COVERALLS=0.0.6
- WHEN=0.1.0
matrix:
include:
# Linux
- env: TARGET=x86_64-unknown-linux-gnu
# OSX
- env: TARGET=x86_64-apple-darwin
os: osx
# Testing other channels
- env: TARGET=x86_64-unknown-linux-gnu
rust: beta
- env: TARGET=x86_64-unknown-linux-gnu
rust: nightly
- rust: nightly-2017-10-09
env: CLIPPY_VERS="0.0.165"
install:
- if [[ `cargo +nightly-2017-10-09 clippy -- --version` != $CLIPPY_VERS* ]] ; then travis_wait cargo +nightly-2017-10-09 install clippy --vers "$CLIPPY_VERS" --force; fi
- export PATH=$HOME/.cargo/bin:$PATH
script:
- cargo +nightly-2017-10-09 clippy --features "syntax-highlight,sass" -- -D warnings
- rust: stable
env: RUSTFMT=0.8.6
install:
- if [[ `rustfmt --version` != $RUSTFMT* ]] ; then travis_wait cargo install rustfmt --force --vers $RUSTFMT; fi
- export PATH=$HOME/.cargo/bin:$PATH
script:
- cargo fmt -- --write-mode=diff
install:
- curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $TRAVIS_RUST_VERSION
- source ~/.cargo/env || true
- if [[ `cargo-when --version` != *$WHEN ]] ; then travis_wait cargo install cargo-when --force --vers $WHEN; fi
# - travis_wait cargo install cargo-travis --force --vers $COVERALLS
script:
- rustc -Vv
- cargo -V
- cargo check --verbose
- cargo check --verbose --features "syntax-highlight,sass"
- cargo test --verbose
- cargo test --verbose --features "syntax-highlight,sass"
before_deploy:
- sh ci/before_deploy.sh
deploy:
provider: releases
api_key:
secure: "<KEY>
file_glob: true
file: $CRATE_NAME-$TRAVIS_TAG-$TARGET.*
on:
condition: $TRAVIS_RUST_VERSION = stable
tags: true
skip_cleanup: true
after_success:
- cmake --version
- cargo when --channel stable -e TARGET=x86_64-unknown-linux-gnu coverage
addons:
apt:
packages:
# necessary for `cargo coveralls`
- libcurl4-openssl-dev
- libelf-dev
- libdw-dev
- binutils-dev
- cmake
cache:
apt: true
directories:
- $HOME/.cargo
- target/debug/deps
- target/debug/build
before_cache:
# Travis can't cache files that are not readable by "others"
- chmod -R a+r $HOME/.cargo
notifications:
email:
on_success: never
webhooks:
urls:
- https://webhooks.gitter.im/e/7e08f15b115326957c31
on_success: change # options: [always|never|change] default: always
on_failure: always # options: [always|never|change] default: always
on_start: never # options: [always|never|change] default: always
|
.travis.yml
|
apiVersion: batch/v1
kind: Job
metadata:
name: test
spec:
template:
spec:
restartPolicy: Never
initContainers:
- name: k8psh
image: 76eddge/k8psh
imagePullPolicy: Never
args: [ --install, /k8psh/bin/k8pshd ]
env:
- name: K8PSH_DEBUG # To debug, modify the Dockerfile to have cmake create a debug build
value: 'Process, Main, Configuration'
volumeMounts:
- mountPath: /k8psh/bin
name: k8psh-bin
containers:
- name: server
image: alpine
command: [ /bin/sh, -c, '/k8psh/bin/k8pshd --config=/workspace-server/test/config/k8s.conf --name=server --max-connections=3' ]
env:
- name: K8PSH_DEBUG # To debug, modify the Dockerfile to have cmake create a debug build
value: 'Process, Main, Configuration'
- name: K8PSH_TEST
value: 'ServerTest'
resources:
requests:
memory: 100Mi
cpu: 100m
volumeMounts:
- mountPath: /workspace-server
name: workspace
- mountPath: /k8psh/bin
name: k8psh-bin
readOnly: true
- name: client
image: ubuntu
tty: true
command: [ /bin/sh, -c, 'BINDIR=$(mktemp -d); cd /workspace/test && k8pshd --executable-directory=$BINDIR && uname -a && printenv K8PSH_TEST && pwd && $BINDIR/uname -a && $BINDIR/printenv K8PSH_TEST && $BINDIR/pwd' ]
env:
- name: K8PSH_CONFIG
value: /workspace/test/config/k8s.conf
- name: K8PSH_DEBUG # To debug, modify the Dockerfile to have cmake create a debug build
value: 'Process, Main, Configuration'
- name: K8PSH_TEST
value: 'ClientTest'
- name: PATH
value: '/bin:/usr/bin:/k8psh/bin'
resources:
requests:
memory: 100Mi
cpu: 100m
volumeMounts:
- mountPath: /workspace
name: workspace
- mountPath: /k8psh/bin
name: k8psh-bin
readOnly: true
volumes:
- name: k8psh-bin
emptyDir:
medium: Memory
- name: workspace
hostPath:
path: "${GITHUB_WORKSPACE}"
type: Directory
|
test/k8s/test.yaml
|
WebApi:
Host: localhost
Port: 20000
IsSecure: false
UseImpersonation: false
Authentication:
Scheme: IntegratedWindowsAuthentication
Config:
Authorization:
AllowAnonymous: true
Providers:
- Type: Synapse.Authorization:WindowsPrincipalProvider
AppliesTo:
ServerRole: Controller, Node
Config:
# specify an LdapRoot if using Groups
LdapRoot: {LDAP://...}
# a ListSource is a serialized list of Users/Groups with is checked for changes on every authorization call
# see below for example
ListSourcePath: {file path}\myAuthorizationList.yaml
# staticly declared Users/Groups, static lists and ListSource can be used together in an additive fashion
Users:
Allowed:
- domain/user0
- domain/user1
Denied:
- domain/user2
Groups:
Allowed:
- group0
Denied:
- group1
- Type: Synapse.Authorization.Suplex:SuplexProvider
AppliesTo:
ServerRole: Admin
Config:
Connection:
Type: File
Path: {file path}\{suplexFile}.splx
# a ListSource is a serialized list of Users/Groups with is checked for changes on every authorization call
# see below for example
ListSourcePath: {file path}\myAuthorizationList.yaml
# staticly declared Users/Groups, static lists and ListSource can be used together in an additive fashion
Users:
Allowed:
- domain/user0
- domain/user1
Denied:
- domain/user2
Groups:
Allowed:
- group0
Denied:
- group1
- Type: Synapse.Authorization.Suplex:SuplexProvider
AppliesTo:
ServerRole: Admin
Topics:
- AutoUpdate
Config:
Connection:
Type: File
Path: {file path}\{suplexFile}.splx
ListSourcePath: {file path}\{fileName}.yaml
# Example of ListSource: myAuthorizationList.yaml
# Static lists and ListSource can be used together in an additive fashion.
Users:
Allowed:
- domain/user0
- domain/user1
Denied:
- domain/user2
Groups:
Allowed:
- group0
Denied:
- group1
# The Authorization section may be omitted entirely if not required, or declared in a minimal setup:
WebApi:
Authorization:
AllowAnonymous: true
Providers:
|
Synapse.Server.config.yaml/WebApi.Authorization.yaml
|
l_traditional_chinese:
########################
# Lava Fortress
# Code by Malthus
# Text by ViolentBeetle
########################
#First Anomaly
mem_lava_fortress_category:0 "焰火之內"
mem_lava_fortress_category_desc:0 "深入的掃瞄顯示在星球的表面上有著不尋常的地勢起伏,很有可能是一座人造建築。"
#Second Anomaly (Loop)
mem_lava_fortress_loop_category:0 "熔岩要塞"
mem_lava_fortress_loop_category_desc:0 "這座古老的建築物對我們而言依然是充滿謎團,我們需要找出答案..."
#simple fail - thats it
mem_lava_fortress.1.name:0 "只是熱空氣而已"
mem_lava_fortress.1.desc:0 "看起來這些奇怪的形狀原來只不過是充斥著熱風的高地,再加上不平均密度的熔岩以致結果有所扭曲,沒有顯示任何人為的活動。"
mem_lava_fortress.1.a:0 "繼續吧。"
#hard fail - start the loop
mem_lava_fortress.2.name:0 "危險的房間"
mem_lava_fortress.2.desc:0 "我們的考古團隊成功利用耐熱性能極強的工具,鑽探這個外星的哨站。他們發現一些矮小而強壯的外星人,很有可能是某類哺乳動物。在[mem_lava_fortress_leader.GetName]更深入地調查這個地方時,恰巧發掘了被[mem_lava_fortress_leader.GetSheHe]所認為的訓練場所。很不智地,[mem_lava_fortress_leader.GetName]不小心拉了一下控制桿,啟動了某些尖刺物機關不斷伸縮,有可能這些機關的用意是訓練使用者的閃避技巧。\n\n這看起來[mem_lava_fortress_leader.GetName]以及大部分 [mem_lava_fortress_leader.GetHisHer]的人員不太擅於這一方面的活動..."
mem_lava_fortress.2.a:0 "派出另一隊人馬,我們不能於這處止步!"
mem_lava_fortress.2.b:0 "這座建築物簡直是一個死亡陷阱!我們要把它封起來!"
#hard fail - ends the anomaly
mem_lava_fortress.3.name:0 "火焰中的死亡"
mem_lava_fortress.3.desc:0 "研究團隊成功找到在星球表面上一座被棄置的建築物。可是,他們在前廳進行探索時,[mem_lava_fortress_leader.GetName]不小心踩中某種類似踏板的東西,更從外面引來了像河流般的熔岩由天花而降,灌注了整座建築物。整個要塞連帶我們的科學家團隊都埋葬於此。"
mem_lava_fortress.3.a:0 "太可怕了!"
#hard fail - ends the anomaly
mem_lava_fortress.4.name:0 "大型塌陷"
mem_lava_fortress.4.desc:0 "這個奇形怪狀的物體原來是某種外星人的哨站。可是,就在裡面探索的時候,[mem_lava_fortress_leader.GetName]不智地拉了一下位於其中一個房間內的控制桿,令到唯一一枝支撐著整座建築物的支柱突然折斷,導致整座建築物塌陷並完全被摧毀。"
mem_lava_fortress.4.a:0 "太可怕了!"
#hard fail - continues the loop
mem_lava_fortress.5.name:0 "更多陷阱"
mem_lava_fortress.5.desc:0 "另一支科研團隊成功進入該建築物,不過儘管他們最大的努力,不去碰任何有可能帶來危險的控制桿, [mem_lava_fortress_leader.GetName]不小心踩中某種類似踏板的東西,,更從外面引來了像河流般的熔岩由天花而降,灌注了[mem_lava_fortress_leader.GetSheHe]和[mem_lava_fortress_leader.GetHisHer]的所在地。"
mem_lava_fortress.5.a:0 "派出另一隊人馬,我們不能放棄!"
mem_lava_fortress.5.b:0 "這座建築物簡直是一個死亡陷阱!我們要把它封起來!"
#hard fail - ends the anomaly
mem_lava_fortress.6.name:0 "火焰中的死亡"
mem_lava_fortress.6.desc:0 "研究團隊成功進入這座要塞,可是,他們在前廳進行探索時,[mem_lava_fortress_leader.GetName] 不小心踩中某種類似踏板的東西,更從外面引來了像河流般的熔岩由天花而降,灌注了整座建築物。整個要塞連帶我們的科學家團隊都埋葬於此。"
mem_lava_fortress.6.a:0 "太可怕了!"
#hard fail - ends the anomaly
mem_lava_fortress.7.name:0 "大型塌陷"
mem_lava_fortress.7.desc:0 "這個奇形怪狀的物體原來是某種外星人的哨站。可是,就在裡面探索的時候,[mem_lava_fortress_leader.GetName] 不智地拉了一下位於其中一個房間內的控制桿,令到唯一一枝支撐著整座建築物的支柱突然折斷,導致整座建築物塌陷並完全被摧毀。"
mem_lava_fortress.7.a:0 "太可怕了!"
#Success in first try
mem_lava_fortress.10.name:0 "熔岩要塞"
mem_lava_fortress.10.desc:0 "我們的考古團隊成功利用耐熱性能極強的工具,鑽探這個外星的哨站。他們發現一些矮小而強壯的外星人,很有可能是某類哺乳動物。我們可以透過牆壁上、地面上以及外星人日常所使用的物件當中的浮雕,可以將以前大部分的歷史重組於眼前。這座設施在這種極端環境下被設立,是希望在地底深處可以找到一種非常珍貴的礦脈 ─ 對他們而言是價值連城。\n\n透過將熔岩泵送至地面,他們能夠提取到近乎不會耗盡的能源,而且在多層熔岩會流經的位置設立防禦系統,更找出一種方法排除掉「不合資格」的族人─ 從他們詳盡的背景得知。n\n最初,這個哨站穩定地發展,更吸引了不少的人慕名而來。很多居住者不只在礦場內工作,他們亦轉向藝術發展,製作一些以他們自己樣貌為藍本、具有錯綜複雜圖案的物品,上面刻著不規則碎片形狀的形象,美麗動人的同時卻令人不寒而慄。\n\n儘管經常被附近星系的敵人所佔據,這個殖民地依然捱過了幾個世代。不過,當原住民鑽探工程差不多來到尾聲時,當時肯定發生了某些災難性事件。從傷亡情況可以讓我們判斷出,這裡曾經發生過一場打擊極大的內戰,但是從他們遺留下來的儲藏室來看,各式各樣的食物供應都一應俱全,所以排除了物資短缺作為開戰的原因。其中一位科學家得出一個有趣的發現:他們沒有配備保護足部的護具,可是在他們的浮雕上卻發現不少有關護具的圖案,很可能是一種社會地位的象徵。不幸地,我們的隊伍未能找到更詳細的資料。"
mem_lava_fortress.10.a:0 "這些外星人真的如傳言中那麼瘋狂..."
mem_lava_fortress.10.b:0 "這個品種既獨特又引人入勝..."
mem_lava_fortress.10.c:0 "我們有沒有足夠的鞋子!? "
#Success in later try
mem_lava_fortress.11.name:0 "血火之間"
mem_lava_fortress.11.desc:0 "我們的考古團隊成功利用耐熱性能極強的工具,鑽探這個外星的哨站。他們發現一些矮小而強壯的外星人,很有可能是某類哺乳動物。我們可以透過牆壁上、地面上以及外星人日常所使用的物件當中的浮雕,可以將以前大部分的歷史重組於眼前。這座設施在這種極端環境下被設立,是希望在地底深處可以找到一種非常珍貴的礦脈 ─ 對他們而言是價值連城。\n\n透過將熔岩泵送至地面,他們能夠提取到近乎不會耗盡的能源,而且在多層熔岩會流經的位置設立防禦系統,更找出一種方法排除掉「不合資格」的族人─ 從他們詳盡的背景得知。n\n最初,這個哨站穩定地發展,更吸引了不少的人慕名而來。很多居住者不只在礦場內工作,他們亦轉向藝術發展,製作一些以他們自己樣貌為藍本、具有錯綜複雜圖案的物品,上面刻著不規則碎片形狀的形象,美麗動人的同時卻令人不寒而慄。\n\n儘管經常被附近星系的敵人所佔據,這個殖民地依然捱過了幾個世代。不過,當原住民鑽探工程差不多來到尾聲時,當時肯定發生了某些災難性事件。從傷亡情況可以讓我們判斷出,這裡曾經發生過一場打擊極大的內戰,但是從他們遺留下來的儲藏室來看,各式各樣的食物供應都一應俱全,所以排除了物資短缺作為開戰的原因。其中一位科學家得出一個有趣的發現:他們沒有配備保護足部的護具,可是在他們的浮雕上卻發現不少有關護具的圖案,很可能是一種社會地位的象徵。不幸地,我們的隊伍未能找到更詳細的資料。"
|
mem_sulfuric_transplant/reference_files/mem_stable_20190322/localisation/mem_lava_fortress_l_traditional_chinese.yml
|
services:
- docker:dind # https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#use-docker-in-docker-workflow-with-docker-executor
variables:
DOCKER_REGISTRY_ADDR: $DOCKER_REGISTRY_ADDR # wird von Gitlab hereingereicht
DOCKER_REGISTRY_USER: $DOCKER_REGISTRY_USER # wird von Gitlab hereingereicht
DOCKER_REGISTRY_TOKEN: $DOCKER_REGISTRY_TOKEN # wird von Gitlab hereingereicht
DOCKER_HOST: 'tcp://docker:2375'
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: ""
before_script:
- cd app # Gitlab startet Jobs in Root-Pfad. Die meisten folgenden Aufrufe sind jedoch auf den Sub-Pfad bezogen.
stages:
- deploy
.deploy2intranet:
image: jaromirpufler/docker-openssh-client
stage: deploy
variables:
DEPLOY_HOST: ${SSH_DEPLOY_HOST_INTRANET}
DEPLOY_USER: ${SSH_DEPLOY_USER_INTRANET}
script:
- ssh-keyscan -t rsa ${DEPLOY_HOST} > ~/.ssh/known_hosts 2> /dev/null
- ssh ${DEPLOY_USER}@${DEPLOY_HOST} "mkdir -p ${CI_PROJECT_PATH} && ln -s \"\$(pwd)/.env\" ${CI_PROJECT_PATH}/.env || true"
- scp docker-compose.yml ${DEPLOY_USER}@${DEPLOY_HOST}:${CI_PROJECT_PATH}/
- scp -r scripts ${DEPLOY_USER}@${DEPLOY_HOST}:${CI_PROJECT_PATH}/
- ssh ${DEPLOY_USER}@${DEPLOY_HOST} "echo '${DOCKER_REGISTRY_TOKEN}' | docker login -u ${DOCKER_REGISTRY_USER} --password-stdin ${DOCKER_REGISTRY_ADDR}"
- ssh ${DEPLOY_USER}@${DEPLOY_HOST} "cd ${CI_PROJECT_PATH} && docker-compose pull --quiet && docker-compose up --detach --remove-orphans"
only:
- master
deploy2online:
image: docker:git
stage: deploy
variables:
DEPLOY_HOST: ${SSH_DEPLOY_HOST_ONLINE}
DEPLOY_USER: ${SSH_DEPLOY_USER_ONLINE}
script:
- mkdir ~/.ssh
- echo "${SSH_DEPLOY_KEY}" > ~/.ssh/id_rsa
- chmod 400 ~/.ssh/id_rsa
- ssh-keyscan -t rsa ${DEPLOY_HOST} > ~/.ssh/known_hosts 2> /dev/null
- ssh ${DEPLOY_USER}@${DEPLOY_HOST} "mkdir -p ${CI_PROJECT_PATH} && ln -s \"\$(pwd)/.env\" ${CI_PROJECT_PATH}/.env || true"
- echo "${DOCKER_REGISTRY_TOKEN}" | docker login -u ${DOCKER_REGISTRY_USER} --password-stdin ${DOCKER_REGISTRY_ADDR}
- for i in `sed -n -e "s/.*\(${DOCKER_REGISTRY_ADDR}\/[^[:space:]]*\).*/\1/p" docker-compose.yml`;
do docker pull --quiet $i && docker save $i --output `echo $i | sed -e "s/.*\/\([^:]*\).*/\1.tar/g"`;
done
- scp *.tar ${DEPLOY_USER}@${DEPLOY_HOST}:${CI_PROJECT_PATH}/
- ssh ${DEPLOY_USER}@${DEPLOY_HOST} "cd ${CI_PROJECT_PATH}"' && (for curfile in *.tar ; do docker load --input "$curfile" ; done) && rm -rf *.tar'
- scp docker-compose.yml ${DEPLOY_USER}@${DEPLOY_HOST}:${CI_PROJECT_PATH}/
- scp -r scripts ${DEPLOY_USER}@${DEPLOY_HOST}:${CI_PROJECT_PATH}/
- ssh ${DEPLOY_USER}@${DEPLOY_HOST} "cd ${CI_PROJECT_PATH} && (docker-compose pull --ignore-pull-failures --quiet || true) && docker-compose up --detach --remove-orphans"
only:
- master
|
app/.gitlab-ci.yml
|
# Server Connections
# -------------------------------------------------
# This is list of labels with their connections to bind to. SWith this you
# have the ability to listen on multiple connections. You may connect your apps
# with specific labels, vhost and context paths.
# ### host
#
# Hostname or IP address to bind server to.
# ### port
#
# Set the port the server is listening on.
#
# To use a privileged port below 1024 you have to bind it using root privileges.
# This may be critical.
# The following three methods are possible to do this securely:
#
# 1. Use sudo to start the server and change back to the normal user
# after port binding.
#
# 2. Use ip-tables to forward to an unprivileged port:
# `sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-ports 3000`
#
# 3. Use Nginx reverse-proxy on port 80 this helps also if you may run other
# things on port 80, too.
#
# 4. Give Normal user capability of using sockets as root
# sudo apt-get install libcap2-bin
# sudo setcap cap_net_bind_service=+ep `readlink -f \`which node\``
#
# Keep in mind to use different ports for different services like HTTP and
# HTTPS. Defaults to port 23174 if not set.
# ### tls
#
# The settings to use an SSL encryption on this connections:
#
# - pfx - PKCS#12 certificate containing certificate, private key and CA
# certificates to use for SSL
# - cert - public x509 certificate to use
# - key - private key to use for SSL
# - passphrase - passphrase for the private key or pfx if necessary
# - ca - authority certificate or array of authority certificates to check
# the remote host against"
# - ciphers - string describing the ciphers to use or exclude
# (http://www.openssl.org/docs/apps/ciphers.html#CIPHER_LIST_FORMAT)"
# - rejectUnauthorized - lag to check server certificate against the list of
# supplied CAs
# - secureProtocol - SSL method to use, e.g. TLSv1_method to force TLS version 1
# (the possible values depend on your installation of OpenSSL and
# are defined in the constant SSL_METHODS)"
# ### load
#
# The load settings will result in rejection of incoming requests with an
# "HTTP Server Timeout (503)" response if they are reached:
#
# - maxHeap - maximum V8 heap size
# - maxRss - maximum process RSS size
# - eventLoopDelay - maximum event loop delay duration in milliseconds
listener:
default:
#host: localhost
port: <<<env://PORT | 23174>>>
# Log Settings
# -------------------------------------------------
# The log settings are defined as array of possible loggers.
# ### bind
#
# Like described above this will define the area of requests to log.
# ### data
#
# This is a specific format selected of the possible formats: 'error', 'event'
# 'common', 'commonvhost', 'combined', 'referrer', 'extended', 'object'.
# ### file
#
# ### http
#
# ### mail
#
log:
- data: combined
file:
filename: http-access.log
datePattern: .yyyy-MM-dd
compress: true
- data: error
file:
filename: http-error.log
maxSize: 100MB
compress: true
# ### HEAP dump
#
# Should a HEAP dump be written in var/log? But keep in mind they take a lot of
# space and you should remove them later.
heapdump: false
|
var/src/config/server/http/index.yml
|
version: '3.7'
services:
elasticsearch-one:
image: docker.elastic.co/elasticsearch/elasticsearch:7.7.0
container_name: elasticsearch-one
environment:
- node.name=elasticsearch-one
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=elasticsearch-one,elasticsearch-two,elasticsearch-three
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m" #Set Xmx and Xms to no more than 50% of your physical RAM. Elasticsearch requires memory for purposes other than the JVM heap and it is important to leave space for this.
- network.host=0.0.0.0
- node.master=true
- node.data=false
- discovery.zen.minimum_master_nodes=2
- discovery.seed_hosts=elasticsearch-two,elasticsearch-three
# - discovery.zen.ping.unicast.hosts="elasticsearch-one","elasticsearch-two","lasticsearch-three" # Address has the format host:port or host. The host is either a host name to be resolved by DNS, an IPv4 address, or an IPv6 address
- network.bind_host=0.0.0.0
# - network.publish_host=192.168.3.11 IP of the host machine
# - discovery.zen.ping.unicast.hosts=192.168.3.11,192.168.127.12,172.16.58.3 IP example
# - discovery.zen.ping_timeout=3s (default) # use this property when the elasticsearch cluster are starting with gap of mothan 3s, when you atsrt all the service together you don't need to set this
# path.data=/var/lib/elasticsearch(custom path) - default path is /usr/share/elasticsearch/data
# path.logs=/var/log/elasticsearch(custom path) - default path is /usr/share/elasticsearch/logs
# - xpack.license.self_generated.type=trial # Default 'basic'
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- elasticsearch-one-data:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- elastic
elasticsearch-two:
image: docker.elastic.co/elasticsearch/elasticsearch:7.7.0
container_name: elasticsearch-two
environment:
- node.name=elasticsearch-two
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=elasticsearch-one,elasticsearch-two,elasticsearch-three
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- network.host=0.0.0.0
- node.master=true
- node.data=false
- discovery.zen.minimum_master_nodes=2
- discovery.seed_hosts=elasticsearch-two,elasticsearch-three
# - discovery.zen.ping.unicast.hosts="elasticsearch-one","elasticsearch-two","lasticsearch-three"
- network.bind_host=0.0.0.0
# - xpack.license.self_generated.type=trial
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- elasticsearch-two-data:/usr/share/elasticsearch/data
ports:
- 9201:9201
networks:
- elastic
elasticsearch-three:
image: docker.elastic.co/elasticsearch/elasticsearch:7.7.0
container_name: elasticsearch-three
environment:
- node.name=elasticsearch-three
- cluster.name=es-docker-cluster
- cluster.initial_master_nodes=elasticsearch-one,elasticsearch-two,elasticsearch-three
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- network.host=0.0.0.0
- node.master=true
- node.data=false
- discovery.zen.minimum_master_nodes=2
- discovery.seed_hosts=elasticsearch-two,elasticsearch-three
#- discovery.zen.ping.unicast.hosts="elasticsearch-one","elasticsearch-two","lasticsearch-three"
- network.bind_host=0.0.0.0
# - xpack.license.self_generated.type=trial
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- elasticsearch-three-data:/usr/share/elasticsearch/data
ports:
- 9202:9202
networks:
- elastic
kibana-one:
image: docker.elastic.co/kibana/kibana:7.7.0
container_name: kibana-one
ports:
- 5601:5601
environment:
ELASTICSEARCH_URL: http://elasticsearch-one:9200
ELASTICSEARCH_HOSTS: http://elasticsearch-one:9200
networks:
- elastic
volumes:
elasticsearch-one-data:
driver: local
elasticsearch-two-data:
driver: local
elasticsearch-three-data:
driver: local
# driver_opts:
# o: bind
# type: none
# device: /var/enc/db/elasticsearch1/data
networks:
elastic:
driver: bridge
|
elasticsearch/5node-cluster/docker-compose.yml
|
items:
- uid: '@azure/servicefabric.HealthEvaluation'
name: HealthEvaluation
fullName: HealthEvaluation
children:
- '@azure/servicefabric.HealthEvaluation.aggregatedHealthState'
- '@azure/servicefabric.HealthEvaluation.description'
- '@azure/servicefabric.HealthEvaluation.kind'
langs:
- typeScript
type: interface
summary: Représente une évaluation d’intégrité qui décrit les données et l’algorithme utilisé par le Gestionnaire d’état pour évaluer l’intégrité d’une entité.
package: '@azure/servicefabric'
- uid: '@azure/servicefabric.HealthEvaluation.aggregatedHealthState'
name: aggregatedHealthState
fullName: aggregatedHealthState
children: []
langs:
- typeScript
type: property
summary: "L’état d’intégrité d’une entité de Service Fabric telles que le Cluster, nœud, Application, Service, Partition, etc. de réplica. Les valeurs possibles incluent\_: «\_Invalid\_», «\_Ok\_», «\_Avertissement\_», «\_Error\_», «\_Inconnu\_»"
optional: true
syntax:
content: 'aggregatedHealthState?: HealthState'
return:
type:
- '@azure/servicefabric.HealthState'
package: '@azure/servicefabric'
- uid: '@azure/servicefabric.HealthEvaluation.description'
name: description
fullName: description
children: []
langs:
- typeScript
type: property
summary: 'Description de l’évaluation d’intégrité, qui représente un résumé du processus d’évaluation.'
optional: true
syntax:
content: 'description?: undefined | string'
return:
type:
- undefined | string
package: '@azure/servicefabric'
- uid: '@azure/servicefabric.HealthEvaluation.kind'
name: kind
fullName: kind
children: []
langs:
- typeScript
type: property
summary: Discriminateur polymorphe
syntax:
content: 'kind: "HealthEvaluation"'
return:
type:
- '"HealthEvaluation"'
package: '@azure/servicefabric'
references:
- uid: '@azure/servicefabric.HealthState'
name: HealthState
spec.typeScript:
- name: HealthState
fullName: HealthState
uid: '@azure/servicefabric.HealthState'
|
docs-ref-autogen/@azure/servicefabric/HealthEvaluation.yml
|
uid: "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch"
fullName: "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch"
name: "LogProfileResourcePatch"
nameWithType: "LogProfileResourcePatch"
summary: "The log profile resource for patch operations."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public class LogProfileResourcePatch"
constructors:
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.LogProfileResourcePatch()"
methods:
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.categories()"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.locations()"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.retentionPolicy()"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.serviceBusRuleId()"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.storageAccountId()"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.tags()"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.validate()"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.withCategories(java.util.List<java.lang.String>)"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.withLocations(java.util.List<java.lang.String>)"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.withRetentionPolicy(com.azure.resourcemanager.monitor.models.RetentionPolicy)"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.withServiceBusRuleId(java.lang.String)"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.withStorageAccountId(java.lang.String)"
- "com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.withTags(java.util.Map<java.lang.String,java.lang.String>)"
type: "class"
metadata: {}
package: "com.azure.resourcemanager.monitor.models"
artifact: com.azure.resourcemanager:azure-resourcemanager-monitor:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.monitor.models.LogProfileResourcePatch.yml
|
- name: Bulut sağlama
href: index.yml
- name: Genel Bakış
items:
- name: Kimlik sağlama nedir?
href: what-is-provisioning.md
- name: Azure AD Connect bulut sağlama nedir?
href: what-is-cloud-provisioning.md
maintainContext: true
- name: Öğreticiler
expanded: true
items:
- name: Tek bir AD ormanını tek bir Azure AD kiracısı ile tümleştirme
href: tutorial-single-forest.md
- name: Mevcut bir ormanı ve yeni bir ormanı tek bir Azure AD kiracısı ile tümleştirme
href: tutorial-existing-forest.md
- name: Mevcut bir eşitlenmiş AD ormanı için pilot bulut sağlama
href: tutorial-pilot-aadc-aadccp.md
- name: Kavramlar
items:
- name: Parola karması eşitleme nedir?
href: /azure/active-directory/hybrid/whatis-phs?context=azure/active-directory/cloud-provisioning/context/cloud-provisioning-context
- name: 'Azure AD şemasını, özniteliklerini ve ifadelerini anlama'
href: concept-attributes.md
- name: Azure Active Directory’de Öznitelik Eşlemeleri için İfadeler Yazma
href: reference-expressions.md
- name: Nasıl yapılır kılavuzları
items:
- name: Yükleme ve yükseltme
items:
- name: Yükleme Önkoşulları
href: how-to-prerequisites.md
- name: Azure AD Connect bulut sağlama aracısını yükleme
href: how-to-install.md
- name: Bulut sağlama yapılandırması
href: how-to-configure.md
- name: Çoklu Oturum Açma Kullanma
href: how-to-sso.md
- name: Planlama ve tasarım
items:
- name: Azure AD Connect bulut sağlama için topolojiler ve senaryolar
href: plan-cloud-provisioning-topologies.md
- name: Yönet
items:
- name: Aracıyı otomatik yükseltme
href: how-to-automatic-upgrade.md
- name: Geliştirme
items:
- name: Dönüşümler
href: how-to-transformation.md
- name: Azure AD eşitleme API’si
href: 'https://docs.microsoft.com/graph/api/resources/synchronization-overview'
- name: Sorun giderme
items:
- name: Bulut sağlama sorunlarını giderme
href: how-to-troubleshoot.md
- name: Yinelenen öznitelikler
href: 'https://docs.microsoft.com/office365/troubleshoot/administration/duplicate-attributes-prevent-dirsync'
- name: Başvuru
items:
- name: Azure AD Connect bulut sağlama aracısı sürüm geçmişi
href: /azure/active-directory/manage-apps/provisioning-agent-release-version-history?context=azure/active-directory/cloud-provisioning/context/cp-context
- name: Azure AD Connect bulut sağlama hakkında SSS
href: reference-cloud-provisioning-faq.md
- name: Eşitlenen öznitelikler
href: /azure/active-directory/hybrid/reference-connect-sync-attributes-synchronized?context=azure/active-directory/cloud-provisioning/context/cp-context
- name: Temel Active Directory ve Azure AD ortamı
href: tutorial-basic-ad-azure.md
|
articles/active-directory/cloud-provisioning/TOC.yml
|
default_language_version:
python: python3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: trailing-whitespace
- id: check-added-large-files
args: ['--maxkb=2000']
- id: end-of-file-fixer
- id: check-case-conflict
- id: check-docstring-first
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: check-toml
- id: check-xml
- id: check-yaml
- id: debug-statements
- id: mixed-line-ending
args: ['--fix=lf']
- id: requirements-txt-fixer
# Changes tabs to spaces
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.1.10
hooks:
- id: remove-tabs
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.2.0
hooks:
- id: pretty-format-ini
args: [--autofix]
- id: pretty-format-toml
args: [--autofix]
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.9.0 # Use the ref you want to point at
hooks:
- id: python-check-blanket-noqa
- id: python-check-blanket-type-ignore
- repo: https://github.com/timothycrosley/isort
rev: 5.10.1
hooks:
- id: isort
- repo: https://github.com/ambv/black
rev: 21.11b1
hooks:
- id: black
- repo: https://github.com/myint/autoflake
rev: v1.4
hooks:
- id: autoflake
args: ['--expand-star-imports', '--ignore-init-module-imports', '--in-place']
- repo: https://github.com/pycqa/flake8
rev: 4.0.1
hooks:
- id: flake8
additional_dependencies: &flake8_dependencies
- flake8-bugbear==21.11.29
- flake8-builtins==1.5.3
- flake8-comprehensions==3.7.0
- flake8-return==1.1.3
- flake8-simplify==0.14.2
- repo: https://github.com/asottile/yesqa
rev: v1.3.0
hooks:
- id: yesqa
additional_dependencies: *flake8_dependencies
# TODO setup mypy
#- repo: https://github.com/pre-commit/mirrors-mypy
# rev: 'v0.910'
# hooks:
# - id: mypy
# pass_filenames: false
# additional_dependencies:
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.8.0.1
hooks:
- id: shellcheck
exclude: "^pointnav_comparison"
|
.pre-commit-config.yaml
|
uid: "com.azure.mixedreality.remoterendering.implementation.RemoteRenderingsImpl.stopSessionWithResponseAsync*"
fullName: "com.azure.mixedreality.remoterendering.implementation.RemoteRenderingsImpl.stopSessionWithResponseAsync"
name: "stopSessionWithResponseAsync"
nameWithType: "RemoteRenderingsImpl.stopSessionWithResponseAsync"
members:
- uid: "com.azure.mixedreality.remoterendering.implementation.RemoteRenderingsImpl.stopSessionWithResponseAsync(java.util.UUID,java.lang.String,com.azure.core.util.Context)"
fullName: "com.azure.mixedreality.remoterendering.implementation.RemoteRenderingsImpl.stopSessionWithResponseAsync(UUID accountId, String sessionId, Context context)"
name: "stopSessionWithResponseAsync(UUID accountId, String sessionId, Context context)"
nameWithType: "RemoteRenderingsImpl.stopSessionWithResponseAsync(UUID accountId, String sessionId, Context context)"
summary: "Stops a particular rendering session."
parameters:
- description: "The Azure Remote Rendering account ID."
name: "accountId"
type: "<xref href=\"java.util.UUID?alt=java.util.UUID&text=UUID\" data-throw-if-not-resolved=\"False\" />"
- description: "An ID uniquely identifying the rendering session for the given account. The ID is case\n sensitive, can contain any combination of alphanumeric characters including hyphens and underscores, and\n cannot contain more than 256 characters."
name: "sessionId"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "The context to associate with this operation."
name: "context"
type: "<xref href=\"com.azure.core.util.Context?alt=com.azure.core.util.Context&text=Context\" data-throw-if-not-resolved=\"False\" />"
syntax: "public Mono<RemoteRenderingsStopSessionResponse> stopSessionWithResponseAsync(UUID accountId, String sessionId, Context context)"
returns:
description: "the completion."
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.azure.mixedreality.remoterendering.implementation.models.RemoteRenderingsStopSessionResponse?alt=com.azure.mixedreality.remoterendering.implementation.models.RemoteRenderingsStopSessionResponse&text=RemoteRenderingsStopSessionResponse\" data-throw-if-not-resolved=\"False\" />>"
type: "method"
metadata: {}
package: "com.azure.mixedreality.remoterendering.implementation"
artifact: com.azure:azure-mixedreality-remoterendering:1.0.0-beta.1
|
preview/docs-ref-autogen/com.azure.mixedreality.remoterendering.implementation.RemoteRenderingsImpl.stopSessionWithResponseAsync.yml
|
backends:
- group: google-spectrum-database-v1explorer
url_pattern: /spectrum/v1explorer/paws/getSpectrum
method: POST
host:
- https://www.googleapis.com
whitelist:
- kind
- maxContiguousBwHz
- maxTotalBwHz
- needsSpectrumReport
- spectrumSchedules
- timestamp
- version
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: API for spectrum-management functions.
version: v1explorer
slug: google-spectrum-database-v1explorer-spectrum-v1explorer-paws-getspectrum
schemes:
- https
mapping:
kind: kind
maxContiguousBwHz: maxContiguousBwHz
maxTotalBwHz: maxTotalBwHz
needsSpectrumReport: needsSpectrumReport
spectrumSchedules: spectrumSchedules
timestamp: timestamp
version: version
hosts:
- https://www.googleapis.com
- group: google-spectrum-database-v1explorer
url_pattern: /spectrum/v1explorer/paws/getSpectrumBatch
method: POST
host:
- https://www.googleapis.com
whitelist:
- geoSpectrumSchedules
- kind
- maxContiguousBwHz
- maxTotalBwHz
- needsSpectrumReport
- timestamp
- version
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: API for spectrum-management functions.
version: v1explorer
slug: google-spectrum-database-v1explorer-spectrum-v1explorer-paws-getspectrumbatch
schemes:
- https
mapping:
geoSpectrumSchedules: geoSpectrumSchedules
kind: kind
maxContiguousBwHz: maxContiguousBwHz
maxTotalBwHz: maxTotalBwHz
needsSpectrumReport: needsSpectrumReport
timestamp: timestamp
version: version
hosts:
- https://www.googleapis.com
- group: google-spectrum-database-v1explorer
url_pattern: /spectrum/v1explorer/paws/init
method: POST
host:
- https://www.googleapis.com
whitelist:
- kind
- version
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: API for spectrum-management functions.
version: v1explorer
slug: google-spectrum-database-v1explorer-spectrum-v1explorer-paws-init
schemes:
- https
mapping:
kind: kind
version: version
hosts:
- https://www.googleapis.com
- group: google-spectrum-database-v1explorer
url_pattern: /spectrum/v1explorer/paws/notifySpectrumUse
method: POST
host:
- https://www.googleapis.com
whitelist:
- kind
- version
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: API for spectrum-management functions.
version: v1explorer
slug: google-spectrum-database-v1explorer-spectrum-v1explorer-paws-notifyspectrumuse
schemes:
- https
mapping:
kind: kind
version: version
hosts:
- https://www.googleapis.com
- group: google-spectrum-database-v1explorer
url_pattern: /spectrum/v1explorer/paws/register
method: POST
host:
- https://www.googleapis.com
whitelist:
- kind
- version
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: API for spectrum-management functions.
version: v1explorer
slug: google-spectrum-database-v1explorer-spectrum-v1explorer-paws-register
schemes:
- https
mapping:
kind: kind
version: version
hosts:
- https://www.googleapis.com
- group: google-spectrum-database-v1explorer
url_pattern: /spectrum/v1explorer/paws/verifyDevice
method: POST
host:
- https://www.googleapis.com
whitelist:
- deviceValidities
- kind
- version
concurrent_calls: 1
extra_config:
provider: googleapis.com
description: API for spectrum-management functions.
version: v1explorer
slug: google-spectrum-database-v1explorer-spectrum-v1explorer-paws-verifydevice
schemes:
- https
mapping:
deviceValidities: deviceValidities
kind: kind
version: version
hosts:
- https://www.googleapis.com
|
shared/data/swagger/specs/googleapis.com/spectrum/v1explorer/krakend/backends.yaml
|
---
- name: Include variables
include_vars:
file: ../../vars.yaml
- name: Install rpm key
rpm_key:
key: https://artifacts.elastic.co/GPG-KEY-elasticsearch
state: present
when: ansible_facts['distribution']=="Amazon"
- name: Install base rpm repository
yum_repository:
name: kibana-7.x
description: Kibana repository for 7.x packages
baseurl: https://artifacts.elastic.co/packages/7.x/yum
gpgcheck: true
gpgkey: https://artifacts.elastic.co/GPG-KEY-elasticsearch
when: ansible_facts['distribution']=="Amazon"
- name: Install Kibana Yum Package
yum:
name: kibana
state: present
update_cache: true
when: ansible_facts['distribution']=="Amazon"
- name: reload systemd config
ansible.builtin.systemd:
daemon_reload: yes
- name: enable service kibana and ensure it is not masked
ansible.builtin.systemd:
name: kibana
enabled: yes
masked: no
- name: Create a certificate directory
file:
owner: kibana
group: kibana
recurse: yes
mode: u=rwx,g+rx,o-rwx
path: /etc/kibana/certs
state: directory
- name: Copy the kibana certs from local to server
copy:
src: "files/{{ kibana_dns }}"
dest: "/etc/kibana/certs/"
- name: Copy CA to server
copy:
src: "files/ca.crt"
dest: "/etc/kibana/certs/ca.crt"
- name: set kibana permissions
file:
path: /usr/share/kibana
state: directory
recurse: yes
owner: kibana
group: kibana
- name: Backup default kibana config
ansible.builtin.command: mv /etc/kibana/kibana.yml /etc/kibana/kibana.bkp.yml
- name: Add Details To Kibana Config YAML
blockinfile:
path: /etc/kibana/kibana.yml
create: yes
marker: ''
block: |
server.host: 0.0.0.0
server.ssl.enabled: true
server.ssl.key: /etc/kibana/certs/{{ kibana_dns }}/{{ kibana_dns }}.key
server.ssl.certificate: /etc/kibana/certs/{{ kibana_dns }}/{{ kibana_dns }}.crt
elasticsearch.hosts: ["https://{{ elastic_dns }}:9200", "https://{{ elastic_node01_dns }}:9200", "https://{{ elastic_node02_dns }}:9200"]
elasticsearch.ssl.certificateAuthorities: [ "/etc/kibana/certs/ca.crt" ]
elasticsearch.ssl.verificationMode: "full"
- name: Restart Kibana Service
ansible.builtin.systemd:
name: kibana
state: restarted
|
playbooks/roles/kibana/tasks/main.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: jhipster-kafka
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: jhipster-kafka
template:
metadata:
labels:
app: jhipster-kafka
spec:
containers:
- name: kafka
image: confluentinc/cp-kafka:5.5.0
env:
- name: KAFKA_ADVERTISED_LISTENERS
value: 'PLAINTEXT://jhipster-kafka.default.svc.cluster.local:9092'
- name: KAFKA_ZOOKEEPER_CONNECT
value: 'jhipster-zookeeper.default.svc.cluster.local:2181'
- name: KAFKA_BROKER_ID
value: '2'
- name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
value: '1'
ports:
- containerPort: 9092
resources:
requests:
memory: '512Mi'
cpu: '500m'
limits:
memory: '1Gi'
cpu: '1'
---
apiVersion: v1
kind: Service
metadata:
name: jhipster-kafka
namespace: default
spec:
selector:
app: jhipster-kafka
ports:
- port: 9092
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jhipster-zookeeper
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: jhipster-zookeeper
template:
metadata:
labels:
app: jhipster-zookeeper
spec:
containers:
- name: zookeeper
image: confluentinc/cp-zookeeper:5.5.0
env:
- name: ZOOKEEPER_CLIENT_PORT
value: '2181'
- name: ZOOKEEPER_TICK_TIME
value: '2000'
- name: ZOOKEEPER_SYNC_LIMIT
value: '2'
ports:
- containerPort: 2181
resources:
requests:
memory: '512Mi'
cpu: '500m'
limits:
memory: '1Gi'
cpu: '1'
---
apiVersion: v1
kind: Service
metadata:
name: jhipster-zookeeper
namespace: default
spec:
selector:
app: jhipster-zookeeper
ports:
- port: 2181
|
messagebroker-k8s/kafka.yml
|
---
- Include: all-types-schema.yaml
# No args
---
- Statement: SELECT MAX() FROM all_types
- error: [42000]
# Test with no rows => null
---
- Statement: SELECT MAX(bigint_field), MAX(bigint_unsigned_field),
MAX(tinyint_field), MAX(char_field),
MAX(char_multi_field), MAX(clob_field), MAX(date_field),
MAX(decimal_field), MAX(double_field), MAX(float_field),
MAX(integer_field), MAX(numeric_field), MAX(real_field),
MAX(smallint_field), MAX(time_field), MAX(timestamp_field) FROM
all_types
- output: [[null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null]]
# Test with null row => null
---
- Statement: INSERT INTO all_types (bigint_field, bigint_unsigned_field,
tinyint_field, char_field, char_multi_field, clob_field,
date_field, decimal_field, double_field, float_field, integer_field,
numeric_field, real_field, smallint_field, time_field,
timestamp_field)
VALUES (null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null)
---
- Statement: SELECT MAX(bigint_field), MAX(bigint_unsigned_field),
MAX(tinyint_field), MAX(char_field),
MAX(char_multi_field), MAX(clob_field), MAX(date_field),
MAX(decimal_field), MAX(double_field), MAX(float_field),
MAX(integer_field), MAX(numeric_field), MAX(real_field),
MAX(smallint_field), MAX(time_field), MAX(timestamp_field) FROM
all_types
- output: [[null, null, null, null, null, null, null, null, null,
null, null, null, null, null, null, null]]
# Insert single row
---
- Statement: INSERT INTO all_types (bigint_field, bigint_unsigned_field,
tinyint_field, char_field, char_multi_field, clob_field,
decimal_field, double_field, float_field, integer_field,
numeric_field, real_field, smallint_field, time_field,
timestamp_field)
VALUES (1, 1, 1, '1', '1.0', '1.0', 1.0, 1.0, 1.0, 1, 1.0,
1.0, 1, '01:01:01', '1000-01-01 01:01:01')
---
- Statement: SELECT MAX(bigint_field), MAX(bigint_unsigned_field),
MAX(tinyint_field), MAX(char_field),
MAX(char_multi_field), MAX(clob_field), MAX(decimal_field),
MAX(double_field), MAX(float_field), MAX(integer_field),
MAX(numeric_field), MAX(real_field), MAX(smallint_field),
MAX(time_field), MAX(timestamp_field) FROM all_types
- output: [[1, 1, 1, 1, 1.0, 1.0, 1, 1.0, 1.0, 1, 1, 1.0, 1,
'01:01:01', !re '1000-01-01 01:01:01([.]0)?']]
# Insert another row
---
- Statement: INSERT INTO all_types (bigint_field, bigint_unsigned_field,
tinyint_field, char_field, char_multi_field, clob_field,
decimal_field, double_field, float_field, integer_field,
numeric_field, real_field, smallint_field, time_field,
timestamp_field)
VALUES (2, 2, 0, '2', '2.0', 2.0, 2.0, 2.0, 2.0, 2, 2.0, 2.0,
2, '02:02:02', '2000-02-02 02:02:02')
---
- Statement: SELECT MAX(bigint_field), MAX(bigint_unsigned_field),
MAX(tinyint_field), MAX(char_field),
MAX(char_multi_field), MAX(clob_field), MAX(decimal_field),
MAX(double_field), MAX(float_field), MAX(integer_field),
MAX(numeric_field), MAX(real_field), MAX(smallint_field),
MAX(time_field), MAX(timestamp_field) FROM all_types
- output: [[2, 2, 1, 2, 2.0, 2.0, 2, 2.0, 2.0, 2, 2, 2.0, 2,
'02:02:02', !re '2000-02-02 02:02:02([.]0)?']]
---
- Statement: DELETE FROM all_types
# Infinity and NaN
---
- Statement: INSERT INTO all_types (double_field, float_field, real_field)
VALUES ('-Infinity', '-Infinity', '-Infinity');
---
- Statement: SELECT MAX(double_field), MAX(float_field), MAX(real_field)
FROM all_types
- output: [[0.0, 0.0, 0.0]]
---
- Statement: INSERT INTO all_types (double_field, float_field, real_field)
VALUES (1.0, 1.0, 1.0);
---
- Statement: SELECT MAX(double_field), MAX(float_field), MAX(real_field)
FROM all_types
- output: [[1.0, 1.0, 1.0]]
---
- Statement: INSERT INTO all_types (double_field, float_field, real_field)
VALUES ('Infinity', 'Infinity', 'Infinity')
---
- Statement: SELECT MAX(double_field), MAX(float_field), MAX(real_field)
FROM all_types
- output: [[1.0, 1.0, 1.0]]
---
- Statement: INSERT INTO all_types (double_field, float_field, real_field)
VALUES ('NaN', 'NaN', 'NaN')
---
- Statement: SELECT MAX(double_field), MAX(float_field), MAX(real_field)
FROM all_types
- output: [[1.0, 1.0, 1.0]]
...
|
fdb-sql-layer-test-yaml/src/test/resources/com/foundationdb/sql/test/yaml/functional/test-max.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "metal-csi.fullname" . }}-controller
labels:
{{- include "metal-csi.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "metal-csi.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "metal-csi.selectorLabels" . | nindent 8 }}
spec:
hostNetwork: true
serviceAccountName: {{ include "metal-csi.serviceAccountName" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
args:
{{- include "metal-csi.csiHelperArgs" . | nindent 12 }}
{{- include "metal-csi.csiHelperLeaderArgs" . | nindent 12 }}
- --extra-create-metadata
- --volume-name-prefix
- {{ include "metal-csi.drivername" . }}
env:
{{- include "metal-csi.csiControllerHelperEnv" . | nindent 12 }}
volumeMounts:
{{- include "metal-csi.csiHelperVolumeMounts" . | nindent 12 }}
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
args:
{{- include "metal-csi.csiHelperArgs" . | nindent 12 }}
{{- include "metal-csi.csiHelperLeaderArgs" . | nindent 12 }}
env:
{{- include "metal-csi.csiControllerHelperEnv" . | nindent 12 }}
volumeMounts:
{{- include "metal-csi.csiHelperVolumeMounts" . | nindent 12 }}
- name: metal-csi-controller
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: [-l, debug, --node-id, $(KUBE_NODE_NAME), --csi-path, /plugin/csi-controller.sock, --csi-name, {{ include "metal-csi.drivername" . }}]
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
allowPrivilegeEscalation: true
privileged: true
capabilities:
add:
- SYS_ADMIN
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: 'Bidirectional'
- name: metal-csi-yml
mountPath: /etc/metal-csi.yml
subPath: metal-csi.yml
volumes:
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/metal-csi
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: metal-csi-yml
configMap:
name: {{ include "metal-csi.fullname" . }}
|
charts/metal-csi/templates/controller.yaml
|
---
name: channelflight.firstnetwork.connectionprofile
x-type: "hlfv1"
description: "BankPeerContract methods will be used through this profile"
version: "1.0"
channels:
channelflight:
orderers:
- orderer.ibs.aero
peers:
peer0.gatewick.ibs.aero:
endorsingPeer: true
chaincodeQuery: true
ledgerQuery: true
eventSource: true
peer1.gatewick.ibs.aero:
endorsingPeer: true
chaincodeQuery: true
ledgerQuery: true
eventSource: true
peer0.birtishairways.ibs.aero:
endorsingPeer: true
ledgerQuery: true
organizations:
Gatewick:
mspid: GatewickMSP
peers:
- peer0.gatewick.ibs.aero
- peer1.gatewick.ibs.aero
certificateAuthorities:
- ca.gatewick.ibs.aero
adminPrivateKey:
path: /usr/peerOrganizations/gatewick.ibs.aero/users/Admin@gatewick.ibs.aero/msp/keystore/f9833629c9923f3f4a39a4df3465582481704e01ee762c4c1f5b5983f32e3141_sk
signedCert:
path: /usr/peerOrganizations/gatewick.ibs.aero/users/Admin@gatewick.ibs.aero/msp/signcerts/Admin@gatewick.ibs.aero-cert.pem
Birtishairways:
mspid: BirtishairwaysMSP
peers:
- peer0.birtishairways.ibs.aero
orderers:
orderer.ibs.aero:
url: grpcs://orderer.ibs.aero:7050
tlsCACerts:
path: /usr/ordererOrganizations/ibs.aero/orderers/orderer.ibs.aero/msp/tlscacerts/tlsca.ibs.aero-cert.pem
orderer2.ibs.aero:
url: grpcs://orderer2.ibs.aero:7050
tlsCACerts:
path: /usr/ordererOrganizations/ibs.aero/orderers/orderer2.ibs.aero/msp/tlscacerts/tlsca.ibs.aero-cert.pem
orderer3.ibs.aero:
url: grpcs://orderer3.ibs.aero:7050
tlsCACerts:
path: /usr/ordererOrganizations/ibs.aero/orderers/orderer3.ibs.aero/msp/tlscacerts/tlsca.ibs.aero-cert.pem
orderer4.ibs.aero:
url: grpcs://orderer5.ibs.aero:7050
tlsCACerts:
path: /usr/ordererOrganizations/ibs.aero/orderers/orderer4.ibs.aero/msp/tlscacerts/tlsca.ibs.aero-cert.pem
orderer5.ibs.aero:
url: grpcs://orderer5.ibs.aero:7050
tlsCACerts:
path: /usr/ordererOrganizations/ibs.aero/orderers/orderer5.ibs.aero/msp/tlscacerts/tlsca.ibs.aero-cert.pem
peers:
peer0.gatewick.ibs.aero:
url: grpcs://peer0.gatewick.ibs.aero:9051
grpcOptions:
request-timeout: 120001
tlsCACerts:
path: /usr/peerOrganizations/gatewick.ibs.aero/peers/peer0.gatewick.ibs.aero/msp/tlscacerts/tlsca.gatewick.ibs.aero-cert.pem
peer1.gatewick.ibs.aero:
url: grpcs://peer1.gatewick.ibs.aero:10051
grpcOptions:
request-timeout: 120001
tlsCACerts:
path: /usr/peerOrganizations/gatewick.ibs.aero/peers/peer1.gatewick.ibs.aero/msp/tlscacerts/tlsca.gatewick.ibs.aero-cert.pem
peer0.birtishairways.ibs.aero:
url: grpcs://peer0.birtishairways.ibs.aero:7051
grpcOptions:
request-timeout: 120001
tlsCACerts:
path: /usr/peerOrganizations/birtishairways.ibs.aero/peers/peer0.birtishairways.ibs.aero/msp/tlscacerts/tlsca.birtishairways.ibs.aero-cert.pem
certificateAuthorities:
ca.gatewick.ibs.aero:
url: https://ca.gatewick.ibs.aero:7054
httpOptions:
verify: false
tlsCACerts:
path: /usr/peerOrganizations/gatewick.ibs.aero/ca/ca.gatewick.ibs.aero-cert.pem
registrar:
- enrollId: admin
enrollSecret: adminpw
caName: ca.gatewick.ibs.aero
|
ichainapi/connection2.yaml
|
openapi: 3.0.1
servers:
- url: http://localhost:20180/
info:
description: Public interface for validating datasets in EASY.
version: 1.0.0
title: validate-dans-bag API
contact:
email: <EMAIL>
license:
name: Apache 2.0
url: http://www.apache.org/licenses/LICENSE-2.0.html
paths:
/:
get:
tags:
- service
summary: Returns a description of the service.
responses:
200:
description: description of the service
content:
text/plain:
schema:
type: string
description: a plain text, human readable description of the service
example: |
EASY Deposit API Service running (v1.0.0)
/validate:
post:
tags:
- validation
summary: Sets a JSON web token cookie for authenticating subsequent calls to the service.
responses:
200:
$ref: "#/components/responses/Ok"
400:
$ref: "#/components/responses/InvalidArchive"
500:
$ref: "#/components/responses/InternalServerError"
parameters:
- name: PackageInfoType
in: query
description: Information type the bag should be validated as
required: false
schema:
type: string
enum: [SIP, AIP]
default: SIP
- name: uri
in: query
description: URI of the bag to be validated.
required: false
schema:
type: string
default: file:///
- name: bag-store
in: query
description: Bag-store to use as context for validation.
required: false
schema:
type: string
components:
responses:
Ok:
description: Ok. A bag is validated.
content:
application/json:
schema:
$ref: "#/components/schemas/validateJsonOk"
text/plain:
schema:
$ref: "#/components/schemas/validateTextOk"
InternalServerError:
description: Internal Server Error.
InvalidArchive:
description: Bad request. No bag can be found at the provided URL.
schemas:
validateJsonOk:
type: object
example:
bagUri: file:///path/to/bag
bag: bag
profileVersion: 0
infoPackageType: AIP
isCompliant: false
ruleViolations: [{1.2.6(a) bag-info.txt must contain exactly one 'EASY-User-Account' element; number found 0}, {4.2 Is-Version-Of value must be a URN}]
properties:
bagUri:
type: string
format: uuid
bag:
type: string
format: url
profileVersion:
type: integer
infoPackageType:
type: string
isCompliant:
type: boolean
ruleViolations:
type: array
items:
type: object
properties:
ruleNumber:
type: string
ruleText:
type: string
validateTextOk:
type: string
example: |
Bag Uri: file:///path/to/bag
Information package type: AIP
Bag: bag
Profile version: 0
Is compliant: false
Rule Violations:
- [1.2.6(a)] bag-info.txt must contain exactly one 'EASY-User-Account' element; number found: 0
- [4.2] Is-Version-Of value must be a URN
|
docs/api/api.yml
|
!level
repoVcsSetups:
share-history: !repoVcs
workingPath: share-history
referenceStoreName: share-history
checkpointStoreName: share-history
share-history-remote: !repoVcs
workingPath: share-history-remote
referenceStoreName: share-history-remote
checkpointStoreName: share-history-remote
repoType: remote
share-history-another: !repoVcs
workingPath: share-history-another
referenceStoreName: share-history-another
checkpointStoreName: share-history-another
steps:
# build repo
- !dev.devAction
actions:
# init remote
- !dev.act.git
repoSetupName: share-history-remote
arguments: [ "init", "--bare"]
# init another local
- !dev.act.git
repoSetupName: share-history-another
arguments:
- init
- !dev.act.git
repoSetupName: share-history-another
arguments: [ config, "--local", user.name, other]
- !dev.act.git
repoSetupName: share-history-another
arguments: [ config, "--local", user.email, other@some.mail]
- !dev.act.git
repoSetupName: share-history-another
arguments: [ config, "--local", core.autocrlf, input]
# link another to remote
- !act.setRemote
localSetupName: share-history-another
remoteSetupName: share-history-remote
remoteNickName: origin
# fill up history
# commit #1
- !act.writeFile &write1
sourceAssetIds:
- "$level-data/clone-repo/file-1:v1"
destinationPaths:
- "share-history-another/file.txt"
- !dev.act.git &stageAll
repoSetupName: share-history-another
arguments: [ "add", "-A" ]
- !dev.act.git &commitDummy
repoSetupName: share-history-another
arguments: [ "commit", "-m", "dummy commit" ]
# commit #2
- !act.writeFile &write2
sourceAssetIds:
- "$level-data/clone-repo/file-1:v2"
destinationPaths:
- "share-history-another/file.txt"
- *stageAll
- *commitDummy
# commit #3 & #4
- *write1
- *stageAll
- *commitDummy
- *write2
- *stageAll
- *commitDummy
# push
- !dev.act.git
repoSetupName: share-history-another
arguments: [ "push", "-u", "origin", "master" ]
# save references
- !dev.act.saveReference
repoSetupName: share-history-another
referenceName: init
- !dev.act.saveReference
repoSetupName: share-history-remote
referenceName: init
- !loadOneReference
repoSetupName: share-history-another
referenceName: init
appendCheckpoint: false
- !loadOneReference
repoSetupName: share-history-remote
referenceName: init
# intro
- !elaborate
descriptionId: "level-data/clone-repo/intro"
- !illustrate
descriptionId: "level-data/clone-repo/concept"
- !elaborate
descriptionId: "level-data/clone-repo/intro-how-to-practice"
- !instruct
descriptionId: "level-data/clone-repo/clone-repo"
needConfirm: false
- !dev.needAction
actions:
- !dev.act.cloneRepo
sourceRepoSetupName: share-history-remote
destinationRepoSetupName: share-history
- !act.setRemote
localSetupName: share-history
remoteSetupName: share-history-remote
remoteNickName: origin
- !verifyRepo
referenceName: "clone-repo-clone"
appendCheckpoint: false
- !playActions
descriptionId: "level-data/clone-repo/set-username"
actions:
- !act.setUser
repoSetupName: share-history
userName: someone
userEmail: <EMAIL>
- !elaborate
descriptionId: "level-data/clone-repo/intro-cloned-repo"
- !elaborate
descriptionId: "level-data/clone-repo/summary"
|
example/course-resources/fork/zh-Hant/levels/clone-repo.yaml
|
---
builds:
!binary "NTQzZTU4NWY1MTlmNjJiYWFiMTIxMTlmZDJiM2JlMzU2NDg2NzJmYg==":
version: !binary |-
NTQzZTU4NWY1MTlmNjJiYWFiMTIxMTlmZDJiM2JlMzU2NDg2NzJmYg==
sha1: !binary |-
ZTRkYzY4NDcwOTk2YWUzZTZlNjk4NGYzOGQyNTM4ZWQzY2VjZWE2Zg==
blobstore_id: 48c43f99-b25b-40cc-9495-b43fcb25a937
!binary "NGQ0OGM1NzhiNmMwMzU0NWZhMjQ1ZWJmYTZiMzdiZTUxYWRmMWY1OQ==":
version: !binary |-
NGQ0OGM1NzhiNmMwMzU0NWZhMjQ1ZWJmYTZiMzdiZTUxYWRmMWY1OQ==
sha1: !binary |-
ZGIwYjdiNjgwMzRjNDEzYTg3YjcxOTY2ZjI2MmUwYzdkODkwZjUyNA==
blobstore_id: 0f270e57-0d1e-4fc3-b30b-53774d2b0468
!binary "OTI3NmY3NTI3MDI2NGI3NDMwMjEyOWFiYzc2NWE1Njc0MWNlZDY5NQ==":
version: !binary |-
OTI3NmY3NTI3MDI2NGI3NDMwMjEyOWFiYzc2NWE1Njc0MWNlZDY5NQ==
sha1: !binary |-
OTJiMzc3YTFkYjg3ZjkzYjAxNjJiZDc4MGU4NzI2YWViMTg3NjQ2MA==
blobstore_id: c278e70f-5bd9-4b61-a1fb-58138e4a5427
!binary "MTk0ZWQzMjUzNTAwYmZjNTRjODMxYTVlOTRmMjA1MGRlZDRlNDZlYQ==":
version: !binary |-
MTk0ZWQzMjUzNTAwYmZjNTRjODMxYTVlOTRmMjA1MGRlZDRlNDZlYQ==
sha1: !binary |-
NDRjOTM2NzJjMDY5MDMzZGZhZDU5ZjBjNjc5MDhkNDcyNmRlYWU3MQ==
blobstore_id: c54eaa44-5a40-45de-a91b-6f8697367e0b
!binary "MjAzOGQzMzhkNGI5OGY1NzQ4NDAxMWNmNDU3OGQ0YzU4NmYwM2U2Mw==":
version: !binary |-
MjAzOGQzMzhkNGI5OGY1NzQ4NDAxMWNmNDU3OGQ0YzU4NmYwM2U2Mw==
sha1: !binary |-
NTg2NWVmMmVhMWFiYTdjOTM2MzNmMjcxMjE4YmFhODYxZmE2YmQ4Yg==
blobstore_id: ac1a7416-15a3-487e-9f0b-2e59ed5308c6
!binary "OTA3YWE2MzIyNDZiNzUwMDI1ZTc2ZDU5MTk3OGYxMmQ3M2UzZjZhYQ==":
version: !binary |-
OTA3YWE2MzIyNDZiNzUwMDI1ZTc2ZDU5MTk3OGYxMmQ3M2UzZjZhYQ==
sha1: !binary |-
ZDU2ZmJkZWE5ZjkzMTcyYWQzMjA2ODMwNDcxMzVjMTQ5NTIxYTZiNA==
blobstore_id: 90a2671f-f828-488a-a6c2-4a68dff00758
!binary "NDkxZjFmZTE3MTRjM2E4NGM1NTIyOTc0NTFjMzBhNzJlZTAxZThjNQ==":
version: !binary |-
NDkxZjFmZTE3MTRjM2E4NGM1NTIyOTc0NTFjMzBhNzJlZTAxZThjNQ==
sha1: !binary |-
ZDg2NzhlNDRkZTUzYjRkYjQwMjg5Y2RmMGEwZDhjMTA4NGJmNTc1Yg==
blobstore_id: fb52a5a1-1044-49f6-ac43-82927d3f21c7
!binary "ODNiOWM0YjQ0NmEzZGQyYWE5MzFhNzY0MDNiYWVkZmU5M2VkMzAzOA==":
version: !binary |-
ODNiOWM0YjQ0NmEzZGQyYWE5MzFhNzY0MDNiYWVkZmU5M2VkMzAzOA==
sha1: !binary |-
OGJjZTA3YzQwODk5YmFmZGFhYWExMzg1ZWM0OGE2M2RmNDg5NzExYw==
blobstore_id: 90c67ed9-5324-4e78-9e56-e9b6de52697d
!binary "YWNlZjA0NTE3MmFlNzI2MzVjNDhjMmI0NGY0YjQ0NWNhOWM3OWNmMw==":
version: !binary |-
YWNlZjA0NTE3MmFlNzI2MzVjNDhjMmI0NGY0YjQ0NWNhOWM3OWNmMw==
sha1: !binary |-
NjUzNTI0ODUxNjUyZjY3MjI0ODdjMGYzOWY3MGVlMGRhZjg4YTI3NQ==
blobstore_id: 413ab995-3a96-4262-af9b-a3de509b15f2
!binary "ODU2ZDQyM2U2MmVhYjQ5NjBkZTQ3ZTM1YWY3ZTA3MDk4YWUyNjRjOA==":
version: !binary |-
ODU2ZDQyM2U2MmVhYjQ5NjBkZTQ3ZTM1YWY3ZTA3MDk4YWUyNjRjOA==
sha1: !binary |-
ZWNiNjVkZDg3ODI4ZDdmYThkMWM4M2YzZGNjMzc0ZWQwMWEwMDcxMQ==
blobstore_id: 4e22ae83-08c1-4c27-9a81-be606e98ab94
format-version: '2'
|
.final_builds/jobs/postgresql/index.yml
|
items:
- uid: '@azure/arm-synapse.WorkspaceAadAdminInfo'
name: WorkspaceAadAdminInfo
fullName: WorkspaceAadAdminInfo
children:
- '@azure/arm-synapse.WorkspaceAadAdminInfo.administratorType'
- '@azure/arm-synapse.WorkspaceAadAdminInfo.login'
- '@azure/arm-synapse.WorkspaceAadAdminInfo.sid'
- '@azure/arm-synapse.WorkspaceAadAdminInfo.tenantId'
langs:
- typeScript
type: interface
summary: Workspace active directory administrator
extends:
name: BaseResource
package: '@azure/arm-synapse'
- uid: '@azure/arm-synapse.WorkspaceAadAdminInfo.administratorType'
name: administratorType
fullName: administratorType
children: []
langs:
- typeScript
type: property
summary: Workspace active directory administrator type
optional: true
syntax:
content: 'administratorType?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-synapse'
- uid: '@azure/arm-synapse.WorkspaceAadAdminInfo.login'
name: login
fullName: login
children: []
langs:
- typeScript
type: property
summary: Login of the workspace active directory administrator
optional: true
syntax:
content: 'login?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-synapse'
- uid: '@azure/arm-synapse.WorkspaceAadAdminInfo.sid'
name: sid
fullName: sid
children: []
langs:
- typeScript
type: property
summary: Object ID of the workspace active directory administrator
optional: true
syntax:
content: 'sid?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-synapse'
- uid: '@azure/arm-synapse.WorkspaceAadAdminInfo.tenantId'
name: tenantId
fullName: tenantId
children: []
langs:
- typeScript
type: property
summary: Tenant ID of the workspace active directory administrator
optional: true
syntax:
content: 'tenantId?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-synapse'
|
docs-ref-autogen/@azure/arm-synapse/WorkspaceAadAdminInfo.yml
|
---
name: CI
# Enable Buildkit and let compose use it to speed up image building
env:
DOCKER_BUILDKIT: 1
COMPOSE_DOCKER_CLI_BUILD: 1
on:
pull_request:
branches: [ "*" ]
paths-ignore: [ "docs/**" ]
push:
branches: [ "*" ]
paths-ignore: [ "docs/**" ]
jobs:
# With no caching at all the entire ci process takes 4m 30s to complete!
pytest:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:14
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: postgres
env:
# postgres://user:password@host:port/database
DATABASE_URL: "postgres://postgres:postgres@localhost:5432/postgres"
steps:
- name: Checkout Code Repository
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: "3.10"
cache: pip
cache-dependency-path: |
requirements/base.txt
requirements/local.txt
- name: Install Dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements/local.txt
- name: Migrate Database
run: |
./manage.py makemigrations
./manage.py migrate
- name: Test with pytest and upload to coveralls
run: |
mkdir -p test-results
coverage run -m pytest
coverage lcov -o test-results/coverage.info --fail-under=90
- name: Coveralls GitHub Action
uses: coverallsapp/github-action@1.1.3
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: test-results/coverage.info
heroku:
runs-on: ubuntu-latest
needs: pytest
if: github.ref == 'refs/heads/master'
steps:
- name: Checkout Code Repository
uses: actions/checkout@v3
- name: Deploy to Heroku
uses: AkhileshNS/heroku-deploy@v3.12.12
with:
heroku_api_key: ${{secrets.HEROKU_API_KEY}}
heroku_app_name: "gentle-savannah-12792"
heroku_email: ${{secrets.HEROKU_EMAIL}}
|
.github/workflows/ci.yml
|
name: Unit and UI tests (multiple devices)
on:
pull_request:
branches:
- master
- main
concurrency:
group: tests-master-${{ github.head_ref }}
cancel-in-progress: true
jobs:
ui-tests:
runs-on: macos-latest
strategy:
matrix:
scheme: [Swift, Objective-C]
device: ['iPhone 13 (15.0)', 'iPhone 12 (14.5)', 'iPhone 11 (13.7)']
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Install the dependencies specified in Gemfile
uses: ruby/setup-ruby@v1
with:
ruby-version: '2.7'
bundler-cache: true
- name: Cache pods
id: cache-pods
uses: actions/cache@v2
env:
cache-name: example-app-pods
with:
path: ExampleCheckout/Pods/
key: cache-${{ env.cache-name }}-${{ hashFiles('ExampleCheckout/Podfile.lock', 'PayoneerCheckout.xcodeproj/project.pbxproj') }}
- name: Install Pods
if: steps.cache-pods.outputs.cache-hit != 'true'
env:
project-directory: ExampleCheckout/
run: bundle exec pod install --deployment --project-directory=${{ env.project-directory }}
- name: Install simulators
run: |
sudo mkdir -p /Library/Developer/CoreSimulator/Profiles/Runtimes
sudo ln -s $XCODE_12_DEVELOPER_DIR/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime /Library/Developer/CoreSimulator/Profiles/Runtimes/iOS\ 14.5.simruntime
sudo ln -s $XCODE_11_DEVELOPER_DIR/Platforms/iPhoneOS.platform/Library/Developer/CoreSimulator/Profiles/Runtimes/iOS.simruntime /Library/Developer/CoreSimulator/Profiles/Runtimes/iOS\ 13.7.simruntime
- name: Set simulators
# I use file instead of passing it as a parameter to have an ability to test multiple devices per one run if needed
# (it is possible only with Scanfile)
run:
echo "devices(['${{ matrix.device }}'])" >> fastlane/Scanfile
- name: Inject secrets
id: inject-secrets
env:
merchant-code: ${{ secrets.MOBILE_MERCHANT_CODE }}
merchant-token: ${{ secrets.MOBILE_MERCHANT_PAYMENT_TOKEN }}
payment-api-list-url: https:/\$()/api.integration.oscato.com/api/lists
xcconfig: ExampleCheckout/UITests/Assets/Config.xcconfig
run: |
echo "MERCHANT_CODE=${{ env.merchant-code }}" > ${{ env.xcconfig }}
echo "MERCHANT_PAYMENT_TOKEN=${{ env.merchant-token }}" >> ${{ env.xcconfig }}
echo "PAYMENT_API_LISTURL=${{ env.payment-api-list-url }}" >> ${{ env.xcconfig }}
- name: Run UI tests
env:
FASTLANE_SKIP_UPDATE_CHECK: true
run: bundle exec fastlane ui_test scheme:${{ matrix.scheme }}
- name: Clear secrets
if: always() && steps.inject-secrets.outcome == 'success'
id: clear-secrets
env:
xcconfig: ExampleCheckout/UITests/Assets/Config.xcconfig
run: rm ${{ env.xcconfig }}
- name: Publish test report
uses: mikepenz/action-junit-report@v2
if: failure() && steps.clear-secrets.outcome == 'success'
continue-on-error: true
with:
report_paths: output/scan/report.junit
check_name: UI test report (${{ matrix.scheme }})
- name: Zip test results
id: zip-xcresult
if: always()
run: zip -r output/scan/${{ matrix.scheme }}.xcresult.zip output/scan/*.xcresult/
- name: Upload test results
if: always() && steps.zip-xcresult.conclusion == 'success'
uses: actions/upload-artifact@v2
with:
name: ${{ github.sha }}.xcresult.zip
path: output/scan/${{ matrix.scheme }}.xcresult.zip/
if-no-files-found: error
retention-days: 5
|
.github/workflows/pullrequest_to_master.yml
|
---
- name: configure repo files
template: src={{ item }}.j2 dest=/etc/yum.repos.d/{{ item }} mode=0644 owner=root group=root
with_items:
- CentOS-Base.repo
- ambari.repo
- hdp.repo
- hdp-gpl.repo
- zabbix.repo
tags:
- common
- common-repo
- name: clear yum cache
shell: yum clean all
tags:
- common
- common-repo
- name: delete yum cache file
file: path=/var/cache/yum state=absent
tags:
- common
- common-repo
- name: install basic packages
yum: name={{ item }}
with_items:
- gcc
- ntp
- libselinux-python
- libevent
- libtirpc-devel
tags:
- common
- common-yum
- name: start ntpd service
service: name=ntpd enabled=yes state=started
tags:
- common
- common-yum
- common-ntpd
- name: install ntpd
yum: name=ntp state=present
tags:
- common
- common-etc
- common-etc-ntpd
- name: configure /etc/ntp.conf
template: src=etc_ntp.conf.j2 dest=/etc/ntp.conf owner=root group=root mode=0644
notify: restart ntpd
tags:
- common
- common-etc
- common-etc-ntpd
- name: configure /etc/sysctl.conf
template: src=etc_sysctl.conf.j2 dest=/etc/sysctl.conf mode=0644 owner=root group=root
register: sysctl_conf
tags:
- common
- common-etc
- name: run command sysctl -p
shell: sysctl -p
when: sysctl_conf|changed
tags:
- common
- common-etc
- name: configure /etc/security/limits.conf
template: src=limits.conf.j2 dest=/etc/security/limits.conf mode=0644 owner=root group=root
tags:
- common
- common-etc
- name: Disbale UseDNS for sshd
lineinfile: dest=/etc/ssh/sshd_config regexp='#UseDNS yes' line='UseDNS no'
tags:
- common
- common-etc
- name: Set dns=none in /etc/NetworkManager/NetworkManager.conf
lineinfile: dest=/etc/NetworkManager/NetworkManager.conf insertafter='\[main\]' line='dns=none'
notify: restart network
tags:
- common
- common-etc
- name: create /root/deploy directory
file: path=/root/deploy mode=0755 owner=root group=root state=directory
tags:
- common
|
ansible/roles/common/tasks/base.yml
|
overrides:
- files:
- "*.ts"
- "*.tsx"
parser: "@typescript-eslint/parser"
plugins:
- "@typescript-eslint"
parserOptions:
project: "./tsconfig.json"
rules:
"@typescript-eslint/adjacent-overload-signatures": error # メンバーのオーバーロードはグループ化してー!
"@typescript-eslint/array-type": # 配列の型は場合に応じて使い分けてー!
- warn
- default: array-simple
# "@typescript-eslint/ban-ts-comment": warn # @ts コメントを抑制 allow-with-description 使えない?
"@typescript-eslint/ban-types": warn # その型は使っちゃだめ ><
"@typescript-eslint/member-delimiter-style": warn # 型定義メンバー区切り文字のスタイル
# "@typescript-eslint/no-invalid-void-type": warn # void の使い方がおかしいよー! 使えない?
"@typescript-eslint/no-misused-new": warn # new, constructor を型定義しようとしないでー!
"@typescript-eslint/no-namespace": warn # namespace, module を使わないでー!
"@typescript-eslint/no-non-null-asserted-optional-chain": warn
"@typescript-eslint/no-non-null-assertion": warn # よくわかんないけどエラー!
"@typescript-eslint/no-require-imports": warn # require を使わないでー!
"@typescript-eslint/no-unnecessary-boolean-literal-compare": warn # boolean は比較しなくていいよー!
"@typescript-eslint/no-unnecessary-type-arguments": warn # その型引数もともとその型だよー!
"@typescript-eslint/no-unnecessary-type-assertion": warn # その型アサーションいらないよー!
"@typescript-eslint/no-explicit-any": warn
"@typescript-eslint/no-unsafe-assignment": warn
"@typescript-eslint/no-unsafe-return": warn # any!!!!
"@typescript-eslint/prefer-as-const": warn # const 使ってー!
"@typescript-eslint/prefer-optional-chain": warn # 省略
"@typescript-eslint/prefer-reduce-type-parameter": warn # reduce は呼び出し時に型定義
"@typescript-eslint/prefer-ts-expect-error": warn # @ts-expect-error を使ってー!
"@typescript-eslint/restrict-plus-operands": warn # 同じ型同士で + してー!
"@typescript-eslint/type-annotation-spacing": warn # type の周りにスペース!
"@typescript-eslint/no-this-alias": warn # this エイリアスを使わないでー!
"@typescript-eslint/prefer-for-of": warn # for of 使ってー!
"@typescript-eslint/prefer-includes": warn # includes 使ってー!
# override
brace-style": off
"@typescript-eslint/brace-style": # 改行は 1tbs スタイルにしてー! 型定義のサポート?
- warn
- 1tbs
default-param-last: off
"@typescript-eslint/default-param-last": warn # デフォルト引数は最後にしよー! ?記法のサポート
# pass
# 問題あり
# no-unused-vars: off
# "@typescript-eslint/no-unused-vars":
# - error
# - varsIgnorePattern: ^_
#
# comma-spacing 不要?
# explicit-function-return-type 明示的な戻り値が必要
# explicit-module-boundary-types 上の export のみ
# explicit-member-accessibility クラスメソッドに明示的なアクセス修飾子が必要
# func-call-spacing 不要なはず 関数()前のスペース 関数ジェネリックのサポート
# method-signature-style 関数定義のスタイルを強制
# no-empty-function
# no-empty-interface
# no-extra-non-null-assertion
# no-extra-parens
# no-extra-semi
# no-extraneous-class
# no-for-in-array
# no-loss-of-precision
# no-useless-constructor
# prefer-nullish-coalescing
#
# class-literal-property-style
# consistent-type-assertions
# indent
# init-declarations
# keyword-spacing
# no-unused-vars-experimental
# no-use-before-define
# no-unused-expressions
#
# member-ordering
# naming-convention
# no-floating-promises
# no-confusing-non-null-assertion
# no-unsafe-member-access
# prefer-function-type
# prefer-readonly
# quotes
# require-await
|
rules/typescript.yml
|
version: '3.7'
services:
cni-and-cube:
image: fnndsc/cni-store-proxy:and-cube
volumes:
- ./secrets/Test_data_generator.json:/etc/chris-plugins/Test_data_generator.json:ro
- ./secrets/cni_challenge_evaluation.json:/etc/chris-plugins/cni_challenge_evaluation.json:ro
environment:
CNI_COMPUTE_ENV: host
FS_PLUGIN_FILE: /etc/chris-plugins/Test_data_generator.json
EVALUATOR_FILE: /etc/chris-plugins/cni_challenge_evaluation.json
env_file:
- ./secrets/.chris.env
- ./secrets/.chris_db.env
- ./secrets/.swift_service.env
ports:
- "127.0.0.1:8011:8011"
depends_on:
- chris_db
- swift
- queue
- chris_store
- pfcon
restart: on-failure
networks:
- local
worker:
image: fnndsc/chris
entrypoint: ''
command: celery -A core worker -c 4 -l info -Q main
env_file:
- ./secrets/.chris.env
- ./secrets/.chris_db.env
- ./secrets/.swift_service.env
depends_on:
- chris_db
- swift
- queue
- pfcon
restart: on-failure
networks:
- local
scheduler:
image: fnndsc/chris
entrypoint: ''
command: celery -A core beat -l info --pidfile /home/localuser/celerybeat.pid --scheduler django_celery_beat.schedulers:DatabaseScheduler
env_file:
- ./secrets/.chris.env
- ./secrets/.chris_db.env
- ./secrets/.swift_service.env
depends_on:
- chris_db
- swift
- queue
restart: on-failure
networks:
- local
queue:
image: rabbitmq:3
restart: on-failure
networks:
- local
chris_db:
image: mysql:5
volumes:
- chris_db_data:/var/lib/mysql
env_file:
- ./secrets/.chris_db.env
restart: on-failure
networks:
- local
chris_store:
image: fnndsc/chris_store:latest
environment:
DJANGO_USE_X_FORWARDED_HOST: "true"
env_file:
- ./secrets/.chris_store.env
- ./secrets/.chris_store_db.env
- ./secrets/.swift_service.env
depends_on:
- chris_store_db
- swift
restart: on-failure
networks:
local:
aliases:
- chris-store.local
chris_store_db:
image: mysql:5
volumes:
- chris_store_db_data:/var/lib/mysql
env_file:
- ./secrets/.chris_store_db.env
restart: on-failure
networks:
- local
swift:
image: fnndsc/docker-swift-onlyone:latest
init: true
volumes:
- swift_storage:/srv
env_file:
- ./secrets/.swift_service.env
restart: on-failure
networks:
local:
aliases:
- swift_service
pfcon:
image: fnndsc/pfcon:latest
command: ["--forever", "--httpResponse", "--verbosity", "1"]
env_file:
- ./secrets/.swift_service.env
depends_on:
- swift
- pman
- pfioh
restart: on-failure
networks:
local:
aliases:
- pfcon.local
- pfcon_service
pfioh:
image: fnndsc/pfioh:latest
command: ["--forever", "--httpResponse", "--createDirsAsNeeded", "--storeBase", "/hostFS/storeBase", "--verbosity", "1"]
volumes:
- pfioh-remote:/hostFS/storeBase
restart: on-failure
networks:
local:
aliases:
- pfioh_service
pman:
image: fnndsc/pman:latest
environment:
- PMAN_DOCKER_VOLUME=cni-pfioh-remote
command: ["--rawmode", "1", "--http", "--port", "5010", "--listeners", "12"]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- pfioh-remote:/hostFS/storeBase
restart: on-failure
networks:
local:
aliases:
- pman_service
networks:
local:
volumes:
chris_db_data:
chris_store_db_data:
swift_storage:
pfioh-remote:
name: cni-pfioh-remote
|
docker-compose.yml
|
- name: Übersicht über die Zugriffsverwaltung
href: ../role-based-access-control/manage-access-to-azure-resources.md
- name: Zugriff auf Ressourcen in Azure
href: ../role-based-access-control/rbac-and-directory-admin-roles.md
- name: 'Rollenbasierte Zugriffssteuerung (Role-Based Access Control, RBAC)'
href: ../role-based-access-control/overview.md
items:
- name: Verwalten von Rollenzuweisungen
items:
- name: Nach Benutzer
href: ../role-based-access-control/role-assignments-users.md
- name: Nach Ressource
href: ../role-based-access-control/role-assignments-portal.md?toc=%2fazure%2factive-directory%2ftoc.json
- name: Mit der Azure-Befehlszeilenschnittstelle
href: ../role-based-access-control/role-assignments-cli.md
- name: Mit PowerShell
href: ../role-based-access-control/role-assignments-powershell.md
- name: Mit REST
href: ../role-based-access-control/role-assignments-rest.md
- name: Integrierte Rollen
href: ../role-based-access-control/built-in-roles.md
- name: Benutzerdefinierte Rollen
href: ../role-based-access-control/custom-roles.md
- name: Zuweisen von benutzerdefinierten Rollen für interne und externe Benutzer
href: ../role-based-access-control/role-assignments-external-users.md
- name: Berichterstellung
href: ../role-based-access-control/change-history-report.md
- name: Erhöhen des Mandantenadministratorzugriffs
href: ../role-based-access-control/elevate-access-global-admin.md
- name: Problembehandlung
href: ../role-based-access-control/troubleshooting.md
- name: Ressourcenanbietervorgänge
href: ../role-based-access-control/resource-provider-operations.md
- name: Privileged Identity Management (PIM) für RBAC
href: ../role-based-access-control/pim-azure-resource.md
- name: Bedingter Zugriff für die Azure-Verwaltung
href: ../role-based-access-control/conditional-access-azure-management.md
|
articles/active-directory/TOC.yml
|
version: "3"
services:
db_recipes:
restart: always
# I seem to recall issues with newer versions when I first installed. Will maybe have to look again
image: postgres:11-alpine
volumes:
- ./postgresql:/var/lib/postgresql/data
#The scripts that run initially are going to look in this file for a bunch of the configurations to setup the application
#You will need to have the .env file there. If you're on linux, and you try to 'ls', you won't see it. don't worry though, if you created it, it will be there.
#you could always cat it just to be sure
env_file:
- ./.env
web_recipes:
image: vabene1111/recipes
restart: always
env_file:
- ./.env
#You can see that it's not only mounting its own mediafiles directory from it's own root directory
#It's then mounting staticfiles and nginx_config from the shared volumes that are created at the bottom
volumes:
- staticfiles:/opt/recipes/staticfiles
- nginx_config:/opt/recipes/nginx/conf.d
- ./mediafiles:/opt/recipes/mediafiles
# won't start if the databas isn't there
depends_on:
- db_recipes
# I like to have centralized logging for better alerting, so I ship them to my log server
logging:
driver: gelf
options:
gelf-address: "my log server"
nginx_recipes:
image: nginx:mainline-alpine
restart: always
#We only really need the port for nginx. Everything else can communicate via their host names.
#If this is available to the outside, I recommend at least one more layer between this and the outside
ports:
- 9280:80
env_file:
- ./.env
# won't start if the application isn't there
depends_on:
- web_recipes
volumes:
- nginx_config:/etc/nginx/conf.d:ro
- staticfiles:/static
- ./mediafiles:/media
logging:
driver: gelf
options:
gelf-address: "my log server"
# Creates the volumes that will be shared between containers
volumes:
nginx_config:
staticfiles:
|
cookbook-config.yml
|
steps:
# Build the container image
- name: "gcr.io/cloud-builders/docker"
entrypoint: "bash"
args:
[
"-c",
'docker build -t gcr.io/visualexp-a7d2c/knowledge --build-arg=ONECADEMYCRED_PROJECT_ID="$${ONECADEMYCRED_PROJECT_ID}" --build-arg=ONECADEMYCRED_PRIVATE_KEY_ID="$${ONECADEMYCRED_PRIVATE_KEY_ID}" --build-arg=ONECADEMYCRED_PRIVATE_KEY="$${ONECADEMYCRED_PRIVATE_KEY}" --build-arg=ONECADEMYCRED_CLIENT_EMAIL="$${ONECADEMYCRED_CLIENT_EMAIL}" --build-arg=ONECADEMYCRED_CLIENT_ID="$${ONECADEMYCRED_CLIENT_ID}" --build-arg=ONECADEMYCRED_CLIENT_X509_CERT_URL="$${ONECADEMYCRED_CLIENT_X509_CERT_URL}" .'
]
secretEnv:
[
"ONECADEMYCRED_PROJECT_ID",
"ONECADEMYCRED_CLIENT_EMAIL",
"ONECADEMYCRED_CLIENT_ID",
"ONECADEMYCRED_CLIENT_X509_CERT_URL",
"ONECADEMYCRED_PRIVATE_KEY",
"ONECADEMYCRED_PRIVATE_KEY_ID"
]
# Push the container image to Container Registry
# - name: "gcr.io/cloud-builders/docker"
# args: ["push", "gcr.io/visualexp-a7d2c/knowledge"]
# Deploy container image to Cloud Run
# - name: "gcr.io/google.com/cloudsdktool/cloud-sdk"
# entrypoint: gcloud
# args:
# [
# "run",
# "deploy",
# "knowledge",
# "--image",
# "gcr.io/visualexp-a7d2c/knowledge",
# "--region",
# "us-central1",
# ]
images:
- gcr.io/visualexp-a7d2c/knowledge
availableSecrets:
secretManager:
- versionName: projects/141114383555/secrets/ONECADEMYCRED_PROJECT_ID/versions/1
env: "ONECADEMYCRED_PROJECT_ID"
- versionName: projects/141114383555/secrets/ONECADEMYCRED_CLIENT_EMAIL/versions/1
env: "ONECADEMYCRED_CLIENT_EMAIL"
- versionName: projects/141114383555/secrets/ONECADEMYCRED_CLIENT_ID/versions/1
env: "ONECADEMYCRED_CLIENT_ID"
- versionName: projects/141114383555/secrets/ONECADEMYCRED_CLIENT_X509_CERT_URL/versions/1
env: "ONECADEMYCRED_CLIENT_X509_CERT_URL"
- versionName: projects/141114383555/secrets/ONECADEMYCRED_PRIVATE_KEY/versions/1
env: "ONECADEMYCRED_PRIVATE_KEY"
- versionName: projects/141114383555/secrets/ONECADEMYCRED_PRIVATE_KEY_ID/versions/1
env: "ONECADEMYCRED_PRIVATE_KEY_ID"
|
knowledge/cloudbuild.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-20 14:47"
game: "Unreal Tournament 2004"
name: "DM-Area52]=["
author: "dauthi{omc}"
description: "One of the last outposts that remain from the Skarrj War,Queitly Sitting\
\ on an asteroid that orbits the moon. Liandri bought it and has since maintained\
\ it for the sole purpose of spilling Tainted blood."
releaseDate: "2005-03"
attachments:
- type: "IMAGE"
name: "DM-Area52]=[_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/A/DM-Area52%5D=%5B_shot_2.png"
- type: "IMAGE"
name: "DM-Area52]=[_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/A/DM-Area52%5D=%5B_shot_1.png"
originalFilename: "dm-area52]_[.zip"
hash: "c23089c2911d4e622e581b5648ff1a9ef8bbeff3"
fileSize: 9005528
files:
- name: "ScaledWeaponBase.u"
fileSize: 1294
hash: "931729c871adaa98aa4ccb7d8402734abd470212"
- name: "DM-Area52]=[.ut2"
fileSize: 9969277
hash: "6139d0cba01c0591c3a6d8dd781ea765b760ecb1"
- name: "dauthi1.utx"
fileSize: 10931162
hash: "0eb6ffb71b596758548d05f68e8a1f6636829436"
otherFiles: 3
dependencies:
DM-Area52]=[.ut2:
- status: "OK"
name: "ScaledWeaponBase"
- status: "OK"
name: "dauthi1"
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/DeathMatch/A/dm-area52%5D_%5B.zip"
main: true
repack: false
state: "OK"
- url: "http://ut2004.ut-files.com/index.php?dir=Maps/DeathMatch/MapsA/&file=dm-area52%5D_%5B.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/DeathMatch/A/c/2/3089c2/dm-area52%255D_%255B.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/DeathMatch/A/c/2/3089c2/dm-area52%255D_%255B.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DM-{OMC}Area52"
playerCount: "4"
themes:
Tech: 0.7
Industrial: 0.2
City: 0.1
bots: true
|
content/Unreal Tournament 2004/Maps/DeathMatch/A/c/2/3089c2/dm-area52_[c23089c2].yml
|
sylius_admin_api_checkout_show:
path: /{id}
methods: [GET]
defaults:
_controller: sylius.controller.order:showAction
_sylius:
serialization_version: $version
serialization_groups: [Detailed]
sylius_admin_api_checkout_addressing:
path: /addressing/{orderId}
methods: [PUT]
defaults:
_controller: sylius.controller.order:updateAction
_sylius:
serialization_version: $version
form: Sylius\Bundle\AdminApiBundle\Form\Type\AddressType
repository:
method: find
arguments: [$orderId]
state_machine:
graph: sylius_order_checkout
transition: address
sylius_admin_api_checkout_available_shipping_methods:
path: /select-shipping/{orderId}
methods: [GET]
defaults:
_controller: sylius.controller.show_available_shipping_methods:showAction
sylius_admin_api_checkout_select_shipping:
path: /select-shipping/{orderId}
methods: [PUT]
defaults:
_controller: sylius.controller.order:updateAction
_sylius:
serialization_version: $version
form: Sylius\Bundle\CoreBundle\Form\Type\Checkout\SelectShippingType
repository:
method: find
arguments: [$orderId]
state_machine:
graph: sylius_order_checkout
transition: select_shipping
sylius_admin_api_checkout_available_payment_methods:
path: /select-payment/{orderId}
methods: [GET]
defaults:
_controller: sylius.controller.show_available_payment_methods:showAction
sylius_admin_api_checkout_select_payment:
path: /select-payment/{orderId}
methods: [PUT, PATCH]
defaults:
_controller: sylius.controller.order:updateAction
_sylius:
serialization_version: $version
form: Sylius\Bundle\CoreBundle\Form\Type\Checkout\SelectPaymentType
repository:
method: find
arguments: [$orderId]
state_machine:
graph: sylius_order_checkout
transition: select_payment
sylius_admin_api_checkout_complete:
path: /complete/{orderId}
methods: [PUT]
defaults:
_controller: sylius.controller.order:updateAction
_sylius:
serialization_version: $version
form: Sylius\Bundle\CoreBundle\Form\Type\Checkout\CompleteType
repository:
method: find
arguments: [$orderId]
state_machine:
graph: sylius_order_checkout
transition: complete
|
vendor/sylius/sylius/src/Sylius/Bundle/AdminApiBundle/Resources/config/routing/checkout.yml
|
---
name: AWS EC2 Systems Manager
description: |-
<p>Amazon EC2 Systems Manager is a management service that helps you automatically collect software inventory, apply OS patches, create system images, and configure Windows and Linux operating systems. These capabilities help you define and track system configurations, prevent drift, and maintain software compliance of your EC2 and on-premises configurations. By providing a management approach that is designed for the scale and agility of the cloud but extends into your on-premises data center, EC2 Systems Manager makes it easier for you to seamlessly bridge your existing infrastructure with AWS.</p>
<p>EC2 Systems Manager is easy to use. Simply access EC2 Systems Manager from the EC2 Management Console, select the instances you want to manage, and define the management tasks you want to perform. EC2 Systems Manager is available now at no cost to manage both your EC2 and on-premises resources.</p>
image: http://kinlane-productions2.s3.amazonaws.com/api-evangelist-site/company/logos/Compute_AmazonEC2.png
created: "2021-02-04"
modified: "2021-02-04"
specificationVersion: "0.14"
x-rank: "10"
x-alexaRank: "14"
url: https://raw.githubusercontent.com/api-network/organizations/main/aws-ec2-systems-manager.yaml
tags:
- Relative Data
- Orchestration
- Management
- Have API Paths
- Have API
- Deployment
- API Service Provider
- API Service Provider
- API Provider
- Amazon Web Services
apis: []
x-common:
- type: x-documentation
url: http://docs.aws.amazon.com/ssm/latest/APIReference/Welcome.html
- type: x-documentation
url: https://docs.aws.amazon.com/index.html?nc2=h_ql_doc
- type: x-facebook
url: https://www.facebook.com/amazonwebservices
- type: x-faq
url: https://aws.amazon.com/ec2/systems-manager/faqs/
- type: x-getting-started
url: http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/systems-manager.html
- type: x-marketplace
url: https://aws.amazon.com/marketplace/?nc2=h_ql_mp
- type: x-marketplace
url: https://aws.amazon.com/marketplace/?nc2=h_mo
- type: x-press
url: https://press.aboutamazon.com/press-releases/aws
- type: x-privacy
url: https://aws.amazon.com/privacy/?nc1=f_pr
- type: x-support
url: https://console.aws.amazon.com/support/home/?nc1=f_dr
- type: x-terms-of-service
url: https://aws.amazon.com/terms/?nc1=f_pr
- type: x-twitter
url: https://twitter.com/awscloud
- type: x-website
url: https://aws.amazon.com/ec2/systems-manager/
include: []
maintainers:
- FN: <NAME>
x-twitter: apievangelist
email: <EMAIL>
---
|
_data/aws-ec2-systems-manager.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-16 19:10"
game: "Unreal Tournament"
name: "DM-Evil Monastary"
author: "a_z"
description: "kill!"
releaseDate: "2006-03"
attachments:
- type: "IMAGE"
name: "DM-Evil_Monastary_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/E/DM-Evil_Monastary_shot_3.png"
- type: "IMAGE"
name: "DM-Evil_Monastary_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/E/DM-Evil_Monastary_shot_1.png"
- type: "IMAGE"
name: "DM-Evil_Monastary_shot_4.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/E/DM-Evil_Monastary_shot_4.png"
- type: "IMAGE"
name: "DM-Evil_Monastary_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/E/DM-Evil_Monastary_shot_2.png"
originalFilename: "dm-evil_monastary.zip"
hash: "965f374dfba7b00743eefe8f5a54ac2e29090349"
fileSize: 4835204
files:
- name: "DM-Evil Monastary.unr"
fileSize: 2518995
hash: "c01dbdae8dc8f778e5f2b1961b4b59455063f23c"
otherFiles: 5
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/DeathMatch/E/dm-evil_monastary.zip"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/DeathMatch/MapsE/&file=dm-evil_monastary.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/DeathMatch&file=dm-evil_monastary.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Maps/DeathMatch/MapsE/dm-evil_monastary.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/DeathMatch/E/9/6/5f374d/dm-evil_monastary.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/DeathMatch/E/9/6/5f374d/dm-evil_monastary.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "DM-Evil Monastary"
playerCount: "4-8"
themes:
Skaarj Crypt: 0.1
Ancient: 0.8
Natural: 0.1
bots: true
|
content/Unreal Tournament/Maps/DeathMatch/E/9/6/5f374d/dm-evil-monastary_[965f374d].yml
|
interactions:
- request:
body: null
headers:
USER-AGENT: [PubNub-Python-Asyncio/4.0.2]
method: GET
uri: http://pubsub.pubnub.com/v2/presence/sub-key/sub-c-33f55052-190b-11e6-bfbc-02ee2ddab7fe/channel/test-state-asyncio-ch/uuid/test-state-asyncio-uuid/data
response:
body: {string: '{"status": 200, "message": "OK", "payload": {"count": 5, "name":
"Alex"}, "service": "Presence"}'}
headers: {ACCEPT-RANGES: bytes, ACCESS-CONTROL-ALLOW-METHODS: 'OPTIONS, GET, POST',
ACCESS-CONTROL-ALLOW-ORIGIN: '*', AGE: '0', CACHE-CONTROL: no-cache, CONNECTION: keep-alive,
CONTENT-LENGTH: '96', CONTENT-TYPE: text/javascript; charset="UTF-8", DATE: 'Thu,
11 Aug 2016 20:06:08 GMT', SERVER: Pubnub Presence}
status: {code: 200, message: OK}
url: http://pubsub.pubnub.com/v2/presence/sub-key/sub-c-33f55052-190b-11e6-bfbc-02ee2ddab7fe/channel/test-state-asyncio-ch/uuid/test-state-asyncio-uuid/data?pnsdk=PubNub-Python-Asyncio%2F4.0.2&state=%7B%22name%22%3A%20%22Alex%22%2C%20%22count%22%3A%205%7D&uuid=test-state-asyncio-uuid
- request:
body: null
headers:
USER-AGENT: [PubNub-Python-Asyncio/4.0.2]
method: GET
uri: http://pubsub.pubnub.com/v2/presence/sub-key/sub-c-33f55052-190b-11e6-bfbc-02ee2ddab7fe/channel/test-state-asyncio-ch/uuid/test-state-asyncio-uuid
response:
body: {string: '{"status": 200, "uuid": "test-state-asyncio-uuid", "service":
"Presence", "message": "OK", "payload": {"count": 5, "name": "Alex"}, "channel":
"test-state-asyncio-ch"}'}
headers: {ACCEPT-RANGES: bytes, ACCESS-CONTROL-ALLOW-METHODS: 'OPTIONS, GET, POST',
ACCESS-CONTROL-ALLOW-ORIGIN: '*', AGE: '0', CACHE-CONTROL: no-cache, CONNECTION: keep-alive,
CONTENT-LENGTH: '167', CONTENT-TYPE: text/javascript; charset="UTF-8", DATE: 'Thu,
11 Aug 2016 20:06:08 GMT', SERVER: Pubnub Presence}
status: {code: 200, message: OK}
url: http://pubsub.pubnub.com/v2/presence/sub-key/sub-c-33f55052-190b-11e6-bfbc-02ee2ddab7fe/channel/test-state-asyncio-ch/uuid/test-state-asyncio-uuid?pnsdk=PubNub-Python-Asyncio%2F4.0.2&uuid=test-state-asyncio-uuid
version: 1
|
tests/integrational/fixtures/asyncio/state/single_channel.yaml
|
---
- name: warewulf_files, Import files into compute image
command: 'wwsh -y file import {{ item }}'
with_items:
- '/etc/passwd'
- '/etc/group'
- '/etc/shadow'
- '/etc/munge/munge.key'
- name: warewulf_files, Create temp network file to be imported into vnfs
template:
src: network.j2
dest: /tmp/network.vnfs
- name: warewulf_files, Import /tmp/network file into vnfs
command: wwsh -y file import /tmp/network.vnfs --name network
- name: warewulf_files, wwsh set network file in vnfs
command: wwsh -y file set network --path /etc/sysconfig/network --mode=0444 --uid=0
- name: warewulf_files, Remove temporary network file
file:
path: /tmp/network.vnfs
state: absent
- name: warewulf_files, copy in local resolv.conf
copy:
src: /etc/resolv.conf
remote_src: yes
dest: '{{ compute_bos_path}}/etc/resolv.conf'
mode: 0444
backup: yes
- name: warewulf_files, Set limits in BOS
template:
src: 'limits.conf.j2'
dest: '{{ compute_bos_path }}/etc/security/limits.conf'
owner: 'root'
group: 'root'
mode: '0444'
- name: warewulf_files, copy in ntp.conf
template:
src: ntp.conf.j2
dest: '{{ compute_bos_path}}/etc/ntp.conf'
owner: 'root'
group: 'root'
mode: 0444
backup: yes
- name: warewulf_files, check to see if ntpd is enabled in BOS
command: chroot {{ compute_bos_path }} systemctl is-enabled ntpd
register: result
failed_when: 'result.rc > 1'
changed_when: 'result.rc == 1'
- name: warewulf_files, enable ntpd in BOS
command: chroot {{ compute_bos_path }} systemctl enable ntpd
when: '"disabled" in result.stdout'
notify: rebuild vnfs
- name: warewulf_files, Ensure BOS /root/.ssh exists
file:
path: '{{ compute_bos_path }}/root/.ssh'
state: directory
owner: 'root'
group: 'root'
mode: 0700
- name: warewulf_files, copy in cluster key
file:
src: /root/.ssh/cluster.pub
remote_src: yes
dest: '{{ compute_bos_path }}/root/.ssh/authorized_keys'
owner: 'root'
group: 'root'
mode: 0400
- name: warewulf_files, fstab in BOS Image
template:
src: fstab.j2
dest: '{{ compute_bos_path }}/etc/fstab'
owner: 'root'
group: 'root'
mode: 0444
backup: yes
when: warewulf_bos_fstab is defined
- name: warewulf_files, copy in slurm configs if enabled
import_tasks: warewulf_slurm.yaml
when: enable_slurm is defined and enable_slurm
- name: warewulf_files, copy in pbspro configs if enabled
import_tasks: warewulf_pbspro.yaml
when: enable_pbspro is defined and enable_pbspro
|
roles/warewulf/tasks/warewulf_files.yaml
|
items:
- uid: '@azure/arm-sql.CheckNameAvailabilityResponse'
name: CheckNameAvailabilityResponse
fullName: CheckNameAvailabilityResponse
children:
- '@azure/arm-sql.CheckNameAvailabilityResponse.available'
- '@azure/arm-sql.CheckNameAvailabilityResponse.message'
- '@azure/arm-sql.CheckNameAvailabilityResponse.name'
- '@azure/arm-sql.CheckNameAvailabilityResponse.reason'
langs:
- typeScript
type: interface
summary: Une réponse indiquant si le nom spécifié pour une ressource est disponible.
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.CheckNameAvailabilityResponse.available'
name: available
fullName: available
children: []
langs:
- typeScript
type: property
summary: "True si le nom est disponible\_; sinon, false.\n**REMARQUE\_: Cette propriété n’est pas sérialisée. Il peut uniquement être rempli par le serveur.**"
optional: true
syntax:
content: 'available?: undefined | false | true'
return:
type:
- undefined | false | true
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.CheckNameAvailabilityResponse.message'
name: message
fullName: message
children: []
langs:
- typeScript
type: property
summary: "Un message expliquant pourquoi le nom n’est pas disponible. Sera null si le nom est disponible.\n**REMARQUE\_: Cette propriété n’est pas sérialisée. Il peut uniquement être rempli par le serveur.**"
optional: true
syntax:
content: 'message?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.CheckNameAvailabilityResponse.name'
name: name
fullName: name
children: []
langs:
- typeScript
type: property
summary: "Le nom dont la disponibilité a été vérifiée.\n**REMARQUE\_: Cette propriété n’est pas sérialisée. Il peut uniquement être rempli par le serveur.**"
optional: true
syntax:
content: 'name?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.CheckNameAvailabilityResponse.reason'
name: reason
fullName: reason
children: []
langs:
- typeScript
type: property
summary: "Le code de raison expliquant pourquoi le nom n’est pas disponible. Sera null si le nom est disponible.\nLes valeurs possibles incluent\_: «\_Invalid\_», «\_AlreadyExists\_» **Remarque\_: Cette propriété n’est pas sérialisée. Il peut uniquement être rempli par le serveur.**"
optional: true
syntax:
content: 'reason?: CheckNameAvailabilityReason'
return:
type:
- '@azure/arm-sql.CheckNameAvailabilityReason'
package: '@azure/arm-sql'
references:
- uid: '@azure/arm-sql.CheckNameAvailabilityReason'
name: CheckNameAvailabilityReason
spec.typeScript:
- name: CheckNameAvailabilityReason
fullName: CheckNameAvailabilityReason
uid: '@azure/arm-sql.CheckNameAvailabilityReason'
|
docs-ref-autogen/@azure/arm-sql/CheckNameAvailabilityResponse.yml
|
apiVersion: krew.googlecontainertools.github.com/v1alpha2
kind: Plugin
metadata:
name: resource-capacity
spec:
version: v0.7.0
homepage: https://github.com/robscott/kube-capacity
shortDescription: Provides an overview of resource requests, limits, and utilization
platforms:
- selector:
matchLabels:
os: darwin
arch: amd64
bin: kube-capacity
files:
- from: "*"
to: "."
uri: https://github.com/robscott/kube-capacity/releases/download/v0.7.0/kube-capacity_0.7.0_Darwin_x86_64.tar.gz
sha256: 9881ca7336b5618390164e643dbdca4dd54d275ca8d6e587052473aad545dd34
- selector:
matchLabels:
os: darwin
arch: arm64
bin: kube-capacity
files:
- from: "*"
to: "."
uri: https://github.com/robscott/kube-capacity/releases/download/v0.7.0/kube-capacity_0.7.0_Darwin_arm64.tar.gz
sha256: 78fc70c2052d3f94a152f4ad78850d50badb9c137c456f9655abb5abe1f5f778
- selector:
matchLabels:
os: linux
arch: amd64
bin: kube-capacity
files:
- from: "*"
to: "."
uri: https://github.com/robscott/kube-capacity/releases/download/v0.7.0/kube-capacity_0.7.0_Linux_x86_64.tar.gz
sha256: a14039c9d677f291095c1164795c4016beae1f480d3bcb0b101ac79b0b72ed32
- selector:
matchLabels:
os: linux
arch: arm64
bin: kube-capacity
files:
- from: "*"
to: "."
uri: https://github.com/robscott/kube-capacity/releases/download/v0.7.0/kube-capacity_0.7.0_Linux_arm64.tar.gz
sha256: a7cf6d17cb11f6b6eb3966443ca3db55437006a809a6a3851551550d07da0ac4
- selector:
matchLabels:
os: windows
arch: amd64
bin: kube-capacity.exe
files:
- from: "*"
to: "."
uri: https://github.com/robscott/kube-capacity/releases/download/v0.7.0/kube-capacity_0.7.0_Windows_x86_64.tar.gz
sha256: cc5c9bf506544c4df69052802f8dd5a075d0dce42bcacedf03407de945a2d279
description: |
A simple CLI that provides an overview of the resource requests, limits, and utilization in a Kubernetes cluster.
|
plugins/resource-capacity.yaml
|
swagger: '2.0'
openapi: 3.0.0
info:
version: 0.1.0
title: OAIE Sample
description: >
https://github.com/OAIE/oaie-sketch
<!--OAIE.viz--><div
style='height:500px;background-color:#eee;overflow:auto;position:relative;white-space:nowrap;border-radius:10px;'><span
style='border-bottom:1px solid
black;position:absolute;left:392.311px;top:242.996px;width:132px;transform:matrix(0.432868,
-0.901457, 0.901457, 0.432868, 0, 0);transform-origin:0 0;'><span
style='border:1px solid
black;width:5px;height:5px;position:absolute;right:0;transform:rotate(45deg);transform-origin:100%
0;border-left:0;border-bottom:0;'></span></span><span
style='border-bottom:1px solid
black;position:absolute;left:675.997px;top:194.101px;width:184.984px;transform:matrix(-0.864897,
-0.501949, 0.501949, -0.864897, 0, 0);transform-origin:0 0;'><span
style='border:1px solid
black;width:5px;height:5px;position:absolute;right:0;transform:rotate(45deg);transform-origin:100%
0;border-left:0;border-bottom:0;'></span></span><div
oaie-key='operation.get./sample/{sampleId}' style='border:1px solid rgb(97,
175, 254);background:rgb(240, 247,
255);position:absolute;left:293px;top:243px;width:153px;height:95px;padding:5px;border-radius:5px;'><div><b>get/sample/{sampleId}</b></div><div
style='white-space:normal'>getSample </div><div>sampleId
(string)</div></div><div oaie-key='operation.put./sample/{sampleId}'
style='border:1px solid rgb(252, 161, 48);background:rgb(255, 246,
236);position:absolute;left:676px;top:191px;width:153px;height:95px;padding:5px;border-radius:5px;'><div><b>put/sample/{sampleId}</b></div><div
style='white-space:normal'>putSample </div><div>sampleId
(string)</div></div><div oaie-key='schema.Sample'
style='position:absolute;left:429px;top:28px;width:87px;height:96px;border:1px
solid
silver;background:white;padding:5px;border-radius:5px;'><div><b>Sample</b></div><div>one
(string)</div><div>two (string)</div></div></div><div
style='padding:5px;color:gray;float:right;'>OAIE
visualization</div><!--/OAIE.viz-->
servers:
- url: 'localhost:8080'
paths:
'/sample/{sampleId}':
get:
operationId: getSample
tags:
- Sample
parameters:
- in: path
name: sampleId
schema:
type: string
required: true
responses:
'200':
description: Get the Sample
content:
application/json:
schema:
$ref: '#/components/schemas/Sample'
_method: get
_path: '/sample/{sampleId}'
_key: 'operation.get./sample/{sampleId}'
put:
operationId: putSample
tags:
- Sample
parameters:
- in: path
name: sampleId
schema:
type: string
required: true
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/Sample'
responses:
'204':
description: No content
_method: put
_path: '/sample/{sampleId}'
_key: 'operation.put./sample/{sampleId}'
components:
schemas:
Sample:
properties:
one:
type: string
two:
type: string
|
configs/api-spec.yml
|
parameters:
- name: subscriptionPrincipal
displayName: Azure Resource Manager connection
type: string
default: ''
- name: deploymentPackage
displayName: Location and filename of deployment zip
type: string
default: '$(System.ArtifactsDirectory)/**/*.zip'
- name: deployImage
displayName: Deplyoment Pool Image
type: string
default: ubuntu-latest
values:
- windows-latest
- vs2017-win2016
- ubuntu-latest
- ubuntu-16.04
- macOS-latest
- macOS-10.14
trigger: none
# variables to be defined for pipeline:
# location: e.g. 'westeurope'
# tf_rgstorage: e.g. 'rg-Storage'
# tf_storageaccount: e.g. 'terraformstatestorage'
# tf_storagecontainer: e.g. 'terraform'
# tf_applicationname: e.g. 'lwpulterraformweb'
# tf_environmentname: e.g. 'Test' or 'Dev'
# tf_storagekey: e.g. 'WillBeSetWhileRuntime' <-- DO NOT store the real key here! It will be determined by the pipeline itself at runtime.
jobs:
- job:
displayName: 'CD job'
pool:
vmImage: '${{ Parameters.deployImage }}'
steps:
# Preparation: get build artifact from previous stage
- task: DownloadBuildArtifacts@1
inputs:
buildType: 'current'
downloadType: 'specific'
downloadPath: '$(System.ArtifactsDirectory)'
# 1. Ensure the storage backend for terraform exists (Az modules are used!)
- task: AzureCLI@2
displayName: 'Azure CLI '
inputs:
azureSubscription: '${{ parameters.subscriptionPrincipal}}'
scriptType: pscore
scriptLocation: inlineScript
inlineScript: |
# Create Azure resource group for Terraform state storage
az group create --location $(location) --name $(tf_rgstorage)
# Create storage account
az storage account create --name $(tf_storageaccount) --resource-group $(tf_rgstorage) --location $(location) --sku Standard_LRS
# Create storage container
az storage container create --name $(tf_storagecontainer) --account-name $(tf_storageaccount)
az storage account keys list -g $(tf_rgstorage) -n $(tf_storageaccount)
env:
AZURE_DEVOPS_CLI_PAT: $(System.AccessToken)
# 2. Get the access key for terraform backend storage
- task: AzurePowerShell@5
displayName: 'Azure PowerShell script: Set storage access key'
inputs:
azureSubscription: '${{ parameters.subscriptionPrincipal}}'
ScriptType: InlineScript
Inline: |
# Using this script we will fetch storage key which is required in terraform file to authenticate backend storage account
$key=(Get-AzStorageAccountKey -ResourceGroupName $(tf_rgstorage) -AccountName $(tf_storageaccount)).Value[0]
Write-Host "##vso[task.setvariable variable=tf_storagekey]$key"
azurePowerShellVersion: LatestVersion
# 3. Replace the tokens inside the Terraform script files
- task: replacetokens@3
displayName: 'Replace tokens in **/*.tf'
inputs:
targetFiles: '**/*.tf'
actionOnMissing: 'warn'
keepToken: false
tokenPrefix: '__'
tokenSuffix: '__'
# 4. Install Terraform on agent
- task: TerraformInstaller@0
displayName: 'Install Terraform 0.15.5'
inputs:
terraformVersion: 0.15.5
# 5. Initialize Terraform on the agent
- task: TerraformTaskV2@2
displayName: 'Terraform : init'
inputs:
workingDirectory: '$(System.ArtifactsDirectory)/drop/Terraform/'
backendServiceArm: '${{ parameters.subscriptionPrincipal}}'
backendAzureRmResourceGroupName: '$(tf_rgstorage)'
backendAzureRmStorageAccountName: '$(tf_storageaccount) '
backendAzureRmContainerName: '$(tf_storagecontainer)'
backendAzureRmKey: '$(tf_applicationname)-$(tf_environmentname)-terraform.tfstate'
# 6. Plan the infrastructure changes
- task: TerraformTaskV2@2
displayName: 'Terraform : plan'
inputs:
command: plan
workingDirectory: '$(System.ArtifactsDirectory)/drop/Terraform/'
environmentServiceNameAzureRM: '${{ parameters.subscriptionPrincipal}}'
# 7. Auto apply the infrastructure
- task: TerraformTaskV2@2
displayName: 'Terraform : apply'
inputs:
command: apply
workingDirectory: '$(System.ArtifactsDirectory)/drop/Terraform/'
environmentServiceNameAzureRM: '${{ parameters.subscriptionPrincipal}}'
# 8. Publish the web application to the infrastructure
- task: AzureRmWebAppDeployment@4
displayName: 'Azure App Service Deploy: app-$(tf_applicationname)-$(tf_environmentname)'
inputs:
azureSubscription: '${{ parameters.subscriptionPrincipal}}'
appType: webAppLinux
WebAppName: 'app-$(tf_applicationname)-$(tf_environmentname)'
packageForLinux: '${{ Parameters.deploymentPackage }}'
|
Pipelines/YAML/03_Terraform_CD.yml
|
name: CI
on:
push:
branches: [ master ]
paths-ignore:
- 'README.md'
- 'docs/**'
- 'cli/**'
pull_request:
branches: [ master ]
paths-ignore:
- 'README.md'
- 'docs/**'
- 'cli/**'
# For systems with an upstream API that could drift unexpectedly (like most SaaS systems, etc.),
# we recommend testing at a regular interval not necessarily tied to code changes. This will
# ensure you are alerted to something breaking due to an API change, even if the code did not
# change.
schedule:
- cron: '0 13 * * *'
jobs:
# ensure the code builds...
build:
name: Build
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.18'
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Download Go Dependencies
run: |
go mod vendor && go mod tidy
- name: Build
run: |
go build -v .
# run acceptance tests
test:
needs: build
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.18'
id: go
- uses: hashicorp/setup-terraform@v2
with:
terraform_version: '>=0.13.x'
terraform_wrapper: false
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Download Go Dependencies
run: |
go mod vendor && go mod tidy
- name: TF acceptance tests
env:
TF_ACC: "1"
ZIA_USERNAME: ${{ secrets.ZIA_USERNAME }}
ZIA_PASSWORD: ${{ secrets.ZIA_PASSWORD }}
ZIA_API_KEY: ${{ secrets.ZIA_API_KEY }}
ZIA_CLOUD: ${{ secrets.ZIA_CLOUD }}
# Set whatever additional acceptance test env vars here. You can
# optionally use data from your repository secrets using the
# following syntax:
# SOME_VAR: ${{ secrets.SOME_VAR }}
run: |
go test -v -cover ./zia -v -parallel 30 -timeout 60m
go mod vendor && go mod tidy
go build ./cli/ziaActivator.go
mv ziaActivator /usr/local/bin
ziaActivator
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: v1.45
args: --timeout=60m
only-new-issues: true
|
.github/workflows/ci.yml
|
- name: Get current inventory
hosts: tag_Name_react_server_box
user: ubuntu
tasks:
- action: ec2_facts
- name: Add existing hosts to group
group_by: key=existing_hosts
- name: Launch instances
hosts: localhost
connection: local
gather_facts: False
tasks:
- name: Provision a set of instances
ec2:
key_name: walkscore_base_keypair2
group: react-server
instance_type: t2.medium
image: ami-d732f0b7
region: us-west-2
vpc_subnet_id: subnet-d94db0ae
assign_public_ip: yes
instance_profile_name: react-server-box
wait: yes
exact_count: 2
count_tag:
Name: react-server-box
instance_tags:
Name: react-server-box
register: ec2
- name: Add all instance public IPs to host group
add_host: hostname={{ item.public_ip }} groups=react-server-box-hosts
with_items: '{{ ec2.instances }}'
- name: Wait for SSH to come up
wait_for: host={{ item.public_dns_name }} port=22 delay=5 timeout=320 state=started
with_items: '{{ ec2.instances }}'
- name: Configuration
hosts: react-server-box-hosts
user: ubuntu
tasks:
- name: Gather facts about instances
action: ec2_facts
- name: Set up instance packages
script: instance-setup.sh
- name: Install React Server Slack API tokens
script: decrypt_credentials.py -k react-server-slack.api.token > react-server-slack-api-token
- name: Install Asini Slack API tokens
script: decrypt_credentials.py -k asini-slack.api.token > asini-slack-api-token
- name: Copy Docker Compose configuration
copy: src=docker-compose.yml dest=/home/ubuntu/docker-compose.yml mode=0644
- name: Copy NGINX configuration
copy: src=../nginx.conf dest=/home/ubuntu/nginx.conf mode=0644
- name: Deploy services
shell: ASINI_SLACK_API_TOKEN=`cat asini-slack-api-token | tr -d '\n'` REACT_SERVER_SLACK_API_TOKEN=`cat react-server-slack-api-token | tr -d '\n'` docker-compose up -d
become: true
- name: Wait for the service to become available
wait_for: port=80 delay=5
- name: Register instances with load balancer
local_action:
module: ec2_elb
instance_id: '{{ ansible_ec2_instance_id }}'
ec2_elbs: react-server-io
region: us-west-2
state: present
wait: no
- name: Give ELB time to put new instance into service
pause: seconds=30
- name: Stop old hosts
hosts: existing_hosts
connection: local
user: ubuntu
tasks:
- action: ec2_facts
- name: Terminate instance
local_action:
module: ec2
state: 'absent'
region: us-west-2
instance_id: '{{ ansible_ec2_instance_id }}'
|
packages/react-server-website/deployment/setup.yml
|
init_config:
instances:
## @param server - string - required
## MySQL server to connect to.
## NOTE: Even if the server name is "localhost", the agent connects to MySQL using TCP/IP, unless you also
## provide a value for the sock key (below).
#
- server: 127.0.0.1
## @param user - string - required
## Datadog Username created to connect to MySQL.
#
user: datadog
## @param pass - string - required
## Password associated with the datadog user.
#
pass: <PASSWORD>
## @param port - integer - optional - default: 3306
## Port to use when connecting to MySQL.
#
port: 3306
## @param sock - string - optional
## Set the sock parameter if you want to connect to MySQL using a Unix Socket.
#
# sock: <SOCK_PATH>
## @param defaults_file - string - optional
## Enter the path of an alternate configuration mechanism file.
#
# defaults_file: <CONFIGURATION_FILE_PATH>
## @param connect_timeout - integer - optional - default: 10
## Change the timeout time in second for the Agent queries.
#
# connect_timeout: 10
## @param tags - list of key:value elements - optional
## List of tags to attach to every metric, event and service check emitted by this integration.
##
## Learn more about tagging: https://docs.datadoghq.com/tagging/
#
tags:
- database:mysql
# - <KEY_2>:<VALUE_2>
## @param ssl - object - optional
## Use this parameter to configure a SSL connection between the Agent and MySQL:
## `key`: Path to your key file.
## `cert`: path to your cert file.
## `ca`: path to your ca file.
#
# ssl:
# key: <KEY_FILE_PATH>
# cert: <CERT_FILE_PATH>
# ca: <CA_PATH_FILE>
## @param max_custom_queries - integer - optional - default: 20
## Set the maximum number of custom queries to execute with this integration.
#
# max_custom_queries: 20
## @param queries - custom object - optional
## Define custom queries to collect custom metrics on your MySQL
## See https://docs.datadoghq.com/integrations/faq/how-to-collect-metrics-from-custom-mysql-queries to learn more.
##
queries:
- query: SELECT value FROM stocks.trading WHERE company = 'MarketBiz'
metric: mysql.query_custom
tags:
- tester: mysql
type: gauge
field: value
|
script/conf.d/mysql.d/conf.yaml
|
name: messenger_from_the_scratch
description: A new Flutter application.
# The following line prevents the package from being accidentally published to
# pub.dev using `pub publish`. This is preferred for private packages.
publish_to: 'none' # Remove this line if you wish to publish to pub.dev
# The following defines the version and build number for your application.
# A version number is three numbers separated by dots, like 1.2.43
# followed by an optional build number separated by a +.
# Both the version and the builder number may be overridden in flutter
# build by specifying --build-name and --build-number, respectively.
# In Android, build-name is used as versionName while build-number used as versionCode.
# Read more about Android versioning at https://developer.android.com/studio/publish/versioning
# In iOS, build-name is used as CFBundleShortVersionString while build-number used as CFBundleVersion.
# Read more about iOS versioning at
# https://developer.apple.com/library/archive/documentation/General/Reference/InfoPlistKeyReference/Articles/CoreFoundationKeys.html
version: 1.0.0+1
environment:
sdk: ">=2.2.2 <3.0.0"
dependencies:
flutter:
sdk: flutter
intl: ^0.15.8
cupertino_icons: ^0.1.2
infinite_listview: ^1.0.0
firebase_auth: ^0.14.0+4 #to auth with firebase
google_sign_in: ^4.0.7 #to auth with google account
flutter_bloc: ^0.21.0 #heedelps in implementing blocs
equatable: ^0.5.0 #helps in comparing class using their values
cloud_firestore: ^0.12.9+1 #Firebase Database
image_picker: ^0.6.1+4 #image picker used for picking images from gallery
firebase_storage: ^3.0.6 #storage for storing image files
flutter_launcher_icons: ^0.7.2+1
shared_preferences: ^0.5.3+4
file_picker: ^1.4.0+1
emoji_picker: ^0.0.2
video_player: ^0.10.2+1
flutter_downloader: ^1.2.2
downloads_path_provider: ^0.1.0
flushbar: ^1.9.0
path_provider: ^1.3.0
cached_network_image: ^2.0.0-rc
dev_dependencies:
flutter_test:
sdk: flutter
mockito: ^4.1.0
flutter_icons:
ios: true
android: true
image_path_ios: "assets/launcher/ic_launcher.png"
image_path_android: "assets/launcher/ic_launcher.png"
adaptive_icon_background: "assets/launcher/ic_background.png"
adaptive_icon_foreground: "assets/launcher/ic_foreground.png"
# For information on the generic Dart part of this file, see the
# following page: https://dart.dev/tools/pub/pubspec
# The following section is specific to Flutter.
flutter:
assets:
- assets/
- assets/launcher/
- assets/fonts/
# The following line ensures that the Material Icons font is
# included with your application, so that you can use the icons in
# the material Icons class.
uses-material-design: true
fonts:
- family: Manrope
fonts:
- asset: assets/fonts/manrope-regular.otf
- asset: assets/fonts/manrope-bold.otf
weight: 700
- asset: assets/fonts/manrope-extrabold.otf
weight: 800
- asset: assets/fonts/manrope-semibold.otf
weight: 400
# - family: Trajan Pro
# fonts:
# - asset: fonts/TrajanPro.ttf
# - asset: fonts/TrajanPro_Bold.ttf
# weight: 700
#
# For details regarding fonts from package dependencies,
# see https://flutter.dev/custom-fonts/#from-packages
|
pubspec.yaml
|
items:
- uid: azure-arm-compute.VirtualMachineScaleSetIPConfiguration
id: azure-arm-compute.VirtualMachineScaleSetIPConfiguration
name: VirtualMachineScaleSetIPConfiguration
summary: Describes a virtual machine scale set network profile's IP configuration.
fullName: azure-arm-compute.VirtualMachineScaleSetIPConfiguration
source:
id: azure-arm-compute.VirtualMachineScaleSetIPConfiguration
path: >-
lib\services\computeManagement2\lib\models\virtualMachineScaleSetIPConfiguration.js
startLine: 20
remote:
branch: master
path: >-
lib\services\computeManagement2\lib\models\virtualMachineScaleSetIPConfiguration.js
repo: 'https://github.com/Azure/azure-sdk-for-node.git'
type: Class
children:
- 'azure-arm-compute.VirtualMachineScaleSetIPConfiguration.#ctor'
- 'azure-arm-compute.VirtualMachineScaleSetIPConfiguration#mapper'
langs:
- js
- id: 'azure-arm-compute.VirtualMachineScaleSetIPConfiguration.#ctor'
uid: 'azure-arm-compute.VirtualMachineScaleSetIPConfiguration.#ctor'
parent: azure-arm-compute.VirtualMachineScaleSetIPConfiguration
name: VirtualMachineScaleSetIPConfiguration()
fullName: >-
azure-arm-compute.VirtualMachineScaleSetIPConfiguration.VirtualMachineScaleSetIPConfiguration()
summary: ''
type: Constructor
syntax:
content: new VirtualMachineScaleSetIPConfiguration()
langs:
- js
- uid: 'azure-arm-compute.VirtualMachineScaleSetIPConfiguration#mapper'
id: 'azure-arm-compute.VirtualMachineScaleSetIPConfiguration#mapper'
parent: azure-arm-compute.VirtualMachineScaleSetIPConfiguration
name: mapper()
summary: Defines the metadata of VirtualMachineScaleSetIPConfiguration
fullName: azure-arm-compute.VirtualMachineScaleSetIPConfiguration.mapper()
type: Function
syntax:
return:
type:
- object
description: metadata of VirtualMachineScaleSetIPConfiguration
content: function mapper()
langs:
- js
|
javascript/docs-ref-autogen/azure-arm-compute/VirtualMachineScaleSetIPConfiguration.yml
|
name : ventoy
version : 1.0.54
release : 1
source :
- https://github.com/ventoy/Ventoy/releases/download/v1.0.54/ventoy-1.0.54-linux.tar.gz : 8304e7833b53e94c3989c57fcf4de16feab3e92cf6947485c92ab5e5aad5aaba
homepage : https://www.ventoy.net
license : GPL-3.0-or-later
component : system.utils
summary : A new bootable USB solution
description: |
Ventoy is an open source tool to create bootable USB drive for ISO/WIM/IMG/VHD(x)/EFI files.WWith ventoy, you don't need to format the disk over and over, you just need to copy the image files to the USB drive and boot it. You can copy many image files at a time and ventoy will give you a boot menu to select them. x86 Legacy BIOS, IA32 UEFI, x86_64 UEFI, ARM64 UEFI and MIPS64EL UEFI are supported in the same way. Both MBR and GPT partition style are supported in the same way.
builddeps :
- pkgconfig(Qt5Core)
- pkgconfig(gtk+-2.0)
- pkgconfig(gtk+-3.0)
- pkgconfig(libglade-2.0)
setup : |
%patch -p1 < $pkgfiles/0001-save-logs-to-home-dir.patch
sed -i 's|log\.txt|~/.ventoy/log.txt|g' WebUI/static/js/languages.js
cd tool/x86_64
for file in *.xz; do
xzcat $file > ${file%.xz}
chmod +x ${file%.xz}
done
rm *.xz
# Clean up unused binaries
rm -v xzcat hexdump
install : |
install -Dm00644 boot/* -t $installdir/usr/share/ventoy/boot
install -Dm00755 tool/*.{cer,glade,json,sh} -t $installdir/usr/share/ventoy/tool
install -Dm00755 tool/x86_64/* -t $installdir/usr/share/ventoy/tool/x86_64
install -Dm00755 VentoyGUI.x86_64 -vt $installdir/usr/share/ventoy
install -Dm00755 *.sh -vt $installdir/usr/share/ventoy
cp --no-preserve=o -avt plugin WebUI $installdir/usr/share/ventoy
install -Dm00644 ventoy/* -vt $installdir/usr/share/ventoy/ventoy
install -Dm00644 $pkgfiles/ventoy.desktop -t $installdir/usr/share/applications
install -Dm00644 WebUI/static/img/VentoyLogo.png $installdir/usr/share/pixmaps/ventoy.png
install -Dm00755 $pkgfiles/bin/* -vt $installdir/usr/bin/
# Link system binaries
for binary in xzcat hexdump; do
ln -svf /usr/bin/$binary $installdir/usr/share/ventoy/tool/x86_64/
done
|
factory/ventoy/package.yml
|
apiVersion: v1
kind: Template
metadata:
name: kafka-exporter
annotations:
"openshift.io/display-name": kafka-exporter
description: |
Kafka prometheus exporter
iconClass: fa fa-cogs
tags: "monitoring, prometheus, kafka-exporter"
parameters:
- name: APP_NAME
description: "Value for app label."
- name: NAME_SPACE
description: "The name of the namespace (Openshift project)"
- name: KAFKA_BROKER
value: kaas-broker.org.com:443 # CHANGE THIS
- name: TLS_SECRET_NAME
value: tls-secrets
- name: CA_FILE
value: tls-root-ca.cert
- name: CERT_FILE
value: tls-cert.pem
- name: KEY_FILE
value: tls-key.pem
objects:
- apiVersion: apps.openshift.io/v1
kind: DeploymentConfig
metadata:
name: ${APP_NAME}
labels:
app: ${APP_NAME}
spec:
replicas: 1
selector:
deploymentconfig: ${APP_NAME}
app: ${APP_NAME}
template:
metadata:
labels:
deploymentconfig: ${APP_NAME}
app: ${APP_NAME}
spec:
containers:
- name: ${APP_NAME}
image: danielqsj/kafka-exporter:latest
ports:
- name: kexporter
containerPort: 9308
protocol: TCP
args:
- '--kafka.server=${KAFKA_BROKER}'
- '--kafka.version=2.0.0'
- '--tls.enabled'
- '--no-sasl.handshake'
- '--tls.ca-file=/etc/secrets/tls-root-ca.cert'
- '--tls.cert-file=/etc/secrets/tls-cert.pem'
- '--tls.key-file=/etc/secrets/tls-key.pem'
imagePullPolicy: Always
livenessProbe:
tcpSocket:
port: kexporter # named port
initialDelaySeconds: 10
timeoutSeconds: 2
periodSeconds: 5
failureThreshold: 5
successThreshold: 1
readinessProbe:
httpGet:
path: /health
port: kexporter
initialDelaySeconds: 5
timeoutSeconds: 2
periodSeconds: 5
volumeMounts:
- name: tls-secrets
mountPath: /etc/secrets
readOnly: true
volumes:
- name: tls-secrets
secret:
secretName: ${TLS_SECRET_NAME}
items:
- key: tls-root-ca.cert
path: ${CA_FILE}
- key: tls-cert.pem
path: ${CERT_FILE}
- key: tls-key.pem
path: ${KEY_FILE}
triggers:
- type: ConfigChange
- apiVersion: v1
kind: Service
metadata:
labels:
name: ${APP_NAME}
name: ${APP_NAME}
namespace: "${NAME_SPACE}"
spec:
ports:
- name: kexporter
port: 9308
protocol: TCP
targetPort: kexporter
selector:
app: ${APP_NAME}
sessionAffinity: None
type: ClusterIP
- apiVersion: route.openshift.io/v1
kind: Route
metadata:
name: ${APP_NAME}
namespace: "${NAME_SPACE}"
spec:
port:
targetPort: kexporter
to:
kind: Service
name: ${APP_NAME}
weight: 100
tls:
termination: edge
wildcardPolicy: None
|
kafka-exporter-template.yaml
|
items:
- uid: '@azure/arm-sql.OperationImpact'
name: OperationImpact
fullName: OperationImpact
children:
- '@azure/arm-sql.OperationImpact.changeValueAbsolute'
- '@azure/arm-sql.OperationImpact.changeValueRelative'
- '@azure/arm-sql.OperationImpact.name'
- '@azure/arm-sql.OperationImpact.unit'
langs:
- typeScript
type: interface
summary: 'L''impatto di un''operazione, in termini assoluti e relativi.'
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.OperationImpact.changeValueAbsolute'
name: changeValueAbsolute
fullName: changeValueAbsolute
children: []
langs:
- typeScript
type: property
summary: |-
L'impatto assoluto alla dimensione.
**NOTA: Questa proprietà non verrà serializzata. Può essere usato solo dal server.**
optional: true
syntax:
content: 'changeValueAbsolute?: undefined | number'
return:
type:
- undefined | number
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.OperationImpact.changeValueRelative'
name: changeValueRelative
fullName: changeValueRelative
children: []
langs:
- typeScript
type: property
summary: 'L''impatto relativo alla dimensione (null se non applicabile) **Nota: Questa proprietà non verrà serializzata. Può essere usato solo dal server.**'
optional: true
syntax:
content: 'changeValueRelative?: undefined | number'
return:
type:
- undefined | number
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.OperationImpact.name'
name: name
fullName: name
children: []
langs:
- typeScript
type: property
summary: |-
Il nome della dimensione impatto.
**NOTA: Questa proprietà non verrà serializzata. Può essere usato solo dal server.**
optional: true
syntax:
content: 'name?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-sql'
- uid: '@azure/arm-sql.OperationImpact.unit'
name: unit
fullName: unit
children: []
langs:
- typeScript
type: property
summary: |-
L'unità in cui stimato è misurato l'impatto alla dimensione.
**NOTA: Questa proprietà non verrà serializzata. Può essere usato solo dal server.**
optional: true
syntax:
content: 'unit?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-sql'
|
docs-ref-autogen/@azure/arm-sql/OperationImpact.yml
|
---
-
option_id:
name:
name: option_id
type: bigint
null: NO
key: PRI
default:
extra: auto_increment
max_length:
input: int_input
size: 4
min_value: -32767
max_value: 65535
length: required|min_length[%s]|max_length[%s]
human_name: Option Id
before:
after:
rules:
attributes:
name: option_id
id: option_id
class: stdINPUT
value: option_id
title: option_id,option_id,auto_increment
alt: option_id
style:
hidden: false
key:
name: option_id
auto_increment: true
-
blog_id:
name:
name: blog_id
type: int
null: NO
key: PRI
default: 0
extra:
max_length: 32000
input: int_input
size: 6
min_value: 0
max_value: 65535
length: required|min_length[%s]|max_length[%s]
human_name: Blog Id
before:
after:
rules:
attributes:
name: blog_id
id: blog_id
class: stdINPUT
value: blog_id
title: blog_id,blog_id,
alt: blog_id
style:
hidden: false
key: false
-
option_name:
name:
name: option_name
type: varchar
null: NO
key: PRI
default:
extra:
max_length: 64
input: text_input
size: 80
human_name: Option Name
before:
after:
rules:
attributes:
name: option_name
id: option_name
class: stdINPUT
value: option_name
title: option_name,option_name,
alt: option_name
style:
hidden: false
key: false
-
option_value:
name:
name: option_value
type: longtext
null: NO
key:
default:
extra:
input: textarea_input
cols: 80
human_name: Option Value
before:
after:
rules:
attributes:
name: option_value
id: option_value
class: stdINPUT
value: option_value
title: option_value,option_value,
alt: option_value
style:
hidden: false
key: false
-
autoload:
name:
name: autoload
type: varchar
null: NO
key:
default: yes
extra:
max_length: 20
input: text_input
size: 80
human_name: Autoload
before:
after:
rules:
attributes:
name: autoload
id: autoload
class: stdINPUT
value: autoload
title: autoload,autoload,
alt: autoload
style:
hidden: false
key: false
|
data/components/wp_options_meta.yml
|
---
# tasks file for roles/terraform
- name: Install required utilities
package:
name:
- unzip
state: present
become: yes
- name: Download Terraform zip file
get_url:
url: "https://releases.hashicorp.com/terraform/{{ terraform_version }}/terraform_{{ terraform_version }}_linux_amd64.zip"
dest: "/tmp/terraform-{{ terraform_version }}.zip"
checksum: "sha256:https://releases.hashicorp.com/terraform/{{ terraform_version }}/terraform_{{ terraform_version }}_SHA256SUMS"
- name: Extract Terraform executable
unarchive:
remote_src: yes
src: "/tmp/terraform-{{ terraform_version }}.zip"
dest: /tmp
- name: Grab installed version (if any)
command: terraform --version
register: installed_tf
failed_when: false
changed_when: false
- name: Grab downloaded version
command: /tmp/terraform --version
register: tmp_tf
failed_when: false
changed_when: false
- name: Find Terraform versions
set_fact:
installed_version: "{{ installed_tf.stdout_lines[0].split()[1].split('v')[1] }}"
when: installed_tf.rc == 0
- name: Find Terraform versions
set_fact:
downloaded_version: "{{ tmp_tf.stdout_lines[0].split()[1].split('v')[1] }}"
- name: Display installed
debug: var=installed_version
- name: Display downloaded
debug: var=downloaded_version
- name: Move the Terraform executable ({{ downloaded_version }}) to the right place
command: cp /tmp/terraform /usr/local/bin/terraform
become: yes
when: (installed_tf.rc != 0) or (installed_version is version(downloaded_version, "<"))
- name: Install Terraform providers
package:
name:
- bridge-utils
- libguestfs-tools
- libvirt-bin
- qemu-kvm
- virt-manager
- virt-top
- virtinst
state: present
become: yes
- name: modprobe vhost_net for performance
modprobe:
name: vhost_net
state: present
become: yes
- name: Permanently load vhost_net kernel module for performance
lineinfile:
path: /etc/modules
line: 'vhost_net'
insertbefore: EOF
become: yes
- name: Enable libvirtd service
systemd:
name: libvirtd
state: started
become: yes
- include_tasks: libvirt-provider.yml
|
roles/terraform/tasks/main.yml
|
- name:
detail: Microsoft Research
i18n: company_a
desc:
detail:
i18n: company_a_desc
date:
detail: Sept. 2017 - Present
i18n: company_a_date
job:
detail: Research Intern
i18n: company_a_job
icon: fa-plus-square
company-icon: /apjacob/static/assets/img/landing/msr.jpg
location:
detail: Montreal
i18n: company_a_location
- name:
detail: Montreal Institute For Learning Algorithms
i18n: company_b
desc:
detail:
i18n: company_b_desc
date:
detail: Sept. 2016 - Present
i18n: company_b_date
job:
detail: Visiting Researcher
i18n: company_b_job
icon: fa-plus-square
company-icon: /apjacob/static/assets/img/landing/mila.jpg
location:
detail: Montreal
i18n: company_b_location
- name:
detail: Pumpup
i18n: company_c
desc:
detail:
i18n: company_c_desc
date:
detail: Sept. 2015 - Sept. 2016
i18n: company_c_date
job:
detail: Data Scientist
i18n: company_c_job
icon: fa-plus-square
company-icon: /apjacob/static/assets/img/landing/pumpup.jpg
location:
detail: Toronto
i18n: company_c_location
- name:
detail: <NAME>
i18n: company_d
desc:
detail:
i18n: company_d_desc
date:
detail: May 2015 - Sept. 2015
i18n: company_d_date
job:
detail: Data Scientist Intern
i18n: company_d_job
icon: fa-plus-square
company-icon: /apjacob/static/assets/img/landing/loblaw.jpg
location:
detail: Toronto
i18n: company_d_location
- name:
detail: Piinpoint
i18n: company_e
desc:
detail:
i18n: company_e_desc
date:
detail: June. 2015 - Aug. 2015
i18n: company_e_date
job:
detail: Data Scientist
i18n: company_e_job
icon: fa-plus-square
company-icon: /apjacob/static/assets/img/landing/piinpoint.jpg
location:
detail: Waterloo
i18n: company_d_location
- name:
detail: Institute For Quantum Computing
i18n: company_f
desc:
detail:
i18n: company_f_desc
date:
detail: Mar. 2015 - June. 2015
i18n: company_f_date
job:
detail: Researcher
i18n: company_f_job
icon: fa-plus-square
company-icon: /apjacob/static/assets/img/landing/iqc.png
location:
detail: Waterloo
i18n: company_f_location
- name:
icon: fa-cog
|
_data/index/careers.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-17 16:05"
game: "Unreal Tournament"
name: "CTF-BT-(Mon-Ch)Basics_i4g"
author: "Monsta-Cheely"
description: "None"
releaseDate: "2005-01"
attachments:
- type: "IMAGE"
name: "CTF-BT-(Mon-Ch)Basics_i4g_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/BunnyTrack/M/CTF-BT-(Mon-Ch)Basics_i4g_shot_2.png"
- type: "IMAGE"
name: "CTF-BT-(Mon-Ch)Basics_i4g_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/BunnyTrack/M/CTF-BT-(Mon-Ch)Basics_i4g_shot_1.png"
originalFilename: "ctf-bt-(Mon-Ch)Basics_i4g.zip"
hash: "a2f92e87e85fec09ca815bf4554d09e45532163d"
fileSize: 2899619
files:
- name: "BT.utx"
fileSize: 2543195
hash: "831aef1a8b08c2306ece9a13b9ed16f01113a76b"
- name: "killingspree.umx"
fileSize: 384751
hash: "74f3a6062bb00ed10d9ac44e1d3ed76e7053abb5"
- name: "CTF-BT-(Mon-Ch)Basics_i4g.unr"
fileSize: 1297135
hash: "473cc6e14f7a320cf759f3cc68a4101dcdacb487"
otherFiles: 2
dependencies:
CTF-BT-(Mon-Ch)Basics_i4g.unr:
- status: "OK"
name: "BT"
- status: "OK"
name: "killingspree"
downloads:
- url: "http://www.ut-files.com/index.php?dir=Maps/BunnyTrack/CTF-BT/&file=ctf-bt-%28Mon-Ch%29Basics_i4g.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/BunnyTrack/M/ctf-bt-(Mon-Ch)Basics_i4g.zip"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/BunnyTrack/&file=ctf-bt-%28Mon-Ch%29Basics_i4g.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/BunnyTrack&file=ctf-bt-%28Mon-Ch%29Basics_i4g.zip"
main: false
repack: false
state: "OK"
- url: "http://www.i4games.euhttp://www.i4games.eu/maps/CTF-BT-%28Mon-Ch%29Basics_i4g.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/BunnyTrack/M/a/2/f92e87/ctf-bt-(Mon-Ch)Basics_i4g.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/BunnyTrack/M/a/2/f92e87/ctf-bt-(Mon-Ch)Basics_i4g.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "BunnyTrack"
title: "Bunny Track : Basics of BT (i4g edition)"
playerCount: "Unknown"
themes:
Natural: 0.1
Skaarj Tech: 0.9
bots: false
|
content/Unreal Tournament/Maps/BunnyTrack/M/a/2/f92e87/ctf-bt-mon-chbasics_i4g_[a2f92e87].yml
|
components:
schemas:
ManagerAccount:
anyOf:
- $ref: http://redfish.dmtf.org/schemas/v1/odata.v4_0_3.yaml#/components/schemas/idRef
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_0_0.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_0_2.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_0_3.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_0_4.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_0_5.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_0_6.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_0_7.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_1_0.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_1_1.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_1_2.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_1_3.yaml#/components/schemas/ManagerAccount
- $ref: http://redfish.dmtf.org/schemas/v1/ManagerAccount.v1_2_0.yaml#/components/schemas/ManagerAccount
description: The user accounts, owned by a Manager, are defined in this resource. Changes
to a Manager Account may affect the current Redfish service connection if
this manager is responsible for the Redfish service.
x-longDescription: This resource shall be used to represent resources that represent
the user accounts for the manager.
title: '#ManagerAccount.ManagerAccount'
x-copyright: Copyright 2014-2018 DMTF. For the full DMTF copyright policy, see http://www.dmtf.org/about/policies/copyright
x-owningEntity: DMTF
|
contrib/DMTF/DSP8010-Redfish_Schema/DSP8010_2018.3/openapi/ManagerAccount.yaml
|
swagger: '2.0'
info:
version: 0.1.0
title: My Demo Feed API
host: localhost
schemes:
- http
consumes:
- application/json
produces:
- application/json
paths:
/subscriptions/{user}/{feed}:
post:
description: Subscribe a user to a feed.
Creates the user if needed and adds the feed to him/her if the feed is defined.
(required operation #1)
parameters:
- name: user
in: path
required: true
type: string
- name: feed
in: path
required: true
type: string
responses:
'200':
description: OK (subscription may already exist)
'201':
description: OK subscription (and possibly user) created
'400':
description: Feed does not exist
'500':
description: Unexpected Error
delete:
description: Unsubscribe a user from a feed.
(required operation #1)
parameters:
- name: user
in: path
required: true
type: string
- name: feed
in: path
required: true
type: string
responses:
'200':
description: OK
'404':
description: user or feed does not exist
'500':
description: Unexpected Error
/subscriptions/:
get:
description: returns list of known users
(was not a req)
responses:
'200':
description: return list of known users
schema:
type: array
items:
type: string
'500':
description: Unexpected Error
/subscriptions/{user}:
get:
description: return list of feeds the user is subscribed to.
(required operation #3)
parameters:
- name: user
in: path
required: true
type: string
responses:
'200':
description: return list of feeds for user
schema:
type: array
items:
type: string
'404':
description: user does not exist
'500':
description: Unexpected Error
/feeds:
get:
description: returns list of defined feeds
(was not a req)
responses:
'200':
description: return list of feeds
schema:
type: array
items:
type: string
'500':
description: Unexpected Error
/feeds/{feed}:
post:
description: add articles to a feed
(required operation #2)
parameters:
- name: feed
in: path
required: true
type: string
- name: articles
in: body
required: true
schema:
type: array
items:
$ref: '#/definitions/article'
responses:
'201':
description: OK article added
'400':
description: inavlid input data
'404':
description: feed does not exist
'500':
description: Unexpected Error
/articles/{user}:
get:
description: get list of new articles for feeds a user is subscribed to
(required operation #4)
parameters:
- name: user
in: path
required: true
type: string
responses:
'200':
description: OK
schema:
type: array
items:
$ref: '#/definitions/feedArticles'
'404':
description: user does not exist
'500':
description: Unexpected Error
definitions:
article:
type: object
properties:
title:
type: string
content:
description: base64 encoded content of article
type: string
feedArticles:
type: object
properties:
feed:
type: string
articles:
type: array
items:
$ref: '#/definitions/article'
|
docs/swagger.yaml
|
presubmits:
kubesphere/console:
- name: pull-console-unit-test
always_run: true
decorate: true
branches:
- ^master$
- release-*
spec:
containers:
- command:
- "/bin/sh"
- "-c"
- "yarn && yarn test"
image: node:12.18
name: ""
resources:
requests:
cpu: "1"
memory: 2Gi
- name: pull-console-build
labels:
preset-docker-sock: "true"
always_run: true
decorate: true
spec:
containers:
- command:
- "/bin/sh"
- "-c"
- "REPO=kubespheredev TAG=${PULL_BASE_REF} make container"
image: kubesphere/build-tools:master-latest
name: ""
resources:
requests:
cpu: "1"
memory: 2Gi
- name: pull-console-preview-deploy
labels:
preset-docker-sock: "true"
preset-docker-credentials: "true"
preset-preivew-cluster-kubeconfig: "true"
preset-github-credentials: "true"
preset-preview-deployment-template: "true"
always_run: false
decorate: true
skip_report: true
spec:
containers:
- command: ["/bin/bash", "-c"]
args:
- set -o errexit;
set -o nounset;
gh auth login --with-token < /home/.github/oauth;
gh -R kubesphere/console pr comment ${PULL_NUMBER} --body "Start building preview env, will notify when it's ready.";
REPO=kubespheredev TAG=pr-${PULL_NUMBER} make container-push;
sed "s/PULL_NUMBER/$PULL_NUMBER/g" /home/.template/template.yaml | kubectl --kubeconfig /home/.kube/preview-kubeconfig apply -f -;
kubectl --kubeconfig /home/.kube/preview-kubeconfig -n kubesphere-system wait deployment --for=condition=Available=true pr-${PULL_NUMBER};
URL=$(kubectl --kubeconfig /home/.kube/preview-kubeconfig -n kubesphere-system logs -lapp=pr-${PULL_NUMBER} -c kunnel | grep -Eo 'https://[^ >]+');
destroy_date=$(date --date="tomorrow");
gh -R kubesphere/console pr comment ${PULL_NUMBER} --body "Preview cluster is ready on ${URL}, and it will be cleaned on ${destroy_date}."
image: kubesphere/build-tools-lite:master-latest
name: ""
securityContext:
priviledged: true
resources:
requests:
cpu: "2"
memory: 4Gi
|
config/jobs/kubesphere/console/console-presubmits.yaml
|
JENKINS_CERT_PASS: "{{ <PASSWORD> | random | string + (ansible_date_time.iso8601_micro|string) | hash('md5') }}"
jenkins_cert_files:
- "/var/lib/jenkins/keys.pkcs12"
- "/var/lib/jenkins/jenkins.jks"
my_jenkins_url: "http://localhost:{{JENKINS_PORT| default('8080')}}/jenkins/"
# you can get names from https://updates.jenkins-ci.org/stable-2.60/plugin-documentation-urls.json
my_jenkins_plugins:
build-pipeline-plugin:
enabled: yes
ansible:
enabled: yes
artifactory:
enabled: yes
blueocean:
enabled: yes
slack:
enabled: yes
blueocean-dashboard:
enabled: yes
copy-to-slave:
enabled: yes
delivery-pipeline-plugin:
enabled: yes
envinject:
enabled: yes
google-login:
enabled: yes
job-dsl:
enabled: yes
matrix-auth:
enabled: yes
maven-plugin:
enabled: yes
pipeline-aggregator-view:
enabled: yes
pipeline-utility-steps:
enabled: yes
workflow-cps:
enabled: yes
build-user-vars-plugin:
enabled: yes
cloudbees-folder:
enabled: yes
role-strategy:
enabled: yes
repository-connector:
enabled: yes
# gearman-plugin:
# enabled: false
jenkins_cloudbees_permissions:
- cloudbees_space:
com.cloudbees.plugins.credentials.CredentialsProvider.Create:
- <EMAIL>
com.cloudbees.plugins.credentials.CredentialsProvider.Delete:
- <EMAIL>
com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains:
- <EMAIL>
com.cloudbees.plugins.credentials.CredentialsProvider.Update:
- <EMAIL>
com.cloudbees.plugins.credentials.CredentialsProvider.View:
- <EMAIL>
hudson.model.Computer.Build:
- <EMAIL>
hudson.model.Computer.Configure:
- <EMAIL>
hudson.model.Computer.Connect:
- <EMAIL>
hudson.model.Computer.Create:
- <EMAIL>
hudson.model.Computer.Delete:
- <EMAIL>
hudson.model.Computer.Disconnect:
- <EMAIL>
hudson.model.Computer.Provision:
- <EMAIL>
hudson.model.Hudson.Administer:
- <EMAIL>
hudson.model.Item.Build:
- <EMAIL>
hudson.model.Item.Cancel:
- <EMAIL>
hudson.model.Item.Configure:
- <EMAIL>
hudson.model.Item.Create:
- <EMAIL>
hudson.model.Item.Delete:
- <EMAIL>
hudson.model.Item.Discover:
- <EMAIL>
hudson.model.Item.Move:
- <EMAIL>
hudson.model.Item.Read:
- <EMAIL>
hudson.model.Item.Workspace:
- <EMAIL>
hudson.model.Run.Delete:
- <EMAIL>
hudson.model.Run.Replay:
- <EMAIL>
hudson.model.Run.Update:
- <EMAIL>
hudson.model.View.Configure:
- <EMAIL>
hudson.model.View.Create:
- <EMAIL>
hudson.model.View.Delete:
- <EMAIL>
hudson.model.View.Read:
- <EMAIL>
hudson.scm.SCM.Tag:
- <EMAIL>
jenkins.metrics.api.Metrics.HealthCheck:
- <EMAIL>
jenkins.metrics.api.Metrics.ThreadDump:
- <EMAIL>
jenkins.metrics.api.Metrics.View:
- <EMAIL>
org.jfrog.hudson.ArtifactoryPlugin.Promote:
- <EMAIL>
org.jfrog.hudson.ArtifactoryPlugin.PushToBintray:
- <EMAIL>
org.jfrog.hudson.ArtifactoryPlugin.Release:
- <EMAIL>
hudson.model.Hudson.Read:
- <EMAIL>
|
defaults/main.yml
|
name: "Build test pack deploy"
env:
DOTNET_CLI_TELEMETRY_OPTOUT: 1
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true
DOTNET_SDK_VERSION: 5.0.301
on:
push:
branches: [develop]
paths-ignore: ['**/*.md']
pull_request_target:
branches: [develop]
jobs:
bindgen-job:
name: "Bindgen"
runs-on: ubuntu-latest
steps:
- name: "Clone Git repository"
uses: actions/checkout@master
with:
submodules: true
- name: "Bindgen"
shell: bash
run: ./bindgen.sh
- name: "Add + commit + push (if necessary)"
uses: EndBug/add-and-commit@v7
with:
default_author: user_info
message: "Update bindings"
build-native-job:
name: "Build native libraries ${{ matrix.platform.rid }}"
needs: [bindgen-job]
runs-on: ${{ matrix.platform.os }}
strategy:
matrix:
platform:
- { name: Windows (x64), os: ubuntu-latest, rid: win-x64 }
- { name: Linux (x64), os: ubuntu-latest, rid: linux-x64 }
- { name: macOS (x64), os: macos-latest, rid: osx-x64 }
steps:
- name: "Clone"
uses: actions/checkout@master
with:
submodules: true
- name: "Linux dependencies"
if: ${{ startsWith(matrix.platform.rid, 'linux') }}
run: |
sudo apt-get update
sudo apt-get install \
libasound2-dev \
libegl1-mesa-dev \
libxcursor-dev \
libxi-dev
- name: "Build native libraries (Unix)"
if: ${{ !(startsWith(matrix.platform.rid, 'win')) }}
shell: bash
run: ./library.sh
- name: "Build native libraries (Windows)"
if: ${{ startsWith(matrix.platform.rid, 'win') }}
shell: bash
run: sudo apt-get update -y && sudo apt-get install -y mingw-w64 && ./library.sh microsoft
- name: "Upload native libraries"
uses: actions/upload-artifact@v2
with:
name: "native-libraries-${{ matrix.platform.rid }}"
path: './lib'
build-dotnet-job:
name: "Build .NET solution ${{ matrix.platform.rid }}"
needs: [build-native-job]
runs-on: ${{ matrix.platform.os }}
strategy:
matrix:
platform:
- { name: Windows (x64), os: windows-latest, rid: win-x64 }
- { name: Linux (x64), os: ubuntu-latest, rid: linux-x64 }
- { name: macOS (x64), os: macos-latest, rid: osx-x64 }
steps:
- name: "Clone Git repository"
uses: actions/checkout@master
with:
ref: ${{ env.GITHUB_REF }}
submodules: 'true'
- name: "Install .NET"
uses: actions/setup-dotnet@v1
with:
dotnet-version: '${{ env.DOTNET_SDK_VERSION }}'
- name: "Download native libraries"
uses: actions/download-artifact@v1
with:
name: "native-libraries-${{ matrix.platform.rid }}"
path: './lib'
- name: "Build .NET solution"
run: dotnet build --nologo --verbosity minimal --configuration Release
|
.github/workflows/sokol.yml
|