code
stringlengths
38
801k
repo_path
stringlengths
6
263
uid: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription*" fullName: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription" name: "RuleDescription" nameWithType: "RuleDescription.RuleDescription" members: - uid: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription()" fullName: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription()" name: "RuleDescription()" nameWithType: "RuleDescription.RuleDescription()" summary: "Creates a rule description with no name, <xref uid=\"com.microsoft.azure.servicebus.rules.TrueFilter\" data-throw-if-not-resolved=\"false\" data-raw-source=\"TrueFilter\"></xref> as filter and no action." syntax: "public RuleDescription()" - uid: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription(com.microsoft.azure.servicebus.rules.Filter)" fullName: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription(Filter filter)" name: "RuleDescription(Filter filter)" nameWithType: "RuleDescription.RuleDescription(Filter filter)" summary: "Creates a rule description with no name, given filter as filter and no action." parameters: - description: "filter the rule uses to filter messages. Can be <xref uid=\"com.microsoft.azure.servicebus.rules.CorrelationFilter\" data-throw-if-not-resolved=\"false\" data-raw-source=\"CorrelationFilter\"></xref> or <xref uid=\"com.microsoft.azure.servicebus.rules.SqlFilter\" data-throw-if-not-resolved=\"false\" data-raw-source=\"SqlFilter\"></xref>." name: "filter" type: "<xref href=\"com.microsoft.azure.servicebus.rules.Filter?alt=com.microsoft.azure.servicebus.rules.Filter&text=Filter\" data-throw-if-not-resolved=\"False\" />" syntax: "public RuleDescription(Filter filter)" - uid: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription(java.lang.String)" fullName: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription(String name)" name: "RuleDescription(String name)" nameWithType: "RuleDescription.RuleDescription(String name)" summary: "Creates a rule description with the given name, <xref uid=\"com.microsoft.azure.servicebus.rules.TrueFilter\" data-throw-if-not-resolved=\"false\" data-raw-source=\"TrueFilter\"></xref> as filter and no action." parameters: - description: "name of the rule" name: "name" type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />" syntax: "public RuleDescription(String name)" - uid: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription(java.lang.String,com.microsoft.azure.servicebus.rules.Filter)" fullName: "com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription(String name, Filter filter)" name: "RuleDescription(String name, Filter filter)" nameWithType: "RuleDescription.RuleDescription(String name, Filter filter)" summary: "Creates a rule description with the given name, given filter as filter and no action." parameters: - description: "name of the rule" name: "name" type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />" - description: "filter this rule uses to filter messages. Can be Can be <xref uid=\"com.microsoft.azure.servicebus.rules.CorrelationFilter\" data-throw-if-not-resolved=\"false\" data-raw-source=\"CorrelationFilter\"></xref> or <xref uid=\"com.microsoft.azure.servicebus.rules.SqlFilter\" data-throw-if-not-resolved=\"false\" data-raw-source=\"SqlFilter\"></xref>." name: "filter" type: "<xref href=\"com.microsoft.azure.servicebus.rules.Filter?alt=com.microsoft.azure.servicebus.rules.Filter&text=Filter\" data-throw-if-not-resolved=\"False\" />" syntax: "public RuleDescription(String name, Filter filter)" type: "constructor" metadata: {} package: "com.microsoft.azure.servicebus.rules" artifact: com.microsoft.azure:azure-servicebus:3.6.1
docs-ref-autogen/com.microsoft.azure.servicebus.rules.RuleDescription.RuleDescription.yml
analyzers: motors: type: GenericAnalyzer path: Motors startswith: [ 'EtherCAT Device'] expected: [ 'EtherCAT Master', 'EtherCAT Device (bl_caster_l_wheel_motor)', 'EtherCAT Device (bl_caster_r_wheel_motor)', 'EtherCAT Device (bl_caster_rotation_motor)', 'EtherCAT Device (br_caster_l_wheel_motor)', 'EtherCAT Device (br_caster_r_wheel_motor)', 'EtherCAT Device (br_caster_rotation_motor)', 'EtherCAT Device (fl_caster_l_wheel_motor)', 'EtherCAT Device (fl_caster_r_wheel_motor)', 'EtherCAT Device (fl_caster_rotation_motor)', 'EtherCAT Device (fr_caster_l_wheel_motor)', 'EtherCAT Device (fr_caster_r_wheel_motor)', 'EtherCAT Device (fr_caster_rotation_motor)', 'EtherCAT Device (laser_tilt_mount_motor)', 'EtherCAT Device (head_pan_motor)', 'EtherCAT Device (head_tilt_motor)', 'EtherCAT Device (led_projector)', 'EtherCAT Device (torso_lift_motor)'] joints: type: GenericAnalyzer path: Joints startswith: [ 'Joint'] expected: [ 'Joint (bl_caster_l_wheel_joint)', 'Joint (bl_caster_r_wheel_joint)', 'Joint (bl_caster_rotation_joint)', 'Joint (br_caster_l_wheel_joint)', 'Joint (br_caster_r_wheel_joint)', 'Joint (br_caster_rotation_joint)', 'Joint (fl_caster_l_wheel_joint)', 'Joint (fl_caster_r_wheel_joint)', 'Joint (fl_caster_rotation_joint)', 'Joint (fr_caster_l_wheel_joint)', 'Joint (fr_caster_r_wheel_joint)', 'Joint (fr_caster_rotation_joint)', 'Joint (laser_tilt_mount_joint)', 'Joint (head_pan_joint)', 'Joint (head_tilt_joint)', 'Joint (torso_lift_joint)'] controllers: type: GenericAnalyzer path: 'Realtime Controllers' expected: [ 'Realtime Control Loop', 'Controller (head_camera_trigger)', 'Controller (projector_controller)', 'Controller (projector_trigger)', 'Controller (prosilica_inhibit_projector_controller)' ] startswith: [ 'Controller'] discard_stale: true cameras: type: AnalyzerGroup path: Cameras analyzes: forearm_l: type: GenericAnalyzer path: Forearm (Left) find_and_remove_prefix: l_forearm_cam num_items: 0 forearm_r: type: GenericAnalyzer path: Forearm (Right) find_and_remove_prefix: r_forearm_cam num_items: 0
config/no_arms/pr2_analyzers_no_arms_diff.yaml
# Codebase Steward (Communities & Product) - company: Foundation for Public Code, Amsterdam position: Codebase Steward (Communities & Product) duration: Feb, 2020 &mdash; Present summary: Supporting communities around public (code)bases. # Developer Relations | Community strategist & Content manager - company: Codemotion, Amsterdam position: Developer Relations | Community strategist & Content manager duration: Oct, 2018 &mdash; Jan, 2020 summary: I built Communities and planned events & projects as Tech Community & Content Manager all around Europe and specifically in The Netherlands and Germany. # Developer Relations | Head of Marketing and Communications - company: Autentia, Madrid position: Developer Relations | Head of Marketing and Communications duration: Feb, 2014 &mdash; Oct, 2018 summary: Since 2014 I've attended to more than a hundred events where I've also developed my journalist and PR skills creating conversations and making interviews (mostly in Spanish). # Community Manager & Social Media - company: Elige Workplace, Madrid position: Community Manager & Social Media duration: Oct, 2013 &mdash; Jan, 2014 summary: Positioning, launching and marketing campaigns coordination. Community managing, monitoring and metrics. Copywriting. WordPress, AdWords, Mail Chimp, LinkedIn, Factusol, Facebook, Tweetdeck, SocialBro, etc. # Communication & Social Media - company: Bosch, Madrid position: Communication & Social Media duration: Oct, 2013 &mdash; Jan, 2014 summary: Copywriting, press releases, content management, brading, reporting, events, Social Media. # Customer Analyst & Social Media - company: Tuenti, Madrid position: Customer Analyst & Social Media duration: Aug, 2012 &mdash; Aug, 2013 summary: Had the chance of working in a Facebook copycat that became very famous in Spain and gathered a huge number of international IT experts. My functions were reporting bugs at Tuenti Móvil with Jira, Zendesk, etc. as well as managing its social media tools such as Twitter or its blogs. # Junior Account Executive - company: Text 100 (Now Archetipe), Madrid position: Junior Account Executive duration: Oct, 2011 &mdash; Apr, 2012 summary: PR agency. Community managing, copywriting for an official Vodafone blog (smartblog.es), PayPal, La Casa del Libro or Parrot events, meetings with journalists, coverage briefings, translations, press releases, bazars, etcetera. # Copy, Web and Community Manager - company: La Voz de Asturias (newspaper), Oviedo (Spain) position: Journalist, Copy, Web and Community Manager duration: Jul, 2011 &mdash; Oct, 2011 summary: Journalist internship. Besides the old school paper edition, I had to keep an eye to the digital site. During my experience in La Voz de Asturias I had to manage the website of the newspaper updating it, overseeing user comments and adding last minute news in the design of it. I additionally vitalized the different social networks in which La Voz de Asturias was present, as Twitter or Facebook by creating debate forums between followers and readers. # Copywriter - company: <NAME> (newspaper), Oviedo (Spain) position: Copywriter and Journalist duration: Jul, 2010 &mdash; Oct, 2010 summary: I carried out journalist and copy tasks, having to attend to press conferences, developing interviews and of course, writing news and articles among other issues.
_data/experience.yml
items: - uid: '@azure/arm-resourcegraph' name: '@azure/arm-resourcegraph' summary: '' children: - '@azure/arm-resourcegraph.Column' - '@azure/arm-resourcegraph.ErrorDetails' - '@azure/arm-resourcegraph.ErrorModel' - '@azure/arm-resourcegraph.ErrorResponse' - '@azure/arm-resourcegraph.Facet' - '@azure/arm-resourcegraph.FacetError' - '@azure/arm-resourcegraph.FacetRequest' - '@azure/arm-resourcegraph.FacetRequestOptions' - '@azure/arm-resourcegraph.FacetResult' - '@azure/arm-resourcegraph.Operation' - '@azure/arm-resourcegraph.OperationDisplay' - '@azure/arm-resourcegraph.OperationListResult' - '@azure/arm-resourcegraph.QueryRequest' - '@azure/arm-resourcegraph.QueryRequestOptions' - '@azure/arm-resourcegraph.QueryResponse' - '@azure/arm-resourcegraph.ResourceGraphClientOptions' - '@azure/arm-resourcegraph.Table' - '@azure/arm-resourcegraph.ColumnDataType' - '@azure/arm-resourcegraph.FacetSortOrder' - '@azure/arm-resourcegraph.FacetUnion' - '@azure/arm-resourcegraph.OperationsListResponse' - '@azure/arm-resourcegraph.ResourcesResponse' - '@azure/arm-resourcegraph.ResultTruncated' - '@azure/arm-resourcegraph.Operations' - '@azure/arm-resourcegraph.ResourceGraphClient' - '@azure/arm-resourcegraph.ResourceGraphClientContext' type: package langs: - typeScript references: - uid: '@azure/arm-resourcegraph.Column' name: Column - uid: '@azure/arm-resourcegraph.ErrorDetails' name: ErrorDetails - uid: '@azure/arm-resourcegraph.ErrorModel' name: ErrorModel - uid: '@azure/arm-resourcegraph.ErrorResponse' name: ErrorResponse - uid: '@azure/arm-resourcegraph.Facet' name: Facet - uid: '@azure/arm-resourcegraph.FacetError' name: FacetError - uid: '@azure/arm-resourcegraph.FacetRequest' name: FacetRequest - uid: '@azure/arm-resourcegraph.FacetRequestOptions' name: FacetRequestOptions - uid: '@azure/arm-resourcegraph.FacetResult' name: FacetResult - uid: '@azure/arm-resourcegraph.Operation' name: Operation - uid: '@azure/arm-resourcegraph.OperationDisplay' name: OperationDisplay - uid: '@azure/arm-resourcegraph.OperationListResult' name: OperationListResult - uid: '@azure/arm-resourcegraph.QueryRequest' name: QueryRequest - uid: '@azure/arm-resourcegraph.QueryRequestOptions' name: QueryRequestOptions - uid: '@azure/arm-resourcegraph.QueryResponse' name: QueryResponse - uid: '@azure/arm-resourcegraph.ResourceGraphClientOptions' name: ResourceGraphClientOptions - uid: '@azure/arm-resourcegraph.Table' name: Table - uid: '@azure/arm-resourcegraph.ColumnDataType' name: ColumnDataType - uid: '@azure/arm-resourcegraph.FacetSortOrder' name: FacetSortOrder - uid: '@azure/arm-resourcegraph.FacetUnion' name: FacetUnion - uid: '@azure/arm-resourcegraph.OperationsListResponse' name: OperationsListResponse - uid: '@azure/arm-resourcegraph.ResourcesResponse' name: ResourcesResponse - uid: '@azure/arm-resourcegraph.ResultTruncated' name: ResultTruncated - uid: '@azure/arm-resourcegraph.Operations' name: Operations - uid: '@azure/arm-resourcegraph.ResourceGraphClient' name: ResourceGraphClient - uid: '@azure/arm-resourcegraph.ResourceGraphClientContext' name: ResourceGraphClientContext
docs-ref-autogen/@azure/arm-resourcegraph/index.yml
apiVersion: jenkins.io/v1alpha2 kind: Jenkins metadata: annotations: name: jenkins spec: configurationAsCode: enabled: true defaultConfig: true configurations: - name: user-configuration-1 - name: user-configuration-2 enableAutoReload: true jenkinsAPIConnectionSettings: useNodePort: true jenkinsAPISettings: authorizationStrategy: serviceAccount roles: - name: admin kind: ClusterRole apiGroup: "rbac.authorization.k8s.io" master: basePlugins: - name: kubernetes version: "1.18.3" - name: workflow-job version: "2.34" - name: workflow-aggregator version: "2.6" - name: git version: "3.12.0" - name: job-dsl version: "1.76" - name: configuration-as-code version: "1.29" - name: configuration-as-code-groovy version: "1.1" - name: configuration-as-code-support version: "1.19" - name: kubernetes-credentials-provider version: "0.12.1" containers: - name: jenkins-master command: - /usr/bin/go-init - '-main' - /usr/libexec/s2i/run env: - name: OPENSHIFT_ENABLE_OAUTH value: 'true' - name: OPENSHIFT_ENABLE_REDIRECT_PROMPT value: 'true' - name: DISABLE_ADMINISTRATIVE_MONITORS value: 'false' - name: KUBERNETES_MASTER value: 'https://kubernetes.default:443' - name: KUBERNETES_TRUST_CERTIFICATES value: 'true' - name: JENKINS_SERVICE_NAME value: jenkins-operator-http-jenkins - name: JNLP_SERVICE_NAME value: jenkins-operator-slave-jenkins - name: JENKINS_UC_INSECURE value: 'false' - name: JENKINS_HOME value: /var/lib/jenkins - name: JENKINS_JAVA_OVERRIDES value: >- -XX:+UnlockExperimentalVMOptions -XX:+UnlockExperimentalVMOptions -XX:MaxRAMFraction=1 -Djenkins.install.runSetupWizard=false -Djava.awt.headless=true -Dhudson.security.csrf.DefaultCrumbIssuer.EXCLUDE_SESSION_ID=true -Dcasc.reload.token=$(POD_NAME) image: 'quay.io/repository/openshift/origin-jenkins:4.5' imagePullPolicy: Always livenessProbe: httpGet: path: /login port: 8080 scheme: HTTP initialDelaySeconds: 420 periodSeconds: 360 timeoutSeconds: 240 readinessProbe: httpGet: path: /login port: 8080 scheme: HTTP initialDelaySeconds: 3 periodSeconds: 0 timeoutSeconds: 240 resources: limits: cpu: 600m memory: 4Gi requests: cpu: 500m memory: 3Gi service: port: 8080 type: ClusterIP slaveService: port: 50000 type: ClusterIP serviceAccount: annotations: serviceaccounts.openshift.io/oauth-redirectreference.jenkins: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"jenkins-operator"}}'
deploy/crds/openshift_jenkins_v1alpha2_jenkins_cr.yaml
http_interactions: - request: method: get uri: https://api.gbif.org/v1/species/match?name=Dugesia&strict=FALSE&verbose=TRUE&limit=500 body: encoding: '' string: '' headers: Accept-Encoding: gzip, deflate Accept: application/json, text/xml, application/xml, */* User-Agent: r-curl/3.3 crul/0.7.0.9100 taxize/0.9.4.9914 X-User-Agent: r-curl/3.3 crul/0.7.0.9100 taxize/0.9.4.9914 response: status: status_code: '200' message: OK explanation: Request fulfilled, document follows headers: status: HTTP/1.1 200 OK date: Sat, 26 Jan 2019 17:01:10 GMT content-type: application/json access-control-allow-origin: '*' access-control-allow-methods: HEAD, GET, POST, DELETE, PUT server: Jetty(9.3.z-SNAPSHOT) cache-control: public, max-age=3601 x-varnish: 56067269 58884305 age: '375' via: 1.1 varnish (Varnish/5.2) accept-ranges: bytes content-length: '2888' connection: keep-alive body: encoding: UTF-8 string: '{"confidence":100,"note":"Multiple equal matches for Dugesia","matchType":"NONE","alternatives":[{"usageKey":3091142,"scientificName":"Dugesia A.Gray","canonicalName":"Dugesia","rank":"GENUS","status":"ACCEPTED","confidence":99,"note":"Similarity: name=100; authorship=0; classification=-2; rank=0; status=1","matchType":"EXACT","kingdom":"Plantae","phylum":"Tracheophyta","order":"Asterales","family":"Asteraceae","genus":"Dugesia","kingdomKey":6,"phylumKey":7707728,"classKey":220,"orderKey":414,"familyKey":3065,"genusKey":3091142,"synonym":false,"class":"Magnoliopsida"},{"usageKey":2502872,"scientificName":"<NAME>, 1851","canonicalName":"Dugesia","rank":"GENUS","status":"ACCEPTED","confidence":99,"note":"Similarity: name=100; authorship=0; classification=-2; rank=0; status=1","matchType":"EXACT","kingdom":"Animalia","phylum":"Platyhelminthes","order":"Tricladida","family":"Dugesiidae","genus":"Dugesia","kingdomKey":1,"phylumKey":108,"classKey":5967490,"orderKey":427,"familyKey":6365,"genusKey":2502872,"synonym":false,"class":"Rhabditophora"},{"usageKey":3253009,"acceptedUsageKey":6006594,"scientificName":"Dugesia Berlese, 1883","canonicalName":"Dugesia","rank":"GENUS","status":"SYNONYM","confidence":98,"note":"Similarity: name=100; authorship=0; classification=-2; rank=0; status=0","matchType":"EXACT","kingdom":"Animalia","phylum":"Arthropoda","order":"Trombidiformes","family":"Calyptostomatidae","genus":"Smaris","kingdomKey":1,"phylumKey":54,"classKey":367,"orderKey":543,"familyKey":3252736,"genusKey":6006594,"synonym":true,"class":"Arachnida"},{"usageKey":9388019,"scientificName":"Dugezia","canonicalName":"Dugezia","rank":"GENUS","status":"DOUBTFUL","confidence":68,"note":"Similarity: name=75; authorship=0; classification=-2; rank=0; status=-5","matchType":"FUZZY","kingdom":"Plantae","genus":"Dugezia","kingdomKey":6,"genusKey":9388019,"synonym":false},{"usageKey":7290774,"scientificName":"Dugezia Montrouz.","canonicalName":"Dugezia","rank":"GENUS","status":"DOUBTFUL","confidence":68,"note":"Similarity: name=75; authorship=0; classification=-2; rank=0; status=-5","matchType":"FUZZY","kingdom":"Plantae","phylum":"Tracheophyta","order":"Malpighiales","family":"Clusiaceae","genus":"Dugezia","kingdomKey":6,"phylumKey":7707728,"classKey":220,"orderKey":1414,"familyKey":6646,"genusKey":7290774,"synonym":false,"class":"Magnoliopsida"},{"usageKey":8038959,"scientificName":"Dugezia Montrouz. ex Beauvis.","canonicalName":"Dugezia","rank":"GENUS","status":"DOUBTFUL","confidence":68,"note":"Similarity: name=75; authorship=0; classification=-2; rank=0; status=-5","matchType":"FUZZY","kingdom":"Plantae","phylum":"Tracheophyta","order":"Ericales","family":"Primulaceae","genus":"Dugezia","kingdomKey":6,"phylumKey":7707728,"classKey":220,"orderKey":1353,"familyKey":6674,"genusKey":8038959,"synonym":false,"class":"Magnoliopsida"}],"synonym":false}' recorded_at: 2019-01-26 17:07:25 GMT recorded_with: vcr/0.2.2, webmockr/0.3.0
tests/fixtures/get_gbifid_ask_arg.yml
box: id: openjdk:8-jdk ports: - 8080 services: - redis dev: steps: - script: name: info/env code: env - internal/watch: name: Run and Watch code: | ./gradlew bootRun reload: true build: steps: - script: name: info/env code: env - script: name: gradle build code: | ./gradlew --full-stacktrace -q --project-cache-dir=$WERCKER_CACHE_DIR build integration-test: steps: - script: name: Run and IT code: | java -jar $WERCKER_ROOT/build/libs/cowweb-2.0.jar & sleep 5 ./gradlew --full-stacktrace -q --project-cache-dir=$WERCKER_CACHE_DIR integrationTest push-to-releases: steps: - internal/docker-push: username: $REGISTRY_USERNAME password: $REGISTRY_PASSWORD repository: iad.ocir.io/hhiroshell1/cowweb tag: demo-v1.0 ports: 8080 cmd: java -jar /pipeline/source/build/libs/cowweb-2.0.jar deploy-to-cluster: box: id: alpine cmd: /bin/sh steps: - kubectl: name: connection testing server: $KUBERNETES_MASTER token: $KUBERNETES_TOKEN insecure-skip-tls-verify: true command: get nodes - kubectl: name: delete namespace server: $KUBERNETES_MASTER token: $KUBERNETES_TOKEN insecure-skip-tls-verify: true command: delete namespace $COWWEB_NAMESPACE --ignore-not-found=true - script: name: wait to delete namespace code: sleep 30 - kubectl: name: create namespace server: $KUBERNETES_MASTER token: $KUBERNETES_TOKEN insecure-skip-tls-verify: true command: create namespace $COWWEB_NAMESPACE - kubectl: name: create deplyoment server: $KUBERNETES_MASTER token: $KUBERNETES_TOKEN insecure-skip-tls-verify: true command: create -f $WERCKER_ROOT/manifests --namespace=$COWWEB_NAMESPACE - kubectl: name: get LoadBalancer public IP address server: $KUBERNETES_MASTER token: $KUBERNETES_TOKEN insecure-skip-tls-verify: true command: get svc -o jsonpath='{.items[*].status.loadBalancer.ingress[*].ip}' --namespace=$COWWEB_NAMESPACE
wercker.yml
applicationApiVersion: v1beta1 properties: name: type: string x-google-marketplace: type: NAME namespace: type: string x-google-marketplace: type: NAMESPACE falco.serviceAccount.name: title: Falco Service Account description: Service account to assign to Falco DaemonSet type: string x-google-marketplace: type: SERVICE_ACCOUNT serviceAccount: roles: - type: ClusterRole rulesType: PREDEFINED rulesFromRoleName: cluster-admin image: type: string default: $REGISTRY/falco:$TAG x-google-marketplace: type: IMAGE image: generatedProperties: splitToRegistryRepoTag: registry: falco.image.registry repo: falco.image.repository tag: falco.image.tag falco.ebpf.enabled: title: Enable eBPF support for Falco description: |- Mark this checkbox if you are using a Kernel newer than 4.14. The default value for newly created clusters is enabled, but if you are using an older kernel, please uncheck this checkbox and use the Sysdig Kernel module for capturing the system calls. type: boolean default: true falco.integrations.pubsubOutput.enabled: title: Enable Pub/Sub output for Falco description: |- When enabled, it makes Falco to use a Google Pub/Sub topic to send alerts. type: boolean default: false falco.integrations.pubsubOutput.projectID: title: Project where Pub/Sub topic is deployed description: |- The project identifier where the Google Pub/Sub topic is deployed. type: string default: "" falco.integrations.pubsubOutput.topic: title: Pub/Sub topic where Falco is going to send alerts description: |- The name of the Google Pub/Sub topic name where Falco is going to send alerts. type: string default: falco-alerts falco.integrations.pubsubOutput.credentialsData: title: Service Account credentials used to publish alerts from Falco to a Pub/Sub topic description: |- To be able to send Falco alerts to a Google Pub/Sub topic, we need to be authenticated. This field is used to store the content of the service account JSON file. It can be encoded using base64 instead of messing with JSON format. type: string default: "" x-google-marketplace: type: STRING falco.integrations.gcscc.enabled: title: Enable Google Cloud Security Command Center integration description: |- When enabled, it makes Falco to send alerts and transform it to findings in Google Cloud Security Control Center. type: boolean default: false falco.integrations.gcscc.webhookUrl: title: Endpoint where Sysdig Connector is deployed description: |- This integration requires a Sysdig connector for Google Cloud Security Command Center deployed. This webhook is going to be responsible of receiving Falco alerts and transform it to findings. type: string default: http://sysdig-gcscc-connector.default.svc.cluster.local:8080/events falco.integrations.gcscc.webhookAuthenticationToken: title: Authentication Token for the Sysdig Connector webhook description: |- To avoid to receive not allowed requests, we need to use the same authentication token in both sides. We need to send the authentication header to the Sysdig Connector and it will check it against its token. type: string default: b27511f86e911f20b9e0f9c8104b4ec4 required: - name - namespace - image
schema.yaml
--- !<MAP> contentType: "MAP" firstIndex: "2019-01-17 05:25" game: "Unreal Tournament" name: "DM-(NYA)Paris" author: "NYA-Mike - <NAME>" description: "Welcome to Paris" releaseDate: "2004-02" attachments: - type: "IMAGE" name: "DM-(NYA)Paris_shot_4.png" url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/N/DM-(NYA)Paris_shot_4.png" - type: "IMAGE" name: "DM-(NYA)Paris_shot_3.png" url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/N/DM-(NYA)Paris_shot_3.png" - type: "IMAGE" name: "DM-(NYA)Paris_shot_5.png" url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/N/DM-(NYA)Paris_shot_5.png" - type: "IMAGE" name: "DM-(NYA)Paris_shot_1.png" url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/N/DM-(NYA)Paris_shot_1.png" - type: "IMAGE" name: "DM-(NYA)Paris_shot_2.png" url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/N/DM-(NYA)Paris_shot_2.png" originalFilename: "DM-(NYA)Paris.zip" hash: "3551a41711dcb742b2258fcd5bacfb115d05b5cd" fileSize: 11940634 files: - name: "Ribeira3.utx" fileSize: 3154935 hash: "810725d873016c9586ff4d412d283ad8c4189b27" - name: "Ribeira1.utx" fileSize: 4886418 hash: "d528e96dc2770e068f3703df6eee3a379426d25b" - name: "Ribeira2.utx" fileSize: 9275225 hash: "caa0d4b986807a46607e848457e8d496441a0c9e" - name: "de.u" fileSize: 72169 hash: "5e220383ed8260509ba288f520d10d81ff7ad103" - name: "DM-(NYA)Paris.unr" fileSize: 1179231 hash: "bfdac3b6c196a42f044c7012199a5c41ce7f2bc5" otherFiles: 4 dependencies: DM-(NYA)Paris.unr: - status: "OK" name: "Ribeira1" - status: "OK" name: "Ribeira2" - status: "OK" name: "Ribeira3" downloads: - url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=3346" main: false repack: false state: "MISSING" - url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/DeathMatch/N/3/5/51a417/DM-(NYA)Paris.zip" main: true repack: false state: "OK" - url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/DeathMatch/N/3/5/51a417/DM-(NYA)Paris.zip" main: false repack: false state: "OK" - url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/DeathMatch/N/3/5/51a417/DM-(NYA)Paris.zip" main: false repack: false state: "OK" deleted: false gametype: "DeathMatch" title: "DM-(NYA)Paris" playerCount: "16" themes: Industrial: 1.0 bots: true
content/Unreal Tournament/Maps/DeathMatch/N/3/5/51a417/dm-nyaparis_[3551a417].yml
--- - name: Query ec2 public dns name shell: | # aws api to return vm public dns name curl -s http://169.254.169.254/latest/meta-data/public-hostname | cut -d'.' -f1 register: output when: vm_provider is defined and vm_provider == "aws" - name: Set kind cluster base name set_fact: hostname_base: "{{ output.stdout }}" when: vm_provider is defined and vm_provider == "aws" - name: Set the kind base name using the baremetal hostname set_fact: hostname_base: "{{ inventory_hostname | regex_replace('[a-zA-Z]\\.(.*)', '') }}" when: vm_provider is defined and vm_provider == "baremetal" - name: Host name command: "hostname" register: command_output - name: Set the kind base name using the openstack vm inventory hostname set_fact: # hostname_base: "{{ inventory_hostname }}" hostname_base: "{{ command_output.stdout }}" when: vm_provider is defined and vm_provider == "openstack" # - name: Create Kind Reserved Config # shell: | # cat > /tmp/kind-config.yaml <<EOF # kind: Cluster # apiVersion: kind.x-k8s.io/v1alpha4 # nodes: # - role: control-plane # kubeadmConfigPatches: # - | # kind: InitConfiguration # nodeRegistration: # kubeletExtraArgs: # system-reserved: memory=8Gi,cpu=1 # EOF # when: False - name: Docker login to track rate limit usage shell: | docker login -u="{{ DOCKER_USER }}" -p="{{ DOCKER_PASSWORD }}" - name: Create Kind Cluster shell: | kind get clusters | grep kind-{{ item }} || kind create cluster --name kind-{{ item }} register: output - name: Debug the create output debug: msg: "{{ output }}" - name: Create directory for kind cluster kubeconfig file: dest: "/home/centos/{{ hostname_base }}-kind-{{ item }}" state: directory - name: Save Kind kubeconfig to /home/centos/ shell: | kind get kubeconfig --name=kind-{{ item }} > /tmp/{{ hostname_base }}-kind-{{ item }}/kubeconfig # - name: Create 4K configmaps # k8s: # state: present # kubeconfig: "/tmp/{{ hostname_base }}-kind-{{ item }}/kubeconfig" # definition: "{{ lookup('template', '{{ playbook_dir }}/files/cm400.yml')}}" # loop: "{{ range(0, configmap_max_count, 1)|list }}" # loop_control: # loop_var: inner # tags: focus
ansible/tasks/kind.yml
rbac: enabled: true binderhub: config: BinderHub: use_registry: true auth_enabled: true template_path: /etc/binderhub/custom/templates extra_static_path: /etc/binderhub/custom/static extra_static_url_prefix: /extra_static/ template_variables: EXTRA_STATIC_URL_PREFIX: "/extra_static/" service: type: ClusterIP ingress: enabled: true annotations: # cert-manager provides a TLS secret # This will ask cert-manager to be configured with default values. It's better to configure default values. kubernetes.io/tls-acme: "true" # nginx-ingress controller to be explicitly utilised instead of "gce" # This is required to allow nginx-ingress-controller to function. This will override any cloud provided ingress controllers and use the one we choose to deploy, i.e. nginx. kubernetes.io/ingress.class: nginx hosts: - binder.hub23.turing.ac.uk tls: - secretName: hub23-binder-tls hosts: - binder.hub23.turing.ac.uk jupyterhub: proxy: service: type: ClusterIP ingress: enabled: true annotations: kubernetes.io/tls-acme: "true" kubernetes.io/ingress.class: nginx hosts: - hub.hub23.turing.ac.uk tls: - secretName: hub23-hub-tls hosts: - hub.hub23.turing.ac.uk initContainers: - name: git-clone-templates image: alpine/git args: - clone - --single-branch - --branch=html-templates - --depth=1 - -- - https://github.com/alan-turing-institute/hub23-deploy - /etc/binderhub/custom securityContext: runAsUser: 0 volumeMounts: - name: custom-templates mountPath: /etc/binderhub/custom extraVolumes: - name: custom-templates emptyDir: {} extraVolumeMounts: - name: custom-templates mountPath: /etc/binderhub/custom nginx-ingress: controller: config: proxy-body-size: 64m scope: enabled: true
hub23-chart/values.yaml
name: Test on: schedule: - cron: '0 15 * * 5' push: branches: - '*' tags: - 'v*' pull_request: branches: - 'master' jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: ruby/setup-ruby@v1 - uses: actions/cache@v1 with: path: vendor/bundle key: ${{ runner.os }}-gem-${{ hashFiles('**/Gemfile.lock') }} restore-keys: | ${{ runner.os }}-gem- - name: Prepare run: | sudo apt-get update -qq sudo apt-get install -y tftp-hpa asciidoctor - name: Install run: | bundle config set deployment true bundle config set path vendor/bundle bundle install --jobs 4 --retry 3 - name: Build run: | make - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-2 - name: Test run: | bundle exec rspec env: TEST_BUCKET_NAME: hanazuki-s3tftpd-test docker: needs: - test runs-on: ubuntu-latest steps: - id: config shell: bash run: | push=false tags= if [[ $GITHUB_EVENT_NAME == push ]]; then if [[ $GITHUB_REF == refs/heads/master ]]; then push=true tags=ghcr.io/${GITHUB_REPOSITORY}:testing elif [[ $GITHUB_REF == refs/tags/v* ]]; then push=true tags=ghcr.io/${GITHUB_REPOSITORY}:${GITHUB_REF#refs/tags/v}%0Aghcr.io/${GITHUB_REPOSITORY}:latest fi fi echo "::set-output name=push::$push" echo "::set-output name=tags::$tags" - uses: docker/setup-qemu-action@v1 - uses: docker/setup-buildx-action@v1 - uses: docker/login-action@v1 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ github.token }} - uses: docker/build-push-action@v2 with: push: ${{ steps.config.outputs.push }} tags: ${{ steps.config.outputs.tags }} platforms: linux/amd64,linux/arm64 labels: | org.opencontainers.image.source=${{ github.event.repository.html_url }} org.opencontainers.image.revision=${{ github.sha }}
.github/workflows/test.yml
uuid: dfd506ce-eb3f-497c-a43b-81d3647a360b langcode: en status: true dependencies: config: - core.entity_view_mode.node.search_index - field.field.node.directory_entry.body - field.field.node.directory_entry.field_address - field.field.node.directory_entry.field_breema_center_staff - field.field.node.directory_entry.field_breema_clinic_practitioner - field.field.node.directory_entry.field_certification - field.field.node.directory_entry.field_email - field.field.node.directory_entry.field_facebook_page - field.field.node.directory_entry.field_geolocation - field.field.node.directory_entry.field_has_active_event - field.field.node.directory_entry.field_job_title - field.field.node.directory_entry.field_name_first - field.field.node.directory_entry.field_name_last - field.field.node.directory_entry.field_resources - field.field.node.directory_entry.field_social_media_image - field.field.node.directory_entry.field_telephone - field.field.node.directory_entry.field_upcoming_events - field.field.node.directory_entry.field_website - node.type.directory_entry module: - address - link - options - text id: node.directory_entry.search_index targetEntityType: node bundle: directory_entry mode: search_index content: body: label: hidden type: text_default weight: 9 settings: { } third_party_settings: { } region: content field_address: weight: 4 label: hidden settings: { } third_party_settings: { } type: address_plain region: content field_breema_center_staff: weight: 1 label: hidden settings: format: custom format_custom_true: 'Breema Center staff' format_custom_false: '' third_party_settings: { } type: boolean region: content field_breema_clinic_practitioner: weight: 2 label: hidden settings: format: custom format_custom_true: 'Breema Clinic practitioner' format_custom_false: '' third_party_settings: { } type: boolean region: content field_certification: weight: 3 label: hidden settings: { } third_party_settings: { } type: list_default region: content field_email: weight: 5 label: hidden settings: { } third_party_settings: { } type: basic_string region: content field_facebook_page: weight: 7 label: hidden settings: trim_length: null url_only: true url_plain: true rel: '0' target: '0' third_party_settings: { } type: link region: content field_job_title: weight: 0 label: hidden settings: link_to_entity: false third_party_settings: { } type: string region: content field_telephone: weight: 6 label: hidden settings: link_to_entity: false third_party_settings: { } type: string region: content field_website: weight: 8 label: hidden settings: trim_length: null url_only: false url_plain: false rel: '0' target: '0' third_party_settings: { } type: link_separate region: content hidden: created: true field_geolocation: true field_has_active_event: true field_name_first: true field_name_last: true field_resources: true field_social_media_image: true field_upcoming_events: true group_content: true links: true uid: true
config/core.entity_view_display.node.directory_entry.search_index.yml
--- :id: :MIMSY: LOAN_ITEMS.LITMKEY :module: '' :column: '' :instructions: 'Not mapped' :loan_id: :MIMSY: LOAN_ITEMS.LKEY :module: '' :column: '' :instructions: 'Not mapped. Foreign key to Loan.id' :catalog_id: :MIMSY: LOAN_ITEMS.M_ID :module: '' :column: '' :instructions: 'Not mapped. Foreign key to Catalog.id' :catalog_number: :MIMSY: LOAN_ITEMS.ID_NUMBER :module: '' :column: '' :instructions: 'Not mapped. See associated Excel files and additional Excel mapping documents.' :status: :MIMSY: LOAN_ITEMS.STATUS :module: '' :column: '' :instructions: '' :status_date: :MIMSY: LOAN_ITEMS.STATUS_DATE :module: '' :column: '' :instructions: '' :loan_date: :MIMSY: LOAN_ITEMS.LOAN_DATE :module: '' :column: '' :instructions: '' :due_date: :MIMSY: LOAN_ITEMS.DUE_DATE :module: '' :column: '' :instructions: '' :return_date: :MIMSY: LOAN_ITEMS.RETURN_DATE :module: '' :column: '' :instructions: '' :return_confirmed_by: :MIMSY: LOAN_ITEMS.RETURN_CONFIRMED_BY :module: '' :column: '' :instructions: '' :renewal_history: :MIMSY: LOAN_ITEMS.RENEWAL_HISTORY :module: '' :column: '' :instructions: '' :note: :MIMSY: LOAN_ITEMS.NOTE :module: '' :column: '' :instructions: '' :description: :MIMSY: LOAN_ITEMS.ITEM_SUMMARY :module: '' :column: '' :instructions: '' :flag1: :MIMSY: LOAN_ITEMS.FLAG1 :module: '' :column: '' :instructions: '' :flag2: :MIMSY: LOAN_ITEMS.FLAG2 :module: '' :column: '' :instructions: '' :catalogued: :MIMSY: LOAN_ITEMS.CATALOGUED :module: '' :column: '' :instructions: '' :id_sort: :MIMSY: LOAN_ITEMS.ID_SORT :module: '' :column: '' :instructions: 'Not mapped' :created_by: :MIMSY: LOAN_ITEMS.CREATED_BY :module: '' :column: '' :instructions: 'Not mapped' :create_date: :MIMSY: LOAN_ITEMS.CREATE_DATE :module: '' :column: '' :instructions: 'Not mapped' :updated_by: :MIMSY: LOAN_ITEMS.UPDATED_BY :module: '' :column: '' :instructions: 'Not mapped' :update_date: :MIMSY: LOAN_ITEMS.UPDATE_DATE :module: '' :column: '' :instructions: 'Not mapped'
migration_mappings/original/LoanCatalog.yaml
items: - uid: azure-arm-network.PacketCaptureFilter name: PacketCaptureFilter fullName: PacketCaptureFilter children: - azure-arm-network.PacketCaptureFilter.localIPAddress - azure-arm-network.PacketCaptureFilter.localPort - azure-arm-network.PacketCaptureFilter.protocol - azure-arm-network.PacketCaptureFilter.remoteIPAddress - azure-arm-network.PacketCaptureFilter.remotePort langs: - typeScript type: interface summary: '' package: azure-arm-network - uid: azure-arm-network.PacketCaptureFilter.localIPAddress name: localIPAddress fullName: localIPAddress children: [] langs: - typeScript type: property summary: '' optional: true syntax: content: 'localIPAddress?: string' return: type: - string package: azure-arm-network - uid: azure-arm-network.PacketCaptureFilter.localPort name: localPort fullName: localPort children: [] langs: - typeScript type: property summary: '' optional: true syntax: content: 'localPort?: string' return: type: - string package: azure-arm-network - uid: azure-arm-network.PacketCaptureFilter.protocol name: protocol fullName: protocol children: [] langs: - typeScript type: property summary: '' optional: true syntax: content: 'protocol?: string' return: type: - string package: azure-arm-network - uid: azure-arm-network.PacketCaptureFilter.remoteIPAddress name: remoteIPAddress fullName: remoteIPAddress children: [] langs: - typeScript type: property summary: '' optional: true syntax: content: 'remoteIPAddress?: string' return: type: - string package: azure-arm-network - uid: azure-arm-network.PacketCaptureFilter.remotePort name: remotePort fullName: remotePort children: [] langs: - typeScript type: property summary: '' optional: true syntax: content: 'remotePort?: string' return: type: - string package: azure-arm-network
docs-ref-autogen/azure-arm-network/PacketCaptureFilter.yml
--- result: FAILURE url: http://manhattan.ci.chef.co/job/chefdk-trigger-ad_hoc/275/ failure_category: network failure_cause: 'network reset: windows: chefdk-build' timestamp: 2016-05-15 14:46:30 UTC duration: 1h44m36s triggered_by: chef-jenkins active_duration: 1h44m28s parameters: GIT_REF: auto_dependency_bump_test EXPIRE_CACHE: false change: git_remote: https://github.com/chef/chef-dk.git git_commit: <PASSWORD> project: chefdk stages: chefdk-build: result: FAILURE failure_category: network failure_cause: 'network reset: windows' url: http://manhattan.ci.chef.co/job/chefdk-build/404/ duration: 1h44m26s runs: windows-2008r2-i386: result: FAILURE failure_category: network failure_cause: network reset failed_in: step: build Packager::APPX url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=i386,platform=windows-2008r2,project=chefdk,role=builder/404/ duration: 1h44m23s debian-6: result: SUCCESS url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=debian-6,project=chefdk,role=builder/404/ duration: 1h42s el-6: result: SUCCESS url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=el-6,project=chefdk,role=builder/404/ duration: 1h21m52s el-7: result: SUCCESS url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=el-7,project=chefdk,role=builder/404/ duration: 47m18s mac_os_x-10.9: result: SUCCESS url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=mac_os_x-10.9,project=chefdk,role=builder/404/ duration: 29m48s ubuntu-12.04: result: SUCCESS url: http://manhattan.ci.chef.co/job/chefdk-build/architecture=x86_64,platform=ubuntu-12.04,project=chefdk,role=builder/404/ duration: 53m31s chefdk-trigger-ad_hoc: result: SUCCESS url: http://manhattan.ci.chef.co/job/chefdk-trigger-ad_hoc/275/ duration: 2s
reports/manhattan.ci.chef.co/job/chefdk-trigger-ad_hoc/275.yaml
uid: "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition" fullName: "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition" name: "OperationDisplayDefinition" nameWithType: "OperationDisplayDefinition" summary: "The display information for a container registry operation." inheritances: - "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />" inheritedMembers: - "java.lang.Object.clone()" - "java.lang.Object.equals(java.lang.Object)" - "java.lang.Object.finalize()" - "java.lang.Object.getClass()" - "java.lang.Object.hashCode()" - "java.lang.Object.notify()" - "java.lang.Object.notifyAll()" - "java.lang.Object.toString()" - "java.lang.Object.wait()" - "java.lang.Object.wait(long)" - "java.lang.Object.wait(long,int)" syntax: "public final class OperationDisplayDefinition" constructors: - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.OperationDisplayDefinition()" methods: - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.description()" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.operation()" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.provider()" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.resource()" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.validate()" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.withDescription(java.lang.String)" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.withOperation(java.lang.String)" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.withProvider(java.lang.String)" - "com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.withResource(java.lang.String)" type: "class" metadata: {} package: "com.azure.resourcemanager.containerregistry.models" artifact: com.azure.resourcemanager:azure-resourcemanager-containerregistry:2.0.0-beta.5
preview/docs-ref-autogen/com.azure.resourcemanager.containerregistry.models.OperationDisplayDefinition.yml
notifications: email: false git: quiet: true depth: false env: global: - TERM=dumb language: java jdk: openjdk8 python: 3.7 addons: apt: update: true packages: - bash - sudo - lsof - wget - curl - unzip - python3-pip install: true before_install: - mkdir -p ~/.local/bin - export PATH=$HOME/.local/bin:$PATH - pip3 install --user --upgrade pip setuptools - pip install --user --upgrade httpie - | if [ ! -f ~/.local/daggerok/bash-functions/master/main.bash ] ; then mkdir -p ~/.local/daggerok/bash-functions/master ; curl -s https://raw.githubusercontent.com/daggerok/bash-functions/master/main.bash > ~/.local/daggerok/bash-functions/master/main.bash ; fi ; source ~/.local/daggerok/bash-functions/master/main.bash ; - stop_any 80 8080 5672 5432 27017 >/dev/null jobs: include: - stage: test jdk: openjdk8 name: Maven build (openjdk8) script: - cp -Rf $TRAVIS_BUILD_DIR/src/main/resources/logback-maven.xml $TRAVIS_BUILD_DIR/src/main/resources/logback.xml - cd $TRAVIS_BUILD_DIR && ./mvnw -U - stage: test jdk: openjdk8 name: Maven updates (openjdk8) script: cd $TRAVIS_BUILD_DIR && ./mvnw versions:display-property-updates -U - stage: test jdk: openjdk8 name: Gradle build (openjdk8) script: - cp -Rf $TRAVIS_BUILD_DIR/src/main/resources/logback-gradle.xml $TRAVIS_BUILD_DIR/src/main/resources/logback.xml - cd $TRAVIS_BUILD_DIR && ./gradlew - stage: test jdk: openjdk8 name: Gradle updates (openjdk8) script: cd $TRAVIS_BUILD_DIR && ./gradlew dependencyUpdates -Drevision=release - stage: test jdk: openjdk11 name: Maven build (openjdk11) script: - cp -Rf $TRAVIS_BUILD_DIR/src/main/resources/logback-maven.xml $TRAVIS_BUILD_DIR/src/main/resources/logback.xml - cd $TRAVIS_BUILD_DIR && ./mvnw -U - stage: test jdk: openjdk11 name: Maven updates (openjdk11) script: - cd $TRAVIS_BUILD_DIR && ./mvnw versions:display-parent-updates versions:display-dependency-updates versions:display-property-updates -U >/dev/null - cd $TRAVIS_BUILD_DIR && ./mvnw versions:display-parent-updates versions:display-dependency-updates versions:display-property-updates - stage: test jdk: openjdk11 name: Gradle build (openjdk11) script: - cp -Rf $TRAVIS_BUILD_DIR/src/main/resources/logback-gradle.xml $TRAVIS_BUILD_DIR/src/main/resources/logback.xml - cd $TRAVIS_BUILD_DIR && ./gradlew - stage: test jdk: openjdk11 name: Gradle updates (openjdk11) script: cd $TRAVIS_BUILD_DIR && ./gradlew dependencyUpdates -Drevision=release cache: pip: true packages: true directories: - ~/.local/daggerok - ~/.docker - ~/.gradle - ~/.m2
.travis.yml
AWSTemplateFormatVersion: 2010-09-09 Description: ec2ZoneIds. Parameters: CodeBucket: Description: CodeBucket. Type: String ec2ZoneIdsObjectVersion: Description: ec2ZoneIdsObjectVersion. Type: String boto3cloudformationObjectVersion: Description: boto3cloudformationObjectVersion Type: String Resources: ec2ZoneIdsRole: Type: AWS::IAM::Role Properties: AssumeRolePolicyDocument: Version: '2012-10-17' Statement: - Effect: Allow Principal: Service: - lambda.amazonaws.com Action: - sts:AssumeRole Policies: - PolicyName: ManageEvents PolicyDocument: Version: '2012-10-17' Statement: - Effect: Allow Action: - events:PutRule - events:DeleteRule - events:PutTargets - events:RemoveTargets - lambda:AddPermission - lambda:RemovePermission Resource: "*" - PolicyName: ManageRoutes PolicyDocument: Version: '2012-10-17' Statement: - Effect: Allow Action: - ec2:DescribeAvailabilityZones - ssm:PutParameter - ssm:DeleteParameter Resource: "*" - PolicyName: WriteToCloudWatch PolicyDocument: Version: '2012-10-17' Statement: - Effect: Allow Action: - logs:CreateLogGroup Resource: "*" - Effect: Allow Action: - logs:CreateLogStream - logs:PutLogEvents Resource: "*" myLambdaLayerboto3cloudformation: Type: "AWS::Lambda::LayerVersion" Properties: CompatibleRuntimes: - python3.6 Content: S3Bucket: !Ref CodeBucket S3Key: boto3cloudformation.zip S3ObjectVersion: !Ref boto3cloudformationObjectVersion Description: boto3cloudformation LayerName: boto3cloudformation ec2ZoneIds: Type: AWS::Lambda::Function Properties: Code: S3Bucket: !Ref CodeBucket S3Key: ec2ZoneIds.zip S3ObjectVersion: !Ref ec2ZoneIdsObjectVersion Handler: index.lambda_handler MemorySize: 2048 Role: Fn::GetAtt: - ec2ZoneIdsRole - Arn Runtime: python3.6 Timeout: 900 Layers: - !Ref myLambdaLayerboto3cloudformation ec2ZoneIdsExecution: Type: AWS::CloudFormation::CustomResource Version: '1.0' Properties: ServiceToken: !GetAtt ec2ZoneIds.Arn
python/ec2ZoneIds/example.yml
container: image: ghcr.io/davidbailey00/notion-linux build_app_task: <<: &build_cache node_modules_cache: folder: node_modules fingerprint_script: cat package-lock.json populate_script: npm ci build_folder_cache: folder: build fingerprint_script: - cat package-lock.json - cat scripts/{_variables-*,build,enhance}.sh build_app_script: - scripts/build.sh - scripts/build.sh -b arm64 enhance_app_script: - scripts/enhance.sh - scripts/enhance.sh -b arm64 package_deb_task: <<: *build_cache debs_cache: &debs_cache folder: out/debs fingerprint_script: - cat package-lock.json - cat scripts/{_variables-*,build,enhance,package-deb}.sh - cat templates/desktop-deb.ejs depends_on: build_app container: cpu: 4 memory: 10G package_deb_script: - scripts/package-deb.sh & - scripts/package-deb.sh -b arm64 & - scripts/package-deb.sh -n notion-enhanced & - scripts/package-deb.sh -n notion-enhanced -b arm64 & - wait - "[[ $(ls out/debs | wc -l) == 4 ]]" deb_artifacts: path: "out/debs/*" package_rpm_task: <<: *build_cache rpms_cache: &rpms_cache folder: out/rpms fingerprint_script: - cat package-lock.json - cat scripts/{_variables-*,build,enhance,package-rpm}.sh - cat templates/desktop-rpm.ejs depends_on: build_app container: cpu: 4 package_rpm_script: - scripts/package-rpm.sh & - scripts/package-rpm.sh -b arm64 & - scripts/package-rpm.sh -n notion-enhanced & - scripts/package-rpm.sh -n notion-enhanced -b arm64 & - wait - "[[ $(ls out/rpms | wc -l) == 4 ]]" rpm_artifacts: path: "out/rpms/*" deploy_to_gemfury_task: debs_cache: *debs_cache rpms_cache: *rpms_cache only_if: $CIRRUS_BRANCH == 'main' environment: GEMFURY_TOKEN: ENCRYPTED[a7b2cc08cab17b9d9043816cc0e8bdfc3e0b0b59f21c5e920d9f86d0961d61a05959590b90d998134d94828cd6a5a860] depends_on: - package_deb - package_rpm deploy_script: scripts/deploy.sh
.cirrus.yml
azurerm_api_management_identity_provider_google: status: ASSESS subcategory: API Management layout: azurerm page_title: 'Azure Resource Manager: azurerm_api_management_identity_provider_google' description: Manages an API Management Google Identity Provider. arguments: api_management_name: description: The Name of the API Management Service where this Google Identity Provider should be created. Changing this forces a new resource to be created. required: false policy: '' notes: '' resource_group_name: description: The Name of the Resource Group where the API Management Service exists. Changing this forces a new resource to be created. required: false policy: '' notes: '' client_id: description: Client Id for Google Sign-in. required: false policy: '' notes: '' client_secret: description: Client secret for Google Sign-in. required: false policy: '' notes: '' attributes: id: description: The ID of the API Management Google Identity Provider. timeouts: create: description: Used when creating the API Management Google Identity Provider. required: false timeout: 30 update: description: Used when updating the API Management Google Identity Provider. required: false timeout: 30 read: description: Used when retrieving the API Management Google Identity Provider. required: false timeout: 5 delete: description: Used when deleting the API Management Google Identity Provider. required: false timeout: 30 usage: !!binary | Q25KbGMyOTFjbU5sSUNKaGVuVnlaWEp0WDNKbGMyOTFjbU5sWDJkeWIzVndJaUFpWlhoaGJYQnNa U0lnZXdvZ0lHNWhiV1VnSUNBZ0lEMGdJbVY0WVcxd2JHVXRjbVZ6YjNWeVkyVnpJZ29nSUd4dlky RjBhVzl1SUQwZ0lsZGxjM1FnUlhWeWIzQmxJZ3A5Q2dweVpYTnZkWEpqWlNBaVlYcDFjbVZ5YlY5 aGNHbGZiV0Z1WVdkbGJXVnVkQ0lnSW1WNFlXMXdiR1VpSUhzS0lDQnVZVzFsSUNBZ0lDQWdJQ0Fn SUNBZ0lDQWdJRDBnSW1WNFlXMXdiR1V0WVhCcGJTSUtJQ0JzYjJOaGRHbHZiaUFnSUNBZ0lDQWdJ Q0FnSUQwZ1lYcDFjbVZ5YlY5eVpYTnZkWEpqWlY5bmNtOTFjQzVsZUdGdGNHeGxMbXh2WTJGMGFX OXVDaUFnY21WemIzVnlZMlZmWjNKdmRYQmZibUZ0WlNBOUlHRjZkWEpsY20xZmNtVnpiM1Z5WTJW ZlozSnZkWEF1WlhoaGJYQnNaUzV1WVcxbENpQWdjSFZpYkdsemFHVnlYMjVoYldVZ0lDQWdJQ0E5 SUNKTmVTQkRiMjF3WVc1NUlnb2dJSEIxWW14cGMyaGxjbDlsYldGcGJDQWdJQ0FnUFNBaVkyOXRj R0Z1ZVVCMFpYSnlZV1p2Y20wdWFXOGlDaUFnYzJ0MVgyNWhiV1VnSUNBZ0lDQWdJQ0FnSUNBOUlD SkVaWFpsYkc5d1pYSmZNU0lLZlFvS2NtVnpiM1Z5WTJVZ0ltRjZkWEpsY20xZllYQnBYMjFoYm1G blpXMWxiblJmYVdSbGJuUnBkSGxmY0hKdmRtbGtaWEpmWjI5dloyeGxJaUFpWlhoaGJYQnNaU0ln ZXdvZ0lISmxjMjkxY21ObFgyZHliM1Z3WDI1aGJXVWdQU0JoZW5WeVpYSnRYM0psYzI5MWNtTmxY MmR5YjNWd0xtVjRZVzF3YkdVdWJtRnRaUW9nSUdGd2FWOXRZVzVoWjJWdFpXNTBYMjVoYldVZ1BT QmhlblZ5WlhKdFgyRndhVjl0WVc1aFoyVnRaVzUwTG1WNFlXMXdiR1V1Ym1GdFpRb2dJR05zYVdW dWRGOXBaQ0FnSUNBZ0lDQWdJQ0FnUFNBaU1EQXdNREF3TURBdVlYQndjeTVuYjI5bmJHVjFjMlZ5 WTI5dWRHVnVkQzVqYjIwaUNpQWdZMnhwWlc1MFgzTmxZM0psZENBZ0lDQWdJQ0E5SUNJd01EQXdN REF3TURBd01EQXdNREF3TURBd01EQXdNREF3TURBd01EQXdNQ0lLZlFvPQ== import: !!binary | Q2dwQlVFa2dUV0Z1WVdkbGJXVnVkQ0JIYjI5bmJHVWdTV1JsYm5ScGRIa2dVSEp2ZG1sa1pYSWdZ MkZ1SUdKbElHbHRjRzl5ZEdWa0lIVnphVzVuSUhSb1pTQmdjbVZ6YjNWeVkyVWdhV1JnTENCbExt Y3VDZ289 hcl_url: !!binary | YUhSMGNITTZMeTluYVhSb2RXSXVZMjl0TDNSbGNuSmhabTl5YlMxd2NtOTJhV1JsY25NdmRHVnlj bUZtYjNKdExYQnliM1pwWkdWeUxXRjZkWEpsY20wdllteHZZaTl0WVhOMFpYSXZkMlZpYzJsMFpT OWtiMk56TDNJdllYQnBYMjFoYm1GblpXMWxiblJmYVdSbGJuUnBkSGxmY0hKdmRtbGtaWEpmWjI5 dloyeGxMbWgwYld3dWJXRnlhMlJ2ZDI0PQ==
config/azurerm/azurerm_api_management_identity_provider_google.yml
uid: "com.microsoft.azure.management.batch.LoginMode" fullName: "com.microsoft.azure.management.batch.LoginMode" name: "LoginMode" nameWithType: "LoginMode" summary: "Defines values for LoginMode." inheritances: - "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />" - "<xref href=\"java.lang.Enum\" data-throw-if-not-resolved=\"False\" />" inheritedMembers: - "java.lang.Enum.<T>valueOf(java.lang.Class<T>,java.lang.String)" - "java.lang.Enum.clone()" - "java.lang.Enum.compareTo(E)" - "java.lang.Enum.equals(java.lang.Object)" - "java.lang.Enum.finalize()" - "java.lang.Enum.getDeclaringClass()" - "java.lang.Enum.hashCode()" - "java.lang.Enum.name()" - "java.lang.Enum.ordinal()" - "java.lang.Enum.toString()" - "java.lang.Object.getClass()" - "java.lang.Object.notify()" - "java.lang.Object.notifyAll()" - "java.lang.Object.wait()" - "java.lang.Object.wait(long)" - "java.lang.Object.wait(long,int)" syntax: "public enum LoginMode extends Enum<LoginMode>" fields: - uid: "com.microsoft.azure.management.batch.LoginMode.BATCH" fullName: "com.microsoft.azure.management.batch.LoginMode.BATCH" name: "BATCH" nameWithType: "LoginMode.BATCH" summary: "The LOGON32\\_LOGON\\_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes." - uid: "com.microsoft.azure.management.batch.LoginMode.INTERACTIVE" fullName: "com.microsoft.azure.management.batch.LoginMode.INTERACTIVE" name: "INTERACTIVE" nameWithType: "LoginMode.INTERACTIVE" summary: "The LOGON32\\_LOGON\\_INTERACTIVE Win32 login mode. Some applications require having permissions associated with the interactive login mode. If this is the case for an application used in your task, then this option is recommended." methods: - uid: "com.microsoft.azure.management.batch.LoginMode.fromString(java.lang.String)" fullName: "com.microsoft.azure.management.batch.LoginMode.fromString(String value)" name: "fromString(String value)" nameWithType: "LoginMode.fromString(String value)" summary: "Parses a serialized value to a LoginMode instance." parameters: - description: "the serialized value to parse." name: "value" type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />" syntax: "public static LoginMode fromString(String value)" returns: description: "the parsed LoginMode object, or null if unable to parse." type: "<xref href=\"com.microsoft.azure.management.batch.LoginMode?alt=com.microsoft.azure.management.batch.LoginMode&text=LoginMode\" data-throw-if-not-resolved=\"False\" />" - uid: "com.microsoft.azure.management.batch.LoginMode.toString()" fullName: "com.microsoft.azure.management.batch.LoginMode.toString()" name: "toString()" nameWithType: "LoginMode.toString()" overridden: "java.lang.Enum.toString()" syntax: "public String toString()" returns: type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />" - uid: "com.microsoft.azure.management.batch.LoginMode.valueOf(java.lang.String)" fullName: "com.microsoft.azure.management.batch.LoginMode.valueOf(String name)" name: "valueOf(String name)" nameWithType: "LoginMode.valueOf(String name)" parameters: - name: "name" type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />" syntax: "public static LoginMode valueOf(String name)" returns: type: "<xref href=\"com.microsoft.azure.management.batch.LoginMode?alt=com.microsoft.azure.management.batch.LoginMode&text=LoginMode\" data-throw-if-not-resolved=\"False\" />" - uid: "com.microsoft.azure.management.batch.LoginMode.values()" fullName: "com.microsoft.azure.management.batch.LoginMode.values()" name: "values()" nameWithType: "LoginMode.values()" syntax: "public static LoginMode[] values()" returns: type: "<xref href=\"com.microsoft.azure.management.batch.LoginMode?alt=com.microsoft.azure.management.batch.LoginMode&text=LoginMode\" data-throw-if-not-resolved=\"False\" />[]" metadata: {} package: "com.microsoft.azure.management.batch" artifact: com.microsoft.azure:azure-mgmt-batch:1.40.0
docs-ref-autogen/com.microsoft.azure.management.batch.LoginMode.yml
- https://images4.alphacoders.com/985/985653.jpg - https://images7.alphacoders.com/333/333710.png - https://images4.alphacoders.com/820/82036.jpg - https://images7.alphacoders.com/934/934644.jpg - https://images6.alphacoders.com/545/545820.jpg - https://images.alphacoders.com/441/441248.jpg - https://images6.alphacoders.com/594/594668.jpg - https://images8.alphacoders.com/651/651321.png - https://images8.alphacoders.com/503/503824.jpg - https://images.alphacoders.com/515/515960.jpg - https://images4.alphacoders.com/234/234678.jpg - https://images.alphacoders.com/232/232153.jpg - https://images5.alphacoders.com/893/893970.jpg - https://images.alphacoders.com/915/915706.jpg - https://images4.alphacoders.com/169/169088.png - https://images2.alphacoders.com/593/593884.jpg - https://images2.alphacoders.com/102/1027580.jpg - https://images5.alphacoders.com/605/605713.png - https://images8.alphacoders.com/394/394518.jpg - https://images6.alphacoders.com/103/1031147.jpg - https://images5.alphacoders.com/842/842657.jpg - https://images2.alphacoders.com/711/711448.jpg - https://images8.alphacoders.com/994/994713.jpg - https://images5.alphacoders.com/842/842656.jpg - https://images3.alphacoders.com/109/1098975.jpg - https://images8.alphacoders.com/114/1143850.jpg - https://images4.alphacoders.com/147/147388.jpg - https://images7.alphacoders.com/975/975418.jpg - https://images8.alphacoders.com/746/746330.jpg - https://images6.alphacoders.com/706/706860.jpg - https://images4.alphacoders.com/103/103606.jpg - https://images2.alphacoders.com/975/975700.jpg - https://images2.alphacoders.com/900/900011.jpg - https://images.alphacoders.com/590/590979.jpg - https://images3.alphacoders.com/999/999049.jpg - https://images8.alphacoders.com/998/998708.jpg - https://images.alphacoders.com/240/240193.jpg - https://images2.alphacoders.com/764/76400.jpg - https://images7.alphacoders.com/115/1156308.jpg - https://images.alphacoders.com/109/1097384.png - https://images7.alphacoders.com/102/1021611.jpg - https://images6.alphacoders.com/999/999048.jpg - https://images.alphacoders.com/994/994113.jpg - https://images2.alphacoders.com/907/907141.jpg - https://images7.alphacoders.com/110/1107086.png - https://images6.alphacoders.com/104/1047685.jpg - https://images2.alphacoders.com/104/1040500.jpg - https://images4.alphacoders.com/956/956202.jpg - https://images7.alphacoders.com/851/851489.png - https://images2.alphacoders.com/438/438597.jpg - https://images7.alphacoders.com/405/405545.jpg - https://images7.alphacoders.com/404/404723.jpg - https://images3.alphacoders.com/142/142537.jpg - https://images.alphacoders.com/136/136481.jpg - https://images2.alphacoders.com/110/1107453.jpg - https://images4.alphacoders.com/108/1083171.jpg - https://images2.alphacoders.com/987/987362.jpg - https://images4.alphacoders.com/984/984197.jpg - https://images2.alphacoders.com/975/975420.jpg - https://images4.alphacoders.com/952/952483.jpg - https://images4.alphacoders.com/939/939569.jpg - https://images4.alphacoders.com/829/829454.jpg - https://images.alphacoders.com/774/774250.jpg - https://wall.alphacoders.com/big.php?i=291381 - https://images3.alphacoders.com/155/155217.jpg - https://images7.alphacoders.com/103/1036162.jpg - https://images6.alphacoders.com/999/999045.jpg - https://images8.alphacoders.com/979/979776.jpg - https://images2.alphacoders.com/925/925019.jpg - https://images3.alphacoders.com/917/917040.jpg - https://images5.alphacoders.com/911/911968.jpg - https://images.alphacoders.com/903/903647.jpg - https://images5.alphacoders.com/655/655003.png - https://images4.alphacoders.com/599/599924.jpg - https://images2.alphacoders.com/524/524479.jpg - https://images5.alphacoders.com/519/519261.jpg - https://images4.alphacoders.com/240/240584.jpg - https://images4.alphacoders.com/129/129896.jpg - https://images2.alphacoders.com/110/1109206.jpg - https://images2.alphacoders.com/110/1106594.jpg - https://images4.alphacoders.com/101/1010899.jpg - https://images2.alphacoders.com/717/717011.jpg - https://images3.alphacoders.com/116/1166020.jpg - https://images2.alphacoders.com/109/1099181.jpg - https://images6.alphacoders.com/109/1097382.png - https://images7.alphacoders.com/104/1048819.jpg - https://images4.alphacoders.com/104/1047683.jpg - https://images8.alphacoders.com/103/1036161.jpg - https://images8.alphacoders.com/958/958236.jpg - https://images2.alphacoders.com/872/872088.png - https://images.alphacoders.com/718/718148.jpg - https://images2.alphacoders.com/594/594790.jpg - https://images2.alphacoders.com/118/1186192.jpg - https://images2.alphacoders.com/109/1097566.jpg - https://images5.alphacoders.com/101/1010900.jpg - https://images7.alphacoders.com/113/1134857.jpg - https://images8.alphacoders.com/983/983515.jpg - https://images2.alphacoders.com/102/1029587.jpg - https://images7.alphacoders.com/118/1189464.jpg - https://images3.alphacoders.com/118/1184109.jpg
themes/shoka/_images.yml
apiVersion: apps/v1 kind: Deployment metadata: labels: name: kube-eventer name: kube-eventer namespace: monitoring spec: replicas: 1 selector: matchLabels: app: kube-eventer template: metadata: labels: app: kube-eventer annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: dnsPolicy: ClusterFirstWithHostNet serviceAccount: kube-eventer nodeSelector: type: eventer containers: - image: harbor.k8s.moonton.net/operator/kube-eventer-amd64:v4 name: kube-eventer command: - "/kube-eventer" - "--source=kubernetes:https://kubernetes.default" - --frequency=10s - --bufferwindows=20 - --v=2 - --sink=dingtalk:https://oapi.dingtalk.com/robot/send?access_token=9097668119105d47c33e86c67a9ac709496739a7f7dea3b6a7e46c4f88d29a6d&level=Warning&msg_type=markdown env: # If TZ is assigned, set the TZ value as the time zone - name: TZ value: "Asia/Shanghai" volumeMounts: - name: localtime mountPath: /etc/localtime readOnly: true - name: zoneinfo mountPath: /usr/share/zoneinfo readOnly: true resources: requests: cpu: 100m memory: 100Mi limits: cpu: 500m memory: 250Mi volumes: - name: localtime hostPath: path: /etc/localtime - name: zoneinfo hostPath: path: /usr/share/zoneinfo --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: kube-eventer rules: - apiGroups: - "" resources: - events verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kube-eventer roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kube-eventer subjects: - kind: ServiceAccount name: kube-eventer namespace: monitoring --- apiVersion: v1 kind: ServiceAccount metadata: name: kube-eventer namespace: monitoring
deploy/deploy.yaml
name: Release Tools on: push: branches: - release repository_dispatch: types: release jobs: build: runs-on: ${{ matrix.os }} strategy: fail-fast: true matrix: os: [ubuntu-latest, windows-latest, macOS-latest] steps: - uses: hecrj/setup-rust-action@v1 with: rust-version: stable - uses: actions/checkout@v1 - name: Build Tools run: cargo build -p divvunspell-tools --release - name: Publish Binary if: matrix.os != 'windows-latest' uses: actions/upload-artifact@v1 with: name: ${{ matrix.os }} path: target/release/thfst-tools - name: Publish Binary if: matrix.os == 'windows-latest' uses: actions/upload-artifact@v1 with: name: ${{ matrix.os }} path: target/release/thfst-tools.exe release: needs: build runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - name: Get Package Version run: echo ::set-env name=PACKAGE_VERSION::$(cargo metadata --no-deps --format-version=1 | jq -r '.packages[] | select(.name == "divvunspell-tools") | .version') - name: Download Linux Binary uses: actions/download-artifact@v1 with: name: ubuntu-latest - name: Download Windows Binary uses: actions/download-artifact@v1 with: name: windows-latest - name: Download macOS Binary uses: actions/download-artifact@v1 with: name: macOS-latest - name: Create Release id: create_release uses: actions/create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ format('v{0}', env.PACKAGE_VERSION) }} release_name: Release ${{ env.PACKAGE_VERSION }} draft: true - name: Upload artifact Linux uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.create_release.outputs.upload_url }} asset_path: ./ubuntu-latest/thfst-tools asset_name: thfst-tools_lin asset_content_type: application/octet-stream - name: Upload artifact Windows uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.create_release.outputs.upload_url }} asset_path: ./windows-latest/thfst-tools.exe asset_name: thfst-tools_win.exe asset_content_type: application/octet-stream - name: Upload artifact macOS uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.create_release.outputs.upload_url }} asset_path: ./macOS-latest/thfst-tools asset_name: thfst-tools_macOS asset_content_type: application/octet-stream
.github/workflows/release.yml
metricbeat.modules: - module: system metricsets: ["core", "cpu", "load", "diskio", "memory", "network", "process", "socket"] process.cgroups.enabled: true period: 15s - module: docker metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "image", "memory", "network"] hosts: ["unix:///var/run/docker.sock"] period: 15s metricbeat.autodiscover: providers: - type: docker hints.enabled: true templates: # Nginx - condition: { contains: { docker.container.image: nginx }} config: - module: nginx metricsets: ["stubstatus"] hosts: ["https://${data.docker.container.name}:443"] headers: { Authorization: "Bearer secret" } ssl.certificate_authorities: ["/usr/share/metricbeat/config/certificates/ca/ca.crt"] period: 5s # Redis - condition: { contains: { docker.container.image: redis }} config: - module: redis metricsets: ["info", "keyspace"] period: 10s hosts: ["redis://${data.host}:${data.port}"] password: <PASSWORD> # Elasticsearch - condition: { contains: { docker.container.image: elasticsearch }} config: - module: elasticsearch metricsets: ["ccr", "enrich", "cluster_stats", "index", "index_recovery", "index_summary", "ml_job", "node_stats", "shard"] hosts: ["https://${data.docker.container.name}:9200"] username: elastic password: ${<PASSWORD>} ssl.certificate_authorities: ["/usr/share/metricbeat/config/certificates/ca/ca.crt"] xpack.enabled: true period: 30s # Kibana - condition: { contains: { docker.container.image: kibana }} config: - module: kibana metricsets: ["stats"] hosts: ["https://${data.docker.container.name}:5601"] username: elastic password: ${<PASSWORD>} ssl.certificate_authorities: ["/usr/share/metricbeat/config/certificates/ca/ca.crt"] xpack.enabled: true period: 30s processors: - add_host_metadata: ~ - add_docker_metadata: ~ - add_cloud_metadata: ~ tags: ['service-A'] fields: { environment: 'staging' } output.elasticsearch: hosts: ["https://demo_elasticsearch_1:9200"] username: "elastic" password: <PASSWORD>}" ssl.certificate_authorities: ["/usr/share/metricbeat/config/certificates/ca/ca.crt"] setup: kibana: host: https://demo_kibana_1:5601 ssl.certificate_authorities: ["/usr/share/metricbeat/config/certificates/ca/ca.crt"] dashboards: { enabled: true, retry: { enabled: true, interval: 10s }} monitoring: enabled: true
metricbeat.yml
uuid: 98781b4c-5e75-4980-a8d3-d3beea2b802b langcode: en status: true dependencies: enforced: module: - wildlife_content id: node_job_national class: null field_plugin_method: null cck_plugin_method: null migration_tags: null migration_group: example_wildlife_national label: 'Migrate Job data from a CSV source to nodes: National' source: plugin: csv path: ./modules/custom/wildlife_content/src/Plugin/migrate/source/csv/node_job_national.csv header_row_count: 1 keys: - nid constants: text_format: rich_text mod_state: published process: type: plugin: default_value default_value: job title: title moderation_state: constants/mod_state field_header_type: header_type field_header_image/target_id: - plugin: skip_on_empty method: process source: image - plugin: migration migration: - media_images_national field_header_silhouette/target_id: - plugin: skip_on_empty method: process source: silhouette - plugin: migration migration: - media_silhouettes_national field_job_reference: ref field_job_role_type: - plugin: explode delimiter: '|' source: role_type - plugin: migration migration: taxonomy_term_job_type field_job_location/target_id: - plugin: skip_on_empty method: process source: location - plugin: migration migration: - location_national field_job_closing_date: closing_date field_job_salary: salary field_job_contract_type: contract field_job_working_hours: hours field_job_application_link/title: link_title field_job_application_link/uri: link_url field_job_summary: summary field_job_contact_details/value: details field_job_contact_details/format: constants/text_format field_further_content: - plugin: explode delimiter: '|' source: components - plugin: migration migration: - paragraph_attached_files_national no_stub: true - plugin: iterator process: target_id: '0' target_revision_id: '1' destination: plugin: 'entity:node' migration_dependencies: required: - location_national - media_images_national - media_silhouettes_national - paragraph_attached_files_national
web/modules/custom/wildlife_content/config/install/migrate_plus.migration.node_job_national.yml
name: StaffCore version: 4.4.6 main: cl.bebt.staffcore.main author: BarraR3port prefix: StaffCore loadbefore: [ Essentials, PermissionsEx ] api-version: 1.13 commands: staffcore: description: Return all the Vanished, Flying and freeze players or the plugin version. clearchat: description: Clear your or Staff chat, also you can clean the cole server chat! aliases: [ cc ] fly: description: Allows you to fly aliases: [ volar ] freeze: description: FreezeQuery players aliases: [ frozen ] gmc: description: Set your gamemode to creative aliases: [ creative ] gms: description: Set your gamemode to survival aliases: [ survival ] heal: description: Heal's and feeds you aliases: [ vida,feed,hungry,maxlife,maxhealth ] ip: description: Returns your ip aliases: [ address,directionip ] ping: description: It gives you your ping permission: staffcore.ping aliases: [ latency ] tp: description: Teleport through players aliases: [ teleport ] tpall: description: Teleport all the player to a target vanish: description: VanishQuery players aliases: [ v,invisible,hide ] day: description: Change the time to day aliases: [ sun,dia ] night: description: Change the time to night aliases: [ noche ] weather: description: Change the weather aliases: [ clima ] suicide: description: Kill yourselfe aliases: [ suicidarme ] staff: description: Sets your staff with Staff utils aliases: [ mod ] staffchat: description: Staff chat aliases: [ sc ] togglestaffchat: description: Turns on and off the staff chat aliases: [ tsc ] report: description: Report players aliases: [ rp ] reportlist: description: invSee all the reports aliases: [ rpl ] invsee: description: invSee other players inventories! aliases: [ chestothers ] endersee: description: invSee other player Ender Chests aliases: [ enderothers ] mute: description: Mute players for a period of time aliases: [ silence ] mutechat: description: Mute the global chat aliases: [ togglechat ] unmute: description: Un mute players ban: description: Open a Ban gui aliases: [ ban-ip, punish, sanction ] bans: description: Open a Gui with all the bans of a player aliases: [ punishments, sanctions ] unban: description: Unban Players aliases: [ pardon ] alts: description: Check the player AltsQuery aliases: [ checkalts ] wipe: description: Wipes players data stafflist: description: Opens a GUI with all the staff warn: description: Warn Players warningns: description: See Players Warnings helpop: description: Ask for help to the Staff Members troll: description: Toggles the trollmode permissions: staffcore.*: description: Give's all StaffCore Permissions. children: staffcore.staffcore: true staffcore.clearchat: true staffcore.fly: true staffcore.freeze: true staffcore.unfreeze.himself: true staffcore.gmc: true staffcore.gms: true staffcore.heal: true staffcore.ip: true staffcore.ping: true staffcore.tp: true staffcore.tp.all: true staffcore.vanish: true staffcore.vanish.see: true staffcore.day: true staffcore.night: true staffcore.weather: true staffcore.suicide: true staffcore.staff: true staffcore.sc: true staffcore.tsc: true staffcore.invsee: true staffcore.reportlist: true staffcore.mute: true staffcore.unmute: true staffcore.ban: true staffcore.unban: true staffcore.alts: true staffcore.wipe: true staffcore.invender: true staffcore.troll: true
src/main/resources/plugin.yml
--- apiVersion: apps/v1 kind: Deployment metadata: name: app spec: selector: matchLabels: app: app template: metadata: labels: app: app annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "app" vault.hashicorp.com/agent-cache-enable: "true" vault.hashicorp.com/agent-cache-use-auto-auth-token: "true" vault.hashicorp.com/agent-inject-secret-ca.pem: "" vault.hashicorp.com/secret-volume-path-ca.pem: "/fluentd/cert" vault.hashicorp.com/agent-inject-template-ca.pem: | {{- with secret "pki/issue/app" "common_name=app-a.service.internal" -}} {{ .Data.issuing_ca }} {{- end }} vault.hashicorp.com/agent-inject-secret-key.pem: "" vault.hashicorp.com/secret-volume-path-key.pem: "/fluentd/cert" vault.hashicorp.com/agent-inject-template-key.pem: | {{- with secret "pki/issue/app" "common_name=app-a.service.internal" -}} {{ .Data.private_key }} {{- end }} vault.hashicorp.com/agent-inject-secret-cert.pem: "" vault.hashicorp.com/secret-volume-path-cert.pem: "/fluentd/cert" vault.hashicorp.com/agent-inject-template-cert.pem: | {{- with secret "pki/issue/app" "common_name=app-a.service.internal" -}} {{ .Data.certificate }} {{- end }} vault.hashicorp.com/agent-inject-secret-fluent.conf: "" vault.hashicorp.com/secret-volume-path-fluent.conf: "/fluentd/etc" vault.hashicorp.com/agent-inject-template-fluent.conf: | <system> log_level debug </system> # TCP input <source> @type forward port 24224 </source> <source> @type tail path /fluentd/log/user.log pos_file /fluentd/log/user.pos @log_level debug tag user.log <parse> @type json </parse> </source> <match user.log> @type kafka2 # list of seed brokers brokers {{- with secret "kv/data/confluent-cloud" }} {{ .Data.data.connection_string }}{{- end }} use_event_time true # buffer settings <buffer ingress> @type file path /fluentd/td/log flush_interval 1s </buffer> # data type settings <format> @type json </format> # topic settings topic_key app-a-ingress default_topic app-a-ingress # producer settings required_acks -1 compression_codec gzip ssl_ca_cert '/fluentd/cert/ca.pem' ssl_client_cert '/fluentd/cert/cert.pem' ssl_client_cert_key '/fluentd/cert/key.pem' sasl_over_ssl true ssl_ca_certs_from_system true username {{- with secret "kv/data/confluent-cloud" }} {{ .Data.data.client_id }}{{- end }} password {{- with secret "kv/data/confluent-cloud" }} {{ .Data.data.client_secret }}{{- end }} </match> spec: serviceAccountName: app containers: - name: app env: - name: NUM_OF_RUNS value: '10' - name: PATH_TO_LOG value: '/fluentd/log/user.log' image: moayadi/vault-confluentcloud-demo:app-latest imagePullPolicy: Always resources: limits: memory: "128Mi" cpu: "500m" ports: - containerPort: 8080 volumeMounts: - name: app-log mountPath: /fluentd/log - name: fluentd image: moayadi/vault-confluentcloud-demo:fluentd-latest imagePullPolicy: Always resources: limits: memory: "128Mi" cpu: "500m" ports: - containerPort: 24224 volumeMounts: - name: app-log mountPath: /fluentd/log volumes: - name: app-log emptyDir: {} --- kind: Service apiVersion: v1 metadata: name: app spec: selector: app: app type: ClusterIP ports: - name: tcp port: 8080 targetPort: 8080 --- apiVersion: v1 kind: ServiceAccount metadata: name: app
deploy/app-deploy.yml
{% set version = "1.0.0" %} {% set name = "GEOexplorer" %} {% set bioc = "3.14" %} package: name: 'bioconductor-{{ name|lower }}' version: '{{ version }}' source: url: - 'https://bioconductor.org/packages/{{ bioc }}/bioc/src/contrib/{{ name }}_{{ version }}.tar.gz' - 'https://bioarchive.galaxyproject.org/{{ name }}_{{ version }}.tar.gz' - 'https://depot.galaxyproject.org/software/bioconductor-{{ name|lower }}/bioconductor-{{ name|lower }}_{{ version }}_src_all.tar.gz' md5: aec0ace054967993c39841f807208228 build: number: 0 rpaths: - lib/R/lib/ - lib/ noarch: generic # Suggests: rmarkdown, knitr, usethis, testthat (>= 3.0.0) requirements: host: - 'bioconductor-biobase >=2.54.0,<2.55.0' - 'bioconductor-geoquery >=2.62.0,<2.63.0' - 'bioconductor-impute >=1.68.0,<1.69.0' - 'bioconductor-limma >=3.50.0,<3.51.0' - r-base - r-dt - r-factoextra - r-ggplot2 - r-heatmaply - r-htmltools - r-maptools - r-pheatmap - r-plotly - r-scales - r-shiny - r-shinybs - r-shinybusy - r-shinyheatmaply - r-stringr - r-umap run: - 'bioconductor-biobase >=2.54.0,<2.55.0' - 'bioconductor-geoquery >=2.62.0,<2.63.0' - 'bioconductor-impute >=1.68.0,<1.69.0' - 'bioconductor-limma >=3.50.0,<3.51.0' - r-base - r-dt - r-factoextra - r-ggplot2 - r-heatmaply - r-htmltools - r-maptools - r-pheatmap - r-plotly - r-scales - r-shiny - r-shinybs - r-shinybusy - r-shinyheatmaply - r-stringr - r-umap test: commands: - '$R -e "library(''{{ name }}'')"' about: home: 'https://bioconductor.org/packages/{{ bioc }}/bioc/html/{{ name }}.html' license: GPL-3 summary: 'GEOexplorer: an R/Bioconductor package for gene expression analysis and visualisation' description: 'GEOexplorer is a Shiny app that enables exploratory data analysis and differential gene expression of gene expression analysis on microarray gene expression datasets held on the GEO database. The outputs are interactive graphs that enable users to explore the results of the analysis. The development of GEOexplorer was made possible because of the excellent code provided by GEO2R (https: //www.ncbi.nlm.nih.gov/geo/geo2r/).' license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3'
recipes/bioconductor-geoexplorer/meta.yaml
name: SYCL CI on: workflow_dispatch: push: branches: [ main, 'pr/*' ] pull_request: branches: [ main ] jobs: test-sycl: runs-on: ubuntu-latest container: ghcr.io/wdmapp/oneapi-dpcpp-ubuntu-20.04:latest env: GTEST_VERSION: 1.10.0 GTEST_ROOT: ${{ github.workspace }}/googletest SYCL_DEVICE_FILTER: host DEBIAN_FRONTEND: noninteractive GTENSOR_TEST_EXCLUDE: test_fft test_reductions LD_LIBRARY_PATH: /opt/intel/oneapi/tbb/2021.5.1/env/../lib/intel64/gcc4.8:/opt/intel/oneapi/mkl/2022.0.2/lib/intel64:/opt/intel/oneapi/debugger/2021.5.0/gdb/intel64/lib:/opt/intel/oneapi/debugger/2021.5.0/libipt/intel64/lib:/opt/intel/oneapi/debugger/2021.5.0/dep/lib:/opt/intel/oneapi/compiler/2022.0.2/linux/lib:/opt/intel/oneapi/compiler/2022.0.2/linux/lib/x64:/opt/intel/oneapi/compiler/2022.0.2/linux/lib/oclfpga/host/linux64/lib:/opt/intel/oneapi/compiler/2022.0.2/linux/compiler/lib/intel64_lin PATH: /opt/intel/oneapi/mkl/2022.0.2/bin/intel64:/opt/intel/oneapi/dev-utilities/2021.5.1/bin:/opt/intel/oneapi/debugger/2021.5.0/gdb/intel64/bin:/opt/intel/oneapi/compiler/2022.0.2/linux/lib/oclfpga/bin:/opt/intel/oneapi/compiler/2022.0.2/linux/bin/intel64:/opt/intel/oneapi/compiler/2022.0.2/linux/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin steps: - uses: actions/checkout@v2 - name: clinfo run: | mkdir -p /etc/OpenCL/vendors echo "libintelocl.so" > /etc/OpenCL/vendors/intel-cpu.icd clinfo - name: setup compiler env run: | which dpcpp echo "CXX=$(which dpcpp)" >> $GITHUB_ENV - name: env check run: | env | grep oneapi - name: install googletest run: | mkdir -p ${{ env.GTEST_ROOT }} cd ${{ env.GTEST_ROOT }} wget https://github.com/google/googletest/archive/release-${{ env.GTEST_VERSION }}.tar.gz tar xzf release-${{ env.GTEST_VERSION }}.tar.gz cmake -S googletest-release-${{ env.GTEST_VERSION }} -B build -DCMAKE_INSTALL_PREFIX=${{ env.GTEST_ROOT }} cmake --build build -t install env: CXX: clang++-9 - name: cmake host run: cmake -S . -B build-sycl-host -DGTENSOR_DEVICE=sycl -DCMAKE_BUILD_TYPE=RelWithDebInfo -DGTENSOR_BUILD_EXAMPLES=ON -DGTENSOR_DEVICE_SYCL_SELECTOR=host -DGTEST_ROOT=${{ env.GTEST_ROOT }} -DGTENSOR_ENABLE_CLIB=ON -DGTENSOR_ENABLE_BLAS=On -DGTENSOR_ENABLE_FFT=ON -DGTENSOR_TEST_DEBUG=ON -DGTENSOR_DEVICE_SYCL_ILP64=ON - name: cmake host build run: cmake --build build-sycl-host -v - name: cmake debug run: cmake -S . -B build-sycl-debug -DGTENSOR_DEVICE=sycl -DCMAKE_BUILD_TYPE=Debug -DGTENSOR_BUILD_EXAMPLES=ON -DGTENSOR_DEVICE_SYCL_SELECTOR=host -DGTEST_ROOT=${{ env.GTEST_ROOT }} -DGTENSOR_ENABLE_CLIB=ON -DGTENSOR_ENABLE_BLAS=ON -DGTENSOR_ENABLE_FFT=ON -DGTENSOR_TEST_DEBUG=ON -DGTENSOR_DEVICE_SYCL_ILP64=ON - name: cmake debug build run: cmake --build build-sycl-debug -v - name: cmake host run tests run: ./run-gtests.sh build-sycl-host - name: cmake host run daxpy run: ./daxpy working-directory: ${{ github.workspace }}/build-sycl-host/examples - name: cmake host run stencil1d run: ./stencil1d working-directory: ${{ github.workspace }}/build-sycl-host/examples - name: cmake host run mult_table run: ./mult_table working-directory: ${{ github.workspace }}/build-sycl-host/examples - name: GNU make setup gtensor subdir run: mkdir -p external/gtensor && cp -R ../include external/gtensor/ working-directory: ${{ github.workspace }}/examples - name: GNU make build run: make GTENSOR_DEVICE=sycl GTENSOR_DEVICE_SYCL_SELECTOR=host working-directory: ${{ github.workspace }}/examples - name: GNU make run daxpy run: ./daxpy working-directory: ${{ github.workspace }}/examples - name: GNU make run stencil1d run: ./stencil1d working-directory: ${{ github.workspace }}/examples - name: GNU make run mult_table run: ./mult_table working-directory: ${{ github.workspace }}/examples
.github/workflows/sycl.yml
A Logic-Model Semantics for SCR Software Requirements.: - https://doi.org/10.1145/229000.226326 - 1996 A Semantic Model of Program Faults.: - https://doi.org/10.1145/229000.226317 - 1996 An Approach to Verification and Validation of a Reliable Multicasting Protocol.: - https://doi.org/10.1145/229000.226316 - 1996 An Efficient State Space Generation for Analysis of Real-Time Systems.: - https://doi.org/10.1145/229000.226297 - 1996 An Incremental Approach to Structural Testing of Concurrent Software.: - https://doi.org/10.1145/229000.226298 - 1996 Automated Test Data Generation for Programs with Procedures.: - https://doi.org/10.1145/229000.226319 - 1996 Beyond Traditional Program Slicing.: - https://doi.org/10.1145/229000.226315 - 1996 Compositional Verification by Model Checking for Counter-Examples.: - https://doi.org/10.1145/229000.226321 - 1996 Constructing Abstract Models of Concurrent Real-Time Software.: - https://doi.org/10.1145/229000.226323 - 1996 Critical Slicing for Software Fault Localization.: - https://doi.org/10.1145/229000.226310 - 1996 Daistish - Systematic Algebraic Testing for OO Programs in the Presence of Side-effects.: - https://doi.org/10.1145/229000.226301 - 1996 Elements of Style - Analyzing a Software Design Feature with a Counterexample Detector.: - https://doi.org/10.1145/229000.226322 - 1996 Experiences and Lessons from the Analysis of TCAS II.: - https://doi.org/10.1145/229000.226304 - 1996 ? Formal Specification and Verification of the Kernel Functional Unit of the OSI Session Layer Protocol and Service Using CCS. : - https://doi.org/10.1145/229000.226325 - 1996 Generating Functional Test Cases in-the-large for Time-critical Systems from Logic-based Specifications.: - https://doi.org/10.1145/229000.226300 - 1996 Generation of Multi-Formalism State-Space Analysis Tools.: - https://doi.org/10.1145/229000.226314 - 1996 Improving the Accuracy of Petri Net-Based Analysis of Concurrent Programs.: - https://doi.org/10.1145/229000.226299 - 1996 Issues in the Full Scale Use of Formal Methods for Automated Testing.: - https://doi.org/10.1145/229000.226303 - 1996 Linear and Structural Event Sequence Analysis.: - https://doi.org/10.1145/229000.226307 - 1996 Predicting Dependability by Testing.: - https://doi.org/10.1145/229000.226305 - 1996 ? Proceedings of the 1996 International Symposium on Software Testing and Analysis, ISSTA 1996, San Diego, CA, USA, January 8-10, 1996 : - https://doi.org/10.1145/229000 - 1996 Reachability Analysis of Feature Interactions - A Progress Report.: - https://doi.org/10.1145/229000.226320 - 1996 Software Error Analysis - A Real Case Study Involving Real Faults and Mutations.: - https://doi.org/10.1145/229000.226313 - 1996 Structural Specification-Based Testing with ADL.: - https://doi.org/10.1145/229000.226302 - 1996 Test and Analysis of Software Architectures.: - https://doi.org/10.1145/229000.226296 - 1996 The Path-Wise Approach to Data Flow Testing with Pointer Variables.: - https://doi.org/10.1145/229000.226311 - 1996 Towards a Structural Load Testing Tool.: - https://doi.org/10.1145/229000.226318 - 1996 Unconstrained Duals and Their Use in Achieving All-Uses Coverage.: - https://doi.org/10.1145/229000.226312 - 1996 Using Partial-Order Methods in the Formal Validation of Industrial Concurrent Programs.: - https://doi.org/10.1145/229000.226324 - 1996 Using Perturbation Analysis to Measure Variation in the Information Content of Test Sets.: - https://doi.org/10.1145/229000.226306 - 1996 Why State-of-the-Art is not State-of-the-Practice (Panel Abstract).: - https://doi.org/10.1145/229000.226327 - 1996
dlfairness/other/get_paper_survey_list/dump/ISSTA/22.yaml
defaults: &defaults mapper: metadata_mapping: datacite_geoblacklight test: <<: *defaults index: adapter: Solr url: http://uc3-dash2solr-dev.cdlib.org:8983/solr/geoblacklight open_timeout: 120 # connection open timeout in seconds read_timeout: 300 # read timeout in seconds retry_503: 3 # max retries retry_after_limit: 20 # retry wait time in seconds source: protocol: OAI oai_base_url: http://uc3-mrtoai-dev.cdlib.org:37001/mrtoai/oai/v2 metadata_prefix: stash_wrapper seconds_granularity: true db: adapter: sqlite3 database: ':memory:' pool: 5 timeout: 5000 update_uri: https://localhost:3000/stash/dataset development: <<: *defaults index: adapter: Solr url: http://uc3-dash2solr-dev.cdlib.org:8983/solr/geoblacklight open_timeout: 120 # connection open timeout in seconds read_timeout: 300 # read timeout in seconds retry_503: 3 # max retries retry_after_limit: 20 # retry wait time in seconds source: protocol: OAI oai_base_url: http://uc3-mrtoai-stg.cdlib.org:37001/mrtoai/oai/v2 metadata_prefix: stash_wrapper seconds_granularity: true set: cdl_dashdev db: adapter: sqlite3 database: db/development.sqlite3 pool: 5 update_uri: https://dash-dev.ucop.edu/stash/dataset stage: <<: *defaults index: adapter: Solr url: http://uc3-dash2solr-stg.cdlib.org:8983/solr/geoblacklight open_timeout: 120 # connection open timeout in seconds read_timeout: 300 # read timeout in seconds retry_503: 3 # max retries retry_after_limit: 20 # retry wait time in seconds source: protocol: OAI oai_base_url: http://uc3-mrtoai-stg.cdlib.org:37001/mrtoai/oai/v2 metadata_prefix: stash_wrapper seconds_granularity: true sets: - dataone_dash - lbnl_dash - ucb_dash - ucd_dash - uci_dash - ucm_dash - ucop_dash - ucr_dash - ucsc_dash - ucsf_datashare_submitter - ucpress_dash - ucsb_dash - ocdp_dash - ucsb_dash db: adapter: sqlite3 database: db/stage.sqlite3 pool: 5 update_uri: https://dash-stg.ucop.edu/stash/dataset demo: <<: *defaults index: adapter: Solr url: http://uc3-dashdemo-stg.cdlib.org:8983/solr/geoblacklight open_timeout: 120 # connection open timeout in seconds read_timeout: 300 # read timeout in seconds retry_503: 3 # max retries retry_after_limit: 20 # retry wait time in seconds source: protocol: OAI oai_base_url: http://uc3-mrtoai-stg.cdlib.org:37001/mrtoai/oai/v2 metadata_prefix: stash_wrapper seconds_granularity: true set: cdl_dash_demo db: adapter: sqlite3 database: db/demo.sqlite3 pool: 5 update_uri: https://dashdemo.ucop.edu/stash/dataset production: <<: *defaults index: adapter: Solr url: http://uc3-dash2solr-prd.cdlib.org:8983/solr/geoblacklight open_timeout: 120 # connection open timeout in seconds read_timeout: 300 # read timeout in seconds retry_503: 3 # max retries retry_after_limit: 20 # retry wait time in seconds source: protocol: OAI oai_base_url: http://uc3-mrtoai-prd.cdlib.org:37001/mrtoai/oai/v2 metadata_prefix: stash_wrapper seconds_granularity: true sets: - dataone_dash - lbnl_dash - uci_dash - ucb_dash - ucd_lib_dash - ucla_dash - ucm_dash - ucop_dash - ucpress_dash - ucr_lib_dash - ucsb_dash - ucsc_dash - ucsf_lib_datashare db: adapter: sqlite3 database: db/production.sqlite3 pool: 5 update_uri: https://dash.ucop.edu/stash/dataset
config/stash-harvester.yml
--- - name: Calico | Set docker daemon options template: src: docker dest: "/etc/default/docker" owner: root group: root mode: 0644 notify: - restart docker when: ansible_os_family != "CoreOS" - meta: flush_handlers - name: Calico | Install calicoctl container script template: src: calicoctl-container.j2 dest: "{{ bin_dir }}/calicoctl" mode: 0755 owner: root group: root changed_when: false notify: restart calico-node - name: Calico | Install calico cni bin command: rsync -piu "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico" changed_when: false when: not use_hyperkube_cni - name: Calico | Install calico-ipam cni bin command: rsync -piu "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico-ipam" changed_when: false when: not use_hyperkube_cni - name: Calico | Copy cni plugins from hyperkube command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" changed_when: false when: use_hyperkube_cni - name: Calico | wait for etcd uri: url=http://localhost:2379/health register: result until: result.status == 200 retries: 10 delay: 5 when: inventory_hostname in groups['kube-master'] - name: Calico | Check if calico network pool has already been configured uri: url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool" return_content: yes status_code: 200,404 register: calico_conf run_once: true - name: Calico | Define ipip pool argument run_once: true set_fact: ipip_arg: "--ipip" when: cloud_provider is defined or ipip|default(false) - name: Calico | Define nat-outgoing pool argument run_once: true set_fact: nat_arg: "--nat-outgoing" when: nat_outgoing|default(false) and not peer_with_router|default(false) - name: Calico | Define calico pool task name run_once: true set_fact: pool_task_name: "with options {{ ipip_arg|default('') }} {{ nat_arg|default('') }}" when: ipip_arg|default(false) or nat_arg|default(false) - name: Calico | Configure calico network pool {{ pool_task_name|default('') }} command: "{{ bin_dir}}/calicoctl pool add {{ kube_pods_subnet }} {{ ipip_arg|default('') }} {{ nat_arg|default('') }}" environment: NO_DEFAULT_POOLS: true run_once: true when: calico_conf.status == 404 - name: Calico | Get calico configuration from etcd uri: url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool" return_content: yes register: calico_pools run_once: true - name: Calico | Check if calico pool is properly configured fail: msg: 'Only one network pool must be configured and it must be the subnet {{ kube_pods_subnet }}. Please erase calico configuration and run the playbook again ("etcdctl rm --recursive /calico/v1/ipam/v4/pool")' when: ( calico_pools.json['node']['nodes'] | length > 1 ) or ( not calico_pools.json['node']['nodes'][0]['key'] | search(".*{{ kube_pods_subnet | ipaddr('network') }}.*") ) run_once: true - name: Calico | Write /etc/network-environment template: src=network-environment.j2 dest=/etc/network-environment when: ansible_service_mgr in ["sysvinit","upstart"] - name: Calico | Write calico-node systemd init file template: src=calico-node.service.j2 dest=/etc/systemd/system/calico-node.service when: ansible_service_mgr == "systemd" notify: restart calico-node - name: Calico | Write calico-node initd script template: src=deb-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=0755 when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" notify: restart calico-node - name: Calico | Write calico-node initd script template: src=rh-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=0755 when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "RedHat" notify: restart calico-node - meta: flush_handlers - name: Calico | Enable calico-node service: name: calico-node state: started enabled: yes - name: Calico | Disable node mesh shell: "{{ bin_dir }}/calicoctl bgp node-mesh off" when: peer_with_router|default(false) and inventory_hostname in groups['kube-node'] - name: Calico | Configure peering with router(s) shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}" with_items: peers when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
roles/network_plugin/calico/tasks/main.yml
uid: azure.batch.models.JobNetworkConfiguration name: JobNetworkConfiguration fullName: azure.batch.models.JobNetworkConfiguration module: azure.batch.models inheritances: - msrest.serialization.Model summary: 'The network configuration for the Job. All required parameters must be populated in order to send to Azure.' constructor: syntax: 'JobNetworkConfiguration(*, subnet_id: str, **kwargs) -> None' parameters: - name: subnet_id description: 'Required. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The ''MicrosoftAzureBatch'' service principal must have the ''Classic Virtual Machine Contributor'' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: [https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration](https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration)' isRequired: true types: - <xref:str>
docs-ref-autogen/azure-batch/azure.batch.models.JobNetworkConfiguration.yml
jxRequirements: autoUpdate: enabled: false schedule: "" bootConfigURL: https://github.com/jenkins-x/jx3-boot-config buildPacks: buildPackLibrary: gitRef: master gitURL: https://github.com/jenkins-x/jx3-pipeline-catalog.git cluster: chartRepository: http://jenkins-x-chartmuseum.jx.svc.cluster.local:8080 clusterName: lear-tech-k8-cluster environmentGitOwner: todo gitKind: github gitName: github gitServer: https://github.com gke: projectNumber: "671176663592" namespace: jx project: domleartechtech provider: gke registry: gcr.io vaultName: lear-tech-k8-cluster vaultSAName: lear-tech-k8-cluster-vt zone: europe-west2-a environments: - ingress: domain: "" externalDNS: false namespaceSubDomain: "" tls: email: "" enabled: false production: false key: dev owner: mikelear repository: jx3-gke-vault - ingress: domain: "" externalDNS: false namespaceSubDomain: "" tls: email: "" enabled: false production: false key: staging - ingress: domain: "" externalDNS: false namespaceSubDomain: "" tls: email: "" enabled: false production: false key: production gitops: true ingress: domain: leartech.tech externalDNS: false namespaceSubDomain: -jx. tls: email: <EMAIL> enabled: true production: true kaniko: true pipelineUser: username: mikelear repository: nexus secretStorage: vault storage: backup: enabled: false url: "" logs: enabled: true url: gs://logs-lear-tech-k8-cluster-54fb310bdb14 reports: enabled: true url: gs://reports-lear-tech-k8-cluster-54fb310bdb14 repository: enabled: true url: gs://repository-lear-tech-k8-cluster-54fb310bdb14 vault: bucket: vault-lear-tech-k8-cluster-54fb310bdb14 key: crypto-key-lear-tech-k8-cluster-54fb310bdb14 keyring: keyring-lear-tech-k8-cluster-54fb310bdb14 name: lear-tech-k8-cluster serviceAccount: lear-tech-k8-cluster-vt velero: schedule: "" ttl: "" versionStream: ref: master url: https://github.com/jenkins-x/jxr-versions.git webhook: lighthouse jxRequirementsIngressExternalDNS: enabled: false jxRequirementsIngressTLS: enabled: true jxRequirementsVault: enabled: true
jx-values.yaml
--- result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-trigger-ad_hoc/14/ timestamp: 2016-05-31 22:54:03 UTC duration: 2h18m56s triggered_by: mcquin active_duration: 2h18m28s parameters: GIT_REF: bump-chef-gem EXPIRE_CACHE: false change: git_remote: <EMAIL>:opscode/delivery.git git_commit: <PASSWORD> project: delivery version: 0.4.364+git.1.482cc3c stages: delivery-promote: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-promote/314/ duration: 0s delivery-test: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-test/435/ duration: 7m45s runs: el-6: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=el-6,project=delivery,role=tester/435/ duration: 7m45s el-7: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=el-7,project=delivery,role=tester/435/ duration: 6m7s ubuntu-12.04: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=ubuntu-12.04,project=delivery,role=tester/435/ duration: 5m51s ubuntu-14.04: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-test/architecture=x86_64,platform=ubuntu-14.04,project=delivery,role=tester/435/ duration: 6m12s delivery-build: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-build/527/ duration: 2h10m31s runs: el-6: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-build/architecture=x86_64,platform=el-6,project=delivery,role=builder/527/ duration: 2h10m18s el-7: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-build/architecture=x86_64,platform=el-7,project=delivery,role=builder/527/ duration: 1h32m47s ubuntu-12.04: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-build/architecture=x86_64,platform=ubuntu-12.04,project=delivery,role=builder/527/ duration: 1h32m48s delivery-trigger-ad_hoc: result: SUCCESS url: http://wilson.ci.chef.co/job/delivery-trigger-ad_hoc/14/ duration: 12s
reports/wilson.ci.chef.co/job/delivery-trigger-ad_hoc/14.yaml
guidelines: 1: Hestia_Criteria_for_Outpatient_Pulmonary_Embolism_Treatment.v1 test_cases: - id: 1. Total score 0 input: 1: gt0003|Hemodynamically unstable (sBP <100 mmHg and HR >100, needing ICU care, or by clinician judgment): 0|local::at0005|No| gt0004|Thrombolysis or embolectomy needed (For reasons other than hemodynamic instability): 0|local::at0043|No| gt0005|Active bleeding or high risk for bleeding (GI bleeding or surgery ≤2 weeks ago or by clinician judgment): 0|local::at0011|No| gt0006|>24 hrs on supplemental oxygen required to maintain SaO₂ >90%: 0|local::at0014|No| gt0007|PE diagnosed while on anticoagulation: 0|local::at0017|No| gt0008|Severe pain needing IV pain medication required >24 hr: 0|local::at0020|No| gt0009|Medical or social reason for admission >24 hr (infection, malignancy, no support system): 0|local::at0023|No| gt0010|Creatinine clearance <30 mL/min by Cockcroft-Gault: 0|local::at0026|No| gt0011|Severe liver impairment (By clinician judgment): 0|local::at0029|No| gt0012|Pregnant: 0|local::at0032|No| gt0013|Documented history of heparin-induced thrombocytopenia (HIT): 0|local::at0035|No| expected_output: 1: gt0016|Calculate Hestia criteria total points: 0 gt0015|Hestia Criteria interpretation: local::at0039|0 points. Low risk. Patient eligible for outpatient treatment (0% mortality, 2% VTE recurrence).| - id: 2. Total score 1 input: 1: gt0003|Hemodynamically unstable (sBP <100 mmHg and HR >100, needing ICU care, or by clinician judgment): 0|local::at0005|No| gt0004|Thrombolysis or embolectomy needed (For reasons other than hemodynamic instability): 0|local::at0043|No| gt0005|Active bleeding or high risk for bleeding (GI bleeding or surgery ≤2 weeks ago or by clinician judgment): 0|local::at0011|No| gt0006|>24 hrs on supplemental oxygen required to maintain SaO₂ >90%: 0|local::at0014|No| gt0007|PE diagnosed while on anticoagulation: 0|local::at0017|No| gt0008|Severe pain needing IV pain medication required >24 hr: 0|local::at0020|No| gt0009|Medical or social reason for admission >24 hr (infection, malignancy, no support system): 0|local::at0023|No| gt0010|Creatinine clearance <30 mL/min by Cockcroft-Gault: 0|local::at0026|No| gt0011|Severe liver impairment (By clinician judgment): 0|local::at0029|No| gt0012|Pregnant: 1|local::at0034|Yes| gt0013|Documented history of heparin-induced thrombocytopenia (HIT): 0|local::at0035|No| expected_output: 1: gt0016|Calculate Hestia criteria total points: 1 gt0015|Hestia Criteria interpretation: local::at0040|>0 points. Not low risk. Patient not eligible for outpatient treatment per the Hestia Criteria, which recommends admission and inpatient treatment.| - id: 3. Maximum score 11 input: 1: gt0003|Hemodynamically unstable (sBP <100 mmHg and HR >100, needing ICU care, or by clinician judgment): 1|local::at0006|Yes| gt0004|Thrombolysis or embolectomy needed (For reasons other than hemodynamic instability): 1|local::at0044|Yes| gt0005|Active bleeding or high risk for bleeding (GI bleeding or surgery ≤2 weeks ago or by clinician judgment): 1|local::at0012|Yes| gt0006|>24 hrs on supplemental oxygen required to maintain SaO₂ >90%: 1|local::at0015|Yes| gt0007|PE diagnosed while on anticoagulation: 1|local::at0019|Yes| gt0008|Severe pain needing IV pain medication required >24 hr: 1|local::at0022|Yes| gt0009|Medical or social reason for admission >24 hr (infection, malignancy, no support system): 1|local::at0025|Yes| gt0010|Creatinine clearance <30 mL/min by Cockcroft-Gault: 1|local::at0027|Yes| gt0011|Severe liver impairment (By clinician judgment): 1|local::at0030|Yes| gt0012|Pregnant: 1|local::at0034|Yes| gt0013|Documented history of heparin-induced thrombocytopenia (HIT): 1|local::at0036|Yes| expected_output: 1: gt0016|Calculate Hestia criteria total points: 11 gt0015|Hestia Criteria interpretation: local::at0040|>0 points. Not low risk. Patient not eligible for outpatient treatment per the Hestia Criteria, which recommends admission and inpatient treatment.|
gdl2/Hestia_Criteria_for_Outpatient_Pulmonary_Embolism_Treatment.v1.test.yml
apiVersion: apps/v1 kind: Deployment metadata: labels: {{- include "dex-operator.labels" . | nindent 4 }} name: {{ include "dex-operator.fullname" . }}-controller-manager spec: replicas: 2 selector: matchLabels: {{- include "dex-operator.selectorLabels" . | nindent 6 }} template: metadata: labels: {{- include "dex-operator.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} serviceAccountName: {{ include "dex-operator.serviceAccountName" . }} containers: {{- if .Values.kubeRbacProxy.enabled }} - args: - --secure-listen-address=0.0.0.0:8443 - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - --v=10 image: "{{ .Values.kubeRbacProxy.image.repository }}:{{ .Values.kubeRbacProxy.image.tag}}" name: kube-rbac-proxy ports: - containerPort: {{ .Values.kubeRbacProxy.port }} name: https {{- end }} - args: - --metrics-addr=127.0.0.1:8080 - --enable-leader-election - --dex-grpc={{ .Values.dexGRPC.host }}:{{ .Values.dexGRPC.port }} command: - /manager securityContext: {{- toYaml .Values.securityContext | nindent 10 }} image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" livenessProbe: httpGet: path: /healthz port: liveness-port name: manager ports: - containerPort: 9440 name: liveness-port resources: {{- toYaml .Values.resources | nindent 12 }} volumeMounts: - mountPath: /etc/dex/tls name: dex-grpc-client-cert terminationGracePeriodSeconds: 10 volumes: - name: dex-grpc-client-cert secret: defaultMode: 420 secretName: {{ template "dex-operator.fullname" . }}-grpc-client-cert
contrib/charts/dex-operator/templates/deployment.yaml
title: Metric definition schema description: JSON schema for the metric definition file type: object required: - metric_groups - metrics additionalProperties: false properties: metric_groups: description: The HMC metric groups and their mapping to Prometheus type: object additionalProperties: false patternProperties: "^[a-z0-9\\-]+$": description: "Key: Name of the HMC metric group" type: object required: - prefix - fetch additionalProperties: false properties: prefix: description: "<prefix> part of Prometheus metric name (format: zhmc_<prefix>_<metric_unit>)" type: string fetch: description: "Indicates whether the metric group should be fetched from the HMC" type: boolean if: description: "Condition for fetching the metric group from the HMC, as an expression using certain variables." type: string default: null labels: description: "Prometheus labels to be added to all metrics of this metric group" type: array default: - name: resource value: resource items: type: object required: - name - value additionalProperties: false properties: name: description: "Label name" type: string value: description: "Label value. There are some keywords with special handling - see documentation for details" type: string metrics: description: The HMC metrics and their mapping to Prometheus type: object additionalProperties: false patternProperties: "^[a-z0-9\\-]+$": description: "Key: Name of the HMC metric group" type: object additionalProperties: false patternProperties: "^[a-zA-Z0-9\\-]+$": description: "Key: Name of the HMC metric within its metric group" type: object required: - exporter_name - exporter_desc additionalProperties: false properties: exporter_name: description: "<metric_unit> part of Prometheus metric name (format: zhmc_<prefix>_<metric_unit>); Null causes the metric not to be exported to Prometheus" type: [string, "null"] pattern: "^[a-zA-Z0-9_]+$" exporter_desc: description: "HELP description of Prometheus metric" type: [string, "null"] percent: description: "Indicates whether the HMC metric value is represented as percentage with a value of 100 meaning 100%" type: boolean default: false
zhmc_prometheus_exporter/schemas/metrics_schema.yaml
service: ${self:custom.service}-${self:custom.namespace} provider: name: aws runtime: nodejs12.x stage: dev region: us-east-1 tracing: lambda: true environment: SLS_NAMESPACE: ${self:custom.namespace} SLS_STAGE: ${self:custom.stage} DYNAMODB_TABLE_TOKENS: ${self:custom.dynamodbTables.tokens} iamRoleStatements: - Effect: Allow Action: - ssm:DescribeParameters Resource: "*" - Effect: Allow Action: - ssm:GetParameter - ssm:GetParameters Resource: Fn::Join: - ":" - - "arn:aws:ssm" - Ref: AWS::Region - Ref: AWS::AccountId - "parameter/${self:custom.stage}/*" - Effect: Allow Action: - dynamodb:DescribeTable - dynamodb:Query - dynamodb:Scan - dynamodb:GetItem - dynamodb:PutItem - dynamodb:UpdateItem - dynamodb:DeleteItem Resource: - "Fn::GetAtt": [ TokensDynamoDBTable, Arn ] - Fn::Join: - "/" - - "Fn::GetAtt": [ TokensDynamoDBTable, Arn ] - "index" - "*" stackPolicy: - Effect: Allow Action: "Update:*" Principal: "*" Resource: "*" - Effect: "Deny" Action: ["Update:Replace"] Principal: "*" Resource: "LogicalResourceId/TokensDynamoDBTable" - Effect: "Deny" Action: ["Update:Delete"] Principal: "*" Resource: "LogicalResourceId/TokensDynamoDBTable" functions: app: handler: index.handler timeout: 30 events: - http: GET /patreon/authorize - http: POST /patreon/validate - http: GET /patreon/api/{proxy+} - http: GET /contentful/{proxy+} - http: GET /rss/{proxy+} - http: POST /contentful-webhook - http: GET /discourse/counts/{proxy+} - http: GET /patron-pledge sync: handler: src/sync.handler timeout: 300 events: - schedule: rate: cron(0 12 * * ? *) # daily, 8AM EDT enabled: ${self:custom.syncEnabled.${self:custom.stage}} plugins: - serverless-offline custom: service: theliturgists-backend namespace: ${env:SLS_NAMESPACE, env:USER} stage: ${opt:stage, self:provider.stage} syncEnabled: dev: false staging: false production: true dynamodbTables: tokens: "${self:custom.service}-${self:custom.namespace}-${self:custom.stage}-tokens" resources: Resources: TokensDynamoDBTable: # Table to store user id / Patreon id/token mapping Type: AWS::DynamoDB::Table Properties: TableName: ${self:custom.dynamodbTables.tokens} AttributeDefinitions: - AttributeName: userId AttributeType: S - AttributeName: patreonUserId AttributeType: S KeySchema: - AttributeName: userId KeyType: HASH GlobalSecondaryIndexes: - IndexName: patreonUserIdIndex KeySchema: - AttributeName: patreonUserId KeyType: HASH Projection: ProjectionType: ALL BillingMode: PAY_PER_REQUEST
serverless.yml
title: Servizi di comunicazione di Azure summary: Servizi di comunicazione Azure è un'offerta basata sul cloud che consente di aggiungere funzionalità vocali, video, chat e di telefonia alle app. Servizi di comunicazione di Azure è attualmente disponibile in anteprima pubblica. metadata: title: Servizi di comunicazione di Azure description: Servizi di comunicazione Azure è un'offerta basata sul cloud che consente di aggiungere funzionalità vocali, video, chat e di telefonia alle app. Servizi di comunicazione di Azure è attualmente disponibile in anteprima pubblica. ms.service: azure-communication-services ms.topic: landing-page author: mikben ms.author: mikben ms.date: 08/10/2020 ms.openlocfilehash: 6081063605439982e29984cc4378ec3aee7ac08a ms.sourcegitcommit: 829d951d5c90442a38012daaf77e86046018e5b9 ms.translationtype: HT ms.contentlocale: it-IT ms.lasthandoff: 10/09/2020 ms.locfileid: "90929037" landingContent: - title: Informazioni su Servizi di comunicazione di Azure linkLists: - linkListType: overview links: - text: Che cos'è l'offerta Servizi di comunicazione di Azure? url: overview.md - linkListType: concept links: - text: Opzioni delle librerie client url: concepts/sdk-options.md - title: Introduzione linkLists: - linkListType: quickstart links: - text: Creare una risorsa di Servizi di comunicazione url: quickstarts/create-communication-resource.md - text: Creare e gestire i token di accesso utente url: quickstarts/access-tokens.md - linkListType: concept links: - text: Architettura client e server url: concepts/client-and-server-architecture.md - text: Autenticazione e autorizzazione url: concepts/authentication.md - text: Gestione degli eventi url: concepts/event-handling.md - text: Notifiche url: concepts/notifications.md - title: Chiamate vocali e video linkLists: - linkListType: quickstart links: - text: Aggiungere chiamate vocali e video all'app url: quickstarts/voice-video-calling/getting-started-with-calling.md - text: Usare la libreria client Chiamate url: quickstarts/voice-video-calling/calling-client-samples.md - linkListType: concept links: - text: Concetti sulle chiamate vocali e video url: concepts/voice-video-calling/about-call-types.md - text: Panoramica della libreria client Chiamate url: concepts/voice-video-calling/calling-sdk-features.md - text: Flussi di chiamate url: concepts/call-flows.md - title: Telefonia e SMS linkLists: - linkListType: quickstart links: - text: Ottenere un numero di telefono url: quickstarts/telephony-sms/get-phone-number.md - text: Inviare SMS url: quickstarts/telephony-sms/send.md - text: Gestire gli eventi SMS url: quickstarts/telephony-sms/handle-sms-events.md - linkListType: concept links: - text: Concetti sugli SMS url: concepts/telephony-sms/concepts.md - text: Panoramica della libreria client SMS url: concepts/telephony-sms/sdk-features.md - text: Panoramica sulle chiamate url: concepts/voice-video-calling/calling-sdk-features.md - text: Pianificare la soluzione di telefonia e SMS url: concepts/telephony-sms/plan-solution.md - title: Chat linkLists: - linkListType: quickstart links: - text: Aggiungere una chat all'app url: quickstarts/chat/get-started.md - linkListType: concept links: - text: Concetti sulle chat url: concepts/chat/concepts.md - text: Panoramica della libreria client Chat url: concepts/chat/sdk-features.md - title: Esempi linkLists: - linkListType: get-started links: - text: Introduzione all'esempio hero di chiamata di gruppo url: samples/calling-hero-sample.md - text: Introduzione all'esempio hero di chat url: samples/chat-hero-sample.md
articles/communication-services/index.yml
name: Test CI on: push: branches: - next - latest pull_request: jobs: test: name: Check if the unit tests are successfull runs-on: ubuntu-latest steps: - name: Checkout the project uses: actions/checkout@v2 - name: Setting up the environment run: cp .env.example .env - name: Setting the development environment run: sed -i 's/production/development/' .env - name: Cleanup the project run: make clean - name: Setting the token for installing private package run: sed -i 's/NPM_AUTH_TOKEN=.*/NPM_AUTH_TOKEN=${{ secrets.NPM_AUTH_TOKEN }}/g' .env - name: Setting the TEST_CLIENT_ID for installing private package run: sed -i 's/TEST_CLIENT_ID=.*/TEST_CLIENT_ID=${{ secrets.TEST_CLIENT_ID }}/g' .env - name: Setting the TEST_CLIENT_SECRET for installing private package run: sed -i 's/TEST_CLIENT_SECRET=.*/TEST_CLIENT_SECRET=${{ secrets.TEST_CLIENT_SECRET }}/g' .env - name: Remove TEST_TESTING_ROUTE for installing private package run: sed -i '/TEST_TESTING_ROUTE=.*/d' .env - name: Setting the TEST_TESTING_ROUTE for installing private package run: echo TEST_TESTING_ROUTE=${{ secrets.TEST_TESTING_ROUTE }} >> .env - name: Remove TEST_BASEURL for installing private package run: sed -i '/TEST_BASEURL=.*/d' .env - name: Setting the TEST_BASEURL for installing private package run: echo TEST_BASEURL=${{ secrets.TEST_BASEURL }} >> .env - name: Remove TEST_AUTH_TOKEN for installing private package run: sed -i '/TEST_AUTH_TOKEN=.*/d' .env - name: Setting the TEST_AUTH_TOKEN for installing private package run: echo TEST_AUTH_TOKEN=${{ secrets.TEST_AUTH_TOKEN }} >> .env - name: Setting the TEST_GRANT_TYPE for installing private package run: sed -i 's/TEST_GRANT_TYPE=.*/TEST_GRANT_TYPE=${{ secrets.TEST_GRANT_TYPE }}/g' .env - name: Setup the NPM running configuration for using the authentication token run: make token - name: Install the Node.js dependencies run: make install - name: Check if all unit tests pass run: make test
.github/workflows/test.yaml
--- # generic postgres user - name: Check if creating {{ data.postgresql.dbuser }} user is necessary iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo SELECT rolname FROM pg_roles WHERE rolname = \\\'{{ data.postgresql.dbuser }}\\\' | psql template1 | grep {{ data.postgresql.dbuser }}\" " register: postgresql_user_present ignore_errors: true - name: Create PostgreSQL user iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo CREATE ROLE {{ data.postgresql.dbuser }} WITH SUPERUSER LOGIN CREATEDB CREATEROLE ENCRYPTED PASSWORD \\\'{{ data.postgresql.password }}\\\' | psql template1 \" " when: - postgresql_user_present | failed # redmine user - name: Check if creating {{ data.redmine.dbuser }} user is necessary iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo SELECT rolname FROM pg_roles WHERE rolname = \\\'{{ data.redmine.dbuser }}\\\' | psql template1 | grep {{ data.redmine.dbuser }}\" " register: redmine_user_present ignore_errors: true - name: Create Redmine user iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo CREATE ROLE {{ data.redmine.dbuser }} WITH LOGIN ENCRYPTED PASSWORD \\\'{{ data.redmine.password }}\\\' | psql template1 \" " when: - redmine_user_present | failed # zabbix user - name: Check if creating {{ data.zabbix.dbuser }} user is necessary iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo SELECT rolname FROM pg_roles WHERE rolname = \\\'{{ data.zabbix.dbuser }}\\\' | psql template1 | grep {{ data.zabbix.dbuser }}\" " register: zabbix_user_present ignore_errors: true - name: Create zabbix user iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo CREATE ROLE {{ data.zabbix.dbuser }} WITH LOGIN ENCRYPTED PASSWORD \\\'{{ data.zabbix.password }}\\\' | psql template1 \" " when: - zabbix_user_present | failed # gitlab user - name: Check if creating {{ data.gitlab.dbuser }} user is necessary iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo SELECT rolname FROM pg_roles WHERE rolname = \\\'{{ data.gitlab.dbuser }}\\\' | psql template1 | grep {{ data.gitlab.dbuser }}\" " register: gitlab_user_present ignore_errors: true - name: Create gitlab user iocage: state=exec tag={{ postgresql_jail.uuid }} user=pgsql cmd="/bin/sh -c \"echo CREATE ROLE {{ data.gitlab.dbuser }} WITH LOGIN ENCRYPTED PASSWORD \\\'{{ data.gitlab.password }}\\\' | psql template1 \" " when: - gitlab_user_present | failed
roles/postgresql-data/tasks/roles.yml
pmid: '9285799' categories: - name: Diagnosis enabled: true - name: Genetics enabled: true title: >- The human COX10 gene is disrupted during homologous recombination between the 24 kb proximal and distal CMT1A-REPs. abstract: >- The CMT1A-REPs are two large directly repeating DNA sequences located on chromosome 17p11.2-p12 flanking the region duplicated in patients with Charcot-Marie-Tooth disease type 1A (CMT1A) and deleted in patients with hereditary neuropathy with liability to pressure palsies (HNPP). We have sequenced two cosmids, c74F4 and c15H12, which contain the entire proximal and distal CMT1A-REPs and determined that these repeats are approximately 99% identical across a 24,011 bp region. In addition, both contain an exon of the human heme Afarnesyltransferase gene (COX10). Hybridization studies revealed that COX10 spans the distal CMT1A-REP, while the proximal CMT1A-REP contains an isolated COX10 'pseudo-exon'. There is also a COX10 hybridization signal on chromosome 10 which appears to represent a processed pseudogene. We propose that the distal CMT1A-REP represents the progenitor copy of COX10 exon VI which was duplicated with surrounding intronic sequences during mammalian genome evolution and that the HNPP deletion results in a COX10 null allele. abstractLink: 'https://www.ncbi.nlm.nih.gov/pubmed/9285799' fullTextLink: '' date: 1997/09 authors: - name: <NAME> - name: <NAME> - name: <NAME> - name: <NAME> - name: <NAME> keywords: - keyword: Alkyl and Aryl Transferases - keyword: Base Sequence - keyword: 'Blotting, Southern' - keyword: Centromere - keyword: Charcot-Marie-Tooth Disease - keyword: 'Chromosomes, Human, Pair 17' - keyword: Electron Transport Complex IV - keyword: Exons - keyword: Membrane Proteins - keyword: Molecular Sequence Data - keyword: Nucleic Acid Hybridization - keyword: Restriction Mapping - keyword: 'Sequence Homology, Nucleic Acid' - keyword: Telomere cites: [] citedBy: - pmid: '20493460' - pmid: '16775374' - pmid: '11381029' - pmid: '9973284' - pmid: '9545397'
src/data/citations/9285799.yml
--- - name: create consul user user: name=consul comment=consul shell=/bin/false system=yes home=/nonexistent - name: make consul directories file: dest={{ item }} state=directory owner=consul with_items: - "{{ consul.bin_path }}" - "{{ consul.archive_path }}" - "{{ consul.config_path }}" - "{{ consul.config_path }}/conf.d" - "{{ consul.data_path }}" - name: download consul get_url: url={{ consul.download.url }} dest="{{ consul.archive_path }}/consul-{{ consul.version }}.zip" sha256sum={{ consul.download.sha256sum }} - name: check if consul binary is already installed stat: path={{ consul.bin_path }}/consul register: consul_binary - name: unzip consul binary unarchive: src="{{ consul.archive_path }}/consul-{{ consul.version }}.zip" dest={{ consul.bin_path }} copy=no when: consul_binary.stat.exists == False - name: link consul binary to path file: src={{ consul.bin_path }}/consul dest=/usr/local/bin/consul state=link - name: configure consul template: src=etc/consul.json dest={{ consul.config_file }} owner=consul notify: restart consul server - name: allow access to consul tcp ports ufw: rule=allow src={{ hostvars[item.0]['ansible_' + consul.bind_interface].ipv4.address }} to_port={{ item.1 }} proto=tcp with_nested: - "{{ groups.consul }}" - - 8500 - 8400 - 8301 - 8302 - 8300 - 8600 tags: - firewall - name: allow access to consul udp ports ufw: rule=allow src={{ hostvars[item.0]['ansible_' + consul.bind_interface].ipv4.address }} to_port={{ item.1 }} proto=udp with_nested: - "{{ groups.consul }}" - - 8301 - 8302 - 8600 tags: - firewall - name: consul service upstart_service: name=consul cmd={{ consul.bin_path }}/consul args="agent {% if consul.is_server %}-bootstrap-expect {{ groups.consul_server|length }}{% endif %} -config-dir {{ consul.config_path }}/conf.d -config-file={{ consul.config_file }}" user=consul notify: restart consul server - meta: flush_handlers - name: start consul service service: name=consul state=started enabled=yes - include: checks.yml when: sensu.client.enable_checks|default('True')|bool tags: sensu-checks - include: metrics.yml when: sensu.client.enable_metrics|default('True')|bool tags: sensu-metrics - include: serverspec.yml when: serverspec.enabled|default("True")|bool tags: serverspec
roles/consul/tasks/main.yml
version: 2 updates: - package-ecosystem: npm directory: '/dockerode-example/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/cognito-s3-example/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/cognito-s3-example/infra/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/next-auth0-example/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/nextra-example/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/next-ts-examples/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/cognito-aws-console-example/app/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/cognito-aws-console-example/infra/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/vnc-example/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/next-examples/tail-kit-example' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/sveltekit-example/sveltekit-vite-example/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad - package-ecosystem: npm directory: '/elm-electron hello-electron/' schedule: interval: daily timezone: Asia/Tokyo allow: - dependency-type: all rebase-strategy: auto assignees: - poad
.github/dependabot.yml
name: CICD on: push: branches: - localnet - devnet - mainnet pull_request: jobs: analyze: runs-on: ubuntu-latest if: (github.actor != 'dependabot[bot]') permissions: actions: read contents: read security-events: write steps: - uses: actions/checkout@v2 - name: Initialise CodeQL uses: github/codeql-action/init@v1 with: languages: javascript # Analysis - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v1 scan: runs-on: ubuntu-latest if: (github.actor != 'dependabot[bot]') steps: - uses: actions/checkout@v2 # Critical - name: Run Trivy for critical vulnerabilities uses: aquasecurity/trivy-action@master with: scan-type: "fs" # Filesystem mode ignore-unfixed: true # Ignore vulnerabilities with no available fix format: "table" # Table output mode as next step will report in security tab severity: "CRITICAL" # Error only on critical vulnerabilities exit-code: "1" # Fail the job if a critical vulnerability with fix available is found # Scanning - name: Run Trivy for reporting all vulnerabilities uses: aquasecurity/trivy-action@master if: always() with: scan-type: "fs" # Filesystem mode ignore-unfixed: true # Ignore vulnerabilities with no available fix format: "template" # Template output mode template: "@/contrib/sarif.tpl" # SARIF template to be compatible with GitHub security tab output: "trivy-results.sarif" # Output file name severity: "CRITICAL,HIGH,MEDIUM" # Report on critical/high/medium vulnerabiliies exit-code: "0" # No failing as for reporting purposes - name: Upload Trivy results uses: github/codeql-action/upload-sarif@v1 if: always() with: sarif_file: "trivy-results.sarif" ci: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 # Install - name: Use Node.js 14.17 uses: actions/setup-node@v1 with: node-version: 14.17 - uses: actions/cache@v2 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/yarn.lock') }} restore-keys: | ${{ runner.os }}-node- - name: Run yarn install run: yarn install - name: Run linter run: yarn run lint - name: Run prettier run: yarn run format - name: Run unit tests run: yarn run test
.github/workflows/cicd.yaml
- company: VMFARMs (Canada division) position: Cloud Engineer duration: Oct, 2021 &mdash; Jan 2022; summary: Working as a Cloud Native Engineer, offering Kubernetes professional services, mostly using AWS and EKS. <br><br> Technologies <br><ul class="resume-item-list"><li>Git</li><li>Docker</li><li>Terraform</li><li>Ansible</li><li>GitHub</li><li>GitBook</li><li>ZenHub</li><li>ZenDesk</li><li>CircleCI</li><li>ShipIt</li><li>Ejson</li><li>Kubernetes</li><li>Keda</li><li>CloudFlare</li><li>CertManager</li><li>Ingress</li><li>Nginx</li><li>Helm</li><li>Velero</li><li>AWS (S3/EC2/RDS/Route53/EKS/ECR/IAM)</li><li>Bash/Shell</li><li>Python for Ops automation</li></ul> - company: JAC Experts position: Cloud Native Engineer duration: Apr, 2019 &mdash; Oct, 2021; summary: Responsible for automate deployment, designing infrastructure and support development teams to delivery their products to customers, mostly using onprem k8s.<br><br> Technologies <br><ul class="resume-item-list"><li>Git</li><li>Docker</li><li>Terraform</li><li>Ansible</li><li>Ingress</li><li>Nginx</li><li>Rancher</li><li>RKE</li><li>LongHorn</li><li>GitLab</li><li>Harbor</li><li>Velero</li><li>Minio</li><li>Loki</li><li>Prometheus</li><li>Grafana</li><li>NextCloud</li><li>WikiJS</li><li>CloudFlare</li><li>AWS (S3/EC2/RDS/Route53/EKS/ECR/IAM)</li><li>Bash/Shell</li><li>Python for OPs automation</li></ul> - company: Instruct position: Infrastructure as Code Engineer duration: May, 2014 &mdash; Marc, 2019; summary: Responsible for automate deployment, designing infrastructure and support development teams to delivery their products to customers, mostly using puppet. <br> <br> Technologies <br><ul class="resume-item-list"><li>Git</li><li>GitLab</li><li>Nexus</li><li>Zabbix</li><li>Puppet</li><li>MatterMost</li><li>NextCloud</li><li>Dokuwiki</li><li>ProxMox</li><li>OVH Cloud</li><li>Bash/Shell</li><li>Python for Ops automation</li><li>Ruby for Ops automation</li></ul> - company: Globalweb (+2x) position: Linux Specialist duration: May 2013 &mdash; May, 2015; summary: Responsible for Linux infrastructure and services of several federal governmet clients. My main goal was to reduce risks and migrate manual procedures to automation tasks using Puppet and Shell. - company: 4Linux position: Linux Specialist duration: May 2012 &mdash; May, 2013; summary: Responsible for Linux infrastructure and services of several private sector clients. As a Linux Specialist, I helped the company clients to achieve better results with their Linux infrastructure. My main goal was to reduce risks and migrate manual procedures to automation tasks using Puppet and Shell. - company: Tecnisys position: Linux Specialist duration: Sep 2011 &mdash; Oct, 2012; summary: As a Linux Specialist, I helped the company clients to achieve better results with their Linux infrastructure. My main goal was to reduce risks and migrate manual procedures to automation tasks using Puppet and Shell resources. - company: Dataprev position: IT Advisor duration: Sep 2010 &mdash; Oct, 2011; summary: I worked with the infrastructure director supporting him in the management of 3 datacenters of the federal government that run all software related to public pension. More than 35 million payments were processed monthly for Brazilians, with more than 25 million receipts of social security taxes.
_data/experience.yml
name: determine-solution-build-deploy on: pull_request: branches: - main paths: - src/** push: branches: - main paths: - src/** env: workflow_scripts_path: .github/workflows/scripts jobs: determine-solution-build-deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 # determine if the build should go to the PR or UAT environment - name: uat env var if: github.ref == 'refs/heads/main' shell: bash run: | echo "environment=uat" >> $GITHUB_ENV - name: pr env var if: github.ref != 'refs/heads/main' shell: bash run: | echo "environment=pr" >> $GITHUB_ENV - id: files uses: jitterbit/get-changed-files@v1 with: format: csv # check for file changes in solution source folder(s) # if found, queue build and deply workflow - name: dispatch-build-deploy-solution id: dispatch-build-deploy-solution shell: pwsh env: # Workflows cannot be dispatched with the standard GITHUB_TOKEN, so we use a token that can GITHUB_TOKEN: ${{ secrets.WORKFLOW_DISPATCH_TOKEN }} run: | . ${{ env.workflow_scripts_path }}/pwsh/Start-Build-Deploy-Solution.ps1 $params = @{ files = '${{ steps.files.outputs.all }}' githubRef = '${{ github.ref }}' prHeadRef = '${{ github.event.pull_request.head.ref }}' githubSha = '${{ github.sha }}' prHeadSha = '${{ github.event.pull_request.head.sha }}' environment = '${{ env.environment }}' } Start-Build-Deploy-Solution @params - name: create-commit-statuses id: create-commit-statuses if: contains(github.ref, 'pull') uses: actions/github-script@v5 with: script: | const createCommitStatuses = require('${{ env.workflow_scripts_path }}/js/createCommitStatuses.js') const solutionNamesString = '${{ steps.dispatch-build-deploy-solution.outputs.solution_names }}' await createCommitStatuses({ github, context, solutionNamesString })
.github/workflows/determine-solution-build-deploy.yml
- name: Usando anotações em esquemas XSD (SQLXML 4.0) href: using-annotations-in-xsd-schemas-sqlxml-4-0.md - name: 'Criando seções CDATA usando sql:use-cdata (SQLXML 4.0)' href: creating-cdata-sections-using-sql-use-cdata-sqlxml-4-0.md - name: 'Criando elementos constantes usando sql:is-constant (SQLXML 4.0)' href: creating-constant-elements-using-sql-is-constant-sqlxml-4-0.md - name: 'Criando atributos de tipo ID, IDREF e IDREFS válidos usando sql:prefix (SQLXML 4.0)' href: creating-valid-id-idref-and-idrefs-type-attributes-using-sql-prefix-sqlxml-4-0.md - name: 'Coerções de tipo de dados e a anotação de sql:datatype (SQLXML 4.0)' href: data-type-coercions-and-the-sql-datatype-annotation-sqlxml-4-0.md - name: Mapeamento padrão de atributos e elementos XSD para tabelas e colunas (SQLXML 4.0) href: default-mapping-of-xsd-elements-and-attributes-to-tables-and-columns-sqlxml-4-0.md - name: 'Excluir elementos de esquema do documento XML usando sql:mapped' href: excluding-schema-elements-from-the-xml-document-using-sql-mapped.md - name: Mapeamento explícito de atributos e elementos XSD para tabelas e colunas href: explicit-mapping-xsd-elements-and-attributes-to-tables-and-columns.md - name: 'Filtrando valores usando sql:limit-field e sql:limit-value (SQLXML 4.0)' href: filtering-values-using-sql-limit-field-and-sql-limit-value-sqlxml-4-0.md - name: 'Ocultando elementos e atributos usando sql:hide' href: hiding-elements-and-attributes-by-using-sql-hide.md - name: 'Identificando colunas de chave usando campos sql:key (SQLXML 4.0)' href: identifying-key-columns-using-sql-key-fields-sqlxml-4-0.md - name: Mapeando tipos de dados XSD para tipos de dados XPath (SQLXML 4.0) href: mapping-xsd-data-types-to-xpath-data-types-sqlxml-4-0.md - name: 'Solicitando referências URL a dados BLOB usando sql:encode (SQLXML 4.0)' href: requesting-url-references-to-blob-data-using-sql-encode-sqlxml-4-0.md - name: 'Recuperando dados não consumidos usando sql:overflow-field (SQLXML 4.0)' href: retrieving-unconsumed-data-using-the-sql-overflow-field-sqlxml-4-0.md - name: Especificando um namespace de destino usando o atributo targetNamespace (SQLXML 4.0) href: specifying-a-target-namespace-using-the-targetnamespace-attribute-sqlxml-4-0.md - name: 'Especificando a profundidade em relações recursivas usando sql:max-depth' href: specifying-depth-in-recursive-relationships-by-using-sql-max-depth.md - name: 'Especificando relações usando sql:relationship (SQLXML 4.0)' href: specifying-relationships-using-sql-relationship-sqlxml-4-0.md - name: 'Especificando o atributo sql:inverse em sql:relationship (SQLXML 4.0)' href: specifying-the-sql-inverse-attribute-on-sql-relationship-sqlxml-4-0.md - name: 'Usando as anotações sql:identity e sql:guid' href: using-the-sql-identity-and-sql-guid-annotations.md - name: Anotações XSD (SQLXML 4.0) href: xsd-annotations-sqlxml-4-0.md
docs/relational-databases/sqlxml-annotated-xsd-schemas-using/toc.yml
title: Documentación sobre las credenciales verificables de Azure Active Directory (versión preliminar) summary: Las credenciales verificables le ayudan a crear soluciones que permiten a los clientes administrar sus propios datos. metadata: author: barclayn description: Aprenda a incorporar credenciales verificables en las soluciones que permiten a los clientes controlar la información sobre sí mismos. manager: karenh444 ms.author: barclayn ms.collection: na ms.date: 10/13/2021 ms.service: active-directory ms.subservice: na ms.topic: landing-page services: active-directory ms.openlocfilehash: 8a69a36231d2eb47307537da0e39ab4306942669 ms.sourcegitcommit: 611b35ce0f667913105ab82b23aab05a67e89fb7 ms.translationtype: HT ms.contentlocale: es-ES ms.lasthandoff: 10/14/2021 ms.locfileid: "130006800" landingContent: - title: Conceptos clave linkLists: - linkListType: overview links: - text: Introducción a las credenciales verificables de Azure Active Directory (versión preliminar) url: decentralized-identifier-overview.md - linkListType: overview links: - text: Principios rectores de las identidades descentralizadas url: https://www.microsoft.com/security/blog/2021/10/06/microsofts-5-guiding-principles-for-decentralized-identities/ - linkListType: video links: - text: Gobierno del acceso a los recursos url: https://www.youtube.com/watch?v=r20hCF9NbTo - title: Obtenga información sobre el servicio de credenciales verificables de Azure Active Directory linkLists: - linkListType: tutorial links: - text: Introducción a las credenciales verificables url: enable-your-tenant-verifiable-credentials.md - linkListType: tutorial links: - text: Generación de credenciales verificables url: verifiable-credentials-configure-issuer.md - title: Compilación de soluciones de credenciales verificables linkLists: - linkListType: concept links: - text: Planeamiento de su arquitectura url: introduction-to-verifiable-credentials-architecture.md - linkListType: how-to-guide links: - text: Request Service API para emisores url: issuance-request-api.md - linkListType: how-to-guide links: - text: Request Service API para comprobadores url: presentation-request-api.md
articles/active-directory/verifiable-credentials/index.yml
--- - hosts: localhost connection: local gather_facts: False environment: AWS_REGION: "{{ aws_region }}" tasks: - name: Gather EC2 instances facts ec2_remote_facts: filters: "tag:kind": mqperf-instance register: ec2_facts - name: Gather VPC facts ec2_vpc_net_facts: filters: "tag:Name": "{{ vpc_name }}" register: vpc_facts - set_fact: vpc_id="{{ (vpc_facts.vpcs | first).id }}" when: vpc_facts.vpcs|length > 0 - name: Terminate mqperf instances ec2: instance_ids: "{{ ec2_facts.instances|map(attribute='id')|list }}" state: absent wait: true when: ec2_facts.instances|length > 0 - name: Delete security groups ec2_group: name: "{{item}}" state: absent description: with_items: - all_access - ssh_access - name: Drop keys ec2_key: name: "{{ key_name }}" state: absent - name: Delete local key file file: path: "./{{ key_name }}.pem" state: absent - name: Delete internet gateway ec2_vpc_igw: vpc_id: "{{ vpc_id }}" state: absent when: vpc_id is defined - name: Delete subnet ec2_vpc_subnet: vpc_id: "{{ vpc_id }}" cidr: "{{ subnet_cidr }}" state: absent when: vpc_id is defined - name: Gather routing table facts ec2_vpc_route_table_facts: filters: "tag:Name": "mqperf" register: routing_table_facts - set_fact: routing_table_id="{{ (routing_table_facts.route_tables | first).id }}" when: routing_table_facts.route_tables|length > 0 - name: Delete routing table ec2_vpc_route_table: vpc_id: "{{ vpc_id }}" route_table_id: "{{ routing_table_id }}" lookup: id state: absent when: routing_table_id is defined - name: Delete VPC ec2_vpc_net: name: "{{ vpc_name }}" cidr_block: "{{ vpc_cidr_block }}" state: absent
ansible/shutdown_ec2_instances.yml
name: "App-Release" on: workflow_dispatch: repository_dispatch: push: branches: [ master ] jobs: build: runs-on: ubuntu-latest steps: - name: Set up code uses: actions/checkout@master - name: Set up Go uses: actions/setup-go@master - name: Set up Nodejs uses: actions/setup-node@master - name: Clone website run: git clone https://github.com/yoanndelattre/Windows-Deployer-KVM-Web.git /home/runner/work/Windows-Deployer-KVM-Web - name: Build website run: cd /home/runner/work/Windows-Deployer-KVM-Web && npm install && npm run build - name: Move website to app run: mv /home/runner/work/Windows-Deployer-KVM-Web/build /home/runner/work/Windows-Deployer-KVM-App/Windows-Deployer-KVM-App/static - name: Get packr run: go get -u github.com/gobuffalo/packr/packr - name: Run packr run: cd /home/runner/work/Windows-Deployer-KVM-App/Windows-Deployer-KVM-App && export GOPATH=$HOME/go && export PATH=$PATH:$GOROOT/bin:$GOPATH/bin && packr - name: Build app windows amd64 run: GOOS=windows GOARCH=amd64 go build -o output/Windows-Deployer-Windows-x64.exe - name: Build app darwin amd64 run: GOOS=darwin GOARCH=amd64 go build -o output/Windows-Deployer-Darwin-x64 - name: Build app linux amd64 run: GOOS=linux GOARCH=amd64 go build -o output/Windows-Deployer-Linux-x64 - name: Build app linux arm64 run: GOOS=linux GOARCH=arm64 go build -o output/Windows-Deployer-Linux-arm64 - name: Build app linux arm run: GOOS=linux GOARCH=arm go build -o output/Windows-Deployer-Linux-arm32 - name: Publish release uses: "marvinpinto/action-automatic-releases@latest" with: repo_token: "${{ secrets.GITHUB_TOKEN }}" automatic_release_tag: "latest" draft: false prerelease: false title: "Windows-Deployer App" files: | output/Windows-Deployer-Windows-x64.exe output/Windows-Deployer-Darwin-x64 output/Windows-Deployer-Linux-x64 output/Windows-Deployer-Linux-arm64 output/Windows-Deployer-Linux-arm32
.github/workflows/app-release.yml
name: 'Close Stale Issues' description: 'Action to close stale issues' author: 'GitHub' inputs: repo-token: description: 'Token for the repo. Can be passed in using {{ secrets.GITHUB_TOKEN }}' required: true stale-issue-message: description: 'The message to post on the issue when tagging it. If none provided, will not mark issues stale.' stale-pr-message: description: 'The message to post on the pr when tagging it. If none provided, will not mark pull requests stale.' days-before-stale: description: 'The number of days old an issue can be before marking it stale' default: 60 days-before-close: description: 'The number of days to wait to close an issue or pull request after it being marked stale' default: 7 stale-issue-label: description: 'The label to apply when an issue is stale' default: 'Stale' exempt-issue-label: description: 'The label to apply when an issue is exempt from being marked stale' stale-pr-label: description: 'The label to apply when a pull request is stale' default: 'Stale' exempt-pr-label: description: 'The label to apply when a pull request is exempt from being marked stale' operations-per-run: description: 'The maximum number of operations per run, used to control rate limiting' default: 30 last-updated-user-type: description: 'Only stale and close issue or pull request that last updated by given user type. Available value: collaborator, non-collaborator. Leave blank to disable this filter.' include-events-from-collaborators: description: 'When checking whether an issue or pull request is updated by collaborators, also check whether there are configured events from collaborators besides comments. Example:labeled,milestoned,referenced,assigned. The action will try its best to figure out who actually triggered the event. Refer following document for full event names:https://developer.github.com/v3/issues/events/' default: 'milestoned,demilestoned,labeled,unlabeled,marked_as_duplicate,unmarked_as_duplicate,referenced,assigned,unassigned,reopen,renamed,review_dismissed,review_requested,review_request_removed' only-labels: description: 'Comma separated label names. Only issues or pull requests with all of these labels are check if stale. Leave blank to disable this filter.' runs: using: 'node12' main: 'lib/main.js'
action.yml
uid: "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset" fullName: "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset" name: "EncoderNamedPreset" nameWithType: "EncoderNamedPreset" summary: "Defines values for EncoderNamedPreset." inheritances: - "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />" - "<xref href=\"com.azure.core.util.ExpandableStringEnum\" data-throw-if-not-resolved=\"False\" />" inheritedMembers: - "com.azure.core.util.ExpandableStringEnum.<T>fromString(java.lang.String,java.lang.Class<T>)" - "com.azure.core.util.ExpandableStringEnum.<T>values(java.lang.Class<T>)" - "com.azure.core.util.ExpandableStringEnum.equals(java.lang.Object)" - "com.azure.core.util.ExpandableStringEnum.hashCode()" - "com.azure.core.util.ExpandableStringEnum.toString()" - "java.lang.Object.clone()" - "java.lang.Object.finalize()" - "java.lang.Object.getClass()" - "java.lang.Object.notify()" - "java.lang.Object.notifyAll()" - "java.lang.Object.wait()" - "java.lang.Object.wait(long)" - "java.lang.Object.wait(long,int)" syntax: "public final class EncoderNamedPreset extends ExpandableStringEnum<EncoderNamedPreset>" constructors: - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.EncoderNamedPreset()" fields: - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.AACGOOD_QUALITY_AUDIO" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.ADAPTIVE_STREAMING" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.CONTENT_AWARE_ENCODING" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.CONTENT_AWARE_ENCODING_EXPERIMENTAL" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.COPY_ALL_BITRATE_NON_INTERLEAVED" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H264MULTIPLE_BITRATE1080P" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H264MULTIPLE_BITRATE720P" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H264MULTIPLE_BITRATE_SD" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H264SINGLE_BITRATE1080P" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H264SINGLE_BITRATE720P" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H264SINGLE_BITRATE_SD" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H265ADAPTIVE_STREAMING" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H265CONTENT_AWARE_ENCODING" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H265SINGLE_BITRATE1080P" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H265SINGLE_BITRATE4K" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.H265SINGLE_BITRATE720P" methods: - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.fromString(java.lang.String)" - "com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.values()" type: "class" metadata: {} package: "com.azure.resourcemanager.mediaservices.models" artifact: com.azure.resourcemanager:azure-resourcemanager-mediaservices:1.0.0-beta.2
preview/docs-ref-autogen/com.azure.resourcemanager.mediaservices.models.EncoderNamedPreset.yml
get: operationId: my-cameras-clips-list summary: Retrieve camera clips list tags: - clip description: Retrieves the list of recording clips of given camera security: - OAuth2: - recording_clips_access - PersonalAccessToken: [] parameters: - $ref: '#/components/parameters/cameraId' - name: created_by in: query required: false description: >- Filter results only to clips created by given users. A comma separated list of user IDs is expected. No whitespace characters are permited. The asterisk (`*`) character can be used to list all clips. By default the endpoint returns only clips created by the current user. example: '1,2,3' schema: type: string responses: '200': description: OK content: application/json: schema: $ref: '#/components/schemas/ClipListResponse' '400': $ref: '#/components/responses/Error400InvalidParams' '401': $ref: '#/components/responses/Error401Unauthorized' '403': $ref: '#/components/responses/Error403PermissionDenied' '404': $ref: '#/components/responses/Error404NotFound' post: operationId: my-cameras-clips-create summary: Create clip tags: - clip description: Create a recording clip on a given camera parameters: - $ref: '#/components/parameters/cameraId' requestBody: content: application/json: schema: type: object title: clip required: - name - start - end properties: name: type: string description: Clip name start: type: string format: date-time description: Start time of clip (ISO 8601) end: type: string format: date-time description: End time of clip (ISO 8601) example: name: Frontdoor loitering start: 2017-01-01T00:00:00.000Z end: 2017-01-01T00:25:00.000Z security: - OAuth2: - recording_clips_access - recording_clips_create - PersonalAccessToken: [] responses: '201': description: Clip request was received and clip processing will start content: application/json: schema: $ref: '#/components/schemas/ClipObject' '400': description: >- No recording data exists for given timeframe, requested clip lenght exceeds 3 hours or there's another error in the request content: application/json: schema: $ref: '#/components/schemas/Error400Invalid' '401': $ref: '#/components/responses/Error401Unauthorized' '403': $ref: '#/components/responses/Error403PermissionDenied'
spec/paths/cameras@{camera_id}@clips@.yaml
--- # Build and deploy slurm # # This was based on a deployment of Slurm 14.11.7 in FutureSystems, # which is a rather old version at this time. Newer version should be # similar process but may need some changes. # # One node (typically the login node) should be designated the # "build" host. Slurm will be compiled and installed there, and then # the installed files will be deployed to the compute nodes. # # Playbook should be run with -b (--become) option. # # Install prerequisite software and build slurm - hosts: - slurm_build tasks: - name: install slurm prereqs yum: name={{ item }} state=present with_items: - "@Development Tools" - munge-devel - ncurses-devel - gtk2-devel - perl-devel - perl-CPAN - name: copy slurm source unarchive: copy=yes src=slurm-14.11.7.tar.bz2 dest=/opt/ creates=/opt/slurm-14.11.7 owner=root group=root - name: configure slurm source command: /opt/slurm-14.11.7/configure --prefix=/opt/slurm --enable-pam --with-pam_dir=/lib64/security chdir=/opt/slurm-14.11.7 creates=config.log - name: compile slurm source command: make chdir=/opt/slurm-14.11.7 creates=src/srun/srun - name: install slurm command: make install chdir=/opt/slurm-14.11.7 creates=/opt/slurm/bin/srun - name: make directory for slurm.conf file: path=/opt/slurm/etc state=directory owner=root group=root mode=0755 - name: copy slurm config template: src=slurm.conf.j2 dest=/opt/slurm/etc/slurm.conf owner=root group=root mode=0644 - name: create installed slurm archive command: tar czf slurm-14.11.7-install.tar.gz slurm chdir=/opt creates=slurm-14.11.7-install.tar.gz - name: fetch slurm installation fetch: src=/opt/slurm-14.11.7-install.tar.gz dest=files/ flat=true # Deploy common slurm prerequisites and install slurm - hosts: - slurm tasks: - name: install munge yum: name={{ item }} state=present with_items: - munge - hwloc - name: copy munge key copy: src=munge.key dest=/etc/munge/munge.key owner=munge group=munge mode=400 - name: start munge service: name=munge state=started enabled=yes - name: copy slurm installation copy: src=slurm-14.11.7-install.tar.gz dest=/opt/slurm-14.11.7-install.tar.gz - name: unpack slurm installation command: tar xzf slurm-14.11.7-install.tar.gz chdir=/opt creates=/opt/slurm/etc/slurm.conf - name: create slurm group group: name=slurm gid=105 state=present - name: create slurm user user: name=slurm uid=105 group=slurm createhome=no home=/opt/slurm shell=/bin/nologin state=present - name: create slurm directories file: path={{ item }} state=directory owner=slurm mode=0755 with_items: - /var/spool/slurmd - /var/log/slurm - /opt/local/slurm - name: copy profile.d script for slurm copy: src=slurm.sh dest=/etc/profile.d/slurm.sh mode=0755 owner=root group=root - name: update slurm manpath lineinfile: dest=/etc/man_db.conf line="{{ item }}" state=present insertafter="^MANPATH_MAP\s*/opt/sbin\s*/opt/man" with_items: - "MANPATH_MAP /opt/slurm/sbin /opt/slurm/share/man" - "MANPATH_MAP /opt/slurm/bin /opt/slurm/share/man" # Configuration for Slurm control daemon - hosts: - slurmctld tasks: - name: copy environment file for slurmctld copy: src=sysconfig.slurmctld dest=/etc/sysconfig/slurmctld mode=0644 owner=root group=root register: slurmctld_config - name: copy service script for slurmctld copy: src=slurmctld.service dest=/etc/systemd/system/slurmctld.service mode=0644 owner=root group=root register: slurmctld_service - name: reload systemctl daemon command: systemctl daemon-reload when: slurmctld_service.changed - name: configure slurmctld daemon service: name=slurmctld enabled=yes - name: restart slurmctld daemon service: name=slurmctld state=restarted when: slurmctld_config.changed # Configuration for Slurm compute nodes - hosts: - slurmd tasks: - name: copy environment file for slurmd copy: src=sysconfig.slurm dest=/etc/sysconfig/slurmd mode=0644 owner=root group=root register: slurmd_config - name: copy service script for slurmd copy: src=slurmd.service dest=/etc/systemd/system/slurmd.service mode=0644 owner=root group=root register: slurmd_service - name: reload systemctl daemon command: systemctl daemon-reload when: slurmd_service.changed - name: copy epilog copy: src=epilog dest=/opt/local/slurm/epilog mode=0700 owner=root group=root - name: configure slurmd daemon service: name=slurmd enabled=yes - name: restart slurmd daemon service: name=slurmd state=restarted when: slurmd_config.changed
rhel7/slurm.yml
{% set version = "0.6.5" %} {% set posix = 'm2-' if win else '' %} {% set native = 'm2w64-' if win else '' %} package: name: r-tidytable version: {{ version|replace("-", "_") }} source: url: - {{ cran_mirror }}/src/contrib/tidytable_{{ version }}.tar.gz - {{ cran_mirror }}/src/contrib/Archive/tidytable/tidytable_{{ version }}.tar.gz sha256: a32d214ebeeb5f66d8945cb6b5929d911f1df1b7e5324891563fd6d561a11fcd build: merge_build_host: true # [win] number: 0 noarch: generic rpaths: - lib/R/lib/ - lib/ requirements: build: - {{ posix }}zip # [win] host: - r-base - r-data.table >=1.12.6 - r-glue >=1.4.0 - r-lifecycle >=0.2.0 - r-magrittr >=1.5 - r-rlang >=0.4.7 - r-tibble >=2.1.3 - r-tidyselect >=1.1.0 - r-vctrs >=0.3.5 run: - r-base - r-data.table >=1.12.6 - r-glue >=1.4.0 - r-lifecycle >=0.2.0 - r-magrittr >=1.5 - r-rlang >=0.4.7 - r-tibble >=2.1.3 - r-tidyselect >=1.1.0 - r-vctrs >=0.3.5 test: commands: - $R -e "library('tidytable')" # [not win] - "\"%R%\" -e \"library('tidytable')\"" # [win] about: home: https://github.com/markfairbanks/tidytable license: MIT summary: A tidy interface to 'data.table' that is 'rlang' compatible, giving users the speed of 'data.table' with the clean syntax of the tidyverse. license_family: MIT license_file: - {{ environ["PREFIX"] }}/lib/R/share/licenses/MIT - LICENSE extra: recipe-maintainers: - conda-forge/r # Package: tidytable # Title: Tidy Interface to 'data.table' # Version: 0.5.7 # Authors@R: c(person(given = "Mark", family = "Fairbanks", role = c("aut", "cre"), email = "<EMAIL>"), person(given = "Tyson", family = "Barrett", role = "ctb"), person(given = "Ivan", family = "Leung", role = "ctb"), person(given = "Ross", family = "Kennedy", role = "ctb"), person(given = "Lionel", family = "Henry", role = "ctb"), person(given = "Matt", family = "Carlson", role = "ctb"), person(given = "Abdessabour", family = "Moutik", role = "ctb") ) # Description: A tidy interface to 'data.table' that is 'rlang' compatible, giving users the speed of 'data.table' with the clean syntax of the tidyverse. # License: MIT + file LICENSE # Encoding: UTF-8 # LazyData: true # Imports: data.table (>= 1.12.6), magrittr (>= 1.5), rlang (>= 0.4.7), methods, tidyselect (>= 1.1.0), vctrs (>= 0.3.5), lifecycle (>= 0.2.0), glue (>= 1.4.0), tibble (>= 2.1.3) # RoxygenNote: 7.1.1 # URL: https://github.com/markfairbanks/tidytable # BugReports: https://github.com/markfairbanks/tidytable/issues # Suggests: testthat (>= 2.1.0), bit64, knitr, rmarkdown # NeedsCompilation: no # Packaged: 2020-12-16 02:43:55 UTC; xmxf129 # Author: <NAME> [aut, cre], <NAME> [ctb], <NAME> [ctb], <NAME> [ctb], <NAME> [ctb], <NAME> [ctb], <NAME> [ctb] # Maintainer: <NAME> <<EMAIL>> # Repository: CRAN # Date/Publication: 2020-12-16 06:20:03 UTC
recipe/meta.yaml
server: port: 8080 compression: enabled: true mime-types: application/javascript,application/json,text/css,text/html,text/plain app: name: fix-backend management: port: 8091 security.enabled: false contextPath: /manages springfox.documentation.swagger.v2.path: /api-docs spring: profiles: active: native jpa: show-sql: true properties.hibernate.format_sql: true hibernate.naming.physical-strategy: org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl hibernate.ddl-auto: create datasource: name: fix_backend_db url: jdbc:h2:./db/fix_backend_db;MODE=PostgreSQL;DB_CLOSE_DELAY=-1;DB_CLOSE_ON_EXIT=FALSE username: sa password: driver-class-name: org.h2.Driver platform: h2 initialize: true continue-on-error: false h2.console.enabled: true devtools.restart: exclude: static/**,public/** enabled: true output.ansi.enabled: ALWAYS messages: basename: messages security: user: password: <PASSWORD> username: user eureka: instance: prefer-ip-address: true client: registerWithEureka: false fetchRegistry: false serviceUrl: defaultZone: http://localhost:8080/eureka/ server: waitTimeInMsWhenSyncEmpty: 0 logging: file: logs/dev_app.log pattern: #console: "%d %-5level %logger : %msg%n" file: "%d %-5level [%thread] %logger : %msg%n" level: org.springframework: web: WARN security: DEBUG com.fix: DEBUG endpoints.shutdown.enabled: true swagger: apiInfo: contactEmail: <EMAIL> contactName: <EMAIL> contactUrl: https://github.com/OElabed/vagrant-development-tools description: Users Restful API with Spring Boot + Spring Data + Spring Rest. license: Apache 2.0 licenseUrl: http://www.apache.org/licenses/LICENSE-2.0.html termOfServiceUrl: http://swagger.io/terms/ title: User Management API version: 1.0.0 defaultKey: page: page size: size sort: sort defaultValue: page: 0 size: 20 sort: id,desc
apps/server/src/main/resources/application.yml
position: 2 token: env(token) username: env(username) mainAdmin: env(main_admin) # id of main admin lastvegan: -1001440160000 tourgroup: -1001153873643 tourgroupname: "@bigelmogame" tourchannel: '@bigelmocucumber' wwBots: # list of werewolf bot nicknames - 'werewolfbot' - 'werewolfbetabot' - 'blackwerewolfbot' veganWarsCommands: - '/game@veganwarsbot' - '/customgame@veganwarsbot' bncphoto: "AgADAgAD2qkxG1TVaUjH0X-0YjLur08E9Q4ABAG8AAEj0FdnA6d7AgABAg" # picture for /bnchelp higif: "CgADAQADiwAD0r04RoeZTv5wklZNAg" # gif for new members leavesticker: "CAADAgADDAADea1UE9LCz-IGyKZhAg" # sticker that bot will send after someone leaves the chat setupHelp: | Есть два режима турнира - командный и дуэль. Дуэль вызывается так: /setup @player1 @player2 Имя раунда Командный: /setup Название_команды @player1 @player2 ... @playerN Название_команды2 @player1 @player2 ... @playerN Имя раунда loveStrings: - | Как Винко и Юни руинят в ласткатке Как Пасюк и рандом в Hearthstone И как Мяф и Исаев сливают турнир Так и эти двое всегда будут вместе: %1$s ❤️ %2$s - | Как Маринет и Эдриан Как Бражник и бабочки Как Леди Wi-Fi и интернет Так и %1$s вместе с %2$s - | Как огурец и молоко Как шутки про негров и солярий Как Франция и белый флаг Так и эти двое отлично подходят друг другу: %1$s ❤️ %2$s - | Как странник, заблудший в пустыне и набредший на оазис, Как луч Солнца, спустя световой год достигший Земли, Как мореход, ступивший на землю после долгого плаванья, Так и встреча %1$s и %2$s ознаменует начало новой истории - | Roses are red Violets are blue В паре произносят "Я тебя люблю" - %1$s и %2$s - | Как опенинг и эндинг Как русский фандаб и русские хардсабы Как ан и ме Так и %1$s с %2$s должны быть вместе! - | Не учатся ничему некоторые и учиться не хотят Кина американского насмотрелись или крышу срывает от жадности Ты ему про аномалии, он тебе про хабар Вот и сегодня %1$s с %2$s пошли за хабаром, а попали в любовную аномалию! - | Как ласткатка и адекватность Как Исма и Кищук Как Ксен и Дамплз Так и %1$s и %2$s идеально подходят друг для друга - | Короче, анонимный, я тебе ответил, и в благородность играть не буду Сыграешь для меня пару каток в BnC и мы в рассчете, заодно посмотрим как у тебя голова после вервольфа прояснится Хрен его знает, зачем тебе пара дня понадобилась, но я не в свои дела не лезу Хочешь знать - значит есть зачем: %1$s ❤ %2$s - | У каждой истории есть начало и конец У каждой истории есть своя канва, синопсис, содержание, ключевые моменты, прологи и эпилоги Но у каждой книги есть и своя последняя страница, перевернув которую, мы ставим книгу на полку. У каждой истории есть начало и конец. Почти у каждой. Кроме истории любви %1$s и %2$s!
botConfigs/lastkatkabot.yaml
name: build-publish on: [push] env: CI: true jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - name: Use Node.js 12.x uses: actions/setup-node@v1 with: node-version: 12.x - uses: actions/cache@v1 id: cache with: path: node_modules key: v1-${{ runner.os }}-${{ hashFiles('yarn.lock') }} - name: yarn install if: steps.cache.outputs.cache-hit != 'true' run: yarn install --frozen-lockfile - name: build and copy slide run: | GITHUB_REF=${{ github.ref }} if [ $GITHUB_REF = "refs/heads/master" ]; then export GATSBY_SITE_URL=https://aluc.io/ else # https://stackoverflow.com/a/13210909/5568628 # Converting `refs/heads/some-feature-branch` to `refs-heads-some-feature-branch`. # So SITE_URL will be https://refs-heads-some-feature-branch.aluc.io/ export GATSBY_SITE_URL=https://${GITHUB_REF//\//-}/ fi yarn build cp -r contents/slide public/ - uses: actions/upload-artifact@master with: name: build path: public publish: needs: [build] if: github.ref == 'refs/heads/master' runs-on: ubuntu-latest env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} steps: - uses: actions/download-artifact@master with: name: build path: public - uses: chrislennon/action-aws-cli@v1.1 - name: upload to s3 run: | ls -al public # TODO: Keep only recent 5 SHAs aws s3 cp --recursive --only-show-errors public/ s3://aluc-io-v3/${{ github.sha }}/ aws s3 rm --recursive --only-show-errors s3://aluc-io-v3/aluc.io/ aws s3 cp --recursive --only-show-errors s3://aluc-io-v3/${{ github.sha }}/ s3://aluc-io-v3/aluc.io/
.github/workflows/nodejs.yml
_id: 4a3741d0-4117-11ea-b526-9d054b363813 message: >- Bone fro.nloj.hashtafak.github.io.hbi.zo cutting hypothesis [URL=http://solartechnicians.net/female-cialis/#online-female-cialis]female cialis[/URL] [URL=http://cheapflights-advice.org/product/tretinoin/#best-price-tretinoin]tretinoin[/URL] tretinoin capsules for sale [URL=http://loveandlightmusic.net/product/clindamycin/#buy-generic-clindamycin]clindamycin[/URL] [URL=http://jokesaz.com/cialis-endothelial/#cialis-endothelial]cialis endothelial[/URL] [URL=http://theatreghost.com/advair/#advair-diskus-100-50]purchase advair without a prescription[/URL] purchase advair without a prescription [URL=http://10selects.com/drugs/fincar/#fincar]fincar lowest price[/URL] [URL=http://loveandlightmusic.net/product/testosterone-booster/#testosterone-booster]what is the best testosterone booster[/URL] hydralazine <a href="http://solartechnicians.net/female-cialis/#price-of-female-cialis">online female cialis</a> <a href="http://cheapflights-advice.org/product/tretinoin/#tretinoin-cost">tretinoin</a> <a href="http://loveandlightmusic.net/product/clindamycin/#buy-clindamycin-without-prescription">order clindamycin</a> <a href="http://jokesaz.com/cialis-endothelial/#cialis-and-coumadin">cialis endothelial</a> <a href="http://theatreghost.com/advair/#advair">advair</a> <a href="http://10selects.com/drugs/fincar/#buy-fincar-uk">fincar</a> <a href="http://loveandlightmusic.net/product/testosterone-booster/#testosterone-booster-from-india">testosterone booster</a> cytosine uveal http://solartechnicians.net/female-cialis/#online-female-cialis female cialis no prescription http://cheapflights-advice.org/product/tretinoin/#cheapest-tretinoin tretinoin online usa http://loveandlightmusic.net/product/clindamycin/#clindamycin low cost clindamycin http://jokesaz.com/cialis-endothelial/#cialis-endothelial cialis endothelial http://theatreghost.com/advair/#advair buy advair online canada http://10selects.com/drugs/fincar/#fincar-walmart-price fincar pills http://loveandlightmusic.net/product/testosterone-booster/#what-is-the-best-testosterone-booster testosterone booster information simultaneously rheumatoid, principles sleepiness. name: ucehowihubun email: <PASSWORD> url: 'http://solartechnicians.net/female-cialis/' hidden: '' date: '2020-01-27T15:11:27.612Z'
_data/comments/dear-diary/comment-1580137887612.yml
name: DialogManagerConfiguration uid: botbuilder-dialogs.DialogManagerConfiguration package: botbuilder-dialogs summary: '' fullName: DialogManagerConfiguration remarks: '' isPreview: false isDeprecated: false type: interface properties: - name: conversationState uid: botbuilder-dialogs.DialogManagerConfiguration.conversationState package: botbuilder-dialogs summary: State property used to persist the bots dialog stack. fullName: conversationState remarks: '' isPreview: false isDeprecated: false syntax: content: 'conversationState: BotState' return: type: BotState description: '' - name: expireAfter uid: botbuilder-dialogs.DialogManagerConfiguration.expireAfter package: botbuilder-dialogs summary: >- Optional. Number of milliseconds to expire the bots conversation state after. fullName: expireAfter remarks: '' isPreview: false isDeprecated: false syntax: content: 'expireAfter?: number' return: type: number description: '' - name: rootDialog uid: botbuilder-dialogs.DialogManagerConfiguration.rootDialog package: botbuilder-dialogs summary: 'Root dialog to start from [onTurn()](#onturn) method.' fullName: rootDialog remarks: '' isPreview: false isDeprecated: false syntax: content: 'rootDialog: Dialog' return: type: <xref uid="botbuilder-dialogs.Dialog" /> description: '' - name: stateConfiguration uid: botbuilder-dialogs.DialogManagerConfiguration.stateConfiguration package: botbuilder-dialogs summary: >- Optional. Path resolvers and memory scopes used for conversations with the bot. fullName: stateConfiguration remarks: '' isPreview: false isDeprecated: false syntax: content: 'stateConfiguration?: DialogStateManagerConfiguration' return: type: <xref uid="botbuilder-dialogs.DialogStateManagerConfiguration" /> description: '' - name: userState uid: botbuilder-dialogs.DialogManagerConfiguration.userState package: botbuilder-dialogs summary: Optional. Bots persisted user state. fullName: userState remarks: '' isPreview: false isDeprecated: false syntax: content: 'userState?: UserState' return: type: UserState description: ''
botbuilder-typescript/docs-ref-autogen/botbuilder-dialogs/DialogManagerConfiguration.yml
tosca_definitions_version: tosca_2_0 imports: - capabilities.yaml - relationships.yaml - interfaces.yaml node_types: Instantiated: description: >- Base type for nodes that can have zero or more instances. See `Redundancy` policy. attributes: count: type: Count default: 0 instances: type: list entry_schema: Instance #default: [ { name: "hello" } ] Endpoint: derived_from: Instantiated properties: ingress: type: boolean default: false requirements: - connection: capability: Connectable relationship: Connection count_range: [ 0, UNBOUNDED ] DelegateEndpoint: description: >- An endpoint that registers a delegate. derived_from: Endpoint properties: name: description: >- If not specified will use the node template's name. type: string required: false # Activities Process: derived_from: Instantiated interfaces: events: type: Events capabilities: activity: type: Process connectable: Connectable requirements: - connection: capability: Connectable relationship: Connection count_range: [ 0, UNBOUNDED ] - storage: capability: Storage relationship: Storage count_range: [ 0, UNBOUNDED ] Container: derived_from: Instantiated interfaces: events: type: Events capabilities: activity: Container connectable: ContainerConnectable requirements: - connection: capability: Connectable relationship: Connection count_range: [ 0, UNBOUNDED ] - storage: capability: Storage relationship: Storage count_range: [ 0, UNBOUNDED ] LoadBalancer: derived_from: Container capabilities: activity: type: Container properties: image: default: reference: docker.io/nginx:1.22.0 # TODO: attach an artifact that sets up loadbalancing VirtualMachine: derived_from: Instantiated interfaces: events: type: Events capabilities: activity: VirtualMachine connectable: Connectable requirements: - connection: capability: Connectable relationship: Connection count_range: [ 0, UNBOUNDED ] - storage: capability: Storage relationship: Storage count_range: [ 0, UNBOUNDED ] # Storage LocalDirectory: interfaces: events: type: Events capabilities: storage: LocalDirectory
assets/tosca/profiles/khutulun/nodes.yaml
--- - name: Add PublicVirtualFixedIPs Argument to network environment file set_fact: network_environment_args: >- {{ network_environment_args | combine(public_virtual_fixed_ips) }} when: ssl_overcloud|bool and undercloud_type != "baremetal" - name: Create network environment file for network isolation template: src: "{{ network_environment_file }}" dest: "{{ working_dir }}/network-environment.yaml" mode: 0644 when: network_isolation|bool and undercloud_type != "baremetal" - when: undercloud_type == "baremetal" block: - name: "Push baremetal instackenv.json" copy: src: "{{ baremetal_instackenv }}" dest: "{{ working_dir }}/instackenv.json" - name: "Push network-environment.yaml" copy: src: "{{ baremetal_network_environment }}" dest: "{{ working_dir }}/network-environment.yaml" - when: network_isolation_type is defined and network_isolation_type == "bond_with_vlans" block: - name: Create bond-with-vlans nic-configs script template: src: "{{ bond_with_vlans_copy_nic_configs_script }}" dest: "{{ working_dir }}/bond-with-vlans-copy-nic-configs.sh" mode: 0755 - name: Modify the nic-configs file to use Linux bonds shell: | {{ working_dir }}/bond-with-vlans-copy-nic-configs.sh > \ {{ bond_with_vlans_nic_configs_log }} 2>&1 - name: "Push baremetal nic-configs directory (if defined)" copy: src: "{{ baremetal_nic_configs }}" dest: "{{ working_dir }}" when: baremetal_nic_configs is defined - name: Create overcloud custom tht script template: src: "{{ overcloud_custom_tht_script }}" dest: "{{ working_dir }}/overcloud-custom-tht-script.sh" mode: 0755 - name: Checkout custom tht heat templates from src shell: | {{ working_dir }}/overcloud-custom-tht-script.sh > \ {{ overcloud_custom_tht_log }} 2>&1 - name: Copy extra THT config files on the undercloud copy: src: "{{ item }}" dest: "{{ working_dir }}" with_items: "{{ extra_tht_configs|default([]) }}" - name: Add extra THT config file to the deploy command set_fact: extra_tht_config_args: >- {{ extra_tht_config_args|default('') }} -e {{ working_dir }}/{{ item | basename }} with_items: "{{ extra_tht_configs|default([]) }}"
tasks/main.yml
trigger: - master stages: - stage: 'Build' displayName: 'Build the web application' jobs: - job: 'Build' displayName: 'Build Job' pool: vmImage: 'ubuntu-latest' strategy: matrix: Python36: python.version: '3.6' steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(python.version)' displayName: 'Use Python $(python.version)' - script: | python -m venv antenv source antenv/bin/activate python -m pip install --upgrade pip pip install setup pip install -r requirements.txt displayName: 'Install dependencies' - script: | python -m venv antenv source antenv/bin/activate python -m pip install --upgrade pip pip install setup pip install pytest pytest-azurepipelines pip install -r requirements.txt pytest displayName: 'pytest' - task: ArchiveFiles@2 inputs: rootFolderOrFile: '$(Build.SourcesDirectory)' includeRootFolder: false archiveType: 'zip' achiveFile: '$(Build.ArtifactStagingDirectory)/$(Build.BuildId).zip' replaceExistingArchive: true - task: PublishBuildArtifacts@1 inputs: pathToPublish: '$(Build.ArtifactStagingDirectory)' artifactName: drop - stage: 'Dev' displayName: 'Deploy to the dev enviroment' dependsOn: Build jobs: - deployment: Deploy pool: vmImage: 'ubuntu-latest' environment: dev variables: - group: 'flask-app' strategy: runOnce: deploy: steps: - task: UsePythonVersion@0 inputs: versionSpec: '3.6' displayName: 'Use Python version' - download: current artifact: drop - task: AzureWebApp@1 displayName: 'deploy to azure web app' inputs: azureSubscription: 'flask-app-service-connection' appName: $(WebAppNameDev) package: $(Pipeline.Workspace)/drop/$(Build.BuildId).zip
azure-pipelines.yml
name: Test on: pull_request: branches: - main jobs: test: name: Test runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v2 - name: Install flutter run: | mkdir ~/Tools && cd $_ git clone https://github.com/flutter/flutter.git ~/Tools/flutter/bin/flutter doctor - name: Test run: | ~/Tools/flutter/bin/flutter packages get ~/Tools/flutter/bin/flutter test --no-sound-null-safety build_android_apk: name: Build android apk runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v2 - name: Install flutter run: | mkdir ~/Tools && cd $_ git clone https://github.com/flutter/flutter.git ~/Tools/flutter/bin/flutter doctor - name: Decrypt archive env: ARCHIVE_PASSPHRASE: ${{ secrets.ARCHIVE_PASSPHRASE }} run: | gpg --quiet --batch --yes --decrypt --passphrase "$ARCHIVE_PASSPHRASE" --output "archive.tar" archive.tar.gpg tar -xf archive.tar rm archive.tar - name: Build apk run: | ~/Tools/flutter/bin/flutter packages get ~/Tools/flutter/bin/flutter build apk --no-sound-null-safety - name: Upload artifact uses: actions/upload-artifact@v2 with: name: android-apk path: build/app/outputs/flutter-apk/app-release.apk build_android_appbundle: name: Build android appbundle runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v2 - name: Install flutter run: | mkdir ~/Tools && cd $_ git clone https://github.com/flutter/flutter.git ~/Tools/flutter/bin/flutter doctor - name: Decrypt archive env: ARCHIVE_PASSPHRASE: ${{ secrets.ARCHIVE_PASSPHRASE }} run: | gpg --quiet --batch --yes --decrypt --passphrase "$ARCHIVE_PASSPHRASE" --output "archive.tar" archive.tar.gpg tar -xf archive.tar rm archive.tar - name: Build appbundle run: | ~/Tools/flutter/bin/flutter packages get ~/Tools/flutter/bin/flutter build appbundle --no-sound-null-safety - name: Upload artifact uses: actions/upload-artifact@v2 with: name: android-appbundle path: build/app/outputs/bundle/release/app-release.aab build_web: name: Build web runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v2 - name: Install flutter run: | mkdir ~/Tools && cd $_ git clone https://github.com/flutter/flutter.git ~/Tools/flutter/bin/flutter config --enable-web ~/Tools/flutter/bin/flutter doctor - name: Build web run: | ~/Tools/flutter/bin/flutter packages get ~/Tools/flutter/bin/flutter build web --no-sound-null-safety - name: Upload artifact uses: actions/upload-artifact@v2 with: name: web path: build/web build_windows: name: Build windows runs-on: windows-latest steps: - name: Checkout repository uses: actions/checkout@v2 - name: Install flutter run: | mkdir ~/Tools && cd $_ git clone https://github.com/flutter/flutter.git ~/Tools/flutter/bin/flutter config --enable-windows-desktop ~/Tools/flutter/bin/flutter doctor shell: bash - name: Build windows run: | ~/Tools/flutter/bin/flutter packages get ~/Tools/flutter/bin/flutter build windows --no-sound-null-safety shell: bash - name: Upload artifact uses: actions/upload-artifact@v2 with: name: windows path: build\windows\runner\Release
.github/workflows/test.yml
domainInfo: AdminUserName: '@@PROP:AdminUserName@@' AdminPassword: '@@PROP:AdminPassword@@' topology: Name: onprem_domain EnableEeCompliantClassloadingForEmbeddedAdapters: true AdminServerName: 'admin-server' DomainVersion: 172.16.31.10.0 CdiContainer: ImplicitBeanDiscoveryEnabled: false JMX: PlatformMBeanServerUsed: true Cluster: cluster_1: DynamicServers: ServerNamePrefix: 'managed-server' CalculatedListenPorts: false MachineMatchType: name MaximumDynamicServerCount: 5 ServerTemplate: 'server-template_1' DynamicClusterSize: 5 Server: 'admin-server': ServerTemplate: 'server-template_1': ListenPort: 8001 Cluster: cluster_1 JTAMigratableTarget: Cluster: cluster_1 SSL: ListenPort: 8100 SecurityConfiguration: CredentialEncrypted: '@@PROP:SecurityConfig.CredentialEncrypted@@' Realm: myrealm: Adjudicator: DefaultAdjudicator: DefaultAdjudicator: AuthenticationProvider: DefaultAuthenticator: DefaultAuthenticator: DefaultIdentityAsserter: DefaultIdentityAsserter: ActiveType: [ AuthenticatedUser, 'weblogic-jwt-token' ] Authorizer: XACMLAuthorizer: XACMLAuthorizer: PolicyDeploymentEnabled: true CertPathProvider: WebLogicCertPathProvider: WebLogicCertPathProvider: CredentialMapper: DefaultCredentialMapper: DefaultCredentialMapper: PasswordValidator: SystemPasswordValidator: SystemPasswordValidator: RoleMapper: XACMLRoleMapper: XACMLRoleMapper: resources: JDBCSystemResource: testDatasource: Target: cluster_1 JdbcResource: DatasourceType: GENERIC JDBCConnectionPoolParams: ConnectionHarvestTriggerCount: -1 StatementCacheSize: 10 InitialCapacity: 0 RemoveInfectedConnections: true CountOfRefreshFailuresTillDisable: 2 ConnectionCreationRetryFrequencySeconds: 0 MinCapacity: 0 LoginDelaySeconds: 0 TestTableName: SQL ISVALID TestFrequencySeconds: 120 StatementTimeout: 23 HighestNumWaiters: 2147483647 InactiveConnectionTimeoutSeconds: 0 WrapTypes: true SecondsToTrustAnIdlePoolConnection: 10 ConnectionReserveTimeoutSeconds: 10 ConnectionHarvestMaxCount: 1 CountOfTestFailuresTillFlush: 2 ShrinkFrequencySeconds: 900 StatementCacheType: LRU IgnoreInUseConnectionsEnabled: true MaxCapacity: 15 JDBCDataSourceParams: GlobalTransactionsProtocol: OnePhaseCommit JNDIName: jdbc/testDatasource JDBCDriverParams: URL: 'jdbc:oracle:thin:@//xxx.xxx.x.xxx:1521/ORCLCDB' PasswordEncrypted: '@@PROP:JDBC.testDatasource.PasswordEncrypted@@' DriverName: oracle.jdbc.OracleDriver Properties: user: Value: scott appDeployments: Application: opdemo: SourcePath: wlsdeploy/applications/opdemo.war ModuleType: war Target: 'cluster_1,admin-server'
AppDev/wls/ll-wls-migration/WLS_imagetool_scripts/DiscoveredDemoDomain.yaml
on: [push, workflow_dispatch] name: Integration jobs: test: runs-on: [self-hosted, Linux, X64] steps: - name: Checkout the source code uses: actions/checkout@master with: submodules: true - name: Install & display rust toolchain run: rustup show - name: Check targets are installed correctly run: rustup target list --installed # - uses: actions/cache@v2 # with: # path: | # ~/.cargo/registry # ~/.cargo/git # target/ # key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} # restore-keys: | # ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} # ${{ runner.os }}-cargo - name: Run all tests run: cargo test --all-features native-linux: needs: test if: startsWith(github.ref, 'refs/tags/') runs-on: [self-hosted, Linux, X64] steps: - name: Checkout the source code uses: actions/checkout@master with: submodules: true - name: Install & display rust toolchain run: rustup show - name: Check targets are installed correctly run: rustup target list --installed - uses: actions/cache@v2 with: path: | ~/.cargo/registry ~/.cargo/git target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} ${{ runner.os }}-cargo - name: Build optimized binary run: CARGO_PROFILE_RELEASE_LTO=true RUSTFLAGS="-C codegen-units=1" cargo build --release --verbose - uses: actions/upload-artifact@master with: name: astar-ubuntu-latest-x86_64 path: target/release/astar-collator native-macos: needs: test if: startsWith(github.ref, 'refs/tags/') runs-on: macos-latest steps: - name: Checkout the source code uses: actions/checkout@master with: submodules: true - name: Install & display rust toolchain run: rustup show - name: Check targets are installed correctly run: rustup target list --installed - name: Build optimized binary run: cargo build --release --verbose - uses: actions/upload-artifact@master with: name: astar-macOS-latest-x86_64 path: target/release/astar-collator check-all-features: runs-on: ubuntu-latest steps: - name: Checkout the source code uses: actions/checkout@master with: submodules: true - name: Install & display rust toolchain run: rustup show - name: Check targets are installed correctly run: rustup target list --installed - name: Check all features compilation run: cargo check --verbose --all-features nix: needs: test if: startsWith(github.ref, 'refs/tags/') runs-on: ubuntu-latest steps: - name: Checkout the source code uses: actions/checkout@v2 with: submodules: true - uses: cachix/install-nix-action@v12 with: nix_path: nixpkgs=channel:nixos-21.11 - run: nix-shell third-party/nix/shell.nix --run "cargo check --all-features" docker: needs: test if: startsWith(github.ref, 'refs/tags/') runs-on: [self-hosted, Linux, X64] steps: - name: Checkout the source code uses: actions/checkout@v2 with: submodules: true - name: Install & display rust toolchain run: rustup show - name: Check targets are installed correctly run: rustup target list --installed - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 - name: Login to DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Docker meta id: docker_meta uses: crazy-max/ghaction-docker-meta@v1 with: images: staketechnologies/astar-collator tag-custom: shiden tag-sha: true # add git short SHA as Docker tag - name: Build optimized binary run: | CARGO_PROFILE_RELEASE_LTO=true RUSTFLAGS="-C codegen-units=1" cargo build --locked --release cp target/release/astar-collator third-party/docker - name: Build & Push docker image uses: docker/build-push-action@v2 with: context: third-party/docker platforms: linux/amd64 labels: ${{ steps.docker_meta.outputs.labels }} tags: ${{ steps.docker_meta.outputs.tags }} push: true
.github/workflows/integration.yml
cv: header: contact_me: Contact me looking_work_now: Not looking for work right now headings: about: About me experience: Experiences formation: Education projects: Projects associations: Associations basic_information: Basic informations skills: Skills testimonials: Testimonials recognitions: Recognitions languages: Languages links: Links text: more_linkedin: More on LinkedIn to_project: To the projet more: More menu: about: About publications: Publications profile: Profile articles: Articles cv: CV teaching: Teaching about: heading: contact: Contact in_short: In short about_site: About this site license: License text: in_short: "I see the Web as a tool that can shape an open and decentralized society. The Web being by nature virtual, I see the library as the tool that connects the Web to the physical world. Together, these two tools make it possible to share everyone's knowledge for all. At the same time, I am interested in the new shared governance tools and commons economy that will soon change our operating methods. I hope one day to be able to live the utopia of a free society, based on the peer-to-peer economy, made up of small local communities linked by the web and fraternal bonds." contact: "Got something to say or demand? Use the powers of information highway to get a hold of me!" accounts: "You can find me on a variety of social web sites; my username is usually “michaelravedoni”. Alphabetically:" about_site: It’s an automatically static site generated by Jekyll. The source resides on GitHub. If you find any error, please let me know. licence: Except where otherwise noted, all of the public content on this site (texts and images) is licensed under biblio: title: Libraries for the 21st century description: The digitalization of society, the rapid social and economic changes of the twenty-first century are making culture, knowledge and information increasingly essential production resources. In order to enable citizens to contribute, innovate and live serenely in the knowledge and information society, it is necessary to give it the necessary tools. The library is that toolbox! So let us ensure that the library reinvents itself to meet the challenges ahead. profile: title: One CV, two profiles description: "My visits to different libraries, archives and museums, a curiosity for trends and innovations, discussions and thousands of hours spent on the web have allowed me to bring out many ideas and models adapted to the field.<br/>I am passionate about this way of doing things: observing, exchanging, questioning, doubting, searching, gathering, designing and applying. Wherever I go, this process accompanies me in order to create new ideas or new models. It is with this in mind that I am passionate about web technologies and commons economy. Above all, I think that the library is at the heart of the societal model that allows for the transition to the commons economy." hire_for: Hire me to… biblio: heading: Library description: The library of the 21st century. The library as a driver of the transition to the commons economy web: heading: Web and common society description: Web technologies at the service of communities for a commons economy, P2P or through Open Co-Operativism publications: title: Projects and publications description: You will find all my publications, projects and text creations below. heading: repos: Web projects creations: Creations articles: Blog publications: Publications text: numendil: My personal notebook, projects and ideas confederal: Another political approach is possible. ConfederAl offers simple decryption with an alternative view of the Swiss political landscape. articles: Blog abstract: Abstract read: Read cite: Cite web: title: Web and common society description: "" articles: title: Articles description: Articles and posts related to the world of libraries text: read: Read back: back teaching: title: Teaching description: I love teaching about the Web, the library and the commons and collaborative economy. Contact me if you are looking for a speaker. headings: courses: Courses and short training courses lectures: Lectures text: see: See
_i18n/en.yml
name: native-image-compile on: [push] jobs: native-image-compile-on-host: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Cache SDKMAN installation uses: actions/cache@v2 with: path: ~/.sdkman key: ${{ runner.os }}-sdkman-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-sdkman- - name: Install GraalVM with SDKMAN run: | curl -s "https://get.sdkman.io" | bash source "$HOME/.sdkman/bin/sdkman-init.sh" sdk install java 172.16.31.10.r11-grl java -version - name: Install GraalVM Native Image run: | source "$HOME/.sdkman/bin/sdkman-init.sh" gu install native-image native-image --version - name: Install Maven, that uses GraalVM for later builds run: | source "$HOME/.sdkman/bin/sdkman-init.sh" sdk install maven mvn --version - name: Run GraalVM Native Image compilation of Spring Boot App (Maven version instead of ./compile.sh) run: | source "$HOME/.sdkman/bin/sdkman-init.sh" mvn -B clean package -P native --no-transfer-progress native-image-compile-in-docker: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Login into Heroku Container Registry first, so that we can push our Image later run: echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin registry.heroku.com - name: Compile Native Image using Docker run: docker build . --tag=registry.heroku.com/spring-boot-graal/web - name: Push to Heroku Container Registry run: docker push registry.heroku.com/spring-boot-graal/web - name: Release Dockerized Native Spring Boot App on Heroku run: ./heroku-release.sh spring-boot-graal - name: Push to Docker Hub also, since automatic Builds there dont have anough RAM to do a docker build run: | echo ${{ secrets.DOCKER_HUB_TOKEN }} | docker login -u ${{ secrets.DOCKER_HUB_USERNAME }} --password-stdin docker tag registry.heroku.com/spring-boot-graal/web jonashackt/spring-boot-graalvm:latest docker push jonashackt/spring-boot-graalvm:latest
.github/workflows/native-image-compile.yml
--- # (c) <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - hosts: all remote_user: root tasks: - name: include default variables include_vars: "./vars/main.yml" - name: include release specific variables include_vars: "{{ lookup('first_found', _params) }}" vars: _params: files: - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" - "{{ ansible_distribution | lower }}.yml" - "{{ ansible_os_family | lower }}.yml" paths: - "./vars" ignore_errors: true - name: rm -rf /root/.ansible/tmp/* raw: | set -ex rm -rf /root/.ansible/tmp/* rm -rf /root/.cache/* rm -rf /tmp/* changed_when: false ignore_errors: true - name: debian | apt-get clean raw: | set -ex apt-get clean rm -rf /var/lib/apt/lists/* changed_when: false ignore_errors: true - name: redhat | yum clean all raw: | set -ex yum clean all rm -rf /var/cache/yum/* changed_when: false ignore_errors: true - name: suse | zypper clean --all raw: | set -ex zypper clean --all rm -rf /var/cache/zypper/* changed_when: false ignore_errors: true
playbooks/side_effect.yml
ms.openlocfilehash: 0a9388419256feee2add90299f7be3b7c6ab5c77 ms.sourcegitcommit: 262b7c417245b7b61a57d53eee33a477a722ca6e ms.translationtype: MT ms.contentlocale: zh-CN ms.lasthandoff: 09/28/2018 ms.locfileid: "24040272" items: - uid: office.Office.GoToType summary: 指定要导航到的位置或对象类型。 remarks: >- **支持详细信息** 下列矩阵中的大写字母 Y 表示相应的 Office 主机应用程序支持此枚举。空的单元格表示相应的 Office 主机应用程序不支持此枚举。 有关 Office 主机应用程序和服务器要求的详细信息,请参阅[Requirements for Office 加载项运行](https://docs.microsoft.com/office/dev/add-ins/concepts/requirements-for-running-office-add-ins)<!-- -->。 *支持的主机(按平台)* <table> <tr><th> </th><th> Office for Windows Desktop </th><th> Office Online(在浏览器中) </th><th> Office for iPad </th></tr> <tr><td><strong>Excel</strong></td><td> Y </td><td> Y </td><td> Y </td></tr> <tr><td><strong>PowerPoint</strong></td><td> Y </td><td> Y </td><td> Y </td></tr> <tr><td><strong>Word</strong></td><td> Y </td><td> </td><td> Y </td></tr> </table> name: Office.GoToType fullName: office.Office.GoToType langs: - typeScript type: enum package: office children: - office.Office.GoToType.Binding - office.Office.GoToType.Index - office.Office.GoToType.NamedItem - office.Office.GoToType.Slide - uid: office.Office.GoToType.Binding summary: >- 转至使用特定绑定 ID 的绑定对象。 支持主机: Excel、 Word name: Binding fullName: office.Office.GoToType.Binding langs: - typeScript type: field - uid: office.Office.GoToType.Index summary: >- 按幻灯片编号或[Office.Index](xref:office.Office.Index)转到指定的索引<!-- -->。 支持主机: PowerPoint name: Index fullName: office.Office.GoToType.Index langs: - typeScript type: field - uid: office.Office.GoToType.NamedItem summary: >- 转至使用该项名称的命名项。 在 Excel 中,您可以使用任何对命名范围或表格的结构化引用:“Worksheet2!Table1” 支持主机: Excel name: NamedItem fullName: office.Office.GoToType.NamedItem langs: - typeScript type: field - uid: office.Office.GoToType.Slide summary: >- 转至使用特定 ID 的幻灯片。 支持主机: PowerPoint name: Slide fullName: office.Office.GoToType.Slide langs: - typeScript type: field
docs/docs-ref-autogen/office/office.gototype.yml
functions: - summary: Gibt eine diskrete kategorische Verteilung zurück, bei der die Wahrscheinlichkeit für jede endliche Liste der angegebenen Ergebnisse explizit angegeben wird. uid: Microsoft.Quantum.Random.CategoricalDistribution - summary: Gibt eine einheitliche Verteilung über ein bestimmtes inklusives Intervall zurück. uid: Microsoft.Quantum.Random.ContinuousUniformDistribution - summary: Gibt eine einheitliche Verteilung für einen angegebenen inklusiven Bereich zurück. uid: Microsoft.Quantum.Random.DiscreteUniformDistribution - summary: Gibt eine normale Verteilung mit einem angegebenen Mittelwert und Varianz zurück. uid: Microsoft.Quantum.Random.NormalDistribution - summary: Gibt eine normale Verteilung mit dem Mittelwert 0 und der Varianz 1 zurück. uid: Microsoft.Quantum.Random.StandardNormalDistribution - summary: '' uid: Microsoft.Quantum.Random.StandardTransformation - summary: Gibt bei einer kontinuierlichen Verteilung eine neue Verteilung zurück, die den ursprünglichen durch eine angegebene Funktion transformiert. uid: Microsoft.Quantum.Random.TransformedContinuousDistribution name: Microsoft.Quantum.Random newtypes: - summary: Stellt eine Univariate-Wahrscheinlichkeitsverteilung für ganze Zahlen beliebiger Größe dar. uid: Microsoft.Quantum.Random.BigDiscreteDistribution - summary: Stellt eine Univariate-Wahrscheinlichkeitsverteilung für komplexe Zahlen dar. uid: Microsoft.Quantum.Random.ComplexDistribution - summary: Stellt eine Univariate-Wahrscheinlichkeitsverteilung für reelle Zahlen dar. uid: Microsoft.Quantum.Random.ContinuousDistribution - summary: Stellt eine Univariate-Wahrscheinlichkeitsverteilung über ganze Zahlen dar. uid: Microsoft.Quantum.Random.DiscreteDistribution operations: - summary: '' uid: Microsoft.Quantum.Random.Delay - summary: Zeichnet eine zufällige Stichprobe aus einer kategorischen Verteilung, die durch eine Liste von Wahrscheinlichkeiten angegeben wird. uid: Microsoft.Quantum.Random.DrawCategorical - summary: Bei einer Erfolgswahrscheinlichkeit wird eine einzelne Bernoulli-Testversion zurückgegeben, die mit der angegebenen Wahrscheinlichkeit true ist. uid: Microsoft.Quantum.Random.DrawRandomBool - summary: Zeichnet eine zufällige reelle Zahl in einem angegebenen inklusiven Intervall. uid: Microsoft.Quantum.Random.DrawRandomDouble - summary: Zeichnet eine zufällige ganze Zahl in einem angegebenen inklusiven Bereich. uid: Microsoft.Quantum.Random.DrawRandomInt - summary: Zeichnet einen zufälligen Pauli-Wert. uid: Microsoft.Quantum.Random.DrawRandomPauli - summary: '' uid: Microsoft.Quantum.Random.DrawStandardNormalVariate - summary: Wenn ein Array von Daten und eine Verteilung über die Indizes verfügen, versucht, ein Element nach dem Zufallsprinzip auszuwählen. uid: Microsoft.Quantum.Random.MaybeChooseElement - summary: Interner Vorgang für die Stichprobenentnahme aus transformierten Verteilungen. Sollte nur über eine partielle Anwendung verwendet werden. uid: Microsoft.Quantum.Random.SampleTransformedContinuousDistribution summary: '' uid: Microsoft.Quantum.Random metadata: ms.openlocfilehash: 1504b9ae9ffc21a7684a45b58f56f138068beb67 ms.sourcegitcommit: 29e0d88a30e4166fa580132124b0eb57e1f0e986 ms.translationtype: MT ms.contentlocale: de-DE ms.lasthandoff: 10/27/2020 ms.locfileid: "92702120"
api/qsharp/microsoft.quantum.random.yml
--- # # edX Configuration # # github: https://github.com/edx/configuration # wiki: https://github.com/edx/configuration/wiki # code style: https://github.com/edx/configuration/wiki/Ansible-Coding-Conventions # license: https://github.com/edx/configuration/blob/master/LICENSE.TXT # ## # Defaults for role edx_service_rds # # # vars are namespaced with the module name. # edx_service_rds_role_name: edx_service_rds E_D_C: "prod-sample-app" EDX_SERVICE_RDS_INSTANCE_SIZE: 10 EDX_SERVICE_RDS_INSTANCE_TYPE: "db.m1.small" EDX_SERVICE_RDS_ROOT_USER: "root" # no unicode, c cedilla , passwords EDX_SERVICE_RDS_ROOT_PASSWORD: "<PASSWORD>" EDX_SERVICE_RDS_ENGINE: "MySQL" EDX_SERVICE_RDS_ENGINE_VERSION: "5.6.22" EDX_SERVICE_RDS_PARAM_GROUP_ENGINE: "mysql5.6" # will vary depending upon engine, examples assume # MySQL 56 EDX_SERVICE_RDS_PARAM_GROUP_PARAMS: character_set_client: "utf8" character_set_connection: "utf8" character_set_database: "utf8" character_set_filesystem: "utf8" character_set_results: "utf8" character_set_server: "utf8" collation_connection: "utf8_unicode_ci" collation_server: "utf8_unicode_ci" EDX_SERVICE_RDS_MULTI_AZ: No EDX_SERVICE_RDS_MAINT_WINDOW: "Mon:00:00-Mon:01:15" EDX_SERVICE_RDS_BACKUP_DAYS: 30 EDX_SERVICE_RDS_BACKUP_WINDOW: "02:00-03:00" EDX_SERVICE_RDS_SUBNET_1_AZ: "us-east-1c" EDX_SERVICE_RDS_SUBNET_1_CIDR: "{{ vpc_class_b }}.50.0/24" EDX_SERVICE_RDS_SUBNET_2_AZ: "us-east-1d" EDX_SERVICE_RDS_SUBNET_2_CIDR: "{{ vpc_class_b }}.51.0/24" # The defaults are permissive, override EDX_SERVICE_RDS_SECURITY_GROUP: name: "{{ e_d_c }}-rds-sg" description: "RDS ingress and egress." rules: - proto: "tcp" from_port: "3306" to_port: "3306" cidr_ip: "0.0.0.0/0" rules_egress: - proto: "tcp" from_port: "3306" to_port: "3306" cidr_ip: "0.0.0.0/0" # The defaults are permissive, override EDX_SERVICE_RDS_VPC_DB_ACL: name: "{{ e_d_c }}-db" rules: - number: "100" type: "ingress" protocol: "tcp" from_port: 3306 to_port: 3306 cidr_block: "0.0.0.0/0" rule_action: "allow" - number: "100" type: "egress" protocol: "all" from_port: 0 to_port: 65535 cidr_block: "0.0.0.0/0" rule_action: "allow" EDX_SERVICE_RDS_VPC_DB_ROUTE_TABLE: - cidr: "{{ vpc_class_b }}.0.0/16" gateway: 'local' # typically override the all caps vars, but may # be convenient to override the entire structure # if you spanning more than two subnets edx_service_rds_vpc_db_subnets: - name: "{{ E_D_C }}-db-{{ EDX_SERVICE_RDS_SUBNET_1_AZ }}" cidr: "{{ EDX_SERVICE_RDS_SUBNET_1_CIDR }}" az: "{{ EDX_SERVICE_RDS_SUBNET_1_AZ }}" - name: "{{ E_D_C }}-db-{{ EDX_SERVICE_RDS_SUBNET_2_AZ }}" cidr: "{{ EDX_SERVICE_RDS_SUBNET_2_CIDR }}" az: "{{ EDX_SERVICE_RDS_SUBNET_2_AZ }}" edx_service_rds_state: "present" edx_service_rds_db: state: "{{ edx_service_rds_state }}" name: "{{ E_D_C }}-primary" size: "{{ EDX_SERVICE_RDS_INSTANCE_SIZE }}" instance_type: "{{ EDX_SERVICE_RDS_INSTANCE_TYPE }}" root_user: "{{ EDX_SERVICE_RDS_ROOT_USER }}" root_password: "{{ EDX_SERVICE_RDS_ROOT_PASSWORD }}" engine: "{{ EDX_SERVICE_RDS_ENGINE }}" engine_version: "{{ EDX_SERVICE_RDS_ENGINE_VERSION }}" multi_az: "{{ EDX_SERVICE_RDS_MULTI_AZ }}" maint_window: "{{ EDX_SERVICE_RDS_MAINT_WINDOW }}" backup_days: "{{ EDX_SERVICE_RDS_BACKUP_DAYS }}" backup_window: "{{ EDX_SERVICE_RDS_BACKUP_WINDOW }}" param_group: name: "{{ E_D_C}}" engine: "{{ EDX_SERVICE_RDS_PARAM_GROUP_ENGINE }}" params: "{{ EDX_SERVICE_RDS_PARAM_GROUP_PARAMS }}" # # OS packages # edx_service_rds_debian_pkgs: [] edx_service_rds_redhat_pkgs: []
repo-github/antshin72@configuration/playbooks/roles/edx_service_rds/defaults/main.yml
toc: - title: Introduction subfolderitems: - page: Overview url: / - page: Architecture url: /architecture - title: Getting Started subfolderitems: - page: Managed Clusters url: /getting-started - page: Standalone Clusters url: /getting-started-standalone - title: Designs subfolderitems: - page: Standalone Clusters url: /designs/standalone-clusters - page: Package Process url: /designs/package-process - title: Installation subfolderitems: - page: Planning Your Installation url: /installation-planning - page: Installing the Tanzu CLI url: /cli-installation - page: Preparing to Deploy a Cluster url: /prepare-deployment subitems: - subpage: Prepare to Deploy a Cluster to Amazon EC2 suburl: /aws - subpage: Prepare to Deploy a Cluster to vSphere suburl: /vsphere - subpage: Prepare to Deploy a Cluster to Azure suburl: /azure-mgmt - page: Deploying Clusters url: /clusters-deploy subitems: - subpage: Deploy a Management Cluster to Amazon EC2 suburl: /aws-install-mgmt - subpage: Deploy a Management Cluster to Azure suburl: /azure-install-mgmt - subpage: Deploy a Management Cluster to Docker suburl: /docker-install-mgmt - subpage: Deploy a Management Cluster to vSphere suburl: /vsphere-install-mgmt - subpage: Deploy a Standalone Cluster to Amazon EC2 suburl: /aws-install-standalone - subpage: Deploy a Standalone Cluster to Azure suburl: /azure-install-standalone - subpage: Deploy a Standalone Cluster to Docker suburl: /docker-install-standalone - subpage: Deploy a Standalone Cluster to vSphere suburl: /vsphere-install-standalone - subpage: Deploying a Workload Cluster suburl: /workload-clusters - subpage: Examine the cluster suburl: /verify-deployment - subpage: Scale Management Cluster suburl: /scale-mgmt - subpage: Delete Management Cluster suburl: /delete-mgmt - title: Packages subfolderitems: - page: Packages Introduction url: /packages-intro - page: Configuring Cert Manager url: /cert-manager-config - page: Configuring Contour url: /contour-config - page: Configuring ExternalDNS url: /externaldns-config - page: Configuring Fluent Bit url: /fluentbit-config - page: Configuring Gatekeeper url: /gatekeeper-config - page: Configuring Grafana url: /grafana-config - page: Configuring Knative url: /knative-config - page: Configuring Prometheus url: /prometheus-config - page: Configuring Velero url: /velero-config - page: Configuring Local Path Storage url: /local-path-storage-config - title: Reference subfolderitems: - page: AWS Account Reference url: /ref-aws - page: Amazon EC2 Workload Cluster Template url: /aws-wl-template - page: vSphere Account Reference url: /ref-vsphere - page: vSphere Workload Cluster Template url: /vsphere-wl-template - page: Azure Account Reference url: /ref-azure - page: Azure Workload Cluster Template url: /azure-wl-template - page: Troubleshoot Error messages url: /error_messages - title: Designs subfolderitems: - page: Package Management url: /designs/package-management - page: Package Packaging url: /designs/package-packaging - page: Standalone Clusters url: /designs/standalone-clusters - page: Package Operations url: /designs/package-operations
docs/site/data/docs/latest-toc.yml
- position: 1 driverNumber: 8 driverId: mika-hakkinen constructorId: mclaren engineManufacturerId: mercedes tyreManufacturerId: bridgestone lap: 25 time: "1:20.450" gap: interval: - position: 2 driverNumber: 7 driverId: david-coulthard constructorId: mclaren engineManufacturerId: mercedes tyreManufacturerId: bridgestone lap: 61 time: "1:20.715" gap: "+0.265" interval: "+0.265" - position: 3 driverNumber: 3 driverId: michael-schumacher constructorId: ferrari engineManufacturerId: ferrari tyreManufacturerId: goodyear lap: 65 time: "1:21.001" gap: "+0.551" interval: "+0.286" - position: 4 driverNumber: 2 driverId: heinz-harald-frentzen constructorId: williams engineManufacturerId: mecachrome tyreManufacturerId: goodyear lap: 65 time: "1:21.394" gap: "+0.944" interval: "+0.393" - position: 5 driverNumber: 5 driverId: giancarlo-fisichella constructorId: benetton engineManufacturerId: playlife tyreManufacturerId: bridgestone lap: 65 time: "1:21.506" gap: "+1.056" interval: "+0.112" - position: 6 driverNumber: 4 driverId: eddie-irvine constructorId: ferrari engineManufacturerId: ferrari tyreManufacturerId: goodyear lap: 62 time: "1:21.667" gap: "+1.217" interval: "+0.161" - position: 7 driverNumber: 1 driverId: jacques-villeneuve constructorId: williams engineManufacturerId: mecachrome tyreManufacturerId: goodyear lap: 33 time: "1:21.701" gap: "+1.251" interval: "+0.034" - position: 8 driverNumber: 9 driverId: damon-hill constructorId: jordan engineManufacturerId: mugen-honda tyreManufacturerId: goodyear lap: 66 time: "1:21.741" gap: "+1.291" interval: "+0.040" - position: 9 driverNumber: 6 driverId: alexander-wurz constructorId: benetton engineManufacturerId: playlife tyreManufacturerId: bridgestone lap: 61 time: "1:21.778" gap: "+1.328" interval: "+0.037" - position: 10 driverNumber: 10 driverId: ralf-schumacher constructorId: jordan engineManufacturerId: mugen-honda tyreManufacturerId: goodyear lap: 21 time: "1:21.881" gap: "+1.431" interval: "+0.103" - position: 11 driverNumber: 14 driverId: jean-alesi constructorId: sauber engineManufacturerId: petronas tyreManufacturerId: goodyear lap: 43 time: "1:21.979" gap: "+1.529" interval: "+0.098" - position: 12 driverNumber: 15 driverId: johnny-herbert constructorId: sauber engineManufacturerId: petronas tyreManufacturerId: goodyear lap: 23 time: "1:22.712" gap: "+2.262" interval: "+0.733" - position: 13 driverNumber: 11 driverId: olivier-panis constructorId: prost engineManufacturerId: peugeot tyreManufacturerId: bridgestone lap: 61 time: "1:22.931" gap: "+2.481" interval: "+0.219" - position: 14 driverNumber: 21 driverId: toranosuke-takagi constructorId: tyrrell engineManufacturerId: ford tyreManufacturerId: goodyear lap: 47 time: "1:23.392" gap: "+2.942" interval: "+0.461" - position: 15 driverNumber: 18 driverId: rubens-barrichello constructorId: stewart engineManufacturerId: ford tyreManufacturerId: bridgestone lap: 33 time: "1:23.412" gap: "+2.962" interval: "+0.020" - position: 16 driverNumber: 17 driverId: mika-salo constructorId: arrows engineManufacturerId: arrows tyreManufacturerId: bridgestone lap: 23 time: "1:23.552" gap: "+3.102" interval: "+0.140" - position: 17 driverNumber: 19 driverId: jos-verstappen constructorId: stewart engineManufacturerId: ford tyreManufacturerId: bridgestone lap: 30 time: "1:23.944" gap: "+3.494" interval: "+0.392" - position: 18 driverNumber: 23 driverId: esteban-tuero constructorId: minardi engineManufacturerId: ford tyreManufacturerId: bridgestone lap: 50 time: "1:24.024" gap: "+3.574" interval: "+0.080" - position: 19 driverNumber: 20 driverId: ricardo-rosset constructorId: tyrrell engineManufacturerId: ford tyreManufacturerId: goodyear lap: 23 time: "1:24.161" gap: "+3.711" interval: "+0.137" - position: 20 driverNumber: 22 driverId: shinji-nakano constructorId: minardi engineManufacturerId: ford tyreManufacturerId: bridgestone lap: 61 time: "1:24.210" gap: "+3.760" interval: "+0.049" - position: 21 driverNumber: 16 driverId: pedro-diniz constructorId: arrows engineManufacturerId: arrows tyreManufacturerId: bridgestone lap: 5 time: "1:25.285" gap: "+4.835" interval: "+1.075" - position: 22 driverNumber: 12 driverId: jarno-trulli constructorId: prost engineManufacturerId: peugeot tyreManufacturerId: bridgestone lap: 4 time: "1:25.328" gap: "+4.878" interval: "+0.043"
src/data/seasons/1998/races/15-luxembourg/fastest-laps.yml
# folder where created dataset will be stored. out_folder: /home/duy/datasets/VivosPrepare/VivosParty/dataset metadata_folder: /home/duy/datasets/VivosPrepare/VivosParty/dataset/metadata samplerate: 16000 # dataset samplerate (should be <=16000) save_wet_sources: False # whether to save oracle reverberated clean-speech sources. save_dry_sources: False # whether to save oracle dry, non-reverberated clean-speech sources. ######################### # Source datasets paths # ######################### vivos_root: /home/duy/datasets/VivosPrepare/vivos # root path to librispeech: download from https://openslr.org/12/ vivos_folders: # folders one wants to use for the train dataset. train: - !ref <vivos_root>/train/ # e.g. clean-100 etc. dev: - !ref <vivos_root>/dev/ eval: - !ref <vivos_root>/test/ rirs_noises_root: /home/duy/datasets/libriparty_1/RIRS_NOISES rirs_folders: - !ref <rirs_noises_root>/simulated_rirs/ - !ref <rirs_noises_root>/real_rirs_isotropic_noises noises_folders: - !ref <rirs_noises_root>/pointsource_noises/ musics_root: /home/duy/datasets/duy-vivos/vivos-with-bg/background/youtube_converted_wavs/ vocal_musics_folders: - !ref <musics_root>/vocal/ off_vocal_musics_folders: - !ref <musics_root>/off_vocal/ backgrounds_root: /home/duy/datasets/libriparty_1/QUT_NOISES_16k/ # backgrounds_root: null # optional background noise from QUT (required for "official" dataset) # One can use also other background noises. ######################################################################### # Metadata creation parameters (Used only for custom dataset generation). ######################################################################### seed: 1234 # e.g, 1234 split_factors: - 0.8 - 0.1 - 0.1 # split factor used to split RIRs, backgrounds and noises. # for speech one can use librispeech official splits n_sessions: # number of synthetic scenes train: 200 dev: 10 eval: 10 n_speakers: 2 # max number of speakers in each session interval_factor_speech: 10 # this parameter controls amount of silence and # probability of overlapped speech. # the LOWER the less silence and more overlapped speech. interval_factor_noises: 15 # same parameter but for impulsive noises. # It controls the interval between two impulsive noises. max_length: 300 # max length in seconds for each session. ######################## # Speech configuration # ######################## # we use gaussian distribution for speech and noise levels. speech_lvl_min: -35 # (dB) absolute minimum level for speech. speech_lvl_max: -3 # (dB) absolute maximum level for speech. speech_lvl_mean: -15 # (dB) mean for speech level. speech_lvl_var: 3 # (dB) variance for speech level. ################################# # Impulsive noise configuration # ################################# imp: lvl_min: -40 # (dB) absolute minimum level for noises. lvl_rel_max: 10 # (dB) absolute maximum for noises relative # to minimum speech level in the whole session. lvl_mean: -33 # (dB) mean for noise level lvl_var: 2 # (dB) variance for noise level min_duration: 5 # (s) minimum duration of noise max_duration: 10 # (s) maximum duration of noise ####################### # Music configuration # ####################### music: lvl_min: -40 # (dB) absolute minimum level for music. lvl_rel_max: 10 # (dB) absolute maximum for music relative # to minimum speech level in the whole session. lvl_mean: -20 # (dB) mean for music level lvl_var: 3 # (dB) variance for music level min_duration: 5 # (s) minimum duration of music max_duration: 10 # (s) maximum duration of music ############################ # Background configuration # ############################ background_lvl_min: -60 # (dB) absolute minimum level for background. background_lvl_rel_max: -12 # (dB) absolute maximum for background relative # to minimum speech level in the whole session. background_prob: 0.7 rir_prob: 0.5
recipes/VivosParty/generate_dataset/dataset.yaml
--- # named environment that vault-unseal is running in, gets passed in when sending # email alerts. environment: dev # delay between seal-checks of each vault node. check_interval: 15s # maximum delay between checks of each vault node. when an error occurs, we will # add a backoff delay, up to this maximum. max_check_interval: 30m # list of vault nodes to check, must include http/https, and a port (unless 80/443). vault_nodes: - https://1.2.3.4:8200 - https://1.2.3.5:8200 - https://1.2.3.6:8200 # unseal tokens necessary to unseal any of the given vaults in the above node # list. # # WARNING: do not put enough tokens in this list that can be used to unseal a # vault instance. I.e. if vault requires 3 of 5 tokens, DO NOT PUT 3 TOKENS HERE. # the goal is to put less than the required amount, but have more instances of # vault-unseal setup with the other missing tokens from the list. this ensures # that if the server was compromised, they don't have all of the needed tokens. # # i.e. 1 instance of vault-unseal on each of the three nodes, each with two # tokens. given A, B, and C tokens required, each instance should have the # following tokens: # * 1: AB # * 2: BC # * 3: AC unseal_tokens: - your-token - your-second-token # skip tls checks for the given vault instance. useful if your instance doesn't # have a certificate which has all of the server hostnames on it. tls_skip_verify: false # email notifications. setting this to false will disable all notifications. email: enabled: false hostname: smtp.hostname.com port: 25 username: your-username password: <PASSWORD> # address to send from. from_addr: your-alerts@hostname.com # addresses to send to. the first will be the TO, the second and on will be CC'd # onto the message. send_addrs: - <EMAIL> - <EMAIL> # Skip TLS certificate validation. tls_skip_verify: false # Require TLS for SMTP connections. # The default is opportunistic. mandatory_tls: false # notifications in vault-unseal queue up to prevent email spam (e.g. 20 alerts # in one email). this is the max allotted time an event can be queued before # the queue is sent as a notification. notify_max_elapsed: 10m # queue delay is the amount of time vault-unseal waits after the last received # notification, before it sends all of them in bulk. notify_queue_delay: 60s
example.vault-unseal.yaml
--- result: FAILURE url: http://wilson.ci.chef.co/job/chef-analytics-trigger-ad_hoc/114/ failure_category: code failure_cause: 'Builder libhugetlbfs: all: chef-analytics-build' timestamp: 2016-06-07 22:47:27 UTC duration: 44m38s triggered_by: sgtpluck active_duration: 44m28s parameters: GIT_REF: spool/update-sqitch-source EXPIRE_CACHE: true change: git_remote: <EMAIL>:chef/chef-analytics.git git_commit: <PASSWORD> project: opscode-analytics stages: chef-analytics-build: result: FAILURE failure_category: code failure_cause: 'Builder libhugetlbfs: all' url: http://wilson.ci.chef.co/job/chef-analytics-build/350/ duration: 44m3s runs: el-5: result: FAILURE failure_category: code failure_cause: Builder libhugetlbfs failed_in: step: Builder libhugetlbfs url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=el-5,project=opscode-analytics,role=builder/350/ duration: 33m49s el-6: result: FAILURE failure_category: code failure_cause: Builder libhugetlbfs failed_in: step: Builder libhugetlbfs url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=el-6,project=opscode-analytics,role=builder/350/ duration: 43m42s el-7: result: FAILURE failure_category: code failure_cause: Builder libhugetlbfs failed_in: step: Builder libhugetlbfs url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=el-7,project=opscode-analytics,role=builder/350/ duration: 23m6s ubuntu-12.04: result: FAILURE failure_category: code failure_cause: Builder libhugetlbfs failed_in: step: Builder libhugetlbfs url: http://wilson.ci.chef.co/job/chef-analytics-build/architecture=x86_64,platform=ubuntu-12.04,project=opscode-analytics,role=builder/350/ duration: 24m3s chef-analytics-trigger-ad_hoc: result: SUCCESS url: http://wilson.ci.chef.co/job/chef-analytics-trigger-ad_hoc/114/ duration: 24s
reports/wilson.ci.chef.co/job/chef-analytics-trigger-ad_hoc/114.yaml
join: description: Group players together in a multiroom setup. fields: master: description: Entity ID of the player that should become the master of the group. example: media_player.sound_room2 entity_id: description: Entity ID(s) of the player(s) that will connect to the master. example: media_player.sound_room1 unjoin: description: Unjoin a player or all players from the multiroom setup. fields: entity_id: description: Entity ID(s) of the player(s) that will be unjoined from the group. If this is a master, all slaves will be unjoined. example: media_player.sound_room2 snapshot: description: Prepare the player to play TTS and save the current state of it for restore afterwards. Current playback will stop. fields: entity_id: description: Entity ID of the player of which the snapshot should be saved. example: media_player.sound_room1 switchinput: description: Switch player to stream input along with snapshotting, before playing TTS. Applies for players with multiple inputs like Line-in, Optical, etc. Useful to handle the situation when first few seconds of the TTS message are cut off due to the latency of the player. Optional - if not specified, defaults to True. example: True restore: description: Restore the state of the player after playing TTS, from a saved snapshot. fields: entity_id: description: Entity ID of the player of which the snapshot should be restored. example: media_player.sound_room1 preset: description: Recall content preset from the device. fields: entity_id: description: Entity ID of the player for which the preset will be recalled. example: media_player.sound_room1 preset: description: Content preset number on the device example: 1 command: description: Execute various linkplay-specific commands on the player. fields: entity_id: description: Entity ID of the player on which the command wil be execuded. example: media_player.sound_room1 command: description: 'Reboot, Rescan, PromptEnable, PromptDisable, WriteDeviceNameToUnit: My Device Name, SetApSSIDName: NewWifiName, SetRandomWifiKey, TimeSync, RouterMultiroomEnable' example: Rescan notify: description: Displays the result of the command as a persistent notification in Lovelace UI (optional, defaults to True). Set to False during automations to avoid seeing these. example: False get_tracks: description: Retrieve the list of audio files available on the storage disk connected to the player, and fill an input_select with it. fields: entity_id: description: Entity ID of the player on which the audio filenames list should be retrieved for. example: media_player.sound_room1 input_select: description: Entity ID of the input_select you want to populate with the audio track filenames retrieved from the device. example: input_select.tracks_room1 source: description: Optional - only 'USB' supported for now. example: USB play_track: description: Play media track by name found in the tracks list. This should be used in conjunction with an input_select populated with items using the linkplay.get_tracks service. fields: entity_id: description: Entity ID of the player on which the playback wil be execuded. example: media_player.sound_room1 track: description: (Part of) The name of the track from the list example: 'Commodores - Machine Gun Extended Mix.mp3'
custom_components/linkplay/services.yaml
en: errors: not_found: title: Sorry but we couldn't find this page description: The page you are looking for does not exist. unprocessable: title: Unprocessable entity description: We was not able to procced your request due to semantic errors. internal_server_error: title: Internal Server Error description: We track these errors automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing. user_groups: last_owner: Cannot remove last group owner project_details: empty_result: empty result set returned from the Data Set repository rimrock: update: timeout: Time out when updating %{user} computations (%{details}) internal: Internal rimrock error while updating %{user} computations (%{details}) activerecord: errors: models: service: uri: override: Given URI overrides already existing URI overridden: Given URI is overridden by already existing URI uri_aliases: format: One of the URI aliases has invalid format overridden: One of the URI aliases is overridden by already existing URI uniqueness: URI aliases are not unique urialiassame: URI cannot be the same as any of the aliases group: child_ids: cycle: Cycle is not allowed resource: pretty_path: wildcard: Path may contain a single wildcard character at the end project: create_dav403: Unable to create project file structure in Process File Store attributes: project_name: invalid: "contains illegal characters, correct characters are letters, digits and ~_-." pipeline: create_dav403: Unable to create pipeline file structure in File Store api: destination_resource_exists: Destination resource already exists source_policy_missing: Source policy does not exist for copying/moving
config/locales/errors.en.yml
--- # Copyright (c) 2016-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. - hosts: db become: true vars: db_name: endagaweb_dev db_user: endaga_dev db_password: <PASSWORD> ansible_ssh_pipelining: true environment: LANG: en_US.UTF-8 LANGUAGE: en_US LC_CTYPE: en_US.UTF-8 LC_NUMERIC: en_US.UTF-8 LC_TIME: en_US.UTF-8 LC_COLLATE: en_US.UTF-8 LC_MONETARY: en_US.UTF-8 LC_MESSAGES: en_US.UTF-8 LC_PAPER: en_US.UTF-8 LC_NAME: en_US.UTF-8 LC_ADDRESS: en_US.UTF-8 LC_TELEPHONE: en_US.UTF-8 LC_MEASUREMENT: en_US.UTF-8 LC_IDENTIFICATION: en_US.UTF-8 tasks: - name: Add all the packages needed for operation apt: pkg={{ item }} state=present update_cache=yes with_items: - postgresql - postgresql-contrib - postgresql-9.3-postgis-2.1 - libpq-dev - python-psycopg2 - name: Add Endaga DB become_user: postgres postgresql_db: name={{ db_name }} state=present - name: Add Endaga DB user become_user: postgres postgresql_user: db={{ db_name }} name={{ db_user }} password={{ db_password }} priv=ALL role_attr_flags=CREATEDB - name: Add postgis support become_user: postgres postgresql_ext: name=postgis db={{ db_name }} - name: Enable postgres support become_user: postgres shell: psql -d template1 -c "CREATE EXTENSION IF NOT EXISTS postgis;" - name: Listen for remote connections lineinfile: dest=/etc/postgresql/9.3/main/postgresql.conf regexp="listen_address" line="listen_addresses = '*'" state=present - name: Allow remote hosts to connect using password auth lineinfile: dest=/etc/postgresql/9.3/main/pg_hba.conf regexp="0.0.0.0" line="host all all 0.0.0.0/0 md5" state=present - name: Restart Postgres service: name=postgresql state=restarted
cloud/ansible/postgres.dev.yml
version: '3.7' services: athena_manager: container_name: athena_manager ports: - 5000 build: context: . dockerfile: Dockerfile.prod volumes: - ./:/usr/src/app networks: - elastic - ingress athena_agent: container_name: athena_agent ports: - 5000 build: context: . dockerfile: Dockerfile.prod command: ["cluster", "--join", "--addr", "athena_manager:5000", "--token", "1", "--foreground"] volumes: - ./:/usr/src/app depends_on: - athena_manager - elasticsearch networks: - elastic - ingress elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0 container_name: elasticsearch1 environment: - node.name=elasticsearch1 - cluster.name=docker-cluster - cluster.initial_master_nodes=elasticsearch1 - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms256M -Xmx256M" - http.cors.enabled=true - http.cors.allow-origin=* - network.host=_eth0_ ulimits: nproc: 65535 memlock: soft: -1 hard: -1 cap_add: - ALL volumes: - type: volume source: logs target: /var/log - type: volume source: esdata1 target: /usr/share/elasticsearch/data networks: - elastic - ingress ports: - 9200:9200 - 9300:9300 logging: driver: none elasticsearch2: image: docker.elastic.co/elasticsearch/elasticsearch:7.4.0 container_name: elasticsearch2 depends_on: - elasticsearch environment: - node.name=elasticsearch2 - cluster.name=docker-cluster - cluster.initial_master_nodes=elasticsearch1 - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms256M -Xmx256M" - "discovery.zen.ping.unicast.hosts=elasticsearch1" - http.cors.enabled=true - http.cors.allow-origin=* - network.host=_eth0_ ulimits: nproc: 65535 memlock: soft: -1 hard: -1 cap_add: - ALL volumes: - type: volume source: logs target: /var/log - type: volume source: esdata2 target: /usr/share/elasticsearch/data networks: - elastic - ingress ports: - 9201:9200 logging: driver: none kibana: image: docker.elastic.co/kibana/kibana:7.4.0 container_name: kibana depends_on: - elasticsearch - elasticsearch2 - filebeat environment: SERVER_NAME: localhost ELASTICSEARCH_URL: http://elasticsearch1:9200/ ports: - 5601:5601 volumes: - type: volume source: logs target: /var/log ulimits: nproc: 65535 memlock: soft: -1 hard: -1 cap_add: - ALL networks: - elastic - ingress logging: driver: none filebeat: image: docker.elastic.co/beats/filebeat:7.4.0 command: --strict.perms=false depends_on: - elasticsearch - elasticsearch2 environment: - setup.kibana.host=kibana:5601 - output.elasticsearch.hosts=["elasticsearch:9200"] ports: - 9000:9000 volumes: - /var/lib/docker/containers:/var/lib/docker/containers:ro - /var/run/docker.sock:/var/run/docker.sock networks: - elastic - ingress logging: driver: none volumes: esdata1: esdata2: logs: networks: elastic: ingress:
docker-compose.yaml
--- - hosts: tag_ansible_role_netbox become: yes pre_tasks: - name: Enable Subscription Manager Repos rhsm_repository: name: "{{ item }}" loop: - rhel-*-optional-rpms - rhel-*-extras-rpms - name: Enable/Install EPEL Repo yum: name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - name: Enable/Install PostgreSQL Repo yum: name: https://download.postgresql.org/pub/repos/yum/10/redhat/rhel-7-x86_64/pgdg-redhat10-10-2.noarch.rpm roles: - geerlingguy.postgresql - lae.netbox - nginxinc.nginx post_tasks: - name: Allow NGINX to network connect seboolean: name: httpd_can_network_connect state: yes persistent: yes - name: Remove default server config from NGINX file: state: absent path: /etc/nginx/conf.d/default.conf notify: "(Handler: All OSs) Reload NGINX" - name: Ensure SSL directory for NGINX exists file: state: directory path: /etc/nginx/ssl mode: 0640 - name: "Place SSL cert for {{ ansible_fqdn }} in NGINX directory" copy: content: "{{ kpsc_ssl_cert }}" dest: "/etc/nginx/ssl/{{ nginx_server_kpsc }}_cert.pem" notify: "(Handler: All OSs) Reload NGINX" - name: "Place SSL key for {{ ansible_fqdn }} in NGINX directory" copy: content: "{{ kpsc_ssl_key }}" dest: "/etc/nginx/ssl/{{ nginx_server_kpsc }}_key.pem" notify: "(Handler: All OSs) Reload NGINX" - name: Place SSL cert for netbox.kovarus.com in NGINX directory copy: content: "{{ kovarus_ssl_cert }}" dest: "/etc/nginx/ssl/netbox.kovarus.com_cert.pem" notify: "(Handler: All OSs) Reload NGINX" - name: Place SSL key for netbox.kovarus.com in NGINX directory copy: content: "{{ kovarus_ssl_key }}" dest: /etc/nginx/ssl/netbox.kovarus.com_key.pem notify: "(Handler: All OSs) Reload NGINX" - name: Create proxy.conf for NGINX template: src: roles_files/netbox_nginx/proxy.conf.j2 dest: /etc/nginx/conf.d/proxy.conf notify: "(Handler: All OSs) Reload NGINX" - name: Create redirect.conf for NGINX copy: src: roles_files/netbox_nginx/redirect.conf dest: /etc/nginx/conf.d/redirect.conf notify: "(Handler: All OSs) Reload NGINX"
netbox.yml
common: fp16: true log_format: json log_interval: 200 checkpoint: save_interval_updates: 2000 keep_interval_updates: 5 no_epoch_checkpoints: true task: _name: audio_pretraining data: ??? max_sample_size: 320000 min_sample_size: 12000 normalize: true dataset: batch_size: 4 num_workers: 6 max_tokens: 800000 valid_subset: dev skip_invalid_size_inputs_valid_test: true distributed_training: distributed_world_size: 128 ddp_backend: legacy_ddp criterion: _name: wav2vec infonce: true log_keys: ["prob_perplexity","code_perplexity","temp"] loss_weights: [0.1, 0] optimization: max_update: 1000000 lr: [0.0002] optimizer: _name: adam adam_betas: (0.9,0.98) adam_eps: 1e-06 weight_decay: 0.01 lr_scheduler: _name: polynomial_decay warmup_updates: 25000 model: # _name: wav2vec2 _name: conformer_based_wav2vec2 is_mel_spectrograms: true quantize_targets: true extractor_mode: layer_norm layer_norm_first: true final_dim: 768 latent_temp: [2.0,0.1,0.999995] encoder_layerdrop: 0.00 dropout_input: 0.0 dropout_features: 0.0 dropout: 0.0 attention_dropout: 0.0 conv_bias: true num_negatives: 100 logit_temp: 0.1 max_source_positions: 3000 mask_prob: 0.65 mask_selection: static mask_other: 0 no_mask_overlap: false mask_min_space: 1 mask_channel_length: 10 mask_channel_prob: 0.0 mask_channel_before: false mask_channel_selection: static mask_channel_other: 0 no_mask_channel_overlap: false mask_channel_min_space: 1 mask_length: 10 conv_pos: 128 conv_pos_groups: 16 codebook_negatives: 0 latent_dim: 0 latent_vars: 320 latent_groups: 2 quantizer_depth: 1 quantizer_factor: 3 target_glu: false same_quantizer: false negatives_from_everywhere: false cross_sample_negatives: 0 activation_dropout: 0 encoder_layers: 8 encoder_embed_dim: 1024 encoder_ffn_embed_dim: 4096 encoder_attention_heads: 8 input_feat_per_channel: 1 feature_grad_mult: 1.0 quantize_input: false decoder_normalize_before: True encoder_normalize_before: True conv_kernel_sizes: 5,5 conv_channels: 512 activation_fn: swish conv_feature_layers: '[(1024, 10, 5)] + [(1024, 3, 2)] * 4 + [(1024,2,2)] + [(1024,2,2)]' macaron_style: True use_cnn_module: True cnn_module_kernel: 31 quant_noise_pq: 0 encoder_attention_type: selfattn
examples/conformer_based_wav2vec2/config/pretraining/conformer_based_wav2vec2.yaml
countraw: 3 countrows: 1 filename: 194283.fec header: {} rows: - attempt: - name: FORM TYPE number: '1' value: F2N - name: FILER FEC CAND ID number: '2' value: H6CA08132 - name: CANDIDATE NAME number: '3' value: '' - name: STREET 1 number: '4' value: 4 JEAN STREET - name: STREET 2 number: '5' value: SUITE 4 - name: CITY number: '6' value: VALLEY SPRINGS - name: STATE number: '7' value: CA - name: ZIP number: '8' value: '95252' - name: PTY/CODE number: '9' value: REP - name: CAN/OFFICE number: '10' value: H - name: CAN/STATE number: '11' value: CA - name: CAN/DIST number: '12' value: 08 - name: YEAR OF ELECTION 1900-2999 number: '13' value: '2006' - name: FEC COMMITTEE ID NUMBER (PCC) number: '14' value: C00413773 - name: COMMITTEE NAME (PCC) number: '15' value: CHAVEZ-O'CHOA FOR CONGRESS - name: STREET 1 number: '16' value: 4 JEAN ST - name: STREET 2 number: '17' value: SUITE 4 - name: CITY number: '18' value: VALLEY SPRINGS - name: STATE number: '19' value: AL - name: ZIP number: '20' value: '95252' - name: FEC COMMITTEE ID NUMBER (Auth) number: '21' value: C00407940 - name: COMMITTEE NAME (Auth) number: '22' value: MAN IN THE ARENA - name: STREET 1 number: '23' value: 1155 - 15TH STREET - name: STREET 2 number: '24' value: ' NW' - name: CITY number: '25' value: SUITE 614 - name: STATE number: '26' value: WASHINGTON - name: ZIP number: '27' value: DC - name: NAME/CAN (as signed) number: '28' value: '20005' - name: Signed number: 29- value: CHAVEZ-O'CHOA^BRIAN^MR^ - name: PRI PERSONAL FUNDS DECLARED number: '30' value: '20051220' - name: GEN PERSONAL FUNDS DECLARED number: '31' value: '0.00' - name: CANDIDATE LAST NAME number: '32' value: '0.00' - name: CANDIDATE FIRST NAME number: '33' value: CHAVEZ-O'CHOA - name: CANDIDATE MIDDLE NAME number: '34' value: BRIAN - name: CANDIDATE PREFIX number: '35' value: '' - name: CANDIDATE SUFFIX number: '36' value: MR - value: '' expected_count: 36 input: F2N,H6CA08132,,4 JEAN STREET,SUITE 4,VALLEY SPRINGS,CA,95252,REP,H,CA,08,2006,C00413773,CHAVEZ-O'CHOA FOR CONGRESS,4 JEAN ST,SUITE 4,VALLEY SPRINGS,AL,95252,C00407940,MAN IN THE ARENA,"1155 - 15TH STREET, NW",SUITE 614,WASHINGTON,DC,20005,CHAVEZ-O'CHOA^BRIAN^MR^,20051220,0.00,0.00,CHAVEZ-O'CHOA,BRIAN,,MR, input_count: 37 record: <fec.version.v5_2.F2.Records instance at 0x1dd0908> result: [] sourceurl: !!python/unicode 20051220 type: single
20051220/194283.fec.yml
--- # author: lework - name: acl | Generate the Consul bootstrap token. shell: > source {{ consul_env_file }} && consul acl bootstrap | awk '/SecretID.*/ {print $2}' environment: - PATH: "{{ consul_bin_path }}:{{ ansible_env.PATH }}" register: consul_bootstrap_token changed_when: false run_once: true until: consul_bootstrap_token.stdout != '' retries: 10 delay: 5 - block: - name: acl | Set the bootstrap token as an environment variable. lineinfile: dest: "{{ consul_env_file }}" create: yes line: export CONSUL_HTTP_TOKEN={{ consul_bootstrap_token.stdout }} run_once: false - name: acl | Generate the Consul node ACL policy. shell: | source {{ consul_env_file }} && \ consul acl policy create \ -name node-policy \ -description 'Node Token Policy' \ -rules -<<EOF agent_prefix "" { policy = "write" } node_prefix "" { policy = "write" } service_prefix "" { policy = "read" } session_prefix "" { policy = "read" } EOF - name: acl | Create the node token. shell: > source {{ consul_env_file }} && consul acl token create -description 'node token' -policy-name node-policy | awk '/SecretID.*/ {print $2}' register: consul_node_token - name: acl | Add the node token. shell: > source {{ consul_env_file }} && consul acl set-agent-token agent "{{ consul_node_token.stdout }}" when: - consul_node_token.stdout | length > 0 - name: acl | Generate the Consul agent ACL policy. shell: | source {{ consul_env_file }} && \ consul acl policy create \ -name agent-policy \ -description "Agent Token Policy" \ -rules -<<EOF node_prefix "" { policy = "write" } service_prefix "" { policy = "read" } EOF - name: acl | Create the agent token. shell: > source {{ consul_env_file }} && consul acl token create -description 'Agent Token' -policy-name agent-policy | awk '/SecretID.*/ {print $2}' register: consul_agent_token - name: acl | Show bootstrap token. debug: "msg={{ consul_bootstrap_token.stdout }}" - name: acl | Show node token. debug: "msg={{ consul_node_token.stdout }}" - name: acl | Show agent token. debug: "msg={{ consul_agent_token.stdout }}" run_once: true environment: - PATH: "{{ consul_bin_path }}:{{ ansible_env.PATH }}" when: consul_bootstrap_token.stdout | length > 0
consul/tasks/4.acl.yml
items: - uid: Google::Cloud::Vision::V1::Likelihood children: - Google::Cloud::Vision::V1::Likelihood::UNKNOWN - Google::Cloud::Vision::V1::Likelihood::VERY_UNLIKELY - Google::Cloud::Vision::V1::Likelihood::UNLIKELY - Google::Cloud::Vision::V1::Likelihood::POSSIBLE - Google::Cloud::Vision::V1::Likelihood::LIKELY - Google::Cloud::Vision::V1::Likelihood::VERY_LIKELY fullName: Google::Cloud::Vision::V1::Likelihood langs: - ruby module: google-cloud-vision-v1 id: Likelihood summary: "A bucketized representation of likelihood, which is intended to give clients\nhighly stable results across model upgrades." type: "module" name: Google::Cloud::Vision::V1::Likelihood example: [] - uid: Google::Cloud::Vision::V1::Likelihood::LIKELY fullName: Google::Cloud::Vision::V1::Likelihood::LIKELY name: LIKELY parent: Google::Cloud::Vision::V1::Likelihood langs: - ruby module: google-cloud-vision-v1 id: LIKELY summary: "<b>value: </b>4<br>It is likely." type: const - uid: Google::Cloud::Vision::V1::Likelihood::POSSIBLE fullName: Google::Cloud::Vision::V1::Likelihood::POSSIBLE name: POSSIBLE parent: Google::Cloud::Vision::V1::Likelihood langs: - ruby module: google-cloud-vision-v1 id: POSSIBLE summary: "<b>value: </b>3<br>It is possible." type: const - uid: Google::Cloud::Vision::V1::Likelihood::UNKNOWN fullName: Google::Cloud::Vision::V1::Likelihood::UNKNOWN name: UNKNOWN parent: Google::Cloud::Vision::V1::Likelihood langs: - ruby module: google-cloud-vision-v1 id: UNKNOWN summary: "<b>value: </b>0<br>Unknown likelihood." type: const - uid: Google::Cloud::Vision::V1::Likelihood::UNLIKELY fullName: Google::Cloud::Vision::V1::Likelihood::UNLIKELY name: UNLIKELY parent: Google::Cloud::Vision::V1::Likelihood langs: - ruby module: google-cloud-vision-v1 id: UNLIKELY summary: "<b>value: </b>2<br>It is unlikely." type: const - uid: Google::Cloud::Vision::V1::Likelihood::VERY_LIKELY fullName: Google::Cloud::Vision::V1::Likelihood::VERY_LIKELY name: VERY_LIKELY parent: Google::Cloud::Vision::V1::Likelihood langs: - ruby module: google-cloud-vision-v1 id: VERY_LIKELY summary: "<b>value: </b>5<br>It is very likely." type: const - uid: Google::Cloud::Vision::V1::Likelihood::VERY_UNLIKELY fullName: Google::Cloud::Vision::V1::Likelihood::VERY_UNLIKELY name: VERY_UNLIKELY parent: Google::Cloud::Vision::V1::Likelihood langs: - ruby module: google-cloud-vision-v1 id: VERY_UNLIKELY summary: "<b>value: </b>1<br>It is very unlikely." type: const references: []
testdata/ruby/obj/api/Google-Cloud-Vision-V1-Likelihood.yml
azurerm_spatial_anchors_account: status: ASSESS subcategory: Mixed Reality layout: azurerm page_title: 'Azure Resource Manager: azurerm_spatial_anchors_account' description: Manages an Azure Spatial Anchors Account. arguments: name: description: Specifies the name of the Spatial Anchors Account. Changing this forces a new resource to be created. Must be globally unique. required: false policy: '' notes: '' resource_group_name: description: The name of the resource group in which to create the Spatial Anchors Account. required: false policy: '' notes: '' location: description: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. required: false policy: '' notes: '' tags: description: A mapping of tags to assign to the resource. required: false policy: '' notes: '' attributes: id: description: The ID of the Spatial Anchors Account. timeouts: create: description: Used when creating the Spatial Anchors Account. required: false timeout: 30 update: description: Used when updating the Spatial Anchors Account. required: false timeout: 30 read: description: Used when retrieving the Spatial Anchors Account. required: false timeout: 5 delete: description: Used when deleting the Spatial Anchors Account. required: false timeout: 30 usage: !!binary | Q25KbGMyOTFjbU5sSUNKaGVuVnlaWEp0WDNKbGMyOTFjbU5sWDJkeWIzVndJaUFpWlhoaGJYQnNa U0lnZXdvZ0lHNWhiV1VnSUNBZ0lEMGdJbVY0WVcxd2JHVXRjbVZ6YjNWeVkyVnpJZ29nSUd4dlky RjBhVzl1SUQwZ0ltNXZjblJvWlhWeWIzQmxJZ3A5Q2dweVpYTnZkWEpqWlNBaVlYcDFjbVZ5YlY5 emNHRjBhV0ZzWDJGdVkyaHZjbk5mWVdOamIzVnVkQ0lnSW1WNFlXMXdiR1VpSUhzS0lDQnVZVzFs SUNBZ0lDQWdJQ0FnSUNBZ0lDQWdJRDBnSW1WNFlXMXdiR1VpQ2lBZ2JHOWpZWFJwYjI0Z0lDQWdJ Q0FnSUNBZ0lDQTlJR0Y2ZFhKbGNtMWZjbVZ6YjNWeVkyVmZaM0p2ZFhBdVpYaGhiWEJzWlM1c2Iy TmhkR2x2YmdvZ0lISmxjMjkxY21ObFgyZHliM1Z3WDI1aGJXVWdQU0JoZW5WeVpYSnRYM0psYzI5 MWNtTmxYMmR5YjNWd0xtVjRZVzF3YkdVdWJtRnRaUXA5Q2c9PQ== import: !!binary | Q2dwVGNHRjBhV0ZzSUVGdVkyaHZjbk1nUVdOamIzVnVkQ0JqWVc0Z1ltVWdhVzF3YjNKMFpXUWdk WE5wYm1jZ2RHaGxJR0J5WlhOdmRYSmpaU0JwWkdBc0lHVXVaeTRLQ2c9PQ== hcl_url: !!binary | YUhSMGNITTZMeTluYVhSb2RXSXVZMjl0TDNSbGNuSmhabTl5YlMxd2NtOTJhV1JsY25NdmRHVnlj bUZtYjNKdExYQnliM1pwWkdWeUxXRjZkWEpsY20wdllteHZZaTl0WVhOMFpYSXZkMlZpYzJsMFpT OWtiMk56TDNJdmMzQmhkR2xoYkY5aGJtTm9iM0p6WDJGalkyOTFiblF1YUhSdGJDNXRZWEpyWkc5 M2JnPT0=
config/azurerm/azurerm_spatial_anchors_account.yml
spring: main: web-application-type: NONE jackson: serialization: indent_output: true dollar-sign: "$" logging: pattern: console: "%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(%5p) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%20.20t]){faint} %clr(%-50.50logger{49}){cyan} %clr(:){faint} %m%n%wEx" web: game: base-path: /index.php/1001 version-path: /Index/version cdn: base-path: /data latest-versions-url: https://raw.githubusercontent.com/neko-gg/gfl-data/senpai/latest-versions.json client: clients: - region: us game-host: http://gf-game.sunborngame.com cdn-host: http://gfus-cdn.sunborngame.com asset-host: http://gfus-cdn.sunborngame.com dump-version: 20800 - region: jp game-host: http://gfjp-game.sunborngame.com cdn-host: http://gfjp-cdn.sunborngame.com asset-host: http://gfjp-cdn.sunborngame.com dump-version: 20800 - region: kr game-host: http://gf-game.girlfrontline.co.kr cdn-host: http://gfkrcdn.imtxwy.com asset-host: http://sn-list.girlfrontline.co.kr dump-version: 20900 - region: tw game-host: http://sn-game.txwy.tw cdn-host: http://sncdn.imtxwy.com asset-host: http://sn-list.txwy.tw dump-version: 20900 file: charset: UTF-8 base-path: ./target/gfl-data output-path: ${file.base-path}/out json: extension: .json python: executable: python stc: path: ${file.base-path}/${dollar-sign}{region}/stc extract-path: ${file.base-path}/${dollar-sign}{region}/stc/extract output-path: ${file.output-path}/${dollar-sign}{region}/stc catchdata-encryption-key: c88d016d261eb80ce4d6e41a510d4048 catchdata-line-separator: "\n" catchdata-output-path: ${file.stc.output-path}/catchdata mapping-path: classpath:stc-mapping/${dollar-sign}{region}/*.json skip: - 5122.stc - 5123.stc asset: key: <KEY> iv: M9lp+7j2Jdwqr+Yj1h+A asset-extractor-path: ${file.base-path}/asset_extractor.py asset-extractor-resource-path: classpath:asset_extractor.py path: ${file.base-path}/${dollar-sign}{region}/asset output-path: ${file.output-path}/${dollar-sign}{region}/asset res-data-resource-path: ${file.asset.path}/res-data res-data-extracted-path: ${file.asset.res-data-resource-path}/assets/resources/resdata.asset res-data-output-path: ${file.asset.output-path}/res-data${file.json.extension} extension: .ab lua: input-extension: .lua.txt output-extension: .lua encryption-key: <KEY> asset-bundles: generic-asset-bundle: asset-configs: - asset-name: asset_textavg asset-path: ${file.asset.path}/asset-textavg asset-extracted-name: asset_textavg.ab asset-resources-path: ${file.asset.path}/asset-textavg/assets/resources/dabao/avgtxt asset-output-path: ${file.asset.output-path}/asset-textavg - asset-name: asset_texttable asset-path: ${file.asset.path}/asset-texttable asset-extracted-name: asset_texttable.ab asset-resources-path: ${file.asset.path}/asset-texttable/assets/resources/dabao/table asset-output-path: ${file.asset.output-path}/asset-texttable asset-textes: asset-config: asset-name: asset_textes asset-path: ${file.asset.path}/asset-textes asset-extracted-name: asset_textes.ab asset-resources-path: ${file.asset.path}/asset-textes/assets/resources/dabao asset-output-path: ${file.asset.output-path}/asset-textes dump-data-version-path: ${file.output-path}/latest-versions${file.json.extension} git: remote-url: https://github.com/neko-gg/gfl-data.git directory: ${file.base-path}/gfl-data-repo git-directory: ${git.directory}/.git token: ${API_TOKEN_GITHUB} username: gfl-data-miner email: <EMAIL> latest-versions-commit-message: update latest versions region-commit-message: "[${dollar-sign}{region}] client ${dollar-sign}{client-version} | ab ${dollar-sign}{ab-version} | data ${dollar-sign}{data-version}" string-substitutor: prefix: "${" suffix: "}" values: region: region client-version: client-version ab-version: ab-version data-version: data-version
src/main/resources/application.yml
--- - name: replication | Managing Postgresql Replication User postgresql_user: name: "{{ postgresql_replication['user'] }}" role_attr_flags: "REPLICATION" state: "present" become: true become_user: "postgres" when: inventory_hostname == postgresql_replication['master'] - name: replication | Checking If Replication Is Already Setup stat: path: "{{ postgresql_data_dir }}/recovery.conf" register: "_pg_replication_setup_check" become: true when: inventory_hostname != postgresql_replication['master'] - name: replication | Restarting Postgresql On Master (If Reconfigured) service: name: "postgresql" state: "restarted" become: true register: "_pg_restarted" when: > inventory_hostname == postgresql_replication['master'] and (_pg_conf_reconfigured['changed'] or _pg_hba_conf_reconfigured['changed']) - name: replication | Stopping Postgresql On Slaves service: name: "postgresql" state: "stopped" become: true when: > inventory_hostname != postgresql_replication['master'] and not _pg_replication_setup_check['stat']['exists'] - name: replication | Setting Master Replication IP set_fact: postgresql_master_ip: "{{ hostvars[postgresql_replication['master']]['ansible_' + postgresql_replication['interface']]['ipv4']['address'] }}" - name: replication | Waiting For Postgresql To Be Running wait_for: port: "{{ postgresql_listen_port }}" delay: 20 when: > inventory_hostname == postgresql_replication['master'] and _pg_restarted['changed'] - name: replication | Backing Up Master Databases To Slaves shell: "pg_basebackup -h {{ postgresql_master_ip }} -U {{ postgresql_replication['user'] }} -Ft -x -D - > /tmp/backup.tar" become: true become_user: "postgres" when: > inventory_hostname != postgresql_replication['master'] and not _pg_replication_setup_check['stat']['exists'] - name: replication | Purging Existing Data On Slaves file: path: "{{ postgresql_data_dir }}" state: "absent" become: true when: > inventory_hostname != postgresql_replication['master'] and not _pg_replication_setup_check['stat']['exists'] - name: replication | Recreating Data Directory On Slaves file: path: "{{ postgresql_data_dir }}" state: "directory" owner: "postgres" group: "postgres" mode: "u=rwx,g=,o=" become: true when: > inventory_hostname != postgresql_replication['master'] and not _pg_replication_setup_check['stat']['exists'] - name: replication | Restoring Master Backup unarchive: src: "/tmp/backup.tar" dest: "{{ postgresql_data_dir }}/" owner: "postgres" group: "postgres" remote_src: true become: true when: > inventory_hostname != postgresql_replication['master'] and not _pg_replication_setup_check['stat']['exists'] - name: replication | Configuring Recovery template: src: "var/lib/postgresql/{{ postgresql_version }}/main/recovery.conf.j2" dest: "{{ postgresql_data_dir }}/recovery.conf" owner: "postgres" group: "postgres" become: true notify: "restart postgresql" when: > inventory_hostname != postgresql_replication['master'] and not _pg_replication_setup_check['stat']['exists']
ansible-stackstorm/Vagrant/roles/ansible-postgresql/tasks/replication.yml
uid: "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule" fullName: "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule" name: "SharedAccessAuthorizationRule" nameWithType: "SharedAccessAuthorizationRule" inheritances: - "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />" - "<xref href=\"com.microsoft.azure.servicebus.management.AuthorizationRule\" data-throw-if-not-resolved=\"False\" />" inheritedMembers: - "com.microsoft.azure.servicebus.management.AuthorizationRule.getClaimType()" - "com.microsoft.azure.servicebus.management.AuthorizationRule.getCreatedTime()" - "com.microsoft.azure.servicebus.management.AuthorizationRule.getKeyName()" - "com.microsoft.azure.servicebus.management.AuthorizationRule.getModifiedTime()" - "com.microsoft.azure.servicebus.management.AuthorizationRule.getRights()" - "com.microsoft.azure.servicebus.management.AuthorizationRule.setKeyName(java.lang.String)" - "com.microsoft.azure.servicebus.management.AuthorizationRule.setRights(java.util.List<com.microsoft.azure.servicebus.management.AccessRights>)" - "java.lang.Object.clone()" - "java.lang.Object.equals(java.lang.Object)" - "java.lang.Object.finalize()" - "java.lang.Object.getClass()" - "java.lang.Object.hashCode()" - "java.lang.Object.notify()" - "java.lang.Object.notifyAll()" - "java.lang.Object.toString()" - "java.lang.Object.wait()" - "java.lang.Object.wait(long)" - "java.lang.Object.wait(long,int)" syntax: "public class SharedAccessAuthorizationRule extends AuthorizationRule" constructors: - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.SharedAccessAuthorizationRule(java.lang.String,java.lang.String,java.lang.String,java.util.List<com.microsoft.azure.servicebus.management.AccessRights>)" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.SharedAccessAuthorizationRule(java.lang.String,java.lang.String,java.util.List<com.microsoft.azure.servicebus.management.AccessRights>)" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.SharedAccessAuthorizationRule(java.lang.String,java.util.List<com.microsoft.azure.servicebus.management.AccessRights>)" methods: - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.equals(java.lang.Object)" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.getClaimType()" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.getKeyName()" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.getPrimaryKey()" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.getRights()" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.getSecondaryKey()" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.hashCode()" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.setKeyName(java.lang.String)" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.setPrimaryKey(java.lang.String)" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.setRights(java.util.List<com.microsoft.azure.servicebus.management.AccessRights>)" - "com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.setSecondaryKey(java.lang.String)" type: "class" metadata: {} package: "com.microsoft.azure.servicebus.management" artifact: com.microsoft.azure:azure-servicebus:3.6.1
docs-ref-autogen/com.microsoft.azure.servicebus.management.SharedAccessAuthorizationRule.yml
jobs: build: name: build runs-on: ${{ matrix.os }} steps: - if: ${{ matrix.os == 'windows-2019' }} uses: actions/cache@v2 with: path: ~\AppData\Local\pip\Cache key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} restore-keys: | ${{ runner.os }}-pip- - if: ${{ matrix.os == 'macos-10.15' }} uses: actions/cache@v2 with: path: ~/Library/Caches/pip key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} restore-keys: | ${{ runner.os }}-pip- - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: architecture: x64 python-version: ${{ matrix.python-version }} - uses: actions/setup-java@v1 with: java-version: '14.0.1' # The JDK version to make available on the path. java-package: jdk architecture: x64 - run: | pip install --upgrade pip setuptools wheel pip install numpy==1.20.1 pip install pyinstaller==4.2 git clone https://github.com/CellProfiler/distribution.git - env: LDFLAGS: -L/usr/local/opt/openssl/lib CERTIFICATE_OSX_APPLICATION: ${{ secrets.CERTIFICATE_OSX_APPLICATION }} CERTIFICATE_PASSWORD: ${{ secrets.CERTIFICATE_PASSWORD }} if: ${{ matrix.os == 'macos-10.15' }} run: | sed -i '' 's/4.0.0/4.2.1/' Info.plist brew install mysql make chmod +x add-osx-certificate.sh && ./add-osx-certificate.sh chmod +x osx-codesign.sh && ./osx-codesign.sh ditto -ck --keepParent --rsrc --sequesterRsrc ./dist/CellProfiler.app ./dist/CellProfiler-macOS-4.2.1.zip working-directory: ./distribution/macos - env: JDK_HOME: C:\hostedtoolcache\windows\jdk\14.0.1\x64 if: ${{ matrix.os == 'windows-2019' }} run: | pip install cython pip install mysqlclient==1.4.6 pip install --editable . - if: ${{ matrix.os == 'windows-2019' }} run: | pyinstaller distribution/windows/cellprofiler.spec iscc /dMyAppVersion="4.2.1" "distribution/windows/cellprofiler.iss" - if: ${{ matrix.os == 'macos-10.15' }} uses: actions/upload-artifact@v1 with: name: CellProfiler-macOS-4.2.1.zip path: ./distribution/macos/dist/CellProfiler-macOS-4.2.1.zip - if: ${{ matrix.os == 'windows-2019' }} uses: actions/upload-artifact@v2 with: name: CellProfiler-Windows-4.2.1.exe path: ./distribution/windows/Output/CellProfiler-Windows-4.2.1.exe strategy: matrix: os: [macos-10.15, windows-2019] python-version: [ "3.8" ] upload: name: upload needs: build runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} id: create_release uses: actions/create-release@v1 with: draft: true prerelease: true release_name: ${{ github.ref }} tag_name: ${{ github.ref }} - uses: actions/download-artifact@v1 with: name: CellProfiler-macOS-4.2.1.zip path: ./ - uses: actions/download-artifact@v1 with: name: CellProfiler-Windows-4.2.1.exe path: ./ - env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} uses: actions/upload-release-asset@v1 with: asset_content_type: application/zip asset_name: CellProfiler-macOS-4.2.1.zip asset_path: /home/runner/work/CellProfiler/CellProfiler/CellProfiler-macOS-4.2.1.zip upload_url: ${{ steps.create_release.outputs.upload_url }} - env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} uses: actions/upload-release-asset@v1 with: asset_content_type: application/exe asset_name: CellProfiler-Windows-4.2.1.exe asset_path: /home/runner/work/CellProfiler/CellProfiler/CellProfiler-Windows-4.2.1.exe upload_url: ${{ steps.create_release.outputs.upload_url }} name: create-release on: push: tags: - "v*"
out/CellProfiler/CellProfiler/.github_workflows_release.yml
uid: management.azure.com.automation.agentregistrationinformation.regeneratekey name: Regenerate Key service: Automation groupName: Agent Registration Information apiVersion: 2018-01-15 summary: Regenerate a primary or secondary agent registration key consumes: - application/json produces: - application/json - text/plain paths: - content: POST https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/agentRegistrationInformation/regenerateKey?api-version=2018-01-15 uriParameters: - name: subscriptionId in: path isRequired: true description: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. types: - uid: string - name: resourceGroupName in: path isRequired: true description: Name of an Azure Resource group. types: - uid: string pattern: ^[-\w\._]+$ - name: automationAccountName in: path isRequired: true description: The name of the automation account. types: - uid: string - name: api-version in: query isRequired: true description: Client Api Version. types: - uid: string responses: - name: 200 OK description: OK types: - uid: AgentRegistration - name: Other Status Codes description: Automation error response describing why the operation failed. types: - uid: ErrorResponse requestBody: - name: default parameters: - name: keyName in: body isRequired: true description: Gets or sets the agent registration key name - primary or secondary. types: - uid: AgentRegistrationKeyName requestHeader: [] definitions: - name: AgentRegistration description: Definition of the agent registration infomration type. kind: object properties: - name: dscMetaConfiguration description: Gets or sets the dsc meta configuration. types: - uid: string - name: endpoint description: Gets or sets the dsc server endpoint. types: - uid: string - name: keys description: Gets or sets the agent registration keys. types: - uid: AgentRegistrationKeys - name: id description: Gets or sets the id. types: - uid: string - name: ErrorResponse description: Error response of an operation failure kind: object properties: - name: code description: Error code types: - uid: string - name: message description: Error message indicating why the operation failed. types: - uid: string - name: AgentRegistrationRegenerateKeyParameter description: The parameters supplied to the regenerate keys operation. kind: object properties: - name: keyName description: Gets or sets the agent registration key name - primary or secondary. types: - uid: AgentRegistrationKeyName - name: AgentRegistrationKeys description: Definition of the agent registration keys. kind: object properties: - name: primary description: Gets or sets the primary key. types: - uid: string - name: secondary description: Gets or sets the secondary key. types: - uid: string - name: AgentRegistrationKeyName description: Gets or sets the agent registration key name - primary or secondary. kind: enum properties: - name: primary types: - uid: string - name: secondary types: - uid: string examples: - name: Regenerate registration key request: uri: POST https://management.azure.com/subscriptions/subid/resourceGroups/rg/providers/Microsoft.Automation/automationAccounts/myAutomationAccount18/agentRegistrationInformation/regenerateKey?api-version=2018-01-15 body: >- { "keyName": "primary" } responses: - statusCode: "200" body: >- { "id": null, "keys": { "primary": "<KEY> "secondary": "rVp0000000000000000000000000000000000000000000000000000000000000000000000000000f8cbmrOA==" }, "endpoint": "https://eus2-agentservice-prod-1.azure-automation.net/accounts/bd8fac9e-0000-0000-0000-0000f474fbf6", "dscMetaConfiguration": null } security: - type: oauth2 description: Azure Active Directory OAuth2 Flow flow: implicit authorizationUrl: https://login.microsoftonline.com/common/oauth2/authorize scopes: - name: user_impersonation description: impersonate your user account
docs-ref-autogen/automation/AgentRegistrationInformation/RegenerateKey.yml
zh-CN: users: edit: settings_tab: credit_cards: title: 信用卡 subtitle: If any project you have contributed to <strong>with Credit Card</strong> is not successful, we will <strong>automatically</strong> refund the card used to make the contribution. card_label: 卡 provider_label: 操作员 remove_label: 去除 update_success_msg: 你的信息已经被更新 legal_title: 负责 legal_subtitle: "<strong>IMPORTANT</strong> Full name / Corporate name and CPF / CNPJ <strong>can not be modified</strong> after being saved." account_types: pf: 普通人 pj: 法人 mei: 法人代表 (个人 企业主 - MEI) pf_label_name: 全名 pj_label_name: 全名 / 公司名 pf_label_document: CPF pj_label_document: CNPJ label_birth_date: 出生日期 label_state_inscription: 注册省 address_title: 地址 address_subtitle: 地址信息会用来做发票发送。 label_country: 母公司 label_address_street: 街道 label_address_number: 门牌号 label_address_complement: 补充 label_address_neighbourhood: 邻居 label_address_city: 市 label_address_state: 省 label_other_option: 其他 label_address_zipcode: 邮编 label_address_phone: 电话 email_confirmation: hello: 你好 %{name} hello_sub: 我们联系你遇到了困难 ... hello_email: 这是你的邮件吗? %{email} confirmed_title: 请检查你的邮箱 confirmed_sub: 检查你的垃圾邮件. contributions: 支持 projects: 创建 about_me: 关于你 settings: 数据和地址 notification: 通知 billing: 银行和卡 profil: 回到公共介绍 feeds: 活动 submit: 保存 contributions_fields: no-project: 你还没有贡献任何项目 ... contribute: 现在就行动! about_fields: add_link: "+ 添加链接" image_title: 简介图片 image_description: 这个图片会用于你的简介的缩略图 (PNG, JPG 大小 280x280) cover_title: 简介封面图片 cover_description: 这个图片会作为你的公共简介的背景 (PNG or JPG)。如果你未提交图片,我们会用你的简介图片替代。 profil_address_title: 你的简介地址 profil_address_description: 你的公共链接可能包含了 name: 名字 name_description: 这个名字用户可以在公共简介中看到 faceebook: Facebook 简介 facebook_description: 粘贴你的简介链接 twitter: Twitter 简介 twitter_description: 粘贴你的简介链接 homepage: 网站 homepage_description: 提供链接给用户可以对你有更深入的了解。 about: 关于 about_description: 关于自己的介绍,确保你提供所有相关信息。 billing_fields: credit_card: 信用卡 card_message: If any project you have contributed to <b>with Credit Card</b> is not successful, we will <b>automatically</b> refund the card used to make the contribution. card: 卡 operator: 操作员 bank_account: 银行数据 bank_account_message: If any project you have contributed to <b>with Boleto Bancaria is</b> not successful, we will <b>automatically</b> refund your payment to the account indicated below. notifications_fields: newsletters: 750382: title: 周报 description: 我们团队亲自选取的推荐项目 image_src: http://uploads.webflow.com/5849f4f0a275a2a744efd93e/59a714d438c73b0001ee8d83_news-img.jpg 1370135: title: 专业新闻 description: 漫画, 卡片游戏, RPGs 和 数字游戏。 image_src: http://uploads.webflow.com/5849f4f0a275a2a744efd93e/59a713dde51ca0000149ae85_geek-news-img.jpg
config/locales/catarse_bootstrap/views/users/edit.zh-cn.yml
pt-BR: show: talk: created_by: Criado por events: one: Apresentado no evento other: Apresentado nos eventos to_public: "Público" _yes: "Sim" _no: "Não" without_presentation: "O material ainda não está publicado" my_talks_published: "Minhas palestras publicadas" external_event: "Adicionar evento externo" external_events: "Apresentado em outros eventos" submit_event: "Submeter a um evento" user: profile: "Veja o perfil completo em" presentations: "Palestras apresentadas em eventos" events: one: Evento que ajudei a organizar other: Eventos que ajudei a organizar participations: one: "Evento que participei" other: "Eventos que participei" event: place: "Local" organizers: Organização enrolled: Quem se inscreveu presents: Quem participou of: "De" to: "até" participate: "Quero participar!" cancel_my_participation: "Cancelar minha participação" sellout: "Lotação esgotada!" record_presence: "Registrar presença" undo_presence: "Desfazer presença" btn_presence_checkin: "Estou aqui!" btn_add_vote: "Adicionar voto" btn_remove_vote: "Retirar voto" btn_checkin: "Registrar entrada" btn_use_talk: Selecionar uma palestra btn_add_talk: Adicionar uma palestra hint_add_talk_html: "<p>Você pode selecionar uma palestra sua ou criar uma proposta para ser adicionada na programação do evento.</p> <p>Escolha uma das opções abaixo. Depois de selecionar ou criar uma palestra, clique no botão <b>'Submeter a um evento'</b>, selecione o evento e clique no botão <b>'Adicionar programação'</b>.</p> <p><b>Lembrete:</b> você precisa estar cadastrado e logado.</p>" attendance_table: number: "N." photo: "Foto" name: "Nome" email: "E-mail" presence: "Presença" export_subscribers: Exportar inscritos
config/locales/pt-BR/show.yml
uid: "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy" fullName: "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy" name: "ApplicationGatewaySslPolicy" nameWithType: "ApplicationGatewaySslPolicy" summary: "Application Gateway Ssl policy." inheritances: - "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />" inheritedMembers: - "java.lang.Object.clone()" - "java.lang.Object.equals(java.lang.Object)" - "java.lang.Object.finalize()" - "java.lang.Object.getClass()" - "java.lang.Object.hashCode()" - "java.lang.Object.notify()" - "java.lang.Object.notifyAll()" - "java.lang.Object.toString()" - "java.lang.Object.wait()" - "java.lang.Object.wait(long)" - "java.lang.Object.wait(long,int)" syntax: "public class ApplicationGatewaySslPolicy" constructors: - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.ApplicationGatewaySslPolicy()" methods: - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.cipherSuites()" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.disabledSslProtocols()" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.minProtocolVersion()" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.policyName()" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.policyType()" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.withCipherSuites(java.util.List<com.microsoft.azure.management.network.ApplicationGatewaySslCipherSuite>)" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.withDisabledSslProtocols(java.util.List<com.microsoft.azure.management.network.ApplicationGatewaySslProtocol>)" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.withMinProtocolVersion(com.microsoft.azure.management.network.ApplicationGatewaySslProtocol)" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.withPolicyName(com.microsoft.azure.management.network.ApplicationGatewaySslPolicyName)" - "com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.withPolicyType(com.microsoft.azure.management.network.ApplicationGatewaySslPolicyType)" type: "class" metadata: {} package: "com.microsoft.azure.management.network" artifact: com.microsoft.azure:azure-mgmt-network:1.40.0
docs-ref-autogen/com.microsoft.azure.management.network.ApplicationGatewaySslPolicy.yml
name: Shallow Profiler version: 0.4 description: Shallow Profiler Mission platform: platformID: SWPROF mission: - missionThread: instrumentID: ['OPTAA', 'PCO2W', 'CTDPF'] errorHandling: default: retry maxRetries: 3 schedule: startTime: 2014-07-18T00:00:00 timeZone: loop: quantity: -1 # No. of loops (-1 for infinite) value: 1 # Repeat missionParams every 'xx' 'units' units: hrs # mins, hrs, days event: parentID: eventID: preMissionSequence: - command: SWPROF, execute_resource(TURN_ON_PORT{OPTAA}) onError: retry - command: SWPROF, execute_resource(TURN_ON_PORT{PCO2W}) onError: retry - command: SWPROF, execute_resource(TURN_ON_PORT{CTDPF}) onError: retry - command: SWPROF, execute_resource(CLOCK_SYNC) onError: retry - command: OPTAA, execute_resource(CLOCK_SYNC) onError: retry - command: PCO2W, execute_resource(CLOCK_SYNC) onError: retry - command: CTDPF, execute_resource(CLOCK_SYNC) onError: retry missionSequence: - command: SWPROF, execute_resource(TURN_ON_PORT{OPTAA}) onError: retry - command: OPTAA, execute_agent(INITIALIZE) #OPTAA INACTIVE onError: retry onError: retry - command: CTDPF, execute_agent(RUN) #CTD COMMAND onError: retry - command: CTDPF, set_resource(INTERVAL{1}) #CTD Set sampling interval onError: retry - command: CTDPF, execute_resource(START_AUTOSAMPLE) onError: retry - command: SWPROF, execute_resource(TURN_OFF_PORT{PCO2W}) onError: retry - command: SWPROF, execute_resource(LOAD_MISSION{0}) onError: retry - command: SWPROF, execute_resource(RUN_MISSION{0}) onError: retry postMissionSequence: - missionThread: instrumentID: [SWPROF OPTAA] errorHandling: default: retry maxRetries: 3 schedule: startTime: timeZone: loop: quantity: value: units: event: parentID: SWPROF eventID: PROFILER_AT_CEILING preMissionSequence: missionSequence: - command: OPTAA, execute_agent(GO_INACTIVE) #OPTAA INACTIVE onError: retry - command: SWPROF, execute_resource(TURN_OFF_PORT{OPTAA}) onError: retry postMissionSequence: - missionThread: instrumentID: [SWPROF, PCO2W] errorHandling: default: retry maxRetries: 3 schedule: startTime: timeZone: loop: quantity: value: units: event: parentID: SWPROF eventID: PROFILER_AT_STEP preMissionSequence: missionSequence: - command: PCO2W, execute_agent(GO_ACTIVE) #PCO2W IDLE onError: retry - command: PCO2W, execute_agent(RUN) #PCO2W COMMAND onError: retry - command: PCO2W, set_resource(INTERVAL{1}) #PCO2W Set Sampling Interval onError: retry - command: PCO2W, execute_resource(ACQUIRE_SAMPLE) onError: retry - command: PCO2W, execute_agent(GO_INACTIVE) onError: retry postMissionSequence: - missionThread: instrumentID: [SWPROF, OPTAA, PCO2W, CTDPF] errorHandling: default: retry maxRetries: 3 schedule: startTime: timeZone: loop: quantity: value: units: event: parentID: SWPROF eventID: PROFILER_AT_FLOOR preMissionSequence: missionSequence: - command: CTDPF, execute_resource(STOP_AUTOSAMPLE) onError: retry - command: CTDPF, execute_agent(GO_INACTIVE) onError: retry - command: SWPROF, execute_resource(CLOCK_SYNC) onError: retry - command: OPTAA, execute_resource(CLOCK_SYNC) onError: retry - command: PCO2W, execute_resource(CLOCK_SYNC) onError: retry - command: CTDPF, execute_resource(CLOCK_SYNC) onError: retry postMissionSequence:
ion/agents/platform/test/mission_ShallowProfiler.yml