code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
AWSTemplateFormatVersion: '2010-09-09'
Transform: 'AWS::Serverless-2016-10-31'
Description: AWS CloudFormation script used to create Lambda Function and API gateway to access it.
# input parameters
Parameters:
apiName:
AllowedPattern: ^[a-zA-Z0-9]+[a-zA-Z0-9-]+[a-zA-Z0-9]+$
Default: LoanAmortApi
Description: Name of the API gateway used to access the Lambda function.
Type: String
functionName:
AllowedPattern: ^[a-zA-Z0-9]+[a-zA-Z0-9-]+[a-zA-Z0-9]+$
Default: LoanAmortFucntion
Description: Name of the Lambda Function.
Type: String
s3Bucket:
Type: String
Description: S3 Bucket Name (e.g. mybucket).
s3Key:
Type: String
Description: Path to the file containing the required source inside the bucket (e.g. mypipline/BuildArtifacts/myFnc.zip).
# resources declaration
Resources:
# the lambda function
lambdaFunction:
Type: AWS::Serverless::Function
Properties:
FunctionName:
Ref: functionName
Description: Loan Amort Lambda Function
Handler: com.microfocus.loan.amort.lambda.CalculateLoanHandler
Runtime: java8
MemorySize: 512
CodeUri:
Bucket:
Ref: s3Bucket
Key:
Ref: s3Key
AutoPublishAlias: fncAlias
DeploymentPreference:
Type: AllAtOnce
Timeout: 15
# api gateway to access the lambda
apiGateway:
Type: AWS::Serverless::Api
Properties:
Name:
Ref: apiName
StageName: loanamort
DefinitionBody:
swagger: '2.0'
info:
title: apiGateway
version: '2018-11-01T18:06:27Z'
schemes:
- https
definitions:
Empty:
title: Empty Schema
type: object
paths:
/:
get:
consumes:
- application/json
parameters:
- in: query
name: p
required: true
type: string
- in: query
name: t
required: true
type: string
- in: query
name: r
required: true
type: string
produces:
- application/json
responses:
'200':
description: 200 response
headers:
Access-Control-Allow-Origin:
type: string
schema:
$ref: '#/definitions/Empty'
x-amazon-apigateway-integration:
contentHandling: CONVERT_TO_TEXT
httpMethod: POST
passthroughBehavior: when_no_templates
requestTemplates:
application/json: "{\r\n \"principal\": \"$input.params('p')\"\
,\r\n \"term\": \"$input.params('t')\",\r\n \"rate\": \"$input.params('r')\"\
\r\n}"
responses:
default:
responseParameters:
method.response.header.Access-Control-Allow-Origin: '''*'''
statusCode: '200'
type: aws
uri:
Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${lambdaFunction.Arn}:fncAlias/invocations
# lambda - api gateway permissions
permissions:
Type: AWS::Lambda::Permission
DependsOn: [ lambdaFunction, apiGateway, lambdaFunctionAliasfncAlias ]
Properties:
Action: lambda:InvokeFunction
FunctionName:
Fn::Join:
- ''
- - Ref: functionName
- :fncAlias
Principal: apigateway.amazonaws.com
SourceArn:
Fn::Sub: arn:aws:execute-api:${AWS::Region}:${AWS::AccountId}:${apiGateway}/*/GET/
# output parameters
Outputs:
ApiAddress:
Description: API address for accessing the Lambda Function
Value:
Fn::Join:
- ''
- - https://
- Ref: apiGateway
- ".execute-api."
- Ref: AWS::Region
- ".amazonaws.com/loanamort"
|
scripts/serverless.yml
|
---
:replyall:
:url: https://gimletmedia.com/shows/reply-all/episodes
:twitter: replyall
:flair: Reply All
:crimetown:
:url: https://open.spotify.com/show/5C9yXrX999QNS1Q62IlXvc
:twitter: crimetown
:flair: Crimetown
:hevyweight:
:url: https://gimletmedia.com/shows/heavyweight/episodes
:twitter: heavyweight
:flair: Heavyweight
:thepitch:
:url: https://gimletmedia.com/shows/the-pitch/episodes
:twitter: thepitchshow
:flair: The Pitch
:elt:
:url: https://gimletmedia.com/shows/every-little-thing/episodes
:twitter: eltshow
:flair: ELT
:mogul:
:url: https://gimletmedia.com/shows/mogul/episodes
:flair: Mogul
:sciencevs:
:url: https://gimletmedia.com/shows/science-vs/episodes
:twitter: sciencevs
:flair: Science vs
:homecoming:
:url: https://gimletmedia.com/shows/homecoming/episodes
:twitter: HomecomingShow
:flair: Homecoming
:thenod:
:url: https://gimletmedia.com/shows/the-nod/episodes
:twitter: thenodshow
:flair: The Nod
:uncivil:
:url: https://gimletmedia.com/shows/uncivil/episodes
:twitter: uncivilshow
:flair: Uncivil
:storypirates:
:url: https://gimletmedia.com/shows/story-pirates/episodes
:twitter: storypirates
:flair: Story Pirates
:habitat:
:url: https://gimletmedia.com/shows/the-habitat/episodes
:flair: The Habitat
:sandra:
:url: https://gimletmedia.com/shows/sandra/episodes
:flair: Sandra
:wecametowin:
:url: https://gimletmedia.com/shows/we-came-to-win/episodes
:flair: We Came To Win
:withoutfail:
:url: https://gimletmedia.com/shows/without-fail/episodes
:flair: Without Fail
:thecutontuesdays:
:url: https://gimletmedia.com/shows/the-cut-on-tuesdays/episodes
:twitter: TheCut
:flair: The Cut on Tuesdays
:doloresroach:
:url: https://gimletmedia.com/shows/dolores-roach/episodes
:flair: The Horror of Dolores Roach
:conviction:
:url: https://open.spotify.com/show/2XquJlPU9ibLYZMH0ZzFwA
:flair: Conviction
:motherhoodsessions:
:url: https://gimletmedia.com/shows/motherhood-sessions/episodes
:flair: Motherhood Sessions
:theclearing:
:url: https://gimletmedia.com/shows/the-clearing/episodes
:flair: The Clearing
:motherhacker:
:url: https://gimletmedia.com/shows/motherhacker/episodes
:flair: Motherhacker
:howswork:
:url: https://open.spotify.com/show/0P13JasQfVZ1RiDCMZMYNU
:flair: How's Work?
:scaredycats:
:url: https://open.spotify.com/show/3VrlfhxeItVL0o1rccJU1H
:flair: The Scaredy Cats Horror Show
:whereshouldwebegin:
:url: https://open.spotify.com/show/3fKOTwtnX5oZLaiNntKWAV?si=zQoDobcOSPy48pihCmkYHA
:flair: Where Should We Begin?
:sohelpme:
:url: https://open.spotify.com/show/3xPRJvSzSmd2g38bsmzQfw?si=1EORHiUQSzuB7Fuz51A6uw
:flair: So Help Me
:howtosaveaplanet:
:url: https://gimletmedia.com/shows/howtosaveaplanet/episodes
:flair: How To Save a Planet
|
feeds.yml
|
name: "Deploy & Upload"
on:
# 支持手动触发构建
workflow_dispatch:
release:
# 创建release的时候触发
types: [ published ]
jobs:
gh-deploy:
name: "Publish Project (GitHub)"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: "Set up JDK"
uses: actions/setup-java@v2
with:
java-version: '11'
distribution: 'adopt'
cache: maven
server-id: github
server-username: MAVEN_USERNAME
server-password: <PASSWORD>
gpg-private-key: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} # Value of the GPG private key to import
gpg-passphrase: <PASSWORD> # env variable for GPG private key passphrase
- name: "Maven Deploy With Javadoc"
run: mvn -B -Pgithub deploy --file pom.xml -DskipTests
env:
MAVEN_USERNAME: ${{ github.repository_owner }}
MAVEN_TOKEN: ${{secrets.GITHUB_TOKEN}}
MAVEN_GPG_PASSPHRASE: ${{ secrets.MAVEN_GPG_PASSPHRASE }}
- name: "Copy Javadoc to Location"
run: |
rm -rf docs
mkdir -vp docs
cp -vrf target/apidocs/* docs/
cp -vrf .documentation/JAVADOC-README.md docs/README.md
- name: "Generate the Javadoc sitemap"
id: sitemap
uses: cicirello/generate-sitemap@v1
with:
base-url-path: https://carmjos.github.io/GithubReleases4J
path-to-root: docs
- name: "Output stats"
run: |
echo "sitemap-path = ${{ steps.sitemap.outputs.sitemap-path }}"
echo "url-count = ${{ steps.sitemap.outputs.url-count }}"
echo "excluded-count = ${{ steps.sitemap.outputs.excluded-count }}"
- name: "Configure Git"
env:
DEPLOY_PRI: ${{secrets.DEPLOY_PRI}}
run: |
sudo timedatectl set-timezone "Asia/Shanghai"
mkdir -p ~/.ssh/
echo "$DEPLOY_PRI" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan github.com >> ~/.ssh/known_hosts
git config --global user.name 'CarmJos'
git config --global user.email '<EMAIL>'
- name: "Commit documentations"
run: |
cd docs
git init
git remote add origin <EMAIL>:CarmJos/GithubReleases4J.git
git checkout -b gh-pages
git add -A
git commit -m "API Document generated."
- name: "Push javadocs"
run: |
cd docs
git push origin HEAD:gh-pages --force
central-deploy:
name: "Deploy Project (Central)"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: "Set up JDK"
uses: actions/setup-java@v2
with:
java-version: '11'
distribution: 'adopt'
cache: maven
server-id: ossrh
server-username: MAVEN_USERNAME
server-password: <PASSWORD>
gpg-private-key: ${{ secrets.MAVEN_GPG_PRIVATE_KEY }} # Value of the GPG private key to import
gpg-passphrase: <PASSWORD> # env variable for GPG private key passphrase
- name: "Central Deploy"
run: mvn -B -Possrh deploy --file pom.xml -DskipTests
env:
MAVEN_USERNAME: ${{ secrets.OSSRH_USER }}
MAVEN_PASSWORD: ${{ secrets.OSSRH_PASS }}
MAVEN_GPG_PASSPHRASE: ${{ secrets.MAVEN_GPG_PASSPHRASE }}
|
.github/workflows/deploy.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-19 18:30"
variationOf: "67754758a4feabf4f3c9d6a5892d848d161a651f"
game: "Unreal Tournament"
name: "DM-[BBST]-Artic"
author: "Radius"
description: "None"
releaseDate: "2001-08"
attachments:
- type: "IMAGE"
name: "dm-bbst-artic_shot_651a8fa6_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/B/6/5/1a8fa6/dm-bbst-artic_shot_651a8fa6_1.png"
- type: "IMAGE"
name: "dm-bbst-artic_shot_651a8fa6_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/DeathMatch/B/6/5/1a8fa6/dm-bbst-artic_shot_651a8fa6_2.png"
originalFilename: "DM-[BBST]-artic2.zip"
hash: "651a8fa61d9b1501108924c9a2e62f2355be5882"
fileSize: 4783060
files:
- name: "Industrial_Pack1.u"
fileSize: 2164988
hash: "e48d99d0577ec376c0a55d505601db7378af9c28"
- name: "[BBST]-Oil.utx"
fileSize: 1881361
hash: "1018dae6116f07cea39b605364b5d1c7f2abcfd6"
- name: "DM-[BBST]-Artic.unr"
fileSize: 7635232
hash: "0f5c599b180de58a9aba76e0e3f19734c7af2cf7"
- name: "pb_totalizator.umx"
fileSize: 323167
hash: "071a09ce4485bee80f8986975d05383f362bf621"
otherFiles: 2
dependencies:
DM-[BBST]-Artic.unr:
- status: "OK"
name: "Industrial_Pack1"
- status: "OK"
name: "pb_totalizator"
- status: "OK"
name: "[BBST]-Oil"
downloads:
- url: "http://medor.no-ip.org/index.php?dir=Maps/DeathMatch&file=DM-%5BBBST%5D-artic2.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/DeathMatch/B/DM-%5BBBST%5D-artic2.zip"
main: true
repack: false
state: "OK"
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=2982"
main: false
repack: false
state: "MISSING"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/DeathMatch/B/6/5/1a8fa6/DM-%255BBBST%255D-artic2.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/DeathMatch/B/6/5/1a8fa6/DM-%255BBBST%255D-artic2.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "[BBST]-Artic"
playerCount: "5-15"
themes:
Tech: 0.1
Industrial: 0.8
Natural: 0.1
bots: false
|
content/Unreal Tournament/Maps/DeathMatch/B/6/5/1a8fa6/dm-bbst-artic_[651a8fa6].yml
|
---
# tasks file for prep_selfssl
- name: ensure "{{ dhparam_path | dirname }}" exists
file:
name: "{{ dhparam_path | dirname }}"
owner: root
group: root
mode: 0755
state: directory
- name: use the pre-defined DH groups ffdhe4096 recommended by the IETF in [RFC 7919 https://tools.ietf.org/html/rfc7919]
copy:
dest: "{{ dhparam_path }}"
owner: root
group: root
mode: 0644
content: |
-----BEGIN DH PARAMETERS-----
MIICCAKCAgEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
ssbzSibBsu/6iGtCOGEfz9zeNVs7ZRkDW7w09N75nAI4YbRvydbmyQd62R0mkff3
7lmMsPrBhtkcrv4TCYUTknC0EwyTvEN5RPT9RFLi103TZPLiHnH1S/9croKrnJ32
nuhtK8UiNjoNq8Uhl5sN6todv5pC1cRITgq80Gv6U93vPBsg7j/VnXwl5B0rZp4e
8W5vUsMWTfT7eTDp5OWIV7asfV9C1p9tGHdjzx1VA0AEh/VbpX4xzHpxNciG77Qx
iu1qHgEtnmgyqQdgCpGBMMRtx3j5ca0AOAkpmaMzy4t6Gh25PXFAADwqTs6p+Y0K
zAqCkc3OyX3Pjsm1Wn+IpGtNtahR9EGC4caKAH5eZV9q//////////8CAQI=
-----END DH PARAMETERS-----
- name: use ec2 public hostname if fqdn is not defined
set_fact:
fqdn: "{{ fqdn if ( fqdn ) else facter_ec2_metadata['public-hostname'] }}"
when: facter_ec2_metadata is defined
- name: use ansible_fqdn if fqdn is not defined
set_fact:
fqdn: "{{ fqdn if ( fqdn ) else ansible_fqdn }}"
- name: create fake certificate directory
file:
dest: /etc/nginx/certs
owner: root
group: root
mode: 0755
state: directory
- name: install needed pip packages
pip:
name:
- pyOpenSSL
state: latest
- name: generate an openssl private key with the default values
openssl_privatekey:
path: /etc/nginx/certs/key.pem
- name: generate an openssl certificate signing request
openssl_csr:
path: /etc/nginx/certs/cert.csr
privatekey_path: /etc/nginx/certs/key.pem
common_name: "{{ fqdn }}"
- name: Generate a Self Signed OpenSSL certificate
openssl_certificate:
path: /etc/nginx/certs/cert.pem
privatekey_path: /etc/nginx/certs/key.pem
csr_path: /etc/nginx/certs/cert.csr
provider: selfsigned
|
roles/prep_selfssl/tasks/main.yml
|
uid: azure.mgmt.automation.models.Job
name: Job
fullName: azure.mgmt.automation.models.Job
module: azure.mgmt.automation.models
inheritances:
- azure.mgmt.automation.models._models_py3.Resource
summary: 'Definition of the job.
Variables are only populated by the server, and will be ignored when sending a request.'
constructor:
syntax: 'Job(*, runbook: typing.Union[_ForwardRef(''RunbookAssociationProperty''),
NoneType] = None, started_by: typing.Union[str, NoneType] = None, run_on: typing.Union[str,
NoneType] = None, job_id: typing.Union[str, NoneType] = None, creation_time: typing.Union[datetime.datetime,
NoneType] = None, status: typing.Union[str, _ForwardRef(''JobStatus''), NoneType]
= None, status_details: typing.Union[str, NoneType] = None, start_time: typing.Union[datetime.datetime,
NoneType] = None, end_time: typing.Union[datetime.datetime, NoneType] = None,
exception: typing.Union[str, NoneType] = None, last_modified_time: typing.Union[datetime.datetime,
NoneType] = None, last_status_modified_time: typing.Union[datetime.datetime, NoneType]
= None, parameters: typing.Union[typing.Dict[str, str], NoneType] = None, provisioning_state:
typing.Union[str, _ForwardRef(''JobProvisioningState''), NoneType] = None, **kwargs)'
parameters:
- name: runbook
description: Gets or sets the runbook.
types:
- <xref:azure.mgmt.automation.models.RunbookAssociationProperty>
- name: started_by
description: Gets or sets the job started by.
types:
- <xref:str>
- name: run_on
description: 'Gets or sets the runOn which specifies the group name where the
job is to be
executed.'
types:
- <xref:str>
- name: job_id
description: Gets or sets the id of the job.
types:
- <xref:str>
- name: creation_time
description: Gets or sets the creation time of the job.
types:
- <xref:datetime.datetime>
- name: status
description: 'Gets or sets the status of the job. Possible values include: "New",
"Activating", "Running", "Completed", "Failed", "Stopped", "Blocked", "Suspended",
"Disconnected", "Suspending", "Stopping", "Resuming", "Removing".'
types:
- <xref:str>
- <xref:azure.mgmt.automation.models.JobStatus>
- name: status_details
description: Gets or sets the status details of the job.
types:
- <xref:str>
- name: start_time
description: Gets or sets the start time of the job.
types:
- <xref:datetime.datetime>
- name: end_time
description: Gets or sets the end time of the job.
types:
- <xref:datetime.datetime>
- name: exception
description: Gets or sets the exception of the job.
types:
- <xref:str>
- name: last_modified_time
description: Gets or sets the last modified time of the job.
types:
- <xref:datetime.datetime>
- name: last_status_modified_time
description: Gets or sets the last status modified time of the job.
types:
- <xref:datetime.datetime>
- name: parameters
description: Gets or sets the parameters of the job.
types:
- <xref:dict>[<xref:str>, <xref:str>]
- name: provisioning_state
description: 'The current provisioning state of the job. Possible values include:
"Failed", "Succeeded", "Suspended", "Processing".'
types:
- <xref:str>
- <xref:azure.mgmt.automation.models.JobProvisioningState>
variables:
- description: Fully qualified resource Id for the resource.
name: id
types:
- <xref:str>
- description: The name of the resource.
name: name
types:
- <xref:str>
- description: The type of the resource.
name: type
types:
- <xref:str>
|
preview/docs-ref-autogen/azure-mgmt-automation/azure.mgmt.automation.models.Job.yml
|
pa:
LeftAndMain:
NEWSLETTERS: "ਅਖ਼ਬਾਰਾਂ"
NewsletterAdmin:
FROMEM: "ਈਮੇਲ ਪਤੇ ਤੋ "
MEWDRAFTMEWSL: "ਨਵਾਂ ਕੱਚਾ ਖ਼ਬਰ-ਅੰਕ"
NEWNEWSLTYPE: "ਨਵੀ ਖ਼ਬਰ-ਅੰਕ ਦੀ ਿਕਸਮ "
NEWSLTYPE: "ਖ਼ਬਰ-ਅੰਕ ਦੀ ਿਕਸਮ "
PLEASEENTERMAIL: "ਿਕ੍ਰਪਾ ਕਰਕੇ ਇੱਕ ਈਮੇਲ ਪਤਾ ਪਾਉ"
RESEND: "ਦੁਬਾਰਾ ਭੇਜੋ "
SAVE: "ਬਚਾਓ"
SAVED: "ਬਚਾਇਆ"
SEND: |
ਭੇਜੋ ...
SENDING: "ਭੇਜ ਰਹੇ ਈਮੇਲ..."
SENTTESTTO: "Sent test to"
SHOWCONTENTS: "ਸਮਾਨ ਿਦਖਾਓ"
NewsletterAdmin_BouncedList_ss:
EMADD: "ਈਮੇਲ ਪਤਾ"
HAVEBOUNCED: "Emails that have bounced"
NOBOUNCED: "No emails sent have bounced."
UNAME: "User name"
NewsletterAdmin_SiteTree_ss:
DRAFTS: "Drafts"
MAILLIST: "ਮੇਿਲੰਗ ਸੂਚੀ "
SENT: "Sent Items"
NewsletterAdmin_UnsubscribedList_ss:
NOUNSUB: "No users have unsubscribed from this newsletter."
UNAME: "User name"
UNSUBON: "Unsubscribed on"
NewsletterAdmin_left_ss:
ADDDRAFT: "ਨਵਾਂ draft ਜੋੜ ੋ "
ADDTYPE: "ਨਵੀ ਿਕਸਮ ਜੋੜ ੋ "
CREATE: "ਬਣਾਉ"
DEL: "ਕੱਟੋਂ"
DELETEDRAFTS: "ਚੁਣੇ ਹੋਏ ਡਰਾਫਟ ਕੱਟੋਂ"
GO: "ਈਮੇਲ"
NEWSLETTERS: "ਅਖ਼ਬਾਰਾਂ"
SELECTDRAFTS: "Select the drafts that you want to delete and then click the button below"
NewsletterAdmin_right_ss:
CANCEL: "ਰੱਦ"
ENTIRE: "Send to the entire mailing list"
ONLYNOT: "Send to only people not previously sent to"
SEND: "Send newsletter"
SENDTEST: "Send test to"
WELCOME1: "ਜੀ ਆਇਆਂ ਨੂੰ"
WELCOME2: "ਖ਼ਬਰਾਂ ਬੰਦੋਬਸਤ ਭਾਗ। ਿਕ੍ਰਪਾ ਕਰਕੇ ਖੱਬੇਉ ਇਕ ਫੋਲਡਰ ਚੁਣੋ।"
NewsletterList_ss:
CHOOSEDRAFT1: "ਿਕ੍ਰਪਾ ਕਰਕੇ ਖੱਬੇਓ ਇੱਕ draft ਚੁਣੋ "
CHOOSEDRAFT2: "add one"
CHOOSESENT: "ਿਕ੍ਰਪਾ ਕਰਕੇ ਖੱਬੇਓ ਇੱਕ sent item ਚੁਣੋ "
Newsletter_RecipientImportField_ss:
CHANGED: "Number of details changed:"
IMPORTED: "New members imported:"
IMPORTNEW: "Imported new members"
SEC: "ਸਿਕ ੰਟ"
SKIPPED: "Records skipped:"
TIME: "Time taken:"
UPDATED: "Members updated:"
Newsletter_RecipientImportField_Table_ss:
CONTENTSOF: "Contents of"
NO: "ਰੱਦ"
RECIMPORTED: "Recipients imported from"
YES: "Confirm"
Newsletter_SentStatusReport_ss:
DATE: "Date"
EMAIL: "ਇਮੇਲ"
FN: "ਪਹਿਲਾ ਨਾਮ"
NEWSNEVERSENT: "The Newsletter has Never Been Sent to Following Subscribers"
RES: "Result"
SENDBOUNCED: "Sending to the Following Recipients Bounced"
SENDFAIL: "Sending to the Following Recipients Failed"
SENTOK: "Sending to the Following Recipients was Successful"
SN: "ਗੋਤ"
|
lang/pa.yml
|
name: oauthenticated
version: 0.2.1.0
synopsis: Simple OAuth for http-client
description: |
/Warning/: This software is pre 1.0 and thus its API may change very
dynamically while updating only minor versions. This package will follow the
PVP once it reaches version 1.0.
.
OAuth is a popular protocol allowing servers to offer resources owned by some
user to a series of authorized clients securely. For instance, OAuth lets
Twitter provide access to a user's private tweets to the Twitter client
registered on their phone.
.
@oauthenticated@ is a Haskell library implementing OAuth protocols atop the
minimalistic @http-client@ HTTP client library extracted from @http-conduit@.
"Network.OAuth" offers simple functions for signing
'Network.HTTP.Client.Request's along with tools for 'Network.OAuth.Cred'ential
management and 'Network.OAuth.Server' configuration. "Network.OAuth.Simple"
provides a slightly more heavy-weight interface which manages the necessary state
and configuration using a monad transformer stack.
.
There's also an implementation of OAuth's three-legged credential acquisition
protocol built atop the "Network.OAuth" API. This can be handled in both
conformant and old-style modes: conformant will reject server responses which
are not conformant with RFC 5849 (which builds atop community version OAuth
1.0a) while old-style better allows for less-than-compliant servers. See
'Network.OAuth.Types.Params.Version' for more details.
.
Currently @oauthenticated@ only supports OAuth 1.0 and is in alpha. OAuth 2.0
support is a potential goal, but it's unclear if it can be transparently
supported at a similar level of abstraction.
license: MIT
license-file: LICENSE
author: <NAME>
maintainer: <EMAIL>
copyright: 2013 (c) <NAME>
category: Network, Web
build-type: Simple
ghc-options:
- -Wall
- -Werror
- -fwarn-tabs
dependencies:
- base >= 4.8 && < 5
- aeson
- base64-bytestring
- blaze-builder
- bytestring
- case-insensitive
- cryptonite
- exceptions
- http-client
- http-types
- memory
- mtl
- time
- text
- transformers
- network-uri
- network
library:
source-dirs: src
other-modules:
- Network.OAuth.MuLens
- Network.OAuth.Util
tests:
spec:
main: Spec.hs
source-dirs:
- test
dependencies:
- hspec
- hspec-expectations
- http-client-tls
- oauthenticated
github: tel/oauthenticated.git
|
package.yaml
|
apiVersion: v1
kind: ServiceAccount
metadata:
name: gtflp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gtflp
rules:
- apiGroups:
- serving.knative.dev
- metrics.k8s.io
- extensions
- apps
- eventing.knative.dev
- ""
- flow.triggermesh.io
- sources.triggermesh.io
resources:
- pods/log
- events
- nodes
- pods
- deployments
- replicasets
- routes
- brokers
- triggers
- bridges
- azureactivitylogssources
- azureblobstoragesources
- azureeventgridsources
- azureeventhubsources
- azurequeuestoragesources
- googlecloudauditlogssources
- googlecloudbillingsources
- googlecloudpubsubsources
- googlecloudstoragesources
- ocimetricssources
- salesforcesources
- twiliosources
- httppollersources
- slacksources
- webhooksources
- zendesksources
- awscloudwatchlogssources
- awscloudwatchsources
- awscodecommitsources
- awscognitoidentitysources
- awscognitouserpoolsources
- awsdynamodbsources
- awskinesissources
- awsperformanceinsightssources
- awss3sources
- awssnssources
- awssqssources
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gtflp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gtflp
subjects:
- kind: ServiceAccount
name: gtflp
# UPDATE THIS TO MATCH YOUR NAMESPACE:
namespace: demo
---
apiVersion: eventing.knative.dev/v1
kind: Trigger
metadata:
name: gtflp
spec:
broker: default
subscriber:
ref:
apiVersion: serving.knative.dev/v1
kind: Service
name: gtflp
---
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: gtflp
spec:
template:
metadata:
annotations:
autoscaling.knative.dev/maxScale: "1"
spec:
serviceAccountName: gtflp
containers:
- image: ko://github.com/JeffNeff/gtflp/cmd/gtflp/
env:
|
config/gtflp.yaml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-trigger-ad_hoc/25/
timestamp: 2016-05-25 15:56:22 UTC
duration: 1h59m48s
triggered_by: robbkidd
active_duration: 1h59m24s
parameters:
GIT_REF: fieri-as-engine
EXPIRE_CACHE: false
change:
git_remote: <EMAIL>:chef/supermarket.git
git_commit: <PASSWORD>
project: supermarket
version: 2.6.0+20160525155634
stages:
supermarket-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-promote/232/
duration: 0s
supermarket-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-test/236/
duration: 21m11s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-test/architecture=x86_64,platform=el-5,project=supermarket,role=tester/236/
duration: 11m21s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-test/architecture=x86_64,platform=el-6,project=supermarket,role=tester/236/
duration: 13m26s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-test/architecture=x86_64,platform=el-7,project=supermarket,role=tester/236/
duration: 11m14s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-test/architecture=x86_64,platform=ubuntu-12.04,project=supermarket,role=tester/236/
duration: 7m55s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-test/architecture=x86_64,platform=ubuntu-14.04,project=supermarket,role=tester/236/
duration: 21m11s
supermarket-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-build/264/
duration: 1h38m7s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-build/architecture=x86_64,platform=el-5,project=supermarket,role=builder/264/
duration: 1h38m1s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-build/architecture=x86_64,platform=el-6,project=supermarket,role=builder/264/
duration: 40m9s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-build/architecture=x86_64,platform=el-7,project=supermarket,role=builder/264/
duration: 26m7s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-build/architecture=x86_64,platform=ubuntu-12.04,project=supermarket,role=builder/264/
duration: 1h37m50s
supermarket-trigger-ad_hoc:
result: SUCCESS
url: http://wilson.ci.chef.co/job/supermarket-trigger-ad_hoc/25/
duration: 4s
|
reports/wilson.ci.chef.co/job/supermarket-trigger-ad_hoc/25.yaml
|
items:
- uid: '@azure/arm-notificationhubs.ResourceListKeys'
name: ResourceListKeys
fullName: ResourceListKeys
children:
- '@azure/arm-notificationhubs.ResourceListKeys.keyName'
- '@azure/arm-notificationhubs.ResourceListKeys.primaryConnectionString'
- '@azure/arm-notificationhubs.ResourceListKeys.primaryKey'
- '@azure/arm-notificationhubs.ResourceListKeys.secondaryConnectionString'
- '@azure/arm-notificationhubs.ResourceListKeys.secondaryKey'
langs:
- typeScript
type: interface
summary: ''
package: '@azure/arm-notificationhubs'
- uid: '@azure/arm-notificationhubs.ResourceListKeys.keyName'
name: keyName
fullName: keyName
children: []
langs:
- typeScript
type: property
summary: ''
optional: true
syntax:
content: 'keyName?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-notificationhubs'
- uid: '@azure/arm-notificationhubs.ResourceListKeys.primaryConnectionString'
name: primaryConnectionString
fullName: primaryConnectionString
children: []
langs:
- typeScript
type: property
summary: ''
optional: true
syntax:
content: 'primaryConnectionString?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-notificationhubs'
- uid: '@azure/arm-notificationhubs.ResourceListKeys.primaryKey'
name: primaryKey
fullName: primaryKey
children: []
langs:
- typeScript
type: property
summary: ''
optional: true
syntax:
content: 'primaryKey?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-notificationhubs'
- uid: '@azure/arm-notificationhubs.ResourceListKeys.secondaryConnectionString'
name: secondaryConnectionString
fullName: secondaryConnectionString
children: []
langs:
- typeScript
type: property
summary: ''
optional: true
syntax:
content: 'secondaryConnectionString?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-notificationhubs'
- uid: '@azure/arm-notificationhubs.ResourceListKeys.secondaryKey'
name: secondaryKey
fullName: secondaryKey
children: []
langs:
- typeScript
type: property
summary: ''
optional: true
syntax:
content: 'secondaryKey?: undefined | string'
return:
type:
- undefined | string
description: ''
package: '@azure/arm-notificationhubs'
|
docs-ref-autogen/@azure/arm-notificationhubs/ResourceListKeys.yml
|
service: dark-kite-configurations
plugins:
- serverless-webpack
- serverless-dynamodb-local
- serverless-offline
- serverless-domain-manager
- serverless-plugin-stage-variables
custom:
webpack:
webpackConfig: webpack.config.js
includeModules:
forceExclude:
- aws-sdk
stage: ${opt:stage, self:provider.stage}
stageVariables:
env: ${self:custom.stage}
tables:
projects: projects-${self:custom.stage}
domains:
prod: api.dark-kite.com
stage: stage-api.dark-kite.com
dev: dev-api.dark-kite.com
customDomain:
domainName: ${self:custom.domains.${self:custom.stage}}
basePath: 'configurations'
stage: ${self:custom.stage}
createRoute53Record: true
dynamodb:
start:
port: 8000
inMemory: true
migrate: true
migration:
dir: migrations
package:
exclude:
- coverage/**
- migrations/**
- .circleci/**
- .git/**
- tests/**
provider:
name: aws
runtime: nodejs8.10
stage: dev
region: us-east-1
iamRoleStatements:
- Effect: "Allow"
Action:
- dynamodb:Query
- dynamodb:Scan
- dynamodb:GetItem
- dynamodb:PutItem
- dynamodb:UpdateItem
- dynamodb:DeleteItem
Resource: "arn:aws:dynamodb:*:*:table/${self:custom.tables.projects}"
functions:
create:
handler: create.default
events:
- http:
path: /
method: post
cors: true
environment:
PROJECTS_TABLE: ${self:custom.tables.projects}
list:
handler: list.default
events:
- http:
path: /
method: get
cors: true
environment:
PROJECTS_TABLE: ${self:custom.tables.projects}
get:
handler: get.default
events:
- http:
path: /{id}
method: get
cors: true
environment:
PROJECTS_TABLE: ${self:custom.tables.projects}
update:
handler: update.default
events:
- http:
path: /{id}
method: put
cors: true
environment:
PROJECTS_TABLE: ${self:custom.tables.projects}
delete:
handler: delete.default
events:
- http:
path: /{id}
method: delete
cors: true
environment:
PROJECTS_TABLE: ${self:custom.tables.projects}
|
services/configurations/serverless.yml
|
name: Continuous integration
on:
push: # run when commits are added to master
branches:
- master
tags:
- '[0-9]+' # match version tags with only numbers
pull_request: # run on pr's against master
branches:
- master
env:
default-python: "3.10"
jobs:
check-code-formatting:
name: Check code formatting
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Check code formatting
uses: psf/black@stable
check-coding-style:
name: Check coding style
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python ${{ env.default-python }}
uses: actions/setup-python@v2
with:
python-version: ${{ env.default-python }}
- name: Upgrade pip, Install nox
run: |
python -m pip install --upgrade pip
python -m pip install nox
- name: Check coding style
run: |
nox --error-on-missing-interpreters --non-interactive --session lint
check-static-types:
name: Check static types
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python ${{ env.default-python }}
uses: actions/setup-python@v2
with:
python-version: ${{ env.default-python }}
- name: Upgrade pip, Install nox
run: |
python -m pip install --upgrade pip
python -m pip install nox
- name: Check static types
run: |
nox --error-on-missing-interpreters --non-interactive --session types
tests:
name: Run tests
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ ubuntu-latest ]
python-version: [ "3.6", "3.7", "3.8", "3.9", "3.10" ]
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Determine pip cache directory
id: pip-cache
run: |
echo "::set-output name=dir::$(pip cache dir)"
- name: Cache pip cache
uses: actions/cache@v2
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip${{ matrix.python-version }}
- name: Upgrade pip and install nox
run: |
python -m pip install --upgrade pip
python -m pip install nox
- name: Run tests
run: |
nox --non-interactive --session tests-${{ matrix.python-version }}
build-docs:
name: Test building docs
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python ${{ env.default-python }}
uses: actions/setup-python@v2
with:
python-version: ${{ env.default-python }}
- name: Upgrade pip and install nox
run: |
python -m pip install --upgrade pip
python -m pip install nox
- name: Build docs
run: |
nox --error-on-missing-interpreters --non-interactive --session docs
publish:
name: Publish to PyPi
runs-on: ubuntu-latest
needs:
- check-code-formatting
- check-coding-style
- check-static-types
- tests
- build-docs
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install dependencies
run: python -m pip install wheel
- name: Set version
run: |
if [[ "${{ github.ref }}" == "refs/tags/"* ]]
then
VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,' | sed -e 's/^v//')
else
VERSION=$(date +%Y%m%d).$(date +%H%M%S)
fi
echo ${VERSION}
sed -i "s/__VERSION__/${VERSION}/g" pdfminer/__init__.py
- name: Build package
run: python setup.py sdist bdist_wheel
- name: Generate changelog
run: sed '1,/## \[/d;/## \[/Q' CHANGELOG.md > ${{ github.workspace }}-CHANGELOG.md
- name: Publish package to PyPi
if: startsWith(github.ref, 'refs/tags')
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}
- name: Create GitHub release
if: startsWith(github.ref, 'refs/tags')
uses: softprops/action-gh-release@v1
id: create_release
with:
token: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
body_path: ${{ github.workspace }}-CHANGELOG.md
files: |
dist/*.tar.gz
dist/*.whl
|
.github/workflows/actions.yml
|
openapi: 3.0.0
info:
description: A Tateru Manager is used to communicate with a machine manager to extract inventory and configure boot devices
version: 0.0.2
title: Tateru Manager API
license:
name: Apache 2.0
url: 'http://www.apache.org/licenses/LICENSE-2.0.html'
tags:
- name: inventory
description: Inventory scanning
- name: deploy
description: Deployment operations
paths:
/v1/machines:
get:
tags:
- inventory
summary: fetches all known machines
operationId: fetchMachines
description: |
Returns all currently active machine objects
responses:
'200':
description: list of machine objects
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/Machine'
/v1/machines/{uuid}/boot-installer:
post:
tags:
- deploy
summary: makes a machine boot into the Tateru installer environment
operationId: bootInstaller
description: |
Calling this action will cause a machine to boot into the Tateru installer
environment, possibly by reconfiguring bootup order and doing a network boot.
requestBody:
content:
application/json:
schema:
type: object
properties:
ssh_pub_key:
description: 'The SSH public key to allow SSH access to the installer environment'
type: string
example: 'ssh-ed25519 AAA[..]AAV user@laptop'
required:
- ssh_pub_key
parameters:
- name: 'uuid'
in: 'path'
description: 'The machine UUID to boot the installer on'
required: true
schema:
type: 'string'
responses:
'200':
description: the request succeeded
'404':
description: the requested machine was not found
components:
schemas:
Machine:
type: object
required:
- uuid
properties:
uuid:
type: string
format: uuid
example: d290f1ee-6c54-4b01-90e6-d701748f0851
serialNumber:
type: string
example: XYZ1234
assetTag:
type: string
example: '00203'
name:
type: string
example: VMLNX01
|
api/manager.api.yaml
|
author: Arfoire#9219, EnigWa#4825 and danii#8748
config: |+
options swap_delay=12 debug=true iteration=1000 duration=100 workers=30 mode=sl;
####----GENERATED CHARACTER BLOCK DO NOT EDIT----####
sucrose char lvl=90/90 cons=6 talent=9,9,9;
sucrose add weapon="hakushinring" refine=5 lvl=90/90;
sucrose add set="viridescentvenerer" count=5;
sucrose add stats hp=4780 atk=311 em=560 ; #main
sucrose add stats def%=0.124 def=39.36 hp=507.88 hp%=0.0992 atk=33.08 atk%=0.5952 er=0.3306 em=118.92 cr=0.1324 cd=0.1324;
beidou char lvl=90/90 cons=6 talent=9,9,9;
beidou add weapon="serpentspine" refine=1 lvl=90/90 +params=[stacks=5];
beidou add set="emblemofseveredfate" count=5;
beidou add stats hp=4780 atk=311 atk%=0.466 electro%=0.466 cd=0.622 ; #main
beidou add stats def%=0.124 def=39.36 hp=507.88 hp%=0.0992 atk=33.08 atk%=0.1984 er=0.1102 em=39.64 cr=0.3972 cd=0.662;
fischl char lvl=90/90 cons=6 talent=9,9,9;
fischl add weapon="thestringless" refine=3 lvl=90/90;
fischl add set="tenacityofthemillelith" count=5;
fischl add stats hp=4780 atk=311 atk%=0.466 electro%=0.466 cr=0.311 ; #main
fischl add stats def%=0.124 def=39.36 hp=507.88 hp%=0.0992 atk=33.08 atk%=0.1984 er=0.1102 em=39.64 cr=0.331 cd=0.7944;
xingqiu char lvl=90/90 cons=6 talent=9,9,9;
xingqiu add weapon="lionsroar" refine=3 lvl=90/90;
xingqiu add set="noblesseoblige" count=5;
xingqiu add stats hp=4780 atk=311 er=0.518 hydro%=0.466 cr=0.311 ; #main
xingqiu add stats def%=0.124 def=39.36 hp=507.88 hp%=0.0992 atk=33.08 atk%=0.1488 er=0.1653 em=39.64 cr=0.331 cd=0.7944;
####----END GENERATED CHARACTER BLOCK DO NOT EDIT----####
#enemies and particles:
energy every interval=480,720 amount=1;
target lvl=100 resist=.10;
#action list:
active fischl;
fischl burst;
sucrose attack, burst;
xingqiu skill, burst, attack;
beidou skill, burst;
sucrose
attack:1, skill, jump,
attack:2, dash,
attack:2, dash,
attack:2, dash,
attack:2, jump,
attack
;
beidou skill[counter=2], attack;
fischl skill, attack:2;
sucrose
attack:1, skill, jump,
attack:2, dash,
attack:2, jump,
attack:1, charge,
attack:2, charge;
wait 5; #for more hydro application
sucrose
attack:1, charge
;
beidou skill, attack:2;
sucrose attack:1, charge;
fischl attack:2;
restart;
description: Sucrose Taser.
hash: 260bedd71516b06616f694e1d6f86046631a96c8
team:
- name: beidou
con: 6
weapon: serpentspine
refine: 1
er: 0.1102
talents:
attack: 9
skill: 9
burst: 9
- name: fischl
con: 6
weapon: thestringless
refine: 3
er: 0.1102
talents:
attack: 9
skill: 9
burst: 9
- name: sucrose
con: 6
weapon: hakushinring
refine: 5
er: 0.3306
talents:
attack: 9
skill: 9
burst: 9
- name: xingqiu
con: 6
weapon: lionsroar
refine: 3
er: 0.6833
talents:
attack: 9
skill: 9
burst: 9
dps: 49519.416962433635
mode: sl
duration: 99.9999999999986
target_count: 1
viewer_key: <KEY>
|
db/Fischl/bdfsscxq.yaml
|
title: HIS documentation # < 60 chars
summary: Explore documentation for Host Integration Server # < 160 chars
# brand: aspnet | azure | dotnet | dynamics | m365 | ms-graph | office | power-platform | project | sharepoint | sql | sql-server | teams | vs | visual-studio | windows | xamarin
brand: m365
metadata:
title: HIS documentation # Required; page title displayed in search results. Include the brand. < 60 chars.
description: Explore documentation for Host Integration Server # Required; article description that is displayed in search results. < 160 chars.
ms.service: host-integration-server #Required; service per approved list. service slug assigned to your service by ACOM.
ms.topic: hub-page # Required
author: MandiOhlinger #Required; your GitHub user alias, with correct capitalization.
ms.author: mandia #Required; microsoft alias of author; optional team alias.
ms.date: 04/07/2020 #Required; mm/dd/yyyy format.
# highlightedContent section (optional)
# Maximum of 8 items
highlightedContent:
# itemType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new
items:
# Card
- title: What is HIS?
itemType: overview
url: what-is-his.md
# Card
- title: What’s New?
itemType: whats-new
url: install-and-config-guides/what-s-new-in-his-2016.md
# Card
- title: Install & Config
itemType: how-to-guide
url: install-and-config-guides/installing-his-2016.md
# additionalContent section (optional)
# Card with summary style
additionalContent:
sections:
- items:
# Card
- title: Core integration
summary: Learn about network, data, application & message integration, and more
url: core/host-integration-server-core-documentation.md
# Card
- title: Enterprise single sign-on security
summary: Read about SSO, and how it secures your applications
url: esso/enterprise-single-sign-on1.md
# Card
- title: OLE DB Provider for DB2
summary: Read all about installation and configuration, tooling, data type mapping, security, and more
url: db2oledbv/microsoft-ole-db-provider-for-db2.md
|
his/index.yml
|
openapi: 3.1.0
info:
title: Gre traffic packet header
version: ^0.0.0
components:
schemas:
Flow.Gre:
description: Gre packet header
type: object
properties:
checksum_present:
description: >-
Checksum bit.
Set to 1 if a checksum is present.
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
key_present:
description: >-
Key bit.
Set to 1 if a key is present.
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
seq_number_present:
description: >-
Sequence number bit.
Set to 1 if a sequence number is present.
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
reserved0:
description: >-
Reserved bits.
Set to 0.
9 bits.
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
version:
description: >-
Gre version number.
Set to 0.
3 bits.
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
protocol:
description: >-
Indicates the ether protocol type of the encapsulated payload.
- 0x0800 ipv4
- 0x86DD ipv6
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
checksum:
description: >-
Present if the checksum_present bit is set.
Contains the checksum for the gre header and payload.
16 bits.
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
reserved1:
description: >-
Reserved bits.
Set to 0.
16 bits.
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
key:
description: >-
Present if the key_present bit is set.
Contains an application specific key value.
32 bits
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
sequence_number:
description: >-
Present if the seq_number_present bit is set.
Contains a sequence number for the gre packet.
32 bits
$ref: './patterns.yaml#/components/schemas/Flow.Pattern'
|
flow/packet-headers/gre.yaml
|
description: >-
YOLO v2 trained on VOC dataset by AVA team with 0.7 sparsity level.
task_type: detection
files:
- name: parameters.yml
size: 222
checksum: a748263155086e1cd99d15486648d9f5ff2a5b342c3018a01f51505381b7d5d3928b40bfbb3c829994e72b020dc03413
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/yolo-v2-ava-sparse-70-0001/parameters.yml
- name: FP32/yolo-v2-ava-sparse-70-0001.xml
size: 107088
checksum: fda866d8639169aacf08b50fece71b67cbdaba9b6ea1e4ca3053227e20119edbe03b6e61ccbb1b2bd68408e623aa6ed4
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/yolo-v2-ava-sparse-70-0001/FP32/yolo-v2-ava-sparse-70-0001.xml
- name: FP32/yolo-v2-ava-sparse-70-0001.bin
size: 202580272
checksum: db275a2387d3950fdd782b7bce27e413b6b514da1321c87d4e21124bbf596fb92cca81d6371cef6f098a9ae66177f295
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/yolo-v2-ava-sparse-70-0001/FP32/yolo-v2-ava-sparse-70-0001.bin
- name: FP16/yolo-v2-ava-sparse-70-0001.xml
size: 144631
checksum: e44a67d4789cb7806f5c06a89b2b891e1db7deb4199d666aaa2f05428e59e49317607c08cb94b46779e69f4bf0bc3966
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/yolo-v2-ava-sparse-70-0001/FP16/yolo-v2-ava-sparse-70-0001.xml
- name: FP16/yolo-v2-ava-sparse-70-0001.bin
size: 101290152
checksum: 45ed3148f67cc1cc8f069e88eb490244e210f6a2573e806db1b7d606f343eb70b99b3225e39cf46a559ef70310d7162d
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/yolo-v2-ava-sparse-70-0001/FP16/yolo-v2-ava-sparse-70-0001.bin
- name: FP16-INT8/yolo-v2-ava-sparse-70-0001.xml
size: 226710
checksum: d5d8aad1428871845a4811526416ae75eb5fb6ad8fec56d204913c09e8fd88525bfc7614567ac2e977b45cbf663498d0
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/yolo-v2-ava-sparse-70-0001/FP16-INT8/yolo-v2-ava-sparse-70-0001.xml
- name: FP16-INT8/yolo-v2-ava-sparse-70-0001.bin
size: 50739424
checksum: bc6aadb6721558639358b8a108ad29494ca50cdbb96af6c5776c556446c346cab0fec1e156deeb36fea645c9b29d8ef1
source: https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/yolo-v2-ava-sparse-70-0001/FP16-INT8/yolo-v2-ava-sparse-70-0001.bin
input_info:
- name: data
shape: [1, 416, 416, 3]
layout: NHWC
framework: dldt
license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE
|
models/intel/yolo-v2-ava-sparse-70-0001/model.yml
|
swagger: '2.0'
type: object
description: Full character, returned when queried using UID
properties:
uid:
type: string
description: Character unique ID
required: true
name:
type: string
description: Character name
required: true
gender:
$ref: '#/definitions/Gender'
description: Character gender
yearOfBirth:
type: integer
description: Year the character was born
monthOfBirth:
type: integer
description: Month the character was born
dayOfBirth:
type: integer
description: Day the character was born
placeOfBirth:
type: string
description: Place of birth
yearOfDeath:
type: integer
description: Year the character died
monthOfDeath:
type: integer
description: Month the character died
dayOfDeath:
type: integer
description: Day the character died
placeOfDeath:
type: string
description: Place of death
height:
type: integer
description: Height in centimeters
weight:
type: integer
description: Weight in kilograms
deceased:
type: boolean
description: Whether this character is deceased
bloodType:
$ref: '#/definitions/BloodType'
description: Blood type
maritalStatus:
$ref: '#/definitions/MaritalStatus'
description: Marital status
serialNumber:
type: string
description: Serial number
hologramActivationDate:
type: string
description: Hologram activation date
hologramStatus:
type: string
description: Hologram status
hologramDateStatus:
type: string
description: Hologram date status
hologram:
type: boolean
description: Whether this character is a hologram
fictionalCharacter:
type: boolean
description: Whether this character is a fictional character (from universe point of view)
mirror:
type: boolean
description: Whether this character is from mirror universe
alternateReality:
type: boolean
description: Whether this character is from alternate reality
performers:
type: array
items:
$ref: '#/definitions/PerformerBase'
description: Performers who played this character
episodes:
type: array
items:
$ref: '#/definitions/EpisodeBase'
description: Episodes in which this character appeared
movies:
type: array
items:
$ref: '#/definitions/MovieBase'
description: Movies in which this character appeared
characterSpecies:
type: array
items:
$ref: '#/definitions/CharacterSpecies'
description: Species this character belongs to
characterRelations:
type: array
items:
$ref: '#/definitions/CharacterRelation'
description: Relations with other characters
titles:
type: array
items:
$ref: '#/definitions/TitleBase'
description: Titles this character holds
occupations:
type: array
items:
$ref: '#/definitions/OccupationBase'
description: Occupations of this character
organizations:
type: array
items:
$ref: '#/definitions/OrganizationBase'
description: Organizations this character has affiliation with
|
stapi_swagger_specs/character/entity/characterFull.yaml
|
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: objectscale-portal
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: objectscale-manager
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: objectscale-portal
app.kubernetes.io/part-of: {{ .Release.Name }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name}}
operator: objectscale-operator
product: objectscale
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: objectscale-manager
app.kubernetes.io/component: objectscale-portal
template:
metadata:
labels:
app.kubernetes.io/name: objectscale-manager
app.kubernetes.io/component: objectscale-portal
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name}}
operator: objectscale-operator
product: objectscale
spec:
{{- if .Values.global.registrySecret }}
imagePullSecrets:
- name: {{ .Values.global.registrySecret }}
{{- end }}
volumes:
- name: config-volume
configMap:
name: objectscale-portal
{{- if (eq .Values.global.platform "VMware") }}
- name: certificate
secret:
{{- if (eq .Release.Namespace "dellemc-objectscale-system") }}
secretName: objectscale-plugin-secret
{{- else }}
secretName: objectscale-plugin-secret-{{ .Release.Namespace }}
{{- end }}
{{- end }}
containers:
- name: objectscale-portal
image: {{ .Values.global.registry }}/{{ .Values.image.repository }}:{{ default .Values.tag .Values.image.tag }}
env:
- name: OPERATOR_NAME
value: objectscale-operator
imagePullPolicy: {{ default .Values.pullPolicy .Values.image.pullPolicy }}
volumeMounts:
- name: config-volume
mountPath: /usr/share/nginx/html/plugin.json
subPath: plugin.json
- name: config-volume
mountPath: /conf/upstream.conf
subPath: upstream.conf
- name: config-volume
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
{{- if (eq .Values.global.platform "VMware") }}
- name: certificate
mountPath: /etc/nginx/ssl
{{- end }}
|
objectscale-portal/templates/objectscale-portal-deployment.yaml
|
items:
- uid: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinition
id: ActivityLogsQueryDefinition
artifact: com.microsoft.azure:azure-mgmt-monitor:1.37.0
parent: com.microsoft.azure.management.monitor
langs:
- java
name: ActivityLogs.ActivityLogsQueryDefinition
nameWithType: ActivityLogs.ActivityLogsQueryDefinition
fullName: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinition
type: Interface
package: com.microsoft.azure.management.monitor
summary: The entirety of a Activity Logs query definition.
syntax:
content: public static interface ActivityLogs.ActivityLogsQueryDefinition extends ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataStartTimeFilter, ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataEndFilter, ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataFieldFilter, ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsSelectFilter, ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsQueryExecute
implements:
- com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataStartTimeFilter
- com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataEndFilter
- com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataFieldFilter
- com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsSelectFilter
- com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsQueryExecute
references:
- uid: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsSelectFilter
name: ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsSelectFilter
nameWithType: ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsSelectFilter
fullName: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsSelectFilter
- uid: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsQueryExecute
name: ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsQueryExecute
nameWithType: ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsQueryExecute
fullName: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithActivityLogsQueryExecute
- uid: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataFieldFilter
name: ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataFieldFilter
nameWithType: ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataFieldFilter
fullName: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataFieldFilter
- uid: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataStartTimeFilter
name: ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataStartTimeFilter
nameWithType: ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataStartTimeFilter
fullName: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataStartTimeFilter
- uid: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataEndFilter
name: ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataEndFilter
nameWithType: ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataEndFilter
fullName: com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinitionStages.WithEventDataEndFilter
|
docs-ref-autogen/com.microsoft.azure.management.monitor.ActivityLogs.ActivityLogsQueryDefinition.yml
|
trainer:
callbacks:
- class_path: code.custom_callbacks.UpdateBatchSizeDataLoader
init_args:
batch_sizes:
- 512
- 512
- 256
- 128
- 64
- 32
- class_path: code.custom_callbacks.UpdateMixingDepth
init_args:
epochs_for_each_depth:
- 2
- 4
- 16
- 32
- 64
- 64
fade_for_each_depth:
- 50
- 50
- 50
- 50
- 50
gradient_clip_val: 1
gradient_clip_algorithm: norm
max_epochs: 182
limit_train_batches: 1.0
log_every_n_steps: 10
reload_dataloaders_every_n_epochs: 1
logger:
class_path: pytorch_lightning.loggers.CometLogger
init_args:
api_key: <KEY>
project_name: gans-specialization
model:
generator:
class_path: code.models.Generator
init_args:
resolution: 128
conditional: False
num_classes: 0
z_latent_dim: 512
w_latent_dim: 512
hidden_dim: 512
num_hidden_layers: 8
learning_rate_multiplier: 0.01
activation_func: "lrelu"
use_wscale: True
normalize_latents: True
num_channels: 3
structure: "linear"
blur_filter:
- 1
- 2
- 1
truncation_psi: 0.7
truncation_cutoff: 8
w_latent_avg_beta: 0.995
style_mixing_prob: 0.9
discriminator:
class_path: code.models.Discriminator
init_args:
resolution: 128
structure: "linear"
num_channels: 3
conditional: False
num_classes: 0
non_linearity: "lrelu"
use_wscale: True
blur_filter:
- 1
- 2
- 1
minibatch_std_group_size: 4
minibatch_std_num_features: 1
ignore_labels: False
num_debug_samples: 25
loss: logistic
drift: 0.001
disc_repeats: 1
use_ema: false
ema_decay: 0.999
gen_optimizer:
class_path: torch.optim.Adam
init_args:
lr: 0.003
betas:
- 0
- 0.99
eps: 1e-8
disc_optimizer:
class_path: torch.optim.Adam
init_args:
lr: 0.003
betas:
- 0
- 0.99
eps: 1e-8
data:
data_dir: ../data_set/animeface-character-dataset/thumb
image_size:
- 128
- 128
batch_size: 64
num_workers: 4
|
src/configs/config.yaml
|
---
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
- name: Check pre-requisities
fail:
msg: "Environment variable {{item}} not set. Please declare an environment variables with an appropriate value for the sample to work."
when: item not in ansible_env
with_items:
- "SAMPLE_COMPARTMENT_OCID"
- "SAMPLE_IMAGE_OCID"
- "SAMPLE_AD_NAME"
#==========================================================================================
- name: Create a VCN
oci_network_vcn:
compartment_id: "{{ instance_compartment }}"
display_name: "{{ vcn_name }}"
cidr_block: "{{ vcn_cidr_block }}"
dns_label: "{{ vcn_dns_label }}"
register: result
- set_fact:
vcn_id: "{{ result.vcn.id }}"
#==========================================================================================
- name: Create a new Internet Gateway
oci_network_internet_gateway:
compartment_id: "{{ instance_compartment }}"
vcn_id: "{{ vcn_id }}"
name: "{{ ig_name }}"
is_enabled: 'yes'
state: 'present'
register: result
- set_fact:
ig_id: "{{ result.internet_gateway.id }}"
#==========================================================================================
- name: Create route table to connect internet gateway to the VCN
oci_network_route_table:
compartment_id: "{{ instance_compartment }}"
vcn_id: "{{ vcn_id }}"
name: "{{ route_table_name }}"
route_rules: "{{ route_table_rules }}"
state: 'present'
register: result
- set_fact:
rt_id: "{{ result.route_table.id }}"
#==========================================================================================
# Create a security list for allowing access to public instance
- name: Create a security list for allowing access to public instance
oci_network_security_list:
name: "{{ securitylist_name }}"
compartment_id: "{{ instance_compartment }}"
vcn_id: '{{ vcn_id }}'
ingress_security_rules:
# Allow incoming SSH connections
- source: "{{ quad_zero_route }}"
protocol: "{{ TCP_protocol }}"
tcp_options:
destination_port_range:
min: "{{ SSH_port }}"
max: "{{ SSH_port }}"
egress_security_rules:
# Allow ssh connections outside
- destination: "{{ quad_zero_route }}"
protocol: "{{ TCP_protocol }}"
tcp_options:
destination_port_range:
min: "{{ SSH_port }}"
max: "{{ SSH_port }}"
register: result
- set_fact:
instance_security_list_ocid: "{{ result.security_list.id }}"
#==========================================================================================
- name: Create a subnet to host the public instance. Link security_list and route_table.
oci_network_subnet:
availability_domain: "{{ instance_ad }}"
cidr_block: "{{ subnet_cidr }}"
compartment_id: "{{ instance_compartment }}"
display_name: "{{ subnet_name }}"
prohibit_public_ip_on_vnic: false
route_table_id: "{{ rt_id }}"
security_list_ids: [ "{{ instance_security_list_ocid }}" ]
vcn_id: '{{ vcn_id }}'
dns_label: "{{ subnet_dns_label }}"
register: result
- set_fact:
instance_subnet_id: "{{ result.subnet.id }}"
#==========================================================================================
- name: Launch an instance
oci_compute_instance:
availability_domain: "{{ instance_ad }}"
compartment_id: "{{ instance_compartment }}"
name: "{{ instance_name }}"
image_id: "{{ instance_image }}"
shape: "{{ instance_shape }}"
create_vnic_details:
hostname_label: "{{ instance_hostname }}"
subnet_id: "{{ instance_subnet_id }}"
register: result
- name: Print instance details
debug:
msg: "Launched a new instance {{ result }}"
- set_fact:
instance_id: "{{ result.instance.id }}"
|
samples/compute/boot_volume/setup.yaml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-17 08:33"
game: "Unreal Tournament"
name: "CTF-BT-(Roel)BasedOnADream"
author: "<NAME>"
description: "None"
releaseDate: "2009-02"
attachments:
- type: "IMAGE"
name: "CTF-BT-(Roel)BasedOnADream_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/BunnyTrack/R/CTF-BT-(Roel)BasedOnADream_shot_1.png"
originalFilename: "CTF-BT-(Roel)BasedOnADream.zip"
hash: "a722a69fda55465dfd95fee573d37e31b7e66aff"
fileSize: 1538068
files:
- name: "CTF-BT-(Roel)BasedOnADream.unr"
fileSize: 5508074
hash: "cd3ea1bbcc8cdb483b267e5f7867453c64b66113"
- name: "i4Games_BTScripts_200607.u"
fileSize: 335186
hash: "7b3d32af89fcf7a20898aa75e7fd5402a3c88373"
otherFiles: 0
dependencies:
CTF-BT-(Roel)BasedOnADream.unr:
- status: "OK"
name: "i4Games_BTScripts_200607"
downloads:
- url: "http://www.ut-files.com/index.php?dir=Maps/BunnyTrack/&file=CTF-BT-%28Roel%29BasedOnADream.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/BunnyTrack/R/CTF-BT-(Roel)BasedOnADream.zip"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/BunnyTrack/CTF-BT/&file=CTF-BT-%28Roel%29BasedOnADream.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/BunnyTrack&file=CTF-BT-%28Roel%29BasedOnADream.zip"
main: false
repack: false
state: "OK"
- url: "http://www.i4games.euhttp://www.i4games.eu/maps/CTF-BT-%28Roel%29BasedOnADream.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/BunnyTrack/R/a/7/22a69f/CTF-BT-(Roel)BasedOnADream.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/BunnyTrack/R/a/7/22a69f/CTF-BT-(Roel)BasedOnADream.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "BunnyTrack"
title: "BT: Based on a Dream"
playerCount: "Unknown"
themes:
Nali Temple: 1.0
bots: false
|
content/Unreal Tournament/Maps/BunnyTrack/R/a/7/22a69f/ctf-bt-roelbasedonadream_[a722a69f].yml
|
version: "3.6"
services:
postgres:
image: postgres:12
container_name: mcsp_database
ports:
- "5432:5432"
restart: always
volumes:
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
- db_data:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: root
POSTGRES_DB: postgres
baas:
image: hasura/graphql-engine:v2.0.0-alpha.10
container_name: mcsp_baas
ports:
- "8080:8000"
depends_on:
- "postgres"
restart: always
environment:
HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:root@postgres:5432/postgres
HASURA_GRAPHQL_ENABLE_CONSOLE: "true"
HASURA_GRAPHQL_ADMIN_SECRET: microscope
HASURA_GRAPHQL_SERVER_PORT: 8000
HASURA_GRAPHQL_AUTH_HOOK: http://host.docker.internal:5010/api/Webhook/hasura
# lab:
# build: ./src/Microscope.Lab
# container_name: mcsp_lab
# ports:
# - "8081:8888"
# environment:
# JUPYTER_NO_PASSWORD: "true"
# volumes:
# - ./src/Microscope.Lab/work:/home/jupyter/work
storage:
image: bitnami/minio:latest
container_name: mcsp_storage
ports:
- "8082:9000"
environment:
MINIO_ACCESS_KEY: admin
MINIO_SECRET_KEY: microscope
admin:
build:
context: .
dockerfile: src/Microscope.Admin/Dockerfile
container_name: mcsp_admin
ports:
- "8085:80"
api:
build:
context: .
dockerfile: src/Microscope.Api/Dockerfile
container_name: mcsp_api
ports:
- "8086:80"
keycloak:
image: quay.io/keycloak/keycloak:latest
container_name: mcsp_identity
environment:
KEYCLOAK_IMPORT: tmp/realm-export.json
DB_VENDOR: POSTGRES
DB_ADDR: postgres
DB_DATABASE: postgres
DB_USER: postgres
DB_SCHEMA: mcsp_identity
DB_PASSWORD: <PASSWORD>
KEYCLOAK_USER: admin
KEYCLOAK_PASSWORD: <PASSWORD>
volumes:
- ./realm-export.json:/tmp/realm-export.json
# Uncomment the line below if you want to specify JDBC parameters. The parameter below is just an example, and it shouldn't be used in production without knowledge. It is highly recommended that you read the PostgreSQL JDBC driver documentation in order to use it.
#JDBC_PARAMS: "ssl=true"
ports:
- 8083:8080
depends_on:
- postgres
volumes:
db_data:
|
docker-compose.yml
|
homepage: http://code.haskell.org/~thielema/storable-record/
changelog-type: ''
hash: 90174659c1f74a9582efc71fe9aa64963b1f07bac085fc1223cb3bad2a7b6e80
test-bench-deps: {}
maintainer: <NAME> <<EMAIL>>
synopsis: Elegant definition of Storable instances for records
changelog: ''
basic-deps:
base: '>=3 && <6'
semigroups: '>=0.1 && <1.0'
utility-ht: '>=0.0.14 && <0.1'
transformers: '>=0.2 && <0.6'
QuickCheck: '>=2 && <3'
all-versions:
- 0.0.1
- 0.0.2
- 0.0.2.1
- 0.0.2.2
- 0.0.2.3
- 0.0.2.4
- 0.0.2.5
- 0.0.3
- 0.0.3.1
- 0.0.4
- 0.0.4.1
- 0.0.5
- 0.0.6
author: <NAME> <<EMAIL>>
latest: 0.0.6
description-type: haddock
description: |-
With this package
you can build a Storable instance of a record type
from Storable instances of its elements in an elegant way.
It does not do any magic,
just a bit arithmetic to compute the right offsets,
that would be otherwise done manually
or by a preprocessor like C2HS.
I cannot promise that the generated memory layout
is compatible with that of a corresponding C struct.
However, the module generates the smallest layout
that is possible with respect to the alignment of the record elements.
If you encounter, that a record does not have a compatible layout,
we should fix that.
But also without C compatibility this package is useful
e.g. in connection with StorableVector.
We provide Storable instance support for several cases:
* If you wrap a type in a @newtype@,
then you can lift its 'Storable' instance to that @newtype@
with the module "Foreign.Storable.Newtype".
This way you do not need the @GeneralizedNewtypeDeriving@ feature of GHC.
* If you have a type that is an instance of 'Traversable',
you can use that feature for implementation of 'Storable' methods.
The module "Foreign.Storable.Traversable"
allows manipulation of the portion of your type,
that is accessible by 'Traversable' methods.
For instance with the type
@data T a = Cons Int [a]@
and an according 'Traversable' implementation,
you can load and store the elements of the contained list.
This may be part of a 'Storable' implementation of the whole type.
* If you have a record containing elements of various types,
then you need module "Foreign.Storable.Record".
Note however that the Storable instances
defined with this package are quite slow in (up to) GHC-6.12.1.
I'm afraid this is due to incomplete inlining,
but we have still to investigate the problem.
For examples see packages @storable-tuple@ and @sample-frame@.
license-name: BSD-3-Clause
|
packages/st/storable-record.yaml
|
---
# tasks file for tealc
# clusters connections
- import_tasks: common/check_parameters.yaml
tags: [always, check-params]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: common/check_clients.yaml
tags: [always, check-clients]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: common/prepare_kubeconfigs.yaml
tags: [always, kubeconfigs]
no_log: "{{ not log_sensitive_data|bool }}"
# infra installation
- import_tasks: infra-setup/access_remote_cluster.yaml
tags: [setup-workers-access, strimzi-infra, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: infra-setup/install_tekton.yaml
tags: [tekton, strimzi-infra, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: infra-setup/install_argo.yaml
tags: [argo, strimzi-infra, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: monitoring/install_monitoring.yaml
tags: [monitoring, grafana, strimzi-infra, never]
when: not is_kubernetes
no_log: "{{ not log_sensitive_data|bool }}"
# Scenarios
- import_tasks: scenario-deployment/argo-deploy-strimzi-infra.yaml
tags: [argo-apps, argo-strimzi-infra, strimzi-infra, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: scenario-deployment/argo-deploy-strimzi-clients.yaml
tags: [argo-apps, argo-strimzi-clients, strimzi-infra, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: scenario-deployment/argo-deploy-strimzi-monitoring.yaml
tags: [argo-apps, argo-strimzi-monitoring, strimzi-infra, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: scenario-deployment/argo-deploy-strimzi-twitter-app.yaml
tags: [argo-apps, argo-strimzi-twitter-app, twitter-app, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: scenario-deployment/strimzi-infra-tkn-pipelines.yaml
tags: [tekton-pipelines, strimzi-infra, never]
when: not is_kubernetes
no_log: "{{ not log_sensitive_data|bool }}"
# Teardown
- import_tasks: infra-setup/delete_argo.yaml
tags: [never, teardown, teardown-argo, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: infra-setup/delete_tekton.yaml
tags: [never, teardown, teardown-tekton, never]
no_log: "{{ not log_sensitive_data|bool }}"
- import_tasks: monitoring/delete_monitoring.yaml
tags: [never, teardown, teardown-monitoring, never]
when: not is_kubernetes
no_log: "{{ not log_sensitive_data|bool }}"
|
tealc/tasks/main.yml
|
resource_types:
- name: slack-notification
type: docker-image
source:
repository: cfcommunity/slack-notification-resource
tag: latest
resources:
- name: ons-design-system-release
type: github-release
source:
owner: ONSdigital
repository: design-system
access_token: ((github_access_token))
release: true
pre_release: true
- name: slack-alert
type: slack-notification
source:
url: ((slack_webhook_url))
jobs:
- name: Release
plan:
- get: ons-design-system-release
params:
include_source_tarball: true
trigger: true
- task: CDN Build
config:
platform: linux
image_resource:
type: docker-image
source:
repository: node
tag: 14.15.0
inputs:
- name: ons-design-system-release
outputs:
- name: dist
- name: templates
run:
path: sh
args:
- -exc
- |
cd ons-design-system-release
mkdir design-system
tar -xzf source.tar.gz -C design-system --strip-components=1
cd design-system
design_system_release=$(cat ../../ons-design-system-release/version)
yarn
RELEASE_VERSION=$design_system_release yarn cdn-bundle
mkdir ../../dist/$design_system_release
cp -R build/* ../../dist/$design_system_release
on_failure:
put: slack-alert
params:
channel: '#pat-lib-notifications'
attachments:
- pretext: Design System Build Failed
color: danger
title: Concourse Build $BUILD_ID
title_link: http://concourse.dev.eq.ons.digital/builds/$BUILD_ID
- task: Release to S3
params:
AWS_ACCESS_KEY_ID: ((aws_access_key))
AWS_SECRET_ACCESS_KEY: ((aws_secret_key))
AWS_DEFAULT_REGION: eu-west-1
config:
platform: linux
image_resource:
type: docker-image
source:
repository: mesosphere/aws-cli
inputs:
- name: dist
run:
path: sh
args:
- -exc
- |
aws s3 sync --acl public-read dist s3://((s3_bucket_name))/sdc/design-system/
on_failure:
put: slack-alert
params:
channel: '#pat-lib-notifications'
attachments:
- pretext: Design System CDN Release Failed
color: danger
title: Concourse Build $BUILD_ID
title_link: http://concourse.dev.eq.ons.digital/builds/$BUILD_ID
on_success:
put: slack-alert
params:
channel: '#pat-lib-notifications'
attachments:
- pretext: Design System CDN Release Successful
color: success
title: Concourse Build $BUILD_ID
title_link: http://concourse.dev.eq.ons.digital/builds/$BUILD_ID
|
ci/concourse.yml
|
name: Cleanup Images
on:
# https://docs.github.com/en/actions/learn-github-actions/events-that-trigger-workflows#workflow_call
workflow_call:
inputs:
org:
required: true
type: string
repo:
required: true
type: string
image:
required: true
type: string
secrets:
ghcr_token:
required: true
jobs:
get-untagged-images:
runs-on: ubuntu-latest
outputs:
ids: ${{ steps.untagged_images.outputs.ids }}
steps:
- name: Get Image Ids
id: get_images
# https://github.com/octokit/request-action
uses: octokit/request-action@v2.x
env:
GITHUB_TOKEN: ${{ secrets.ghcr_token }}
with:
org: ${{ inputs.org }}
repo: ${{ inputs.repo }}
image: ${{ inputs.image }}
# https://docs.github.com/en/rest/reference/packages#get-all-package-versions-for-a-package-owned-by-an-organization
route: GET /orgs/{org}/packages/container/{repo}%2F{image}/versions
- name: Get Untagged Images
id: untagged_images
shell: bash
run: |
set -e
data=$(echo -e ${{ toJSON(steps.get_images.outputs.data) }})
ids=$(echo "${data}" | jq -c '[.[] | select(.metadata.container.tags | length == 0) | .id]')
echo "::set-output name=ids::${ids}"
- name: Show Untagged Images
shell: bash
env:
UNTAGGED_IDS: ${{ steps.untagged_images.outputs.ids }}
run: |
for version in $(echo "${UNTAGGED_IDS}" | jq -c '.[]') ; do
echo $version
done
delete-image:
runs-on: ubuntu-latest
needs: get-untagged-images
if: ${{ needs.get-untagged-images.outputs.ids != '[]' }}
strategy:
matrix:
id: ${{fromJSON(needs.get-untagged-images.outputs.ids)}}
steps:
- name: Delete Image
# https://github.com/octokit/request-action
uses: octokit/request-action@v2.x
env:
GITHUB_TOKEN: ${{ secrets.ghcr_token }}
with:
org: ${{ inputs.org }}
repo: ${{ inputs.repo }}
image: ${{ inputs.image }}
version: ${{ matrix.id }}
# https://docs.github.com/en/rest/reference/packages#delete-package-version-for-an-organization
route: DELETE /orgs/{org}/packages/container/{repo}%2F{image}/versions/{version}
|
.github/workflows/cleanup-images.yml
|
logging:
level: ${LOG_LEVEL:-INFO}
loggers:
log4j.logger.org.hibernate: "INFO"
org.hibernate.hql: "ERROR"
appenders:
- type: console
threshold: ALL
timeZone: UTC
logFormat: "%d [%thread] %-5level %c{15} - %msg%n%rEx"
server:
type: simple
applicationContextPath: /
adminContextPath: /admin
connector:
port: 8080
type: http
requestLog:
appenders:
- type: console
timeZone: UTC
jpa:
migrate: "true"
driverClass: "org.postgresql.Driver"
user: ${DB_USER:-ganesh.s}
password: ${DB_PASSWORD:-}
url: jdbc:postgresql://${DB_HOST:-localhost}:${DB_PORT:-5432}/${DB_NAME:-ds}?autoReconnect=true
properties:
hibernate.show_sql: ${DB_LOG_SQL:-true}
hibernate.format_sql: "false"
# hibernate.hbm2ddl.auto: create-drop
hibernate.ejb.interceptor: com.gojek.guice.QueuedProducerTransactionInterceptor
hibernate.dialect: com.gojek.jpa.util.ExtendedPostgreSQL94Dialect
hibernate.c3p0.acquireIncrement: 2
hibernate.c3p0.initialPoolSize: 3
hibernate.c3p0.minPoolSize: 5
hibernate.c3p0.maxPoolSize: ${DB_POOL_MAX_SIZE:-10}
hibernate.c3p0.maxIdleTime: 300
hibernate.c3p0.maxStatements: 500
hibernate.c3p0.idleConnectionTestPeriod: 30
hibernate.c3p0.preferredTestQuery: "SELECT 1"
cache:
host: ${REDIS_HOST:-localhost}
port: ${REDIS_PORT:-6379}
password: ${REDIS_PASSWORD:-}
timeout: ${REDIS_TIMEOUT:-5}
maxConnections: ${REDIS_MAX_CONNECTIONS:-25}
minConnections: ${REDIS_MIN_CONNECTIONS:-5}
maxIdleConnections: ${REDIS_MAX_IDLE_CONNECTIONS:-5}
amqp:
automaticRecovery: true
uri: amqp://${RABBITMQ_USER:-guest}:${RABBITMQ_PASSWORD:-guest}@${RABBITMQ_HOST:-localhost}:${RABBITMQ_PORT:-5672}
maxChannels: ${RABBITMQ_MAX_CHANNELS:-11}
minChannels: ${RABBITMQ_MIN_CHANNELS:-5}
maxIdleChannels: ${RABBITMQ_MAX_IDLE_CHANNELS:-5}
hosts:
- localhost
queue:
driverStatusDestination:
exchange: ${DRIVER_TOPIC_EXCHANGE:-driver_direct_exchange}
routingKey: driver.status
driverConsumer:
retryDestination:
exchange: ${DRIVER_RETRY_DIRECT_EXCHANGE:-driver_retry_direct_exchange}
maxRetries: ${MAX_DRIVER_EVENTS_CONSUMER_RETRIES:-5}
queueName: ${DRIVER_EVENTS_QUEUE:-driver_events}
maxQueueConsumers: ${MAX_DRIVER_EVENTS_QUEUE_CONSUMERS:-5}
swagger:
resourcePackage: com.gojek.ds.resource
metrics:
reporters:
- type: console
useRegexFilters: true
excludes:
- ch.*
- jvm.*
frequency: 600s
|
gojek-commons-examples/driver-location-service/src/main/resources/ds.yml
|
dashboard:
title: Horizon Failure Rate
rows:
- title: Description
height: 100px
panels:
- title: Description
content: |
**This dashboard is managed by [Grafyaml](http://docs.openstack.org/infra/system-config/grafyaml.html).**
If you would like to make changes to this dashboard, please see the grafana directory in [project-config](https://git.openstack.org/cgit/openstack-infra/project-config/tree/grafana/horizon.yaml).
type: text
- title: Graphs
height: 320px
panels:
- title: npm test Failure Rates (Gate queue)
span: 4
targets:
- target: alias(movingAverage(asPercent(transformNull(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.nodejs-npm-run-test.FAILURE),sum(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.nodejs-npm-run-test.{SUCCESS,FAILURE})),'24hours'), 'nodejs4-npm-run-test')
type: graph
- title: Unit Tests Failure Rates (Check queue)
span: 4
targets:
- target: alias(movingAverage(asPercent(transformNull(stats_counts.zuul.tenant.openstack.pipeline.check.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py27.FAILURE),sum(stats_counts.zuul.tenant.openstack.pipeline.check.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py27.{SUCCESS,FAILURE})),'24hours'), 'check-horizon-python27')
- target: alias(movingAverage(asPercent(transformNull(stats_counts.zuul.tenant.openstack.pipeline.check.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py35.FAILURE),sum(stats_counts.zuul.tenant.openstack.pipeline.check.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py35.{SUCCESS,FAILURE})),'24hours'), 'check-horizon-python35')
type: graph
- title: Unit Tests Failure Rates (Gate queue)
span: 4
targets:
- target: alias(movingAverage(asPercent(transformNull(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py27.FAILURE),sum(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py27.{SUCCESS,FAILURE})),'24hours'), 'gate-horizon-python27')
- target: alias(movingAverage(asPercent(transformNull(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py35.FAILURE),sum(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.openstack-tox-py35.{SUCCESS,FAILURE})),'24hours'), 'gate-horizon-python35')
type: graph
- title: Tempest Failure Rates (Check queue)
span: 4
targets:
- target: alias(movingAverage(asPercent(transformNull(stats_counts.zuul.tenant.openstack.pipeline.check.project.git_openstack_org.openstack_horizon.master.job.horizon-dsvm-tempest-plugin.FAILURE),sum(stats_counts.zuul.tenant.openstack.pipeline.check.project.git_openstack_org.openstack_horizon.master.job.horizon-dsvm-tempest-plugin.{SUCCESS,FAILURE})),'24hours'), 'check-horizon-python27')
type: graph
- title: Tempest Failure Rates (Gate queue)
span: 4
targets:
- target: alias(movingAverage(asPercent(transformNull(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.horizon-dsvm-tempest-plugin.FAILURE),sum(stats_counts.zuul.tenant.openstack.pipeline.gate.project.git_openstack_org.openstack_horizon.master.job.horizon-dsvm-tempest-plugin.{SUCCESS,FAILURE})),'24hours'), 'gate-horizon-python27')
type: graph
|
grafana/horizon.yaml
|
AWSTemplateFormatVersion: 2010-09-09
Description: >
Host a static website using S3 and Cloudfront.
Provides https access and redirect from www to apex domain
Expects a CloudFormation Export containing the ARN of an AWS Certificate Manager (ACM) certificate...
Parameters:
ApexDomain:
Type: String
Description: >
Apex Domain for web site, e.g. mysite.com (not www.mysite.com)
MinLength: 1
Resources:
OriginAccessIdentity:
Type: AWS::CloudFront::CloudFrontOriginAccessIdentity
Properties:
CloudFrontOriginAccessIdentityConfig:
Comment: !Ref ApexDomain
WebResourcesBucket:
Type: AWS::S3::Bucket
Properties:
PublicAccessBlockConfiguration: # private bucket, only CloudFront can access
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
WebResourcesBucketPolicy:
Type: AWS::S3::BucketPolicy
Properties:
Bucket: !Ref WebResourcesBucket
PolicyDocument:
Statement:
- Action: s3:GetObject
Effect: Allow
Resource: !Sub 'arn:aws:s3:::${WebResourcesBucket}/*'
Principal:
CanonicalUser: !GetAtt OriginAccessIdentity.S3CanonicalUserId
- Action: s3:ListBucket
Effect: Allow
Resource: !Sub 'arn:aws:s3:::${WebResourcesBucket}'
Principal:
CanonicalUser: !GetAtt OriginAccessIdentity.S3CanonicalUserId
Distribution:
Type: AWS::CloudFront::Distribution
Properties:
DistributionConfig:
Comment: !Ref ApexDomain
Enabled: true
Origins:
- DomainName: !Sub '${WebResourcesBucket}.s3.amazonaws.com' # using us-east-1; otherwise use region specific url to avoid redirect to bucket issue
Id: 'web-app'
S3OriginConfig:
OriginAccessIdentity: !Sub 'origin-access-identity/cloudfront/${OriginAccessIdentity}'
DefaultRootObject: index.html
DefaultCacheBehavior:
AllowedMethods:
- GET
- HEAD
- OPTIONS
TargetOriginId: 'web-app'
ForwardedValues:
QueryString: false
Cookies:
Forward: none
ViewerProtocolPolicy: redirect-to-https
PriceClass: PriceClass_All
HttpVersion: http2
Aliases:
- !Ref ApexDomain
ViewerCertificate:
AcmCertificateArn:
Fn::ImportValue: !Sub
- '${CertificateStackNameBase}-certificate:certificate-arn'
- { CertificateStackNameBase : !Join [ '-', !Split [ '.', !Ref ApexDomain ] ]} # domain names are not valid stack names - by convention replace dots with dashes
SslSupportMethod: sni-only
MinimumProtocolVersion: TLSv1.2_2018
CustomErrorResponses:
- ErrorCachingMinTTL: 0 # don't cache errors
ErrorCode: 403
ResponseCode: 200
ResponsePagePath: /index.html
- ErrorCachingMinTTL: 0
ErrorCode: 404
ResponseCode: 200
ResponsePagePath: /index.html
WebsiteDNSRecordSet:
Type: AWS::Route53::RecordSetGroup
Properties:
HostedZoneName: !Sub '${ApexDomain}.' # Route53 needs a dot on the end
RecordSets:
- Name: !Ref ApexDomain
Type: A
AliasTarget:
HostedZoneId: Z2FDTNDATAQYW2 # magic value for all CloudFront distributions
DNSName: !GetAtt Distribution.DomainName
Outputs:
WebResourcesBucket:
Value: !Ref WebResourcesBucket
|
infrastructure/website-hosting.cfn.yaml
|
{{ if and .Release.IsInstall .Values.initialJobs.autoCreateCluster }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "stolon.fullname" . }}-create-cluster
labels:
{{- include "stolon.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install
"helm.sh/hook-delete-policy": before-hook-creation
"helm.sh/hook-weight": "1"
spec:
template:
metadata:
{{- with .Values.initialJobs.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "stolon.selectorLabels" . | nindent 8 }}
spec:
restartPolicy: OnFailure
serviceAccountName: {{ include "stolon.serviceAccountName" . }}
{{- with .Values.keeper.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.keeper.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: create-cluster
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/usr/local/bin/stolonctl"]
args:
- init
- --cluster-name={{ include "stolon.fullname" . }}
- --store-backend={{ .Values.store.backend }}
{{- if eq .Values.store.backend "kubernetes" }}
- --kube-resource-kind={{ .Values.store.kubeResourceKind }}
{{- else }}
- --store-endpoints={{ .Values.store.endpoints }}
{{- end }}
- --yes
- '{ "initMode": "new", {{- range $key, $value := .Values.clusterSpec }} {{ $key | quote }}: {{ if typeIs "string" $value }} {{ $value | quote }} {{ else }} {{ $value }} {{ end }}, {{- end }} {{ if .Values.tls.enabled }} "pgParameters": {{- $pgParameters := .Values.pgParameters -}}{{ $all_init := set $pgParameters "ssl" "on" }}{{ $all_init := set $all_init "ssl_cert_file" "/certs/serverCrt.crt" }} {{ $all_init := set $all_init "ssl_key_file" "/certs/serverKey.key" }}{{ $all_init := set $all_init "ssl_ca_file" "/certs/rootCa.crt" }}{{ toJson $all_init }}{{ else }}"pgParameters": {{ toJson .Values.pgParameters }} {{ end}} }'
{{ end }}
|
charts/stolon/templates/hooks/create-cluster.yaml
|
on:
workflow_dispatch:
name: Publish on PyPi
jobs:
Version_Bumped:
runs-on: ubuntu-latest
outputs:
version: ${{ steps.master_version_bumped.outputs.version }}
steps:
- name: Checkout code
uses: actions/checkout@v2
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
- name: Master version bumped
id: master_version_bumped
shell: bash -l {0}
run: |
current_version=$(grep "__version__" alphapept/__init__.py | cut -f3 -d ' ' | sed 's/"//g')
current_version_as_regex=$(echo $current_version | sed 's/\./\\./g')
conda create -n version_check python=3.8 pip=20.1 -y
conda activate version_check
set +e
already_on_pypi=$(pip install alphapept== 2>&1 | grep -c "$current_version_as_regex")
set -e
conda deactivate
if [ $already_on_pypi -ne 0 ]; then
echo "Version is already on PyPi"
exit 1
fi
echo ::set-output name=version::$current_version
Create_PyPi_Release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
- name: Conda info
shell: bash -l {0}
run: conda info
- name: Prepare distribution
shell: bash -l {0}
run: |
conda create -n alphapept python=3.8 -y
pip install twine
conda activate alphapept
rm -rf dist
rm -rf build
python setup.py sdist bdist_wheel
twine check dist/*
conda deactivate
- name: Publish distribution to Test PyPI
uses: pypa/gh-action-pypi-publish@master
with:
password: ${{ secrets.TEST_PYPI_API_TOKEN }}
repository_url: https://test.pypi.org/legacy/
- name: Test PyPI test release
shell: bash -l {0}
run: |
conda create -n alphapept_pip_test python=3.8 -y
conda activate alphapept_pip_test
pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple "alphapept"
alphapept
conda deactivate
- name: Publish distribution to PyPI
uses: pypa/gh-action-pypi-publish@master
with:
password: ${{ secrets.PYPI_API_TOKEN }}
Test_PyPi_Release:
name: Test_PyPi_version_on_${{ matrix.os }}
runs-on: ${{ matrix.os }}
needs: Create_PyPi_Release
strategy:
matrix:
os: [ubuntu-latest, macOS-latest, windows-latest]
steps:
- uses: actions/checkout@v2
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
- name: Conda info
shell: bash -l {0}
run: conda info
- name: Test pip installation from PyPi
shell: bash -l {0}
run: |
conda create -n alphapept_pip_test python=3.8 -y
conda activate alphapept_pip_test
pip install "alphapept"
alphapept
conda deactivate
|
.github/workflows/publish_on_pypi.yml
|
name: Generate psycopg2 library for use in AWS Lambda
on:
push:
# Publish `master` as Docker `latest` image.
branches:
- master
# Publish `v1.2.3` tags as releases.
tags:
- v*
# Run tests for any PRs.
pull_request:
env:
IMAGE_NAME: awslambda-psycopg2
IMAGE_TAG: 2.8.4
jobs:
# Run tests.
# See also https ://docs.docker.com/docker-hub/builds/automated-testing/
docker-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run tests
run: |
if [ -f docker-compose.test.yml ]; then
docker-compose --file docker-compose.test.yml build
docker-compose --file docker-compose.test.yml run sut
else
docker build . --file Dockerfile
fi
# Generates psycopg2 library for use in AWS Lambda.
# Reference : https://github.com/jkehler/awslambda-psycopg2
generate-awslambda-psycopg2:
# Ensure test job passes before pushing image.
needs: docker-test
runs-on: ubuntu-latest
if: github.event_name == 'push'
steps:
- uses: actions/checkout@v2
- name: Build the Docker image
run: docker build . --file Dockerfile --tag $IMAGE_NAME:$IMAGE_TAG
- name: Set up Python 3.7
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Get the generated psycopg2 library from container to host
run: |
docker create -it --name dummy $IMAGE_NAME:$IMAGE_TAG bash
# docker cp dummy:/var/lang/lib/python3.7/site-packages/psycopg2-2.8.4-py3.7-linux-x86_64.egg/psycopg2 .
python -m pip install Django==3.0.2 django-storages==1.8 psycopg2-binary==2.8.4 werkzeug==0.16.0 --upgrade -t python
docker rm -f dummy
# - name: Generate the psycopg2 library artifact
# uses: actions/upload-artifact@v1
# with:
# name: psycopg2-v${{ env.IMAGE_TAG }}
# path: psycopg2
- name: Generate the psycopg2 library artifact
uses: actions/upload-artifact@v1
with:
name: layer
path: python
|
.github/workflows/push.yml
|
---
name: Bitrefill
internal_url: /en/compatibility/bitrefill
logo: /img/compatibility/bitrefill/bitrefill.png
rbf:
tested:
date: "2018-11-06"
platforms:
- web
version: "n/a"
features:
receive:
notification: "na"
list: "false"
details: "false"
shows_replaced_version: "true"
shows_original_version: "true"
send:
signals_bip125: "true"
list: "false"
details: "false"
shows_replaced_version: "na"
shows_original_version: "na"
examples:
- image: /img/compatibility/bitrefill/rbf/send-screen.png
caption: >
Sending Transaction - Default send transaction screen. No RBF info. Transaction was sent via RBF.
- image: /img/compatibility/bitrefill/rbf/transaction-list-sent.png
caption: >
Attempting Transaction Replacement - Transaction list screen. No way to manually bump the transaction. Was sent RBF.
- image: /img/compatibility/bitrefill/rbf/transaction-list-incoming-rbf.png
caption: >
Receiving Transaction Signaling RBF - No incoming transactions show initially during original transaction. This delay could have been related to delays in relaying the transaction in the Bitcoin network.
- image: /img/compatibility/bitrefill/rbf/transaction-list-incoming-replacement.png
caption: >
Receiving Replacement Transaction - After replacement transaction was broadcast, both transactions show up as pending. Stayed as pending even after the replacement transaction had 6+ confirmations.
- image: /img/compatibility/bitrefill/rbf/transaction-list-replacement-confirmed.png
caption: >
Receiving Replacement Transaction - At some point in the next day, the original transaction was marked failed and replacement transaction was credited to account and marked complete.
segwit:
tested:
date: "2019-04-12"
platforms:
- web
version: "n/a"
features:
receive:
p2sh_wrapped: "true"
bech32: "false"
default: "p2sh_wrapped"
send:
bech32: "true"
change_bech32: "true"
segwit_v1: "Address does not pass validation."
bech32_p2wsh: "true"
examples:
- image: /img/compatibility/bitrefill/segwit/receive-screen.png
caption: >
Bitrefill allows P2SH-wrapped segwit deposits. No bech32 option available.
- image: /img/compatibility/bitrefill/segwit/send-screen.png
caption: >
Bitrefill can send to bech32 native addresses. Change address is also bech32.
#- image: /img/compatibility/bitrefill/segwit/send-v1.png
# caption: >
# Bitrefill displays an address validation error for segwit v1 addresses.
|
_data/compatibility/bitrefill.yaml
|
title: Azure Virtual Desktop documentation for users
summary: Connecting to Azure Virtual Desktop made easy.
metadata:
title: Azure Virtual Desktop user documentation
description: With Azure Virtual Desktop, you can use Windows 10 Enterprise in the cloud.
ms.service: virtual-desktop
services: virtual-desktop
ms.topic: landing-page
author: Heidilohr
ms.author: helohr
ms.date: 06/15/2021
# linkListType: architecture | concept | deploy | download | get-started | how-to-guide | learn | overview | quickstart | reference | sample | tutorial | video | whats-new
landingContent:
# Card
- title: Connect with the Windows Desktop client
linkLists:
- linkListType: how-to-guide
links:
- text: How to connect with the Windows Desktop client
url: connect-windows-7-10.md
# Card
- title: Connect with the Microsoft Store client
linkLists:
- linkListType: how-to-guide
links:
- text: How to connect with the Microsoft store client
url: connect-microsoft-store.md
# Card
- title: Connect with the Web client
linkLists:
- linkListType: how-to-guide
links:
- text: How to connect with the web client
url: connect-web.md
# Card
- title: Connect with the macOS client
linkLists:
- linkListType: how-to-guide
links:
- text: How to connect with the macOS client
url: connect-macos.md
# Card
- title: Connect with the iOS client
linkLists:
- linkListType: how-to-guide
links:
- text: How to connect with the iOS client
url: connect-ios.md
# Card
- title: Connect with the Android client
linkLists:
- linkListType: how-to-guide
links:
- text: How to connect with the Android client
url: connect-android.md
- title: Connect with Linux or thin clients
linkLists:
- linkListType: how-to-guide
links:
- text: How to connect with Linux or thin clients
url: linux-overview.md
|
articles/virtual-desktop/user-documentation/index.yml
|
name: Run BrowserStack Tests
on:
workflow_call:
inputs:
site_url:
default: "unknown"
required: false
type: string
secrets:
BROWSERSTACK_USERNAME:
required: true
BROWSERSTACK_ACCESS_KEY:
required: true
IMGUR_CLIENT_ID:
required: true
jobs:
run-browserstack-tests:
name: Run BrowserStack Tests
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: 'BrowserStack Env Setup'
uses: 'browserstack/github-actions/setup-env@master'
with:
username: ${{ secrets.BROWSERSTACK_USERNAME }}
access-key: ${{ secrets.BROWSERSTACK_ACCESS_KEY }}
build-name: ${{ github.run_id }}
project-name: ${{ github.repository }}
- name: Setup just
uses: extractions/setup-just@v1
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Utilize pip cache
uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ env.pythonLocation }}-${{ hashFiles('events_page/requirements.txt') }}
- name: "Authenticate to Google Cloud"
uses: "google-github-actions/auth@v0"
with:
workload_identity_provider: "projects/538480189659/locations/global/workloadIdentityPools/los-verdes-lv-event-pagenerator/providers/los-verdes-lv-event-pagenerator"
service_account: "<EMAIL>"
- name: Run Tests
id: build-and-publish-test-site
run: just test
env:
# EVENTS_PAGE_CALENDAR_ID: "<EMAIL>"
EVENTS_PAGE_LOAD_LOCAL_TF_VARS: "${{ github.workspace }}/losverdesatx-events.tfvars"
EVENTS_PAGE_GCS_BUCKET_PREFIX: ${{ github.ref != 'refs/heads/master' && format('tests/pr-{0}', github.event.number) || '' }}
- name: Upload screenshot as artifact
uses: actions/upload-artifact@v2
with:
name: test-screenshot
path: events_page/webdriver_test_screenshot.png
# Next few bits lifted mostly directly from: https://github.com/devicons/public-upload-to-imgur
- name: Upload screenshot to imgur
uses: devicons/public-upload-to-imgur@v2.2.1
id: imgur_step
with:
path: ./events_page/webdriver_test_screenshot.png
client_id: ${{ secrets.IMGUR_CLIENT_ID }}
- name: Comment on the PR about the result
if: github.ref != 'refs/heads/master'
uses: marocchino/sticky-pull-request-comment@v2
env:
# recall that this action returns a JSON.stringified array
IMG_URL: ${{ fromJSON(steps.imgur_step.outputs.imgur_urls)[0] }} # get the output of the step above using its id
MESSAGE: |
### Build / Run ID `${{ github.run_id }}` Results
- Published to: [${{ inputs.site_url }}](${{ inputs.site_url }})
#### Screenshot

with:
message: ${{format(env.MESSAGE, env.IMG_URL)}} # add the url into the string
|
.github/workflows/run_browserstack_tests.yml
|
specVersion: 0.0.3
description: MasterChefV2
repository: https://github.com/beethovenxfi/beethovenx-token-subgraph
schema:
file: ./schema.graphql
dataSources:
- kind: ethereum/contract
name: MasterChefV2
network: {{ network }}
source:
address: "{{ address }}"
abi: MasterChefV2
startBlock: {{ startBlock }}
mapping:
kind: ethereum/events
apiVersion: 0.0.5
language: wasm/assemblyscript
file: ./src/mappings/masterchefV2.ts
entities:
- MasterChefV2
- Pool
- User
- Rewarder
abis:
- name: MasterChefV2
file: ./abis/MasterChefV2.json
- name: SingleTokenRewarder
file: ./abis/SingleTokenRewarder.json
- name: MultiTokenRewarder
file: ./abis/MultiTokenRewarder.json
- name: ERC20
file: ./abis/ERC20.json
eventHandlers:
- event: Deposit(indexed address,indexed uint256,uint256,indexed address)
handler: deposit
- event: Withdraw(indexed address,indexed uint256,uint256,indexed address)
handler: withdraw
- event: EmergencyWithdraw(indexed address,indexed uint256,uint256,indexed address)
handler: emergencyWithdraw
- event: Harvest(indexed address,indexed uint256,uint256)
handler: harvest
- event: LogPoolAddition(indexed uint256,uint256,indexed address,indexed address)
handler: logPoolAddition
- event: LogSetPool(indexed uint256,uint256,indexed address,bool)
handler: logSetPool
- event: LogUpdatePool(indexed uint256,uint256,uint256,uint256)
handler: logUpdatePool
- event: UpdateEmissionRate(indexed address,uint256)
handler: updateEmissionRate
templates:
- kind: ethereum/contract
name: SingleTokenRewarder
network: {{ network }}
source:
abi: SingleTokenRewarder
mapping:
kind: ethereum/events
apiVersion: 0.0.5
language: wasm/assemblyscript
file: ./src/mappings/single-token-rewarder.ts
entities:
- Rewarder
abis:
- name: SingleTokenRewarder
file: ./abis/SingleTokenRewarder.json
- name: ERC20
file: ./abis/ERC20.json
- name: MasterChefV2
file: ./abis/MasterChefV2.json
eventHandlers:
- event: LogRewardPerSecond(uint256)
handler: logRewardPerSecond
- event: LogOnReward(indexed address,indexed uint256,uint256,indexed address)
handler: logOnReward
- kind: ethereum/contract
name: MultiTokenRewarder
network: {{ network }}
source:
abi: MultiTokenRewarder
mapping:
kind: ethereum/events
apiVersion: 0.0.5
language: wasm/assemblyscript
file: ./src/mappings/multi-token-rewarder.ts
entities:
- Rewarder
abis:
- name: MultiTokenRewarder
file: ./abis/MultiTokenRewarder.json
- name: ERC20
file: ./abis/ERC20.json
- name: MasterChefV2
file: ./abis/MasterChefV2.json
eventHandlers:
- event: LogRewardsPerSecond(address[],uint256[])
handler: logRewardsPerSecond
- event: LogOnReward(indexed address,indexed uint256,address,uint256,indexed address)
handler: logOnReward
|
subgraphs/masterchefV2/template.yaml
|
on:
workflow_dispatch:
repository_dispatch:
types:
- update
env:
caniuse_repo: Fyrd/caniuse
caniuse_ref: main
caniuse_path: data.json
previous: previous
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
concurrency:
group: main
cancel-in-progress: true
jobs:
check-if-necessary:
runs-on: ubuntu-latest
outputs:
previous: ${{ steps.check-if-necessary.outputs.previous }}
latest: ${{ steps.check-if-necessary.outputs.latest }}
necessary: ${{ steps.check-if-necessary.outputs.necessary }}
steps:
- id: check-if-necessary
run: |
previous=$(curl https://raw.githubusercontent.com/${{ github.repository }}/${{ env.previous }}/${{ env.previous }})
latest=$(gh api repos/${{ env.caniuse_repo }}/git/trees/${{ env.caniuse_ref }} \
--jq '.tree[] | select(.path == "${{ env.caniuse_path }}") | .sha')
echo ::set-output name=previous::$previous
echo ::set-output name=latest::$latest
run:
runs-on: ubuntu-latest
needs: check-if-necessary
if: needs.check-if-necessary.outputs.previous != needs.check-if-necessary.outputs.latest
steps:
- uses: actions/checkout@v3
- run: |
gh api repos/${{ env.caniuse_repo }}/git/blobs/${{ needs.check-if-necessary.outputs.previous }} --jq '.content' |
base64 --decode > old.json
- run: curl https://raw.githubusercontent.com/${{ env.caniuse_repo }}/${{ env.caniuse_ref }}/${{ env.caniuse_path }} > new.json
- run: yarn
- run: yarn validate new.json
- run: yarn main old.json new.json
env:
TWITTER_API_CONFIG: ${{ secrets.TWITTER_API_CONFIG }}
- uses: actions/checkout@v3
with:
path: ${{ env.previous }}
ref: ${{ env.previous }}
- run: |
cd ${{ env.previous }}
echo ${{ needs.check-if-necessary.outputs.latest }} > ${{ env.previous }}
git config user.name 'github-actions[bot]'
git config user.email '41898282+<EMAIL>-actions[bot]<EMAIL>'
git add .
git commit -m $(TZ=Asia/Tokyo date +%Y-%m-%dT%H:%M:%S.%N%z)
git push
|
.github/workflows/main.yml
|
interactions:
- request:
body: |
{"data":{"attributes":{"metric_type":"distribution","tags":["app","datacenter"]},"id":"TestCreateatagconfigurationreturnsCreatedresponse1618491730","type":"manage_tags"}}
form: {}
headers:
Accept:
- application/json, */*;q=0.8
Connection:
- close
Content-Length:
- '170'
Content-Type:
- application/json
Host:
- api.datadoghq.com
User-Agent:
- datadog-api-client-typescript/0.1.0 (node 15.11.0; os Darwin; arch x64)
X-Datadog-Parent-Id:
- '5095942047695828255'
X-Datadog-Trace-Id:
- '5095942047695828255'
method: POST
url: https://api.datadoghq.com/api/v2/metrics/TestCreateatagconfigurationreturnsCreatedresponse1618491730/tags
response:
body: '{"data":{"type":"manage_tags","id":"TestCreateatagconfigurationreturnsCreatedresponse1618491730","attributes":{"tags":["datacenter","app"],"include_percentiles":false,"created_at":"2021-04-15T13:02:10.769559+00:00","updated_at":"2021-04-15T13:02:10.769559+00:00","metric_type":"distribution"}}}'
code: 201
duration: ''
headers:
Cache-Control:
- no-cache
Connection:
- close
Content-Length:
- '294'
Content-Security-Policy:
- frame-ancestors 'self'; report-uri https://api.datadoghq.com/csp-report
Content-Type:
- application/json
Date:
- Thu, 15 Apr 2021 13:02:10 GMT
Pragma:
- no-cache
Strict-Transport-Security:
- max-age=15724800;
X-Content-Type-Options:
- nosniff
X-Dd-Debug:
- /L+SFFO+m1pPY+hRCpk5325fvfrNl0KmiquUNJolBN/5hu3HIwflqjZSbJ6NxDFG
X-Dd-Version:
- '35.4318655'
X-Frame-Options:
- SAMEORIGIN
X-Ratelimit-Limit:
- '50'
X-Ratelimit-Period:
- '60'
X-Ratelimit-Remaining:
- '49'
X-Ratelimit-Reset:
- '50'
status: 201 Created
- request:
body: ''
form: {}
headers:
Accept:
- application/json, */*;q=0.8
Connection:
- close
Host:
- api.datadoghq.com
User-Agent:
- datadog-api-client-typescript/0.1.0 (node 15.11.0; os Darwin; arch x64)
X-Datadog-Parent-Id:
- '5095942047695828255'
X-Datadog-Trace-Id:
- '5095942047695828255'
method: DELETE
url: https://api.datadoghq.com/api/v2/metrics/TestCreateatagconfigurationreturnsCreatedresponse1618491730/tags
response:
body: ''
code: 204
duration: ''
headers:
Cache-Control:
- no-cache
Connection:
- close
Content-Length:
- '0'
Content-Security-Policy:
- frame-ancestors 'self'; report-uri https://api.datadoghq.com/csp-report
Content-Type:
- application/json
Date:
- Thu, 15 Apr 2021 13:02:11 GMT
Pragma:
- no-cache
Strict-Transport-Security:
- max-age=15724800;
X-Content-Type-Options:
- nosniff
X-Dd-Debug:
- gYZcaADwbKcv7Hm19HJx6WsLoKuOijDWAt2viPeCfWqUgyKY+9e1xZdmMJeXV3YV
X-Dd-Version:
- '35.4318655'
X-Frame-Options:
- SAMEORIGIN
X-Ratelimit-Limit:
- '50'
X-Ratelimit-Period:
- '60'
X-Ratelimit-Remaining:
- '48'
X-Ratelimit-Reset:
- '49'
status: 204 No Content
version: 1
|
tests/api/v2/datadog/cassettes/TestScenarios/Feature_Metrics/Scenario_Create_a_tag_configuration_returns_Created_response.yaml
|
version:
stdin: null
outputs: [stdout]
references: []
options: --version
#merge
#######
#test: overlaps are merged
# bookends are merged
# bookends +1 are not merged
# c.f. output from bedTools mergeBed
merge_simple:
outputs: [stdout]
stdin: null
references: [chip_peaks_merged_simple.bed]
options: --method=merge -L /dev/null -I %DIR%/chip_peaks_sanitized.bed | cut -f1,2,3
#tests: merging at a distance
# c.f. output from bedTools mergeBed
merge_1kb:
outputs: [stdout]
stdin: null
references: [chip_peaks_merged_1kb.bed]
options: --method=merge --merge-distance=1000 -L /dev/null -I %DIR%/chip_peaks_sanitized.bed | cut -f1,2,3
#test: only outputting merged intervals
merge_only_merged:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks_only_merged.bed]
options: --method=merge --merge-min-intervals=2 -L /dev/null
#test: things of the same name are merged,
# overlaps of different names aren't
merge_by_name:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks_merged_by_name.bed]
options: --method=merge --merge-by-name -L /dev/null
#test that merge is union
merge_is_union:
outputs: [stdout]
stdin: null
references: [merge_is_union_out.bed]
options: --method=merge -L /dev/null -I %DIR%/merge_is_union_in.bed
#test bed12 merging with block resolution
bed12_merge:
outputs: [stdout]
stdin: null
references: [bed12_merge_out.bed]
options: --method=merge --merge-and-resolve-blocks -I %DIR%/bed12_merge_in.bed --merge-min-intervals=2 -L /dev/null
stranded_merge:
outputs: [stdout]
stdin: null
references: [stranded_merge_out.bed]
options: --method=merge --merge-stranded -I %DIR%/stranded_merge_in.bed -L /dev/null
#filterGenome
#############
#test: remove bed interval that overlaps start of contig
# remove interval that overlaps end of contig
# remove interval that is on a non-existant contig
filterGenome:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks_filtered.bed]
options: --method=filter-genome --genome-file=%DIR%/../data/hg19.chr19.fasta -L /dev/null
#sanitizeGenome
###############
#test: truncate intervals that overlap start of contig
# truncate intervals that overlap end of contig
# remove intervals that are on non-existant contig
# remove zero length intervals
sanitizeGenome:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks_sanitized.bed]
options: --method=sanitize-genome --genome-file=%DIR%/../data/hg19.chr19.fasta -L /dev/null
#block
######
# wanted to test the roundtrip, using bedtools bed12tobed6, but can't get the resulting
# sort order right.
block:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks.blocked.bed]
options: --method=block -L /dev/null
#shift
######
#check that interval at end isn't moved
shift:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks.shifted.bed]
options: --method=shift --offset=10 -g %DIR%/../data/hg19.chr19.fasta -L /dev/null
#TODO: Find some way to verify the correctness of the bins methods
bins-bases:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks.binned.equal-bases.bed]
options: --method=bins
bins-intervals:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks.binned.equal-intervals.bed]
options: --method=bins --binning-method=equal-intervals
bins-range:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks.binned.equal-range.bed]
options: --method=bins --binning-method=equal-range
bins-manual:
outputs: [stdout]
stdin: chip_peaks.bed
references: [chip_peaks.binned.manual.bed]
options: --method=bins --bin-edges=0,5,10,100 --num-bins=3
issue-347:
stdin: issue347.bed
outputs: [stdout]
references: [issue347.out.bed]
options: --method=merge --merge-by-name
rename_chr_ensembl:
stdin: chip_peaks.bed
outputs: [stdout]
references: [ensembl_renamed_chr.gtf]
options: --method=rename-chr --rename-chr-file=%DIR%/ucsc2ensembl.txt
|
tests/bed2bed.py/tests.yaml
|
? ⠁⠇⠇ ⠓⠥⠍⠁⠝ ⠃⠑⠊⠝⠛⠎ ⠁⠗⠑ ⠃⠕⠗⠝ ⠋⠗⠑⠑ ⠁⠝⠙ ⠑⠟⠥⠁⠇ ⠊⠝ ⠙⠊⠛⠝⠊⠞⠽ ⠁⠝⠙ ⠗⠊⠛⠓⠞⠎⠲ ⠞⠓⠑⠽ ⠁⠗⠑ ⠑⠝⠙⠕⠺⠑⠙ ⠺⠊⠞⠓ ⠗⠑⠁⠎⠕⠝ ⠁⠝⠙ ⠉⠕⠝⠎⠉⠊⠑⠝⠉⠑ ⠁⠝⠙ ⠎⠓⠕⠥⠇⠙ ⠁⠉⠞ ⠞⠕⠺⠁⠗⠙⠎ ⠕⠝⠑ ⠁⠝⠕⠞⠓⠑⠗ ⠊⠝ ⠁ ⠎⠏⠊⠗⠊⠞ ⠕⠋ ⠃⠗⠕⠞⠓⠑⠗⠓⠕⠕⠙⠲
: lang: en
lang_639_3: eng
lang_full: en-Brai-US
lang_bcp_udhr: en-Brai
name_udhr: English (Braille)
script: Brai
source: https://r12a.github.io/
udhr_key: eng_brai
? 𐐃𐑊 𐐸𐐷𐐭𐑋𐐲𐑌 𐐺𐐨𐐮𐑍𐑆 𐐪𐑉 𐐺𐐫𐑉𐑌 𐑁𐑉𐐨 𐐰𐑌𐐼 𐐨𐐿𐐶𐐲𐑊 𐐮𐑌 𐐼𐐮𐑀𐑌𐐮𐐻𐐨 𐐰𐑌𐐼 𐑉𐐴𐐻𐑅. 𐐜𐐩 𐐪𐑉 𐐯𐑌𐐼𐐵𐐲𐐼 𐐶𐐮𐑄 𐑉𐐨𐑆𐐲𐑌 𐐰𐑌𐐼 𐐿𐐪𐑌𐑇𐐲𐑌𐑅 𐐰𐑌𐐼 𐑇𐐳𐐼 𐐰𐐿𐐻 𐐻𐐲𐐶𐐬𐑉𐐼𐑆 𐐶𐐲𐑌 𐐲𐑌𐐲𐑄𐐲𐑉 𐐮𐑌 𐐩 𐑅𐐹𐐮𐑉𐐮𐐻 𐐲𐑂 𐐺𐑉𐐲𐑄𐐲𐑉𐐸𐐳𐐼.
: lang: en
lang_639_3: eng
lang_full: en-Dsrt-US
lang_bcp_udhr: en-Dsrt
name_udhr: English (Deseret)
script: Dsrt
source: https://r12a.github.io/
udhr_key: eng_dsrt
? 𐑷𐑤 𐑣𐑿𐑥𐑩𐑯 𐑚𐑰𐑦𐑙𐑟 𐑸 𐑚𐑹𐑯 𐑓𐑮𐑰 𐑯 𐑰𐑒𐑢𐑩𐑤 𐑦𐑯 𐑛𐑦𐑜𐑯𐑦𐑑𐑦 𐑯 𐑮𐑲𐑑𐑕. 𐑞𐑱 𐑸 𐑦𐑯𐑛𐑬𐑛 𐑢𐑦𐑞 𐑮𐑰𐑟𐑩𐑯 𐑯 𐑒𐑪𐑯𐑖𐑩𐑯𐑕 𐑯 𐑖𐑫𐑛 𐑨𐑒𐑑 𐑑𐑩𐑢𐑹𐑛𐑟 𐑢𐑳𐑯 𐑩𐑯𐑳𐑞𐑼 𐑦𐑯 𐑩 𐑕𐑐𐑦𐑮𐑦𐑑 𐑝 𐑚𐑮𐑳𐑞𐑼𐑣𐑫𐑛.
: lang: en
lang_639_3: eng
lang_full: en-Shaw-GB
lang_bcp_udhr: en-Shaw
name_udhr: English (Shavian)
script: Shaw
source: https://r12a.github.io/
udhr_key: eng_shaw
? 𖬑𖬦𖬰 𖬇𖬰𖬧𖬵 𖬁𖬲𖬬 𖬇𖬲𖬤 𖬓𖬲𖬞 𖬐𖬰𖬦 𖬉 𖬘𖬲𖬤 𖬀𖬰𖬝𖬵 𖬔𖬟𖬰 𖬂𖬲𖬤𖬵 𖬅𖬲𖬨𖬵 𖬓𖬲𖬥𖬰 𖬄𖬲𖬟 𖬒𖬲𖬯𖬵 𖬋𖬯. 𖬎𖬶𖬞 𖬖𖬰𖬮 𖬓𖬜𖬰 𖬆𖬰𖬞 𖬖𖬞𖬰 𖬎𖬲𖬟𖬰 𖬔𖬟𖬰 𖬆𖬰𖬞 𖬔𖬤𖬵 𖬔𖬟𖬰 𖬂𖬮𖬰 𖬁𖬲𖬞 𖬐𖬲𖬤 𖬆𖬝𖬰 𖬒𖬲𖬯 𖬅𖬮𖬰 𖬉𖬰 𖬎𖬰𖬩𖬵 𖬂𖬲𖬮𖬰 𖬁𖬲𖬞 𖬎𖬰𖬩𖬵 𖬒𖬲𖬯𖬵 𖬉 𖬅𖬮𖬰 𖬙 𖬂𖬰𖬧𖬵.
: lang: hmn
lang_639_3: hmn
lang_full: hmn-Hmng-LA
lang_bcp_udhr: hmn-Hmng
name_udhr: Hmong (Pahawh Hmong)
script: Hmng
source: https://r12a.github.io/
udhr_key: hmn_hmng
? 𞄔𞄄𞄧𞄤𞄃𞄧𞄴𞄅𞄫𞄵𞄘𞄧𞄵𞄉𞄨𞄴 𞄀𞄧𞄲𞄤𞄎𞄪𞄳𞄘𞄬𞄲𞄚𞄄𞄲𞄫𞄃𞄄𞄦𞄰𞄤𞄊𞄦𞄰𞄜𞄤𞄵𞄨𞄋𞄨𞄴 𞄄𞄤𞄳𞄨𞄔𞄨𞄲𞄈𞄤𞄦. 𞄉𞄤𞄳𞄬𞄆𞄤𞄲 𞄑𞄨𞄵𞄉𞄧𞄰𞄉𞄤𞄲𞄃𞄄𞄤𞄲𞄬 𞄃𞄄𞄦𞄰𞄤𞄉𞄧𞄰𞄊𞄦𞄰𞄤 𞄃𞄄𞄦𞄰𞄤 𞄦𞄰𞄉𞄫𞄵𞄘𞄧𞄳𞄤𞄁𞄧𞄱𞄈𞄨𞄲 𞄧𞄤 𞄎𞄪𞄂𞄤𞄱𞄬𞄦𞄰𞄉𞄫𞄵𞄂𞄤𞄱𞄬𞄔𞄨𞄲𞄎𞄪𞄧𞄳 𞄧𞄤𞄎𞄬𞄳𞄃𞄦𞄲.
: lang: hmn
lang_639_3: hmn
lang_full: hmn-Hmnp-US
lang_bcp_udhr: hmn-Hmnp
name_udhr: Hmong (Nyiakeng Puachue Hmong
script: Hmnp
source: https://r12a.github.io/
udhr_key: <KEY>
𞡥𞠖𞢻𞠢𞠮𞠣 𞢣𞠽 𞡅 𞡄 𞠺 𞡈 𞡗 𞢰𞠎 𞡔 𞡪, 𞡅𞠧 𞡄 𞡥𞢻𞠤 𞡖𞠢 𞠄𞠦 𞡄 𞠼𞣀 𞠕𞠣 𞡬𞠊𞢂. 𞠀𞠢𞡔 𞠄𞠦 𞡨𞢯𞠸𞠣𞡪 𞡽𞠨𞠷 𞠣 𞡗𞠼 𞡄 𞡪𞡣. 𞡛𞡇 𞠕𞡰𞡽 𞡄 𞡄 𞣄𞣄𞡪 𞡭𞠢 𞠀𞠣 𞢱𞠥𞢄𞠣.:
lang: men
lang_639_3: men
lang_full: men-Mend-SL
lang_bcp_udhr: men-Mend
name_udhr: Mende (Mende)
script: Mend
source: https://r12a.github.io/
udhr_key: men_mend
? ꤰꥈꤳꥎ ꤳꥈꥐ ꤾꥁꥉꥑ ꤸꥎꥑꤴꥉꤰ, ꤳ꥓ꤸꥈꥆꥐ ꥁꥋꤰ꥓-ꥁꥋꤰ꥓ ꤴꥎ ꤼ꥓ꤽꥊ. ꤰꥈꤳꥎ ꤵꤱꥇꥒꤰ꥓-ꤷꥒ ꥆꤰꥎꥒ ꤶꥉꤰꥉꥑ ꤲꥉꥐ ꥆꤳꥊꥎ, ꤰꥎꥑꤵꥋ ꥆꥋ ꤰꥎꤾꥋꤰ꥓-ꤵꥎ ꤷꥎꥒꤰꥈꥆꤳ꥓-ꤷꥒ ꤴꥋ ꤲꥉꥐ ꤾꥈꤿꥎꥐ ꤾꥎꤸ꥓ ꥆꤼꥊ ꤼꥎꤶꤼꥈꥒꤰ꥓.
: lang: rej
lang_639_3: rej
lang_full: rej-Rjng-ID
lang_bcp_udhr: rej-Rjng
name_udhr: Rejang
script: Rjng
source: https://r12a.github.io/
udhr_key: <KEY>
? 𐴀𐴞𐴕𐴐𐴝𐴦𐴕 𐴁𐴠𐴒𐴧𐴟𐴕 𐴀𐴝𐴎𐴝𐴊𐴢 𐴀𐴝𐴌 𐴀𐴠𐴑𐴧𐴟 𐴉𐴟𐴥𐴖𐴝𐴙𐴕𐴝 𐴇𐴡𐴥𐴑 𐴀𐴝𐴌 𐴀𐴞𐴎𐴧𐴡𐴃𐴢 𐴓𐴡𐴌 𐴉𐴡𐴘𐴊𐴝 𐴀𐴡𐴥𐴘𐴧𐴠 ۔ 𐴀𐴞𐴥𐴃𐴝𐴘𐴝𐴃𐴧𐴟 𐴀𐴝𐴈𐴡𐴓 𐴀𐴝𐴌 𐴁𐴟𐴎 𐴀𐴡𐴥𐴘𐴧𐴠 ، 𐴀𐴠𐴥𐴃𐴡𐴓𐴧𐴝 𐴀𐴞𐴥𐴃𐴝𐴌𐴝𐴃𐴧𐴟 𐴀𐴠𐴑 𐴀𐴡𐴕 𐴀𐴝𐴌 𐴀𐴠𐴑 𐴎𐴡𐴕 𐴓𐴡𐴘 𐴁𐴤𐴝𐴘𐴧𐴡 𐴋𐴧𐴡𐴙𐴓𐴧𐴝 𐴔𐴝𐴦𐴔𐴠𐴓𐴝 𐴒𐴡𐴌𐴡𐴥𐴕
𐴏𐴝𐴀𐴝 ۔
: lang: rhg
lang_639_3: rhg
lang_full: rhg-Rohg-MM
lang_bcp_udhr: rhg-Rohg
name_udhr: Rohingya
script: Rohg
source: https://r12a.github.io/
udhr_key: rhg_rohg
? ࠊࠋ⸱ࠄࠀࠍࠔࠉࠌ⸱ࠍࠅࠋࠃࠅ⸱ࠇࠐࠔࠉࠌ⸱ࠅࠄࠌ⸱ࠀࠇࠃ⸱ࠊࠌࠅ⸱ࠄࠔࠍࠉ⸱ࠓࠊࠓࠅࠃ⸱ࠅࠓࠀࠔࠓ⸱ࠌࠂࠉࠏ⸱ࠋࠄࠊ࠽ࠉࠔ⸱ࠍࠊࠍ⸱ࠀࠇࠃ⸱ࠌࠄࠌ⸱ࠂࠍࠍ⸱ࠅࠍࠁ⸱ࠅࠏࠍࠉࠄࠌ⸱ࠍࠏࠋࠉࠄࠌ⸱ࠋࠏࠔࠅࠕ⸱ࠀࠇࠃ⸱ࠋࠌࠏࠍ⸱ࠓࠏࠄࠅ⸱ࠁࠓࠅࠇ⸱ࠈࠅࠁࠄ⸱ࠊࠌࠅ⸱ࠀࠇࠉࠌ࠽
: lang: smp
lang_639_3: smp
lang_full: smp-Samr-IL
lang_bcp_udhr: smp-Samr
name_udhr: Samaritan
script: Samr
source: https://r12a.github.io/
udhr_key: smp_samr
? ꠢꠇꠟ ꠝꠣꠘꠥꠡ ꠡꠣꠗꠤꠘꠜꠣꠛꠦ ꠢꠝꠣꠘ ꠁꠎ꠆ꠎꠔ ꠀꠞ ꠢꠇ ꠟꠁꠀ ꠙꠄꠖꠣ ‘ꠅꠄ। ꠔꠣꠁꠘꠔꠣꠁꠘꠞ ꠛꠤꠛꠦꠇ ꠀꠞ ꠀꠇꠟ ꠀꠍꠦ। ꠅꠔꠣꠞ ꠟꠣꠉꠤ ꠢꠇꠟꠞ ꠄꠇꠎꠘꠦ ꠀꠞꠇꠎꠘꠞ ꠟꠉꠦ ꠛꠤꠞꠣꠖꠞꠤꠞ ꠝꠘ ꠟꠁꠀ ꠀꠌꠞꠘ ꠇꠞꠣ ꠃꠌꠤꠔ।
: lang: syl
lang_639_3: syl
lang_full: syl-Sylo-BD
lang_bcp_udhr: syl-Sylo
name_udhr: Sylheti (Sylheti Nagri)
script: Sylo
source: https://r12a.github.io/
udhr_key: syl_sylo
|
tools/udhr_art1_r12a.yaml
|
parameters:
AzureSubscription: ''
Environment: ''
FunctionAppName: ''
ResourceGroup: ''
ArmTemplateRoot: '$(Pipeline.Workspace)/Dfc.FutureAccessModel.AreaRouting.Resources.ArmTemplates'
FunctionAppPackage: '$(Pipeline.Workspace)/Dfc.FutureAccessModel.AreaRouting.DeploymentPackages/Dfc.FutureAccessModel.AreaRouting.zip'
EnvironmentTag: $(EnvironmentTag)
ParentBusinessTag: $(ParentBusiness)
ServiceOfferingTag: $(ServiceOffering)
ApimResourceGroup: ''
ApimInstanceName: ''
ApimProductId: ''
ApiName: ''
SwaggerSpecificationUrl: ''
jobs:
- deployment: DeployTo_${{ parameters.Environment }}
environment: ${{ parameters.Environment }}
strategy:
runOnce:
deploy:
steps:
- checkout: dfc-devops
path: 's/dfc-devops/'
- template: AzureDevOpsTemplates/Deploy/StepTemplates/dfc-arm-deploy.yml@dfc-devops
parameters:
ArmTemplateRoot: ${{ parameters.ArmTemplateRoot }}
AzureSubscription: ${{ parameters.AzureSubscription }}
EnvironmentTag: ${{ parameters.EnvironmentTag }}
ParentBusinessTag: ${{ parameters.ParentBusinessTag }}
ResourceGroup: ${{ parameters.ResourceGroup }}
ServiceOfferingTag: ${{ parameters.ServiceOfferingTag }}
- template: AzureDevOpsTemplates/Deploy/StepTemplates/dfc-functionapp-apimapi-deploy.yml@dfc-devops
parameters:
AzureSubscription: ${{ parameters.AzureSubscription }}
FunctionAppName: ${{ parameters.FunctionAppName }}
FunctionResourceGroup: ${{ parameters.ResourceGroup }}
FunctionPackage: ${{ parameters.FunctionAppPackage }}
ApimResourceGroup: '${{ parameters.ApimResourceGroup }}'
ApimInstanceName: '${{ parameters.ApimInstanceName }}'
ApimProductId: '${{ parameters.ApimProductId }}'
ApiName: '${{ parameters.ApiName }}'
SwaggerSpecificationUrl: '${{ parameters.SwaggerSpecificationUrl }}'
|
Resources/AzureDevOps/JobTemplates/Deploy.yml
|
---
# NGINX
- name: Install Nginx
include_role:
name: nginxinc.nginx
# NGINX LDAP authentication
- name: Check if the load_module directive is in nginx conf
lineinfile:
line: "load_module modules/ngx_http_auth_ldap_module.so;"
dest: /etc/nginx/nginx.conf
state: present
check_mode: yes
register: lineinfile_module
- name: Install the nginx-ldap-auth module when not present
block:
- name: Get installed Nginx version
command:
cmd: nginx -v
register: get_nginx_version
check_mode: no
- name: Set sequenceserver_nginx_version variable
set_fact:
sequenceserver_nginx_version: "{{ get_nginx_version.stderr | regex_search('nginx/(?P<version>\\d+\\.\\d+\\.\\d+)', '\\g<version>') | first }}"
- name: Create directory for nginx related sources if it does not exist
file:
path: "{{ sequenceserver_nginx_loaded_sources_path }}"
state: directory
mode: '0755'
- name: Get and unarchive the nginx source (needs same version as the one installed with role nginxinc.nginx)
unarchive:
src: 'https://nginx.org/download/nginx-{{ sequenceserver_nginx_version }}.tar.gz'
dest: "{{ sequenceserver_nginx_loaded_sources_path }}"
remote_src: yes
- name: Install Git
include_role:
name: geerlingguy.git
- name: Get source of the nginx-ldap-auth module
git:
repo: 'https://github.com/kvspb/nginx-auth-ldap.git'
dest: "{{ sequenceserver_nginx_loaded_sources_path }}/nginx-auth-ldap"
version: 83c059b73566c2ee9cbda920d91b66657cf120b7
register: get_ldap_auth_source
- name: Install packages required for compiling the nginx module
package:
state: present
name: "{{ sequenceserver_nginx_base }}"
when: sequenceserver_nginx_base is defined
- name: Compile the nginx-ldap-auth module (https://www.nginx.com/blog/compiling-dynamic-modules-nginx-plus/)
command:
chdir: "{{ sequenceserver_nginx_loaded_sources_path }}/nginx-{{ sequenceserver_nginx_version }}"
cmd: "{{ item }}"
with_items:
- "./configure --with-compat --add-dynamic-module={{ sequenceserver_nginx_loaded_sources_path }}/nginx-auth-ldap"
- make modules
- name: Copy the nginx-ldap-auth module library to the nginx module directory
copy:
src: "{{ sequenceserver_nginx_loaded_sources_path }}/nginx-{{ sequenceserver_nginx_version }}/objs/ngx_http_auth_ldap_module.so"
dest: /etc/nginx/modules/
mode: 0644
remote_src: yes
- name: Insert the load_module directive in nginx conf
lineinfile:
line: "load_module modules/ngx_http_auth_ldap_module.so;"
dest: /etc/nginx/nginx.conf
regexp: "load_module modules/ngx_http_auth_ldap_module.so;"
insertbefore: BOF
when: lineinfile_module is changed
|
tasks/install.dependencies.yml
|
dict-name: basic-linear
use-gpu: True
dataset:
- name: mnist
-
# Augmentation is only applied during training
- on-the-fly-augmentation: True
random-shift: False
random-crop: False
white-noise: 3 # std of white noise
# The preprocessor is married to the dictionary. Encoders must use consistent preprocessing.
preprocessor:
# Normalization is applied to each sample in all settings.
- normalization: True
mean: 0
std: 1
parser.add_argument('--dataset', type=str, default='mnist',
help='"mnist", "fashion_mnist", "cifar10", "ASIRRA", ...')
parser.add_argument('--valid-size',type=int, default=-1, metavar='N',
help='Number of samples removed for validation. If N<=0, test set is used.')
parser.add_argument('--patch-size',type=int, default=32, metavar='p',
help='Breaks each image into pxp subimages.')
parser.add_argument('--overComplete', type=float, default=1, metavar='d',
help='Defines dictionary as d-overcomplete (number of times overcomplete)')
parser.add_argument('--L1-weight', type=float, default=0.2, metavar='lambda',
help='Non-negative scalar weight for L1 (sparsity) term.')
# Encoding Parameters:
parser.add_argument('--encode-alg', type=str, default='fista',
help='Encoding algorithm ("ista","fista" or "salsa").')
parser.add_argument('--encode-iters', type=int, default=200, metavar='M',
help='During encoding step, perform M steps of encoding algorithm.')
parser.add_argument('--mu', type=float, default=None, metavar='M',
help='"mu" parameter for SALSA-based encoding.')
########### Optimization arguments
parser.add_argument('--max-epochs', type=int, default=200,
help='Number of training epochs.')
parser.add_argument('--batch-size', type=int, default=10,
help='Number of training patches per batch.')
parser.add_argument('--opt-method', type=str, default='adam',
help='Learning algorithm ("sgd" or "adam").')
parser.add_argument('--learn-rate', type=float, default=2e2,
help='Initial learning rate.')
# SGD Parameters:
parser.add_argument('--learn-rate-decay', type=float, default=0.999,
help='Epoch-wise learning rate decay.')
parser.add_argument('--momentum', type=float, default=0,
help='If nonzero, applies Nesterov Momentum.')
########### Logistics
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--save-filename', type=str, default='results/tmp/no-name',
help='Path to directory where everything gets saved.')
parser.add_argument('--use-HPO-params', action='store_true', default=False,
help='If true, uses the hyperparameters found by grid search (stored in "paramSearchResults").')
parser.add_argument('--seed',type=int, default=23,
help='RNG seed.')
parser.add_argument('--data-seed',type=int, default=23,
help='Special seed for validation set generation.')
parser.add_argument('--save-trained-model', action='store_true', default=False,
help='If true, saves trained model with ddict().')
parser.add_argument('--visualize-dict-atoms', action='store_true', default=False,
help='If true, saves image of dictionary atoms during training.')
parser.add_argument('--print-frequency', type=int, default=4,
help='Number of print statements per epoch.')
|
config_linear.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2019-05-11 18:27"
game: "Unreal Tournament 2004"
name: "DM-atomicalien2"
author: "<NAME>"
description: "Liandri found a little unknown planet called Toeafunagus where gaint\
\ aliens live. These aliens span up to a mile long. They feed and move much like\
\ Earth Slime Mould, buton a much larger scale. Watch out for the spores, they \
\ carry rockets."
releaseDate: "2005-08"
attachments:
- type: "IMAGE"
name: "DM-atomicalien2_shot_3.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/A/DM-atomicalien2_shot_3.png"
- type: "IMAGE"
name: "DM-atomicalien2_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/A/DM-atomicalien2_shot_1.png"
- type: "IMAGE"
name: "DM-atomicalien2_shot_2.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament%202004/Maps/DeathMatch/A/DM-atomicalien2_shot_2.png"
originalFilename: "dm-atomicalien2.zip"
hash: "ec24e30de3a4b64d65a80ee473c7b18ac95ede31"
fileSize: 3682464
files:
- name: "DM-atomicalien2.ut2"
fileSize: 2243063
hash: "c85149ee215135bc234d79dbf66d06ea17c39af7"
- name: "falskpk01.utx"
fileSize: 6382690
hash: "87047df5dd2b5ab23c3afb87e1e17156e346c00f"
otherFiles: 3
dependencies:
DM-atomicalien2.ut2:
- status: "OK"
name: "falskpk01"
downloads:
- url: "http://www.unrealplayground.com/forums/downloads.php?do=file&id=4720"
main: false
repack: false
state: "MISSING"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament%202004/Maps/DeathMatch/A/dm-atomicalien2.zip"
main: true
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament%202004/Maps/DeathMatch/A/e/c/24e30d/dm-atomicalien2.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament%202004/Maps/DeathMatch/A/e/c/24e30d/dm-atomicalien2.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "DeathMatch"
title: "AtomicAlien"
playerCount: "1-8"
themes:
Industrial: 1.0
bots: true
|
content/Unreal Tournament 2004/Maps/DeathMatch/A/e/c/24e30d/dm-atomicalien2_[ec24e30d].yml
|
---
http_interactions:
- request:
method: post
uri: https://owner-api.teslamotors.com/oauth/token
body:
encoding: UTF-8
string: grant_type=password&client_id=<TESLA_CLIENT_ID>&client_secret=<TESLA_CLIENT_SECRET>&email=<TESLA_EMAIL>&password=<<PASSWORD>>
headers:
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
User-Agent:
- Ruby
response:
status:
code: 200
message: OK
headers:
Server:
- nginx
Date:
- Wed, 17 Dec 2014 00:18:20 GMT
Content-Type:
- application/json; charset=utf-8
Transfer-Encoding:
- chunked
Connection:
- keep-alive
Status:
- 200 OK
Cache-Control:
- no-store
Pragma:
- no-cache
X-Ua-Compatible:
- IE=Edge,chrome=1
X-Request-Id:
- a4432ee502d03559a34184a99e9c8250
X-Runtime:
- '0.413855'
body:
encoding: UTF-8
string: '{"access_token":"<KEY>","token_type":"bearer","expires_in":7776000}'
http_version:
recorded_at: Wed, 17 Dec 2014 00:18:22 GMT
- request:
method: get
uri: https://owner-api.teslamotors.com/api/1/vehicles
body:
encoding: US-ASCII
string: ''
headers:
Authorization:
- Bearer 1<PASSWORD>3<PASSWORD>5c499bdeba92c7088b8e543265fba3cc35339707abfd875c09f70
response:
status:
code: 200
message: OK
headers:
Server:
- nginx
Date:
- Wed, 17 Dec 2014 00:18:21 GMT
Content-Type:
- application/json; charset=utf-8
Content-Length:
- '446'
Connection:
- keep-alive
Status:
- 200 OK
X-Ua-Compatible:
- IE=Edge,chrome=1
Etag:
- '"c5d083718f906b7336fb8b28fcc87b6c"'
Cache-Control:
- max-age=0, private, must-revalidate
X-Request-Id:
- 54ea59680bf3b823b2a8e76c585b9b6c
X-Runtime:
- '0.060762'
body:
encoding: UTF-8
string: '{"response":[{"color":null,"display_name":"Nikola","id":1514029006966957156,"option_codes":"MS01,RENA,TM00,DRLH,PF00,BT85,PBCW,RFPO,WT19,IBMB,IDPB,TR00,SU01,SC01,TP01,AU01,CH00,HP00,PA00,PS00,AD02,X020,X025,X001,X003,X007,X011,X013,COUS","vehicle_id":490215852,"vin":"5YJSA1CN5CFP01657","tokens":["6b6e59059375f282","09b1673648be8c08"],"state":"online","remote_start_enabled":true,"calendar_enabled":true,"notifications_enabled":true}],"count":1}'
http_version:
recorded_at: Wed, 17 Dec 2014 00:18:22 GMT
- request:
method: get
uri: https://owner-api.teslamotors.com/api/1/vehicles/1514029006966957156/data_request/charge_state
body:
encoding: US-ASCII
string: ''
headers:
Authorization:
- Bearer <KEY>
response:
status:
code: 200
message: OK
headers:
Server:
- nginx
Date:
- Wed, 17 Dec 2014 00:18:24 GMT
Content-Type:
- application/json; charset=utf-8
Content-Length:
- '945'
Connection:
- keep-alive
Status:
- 200 OK
X-Ua-Compatible:
- IE=Edge,chrome=1
Etag:
- '"69ee67fed559493cfe13612b2bfe9561"'
Cache-Control:
- max-age=0, private, must-revalidate
X-Request-Id:
- 3fb99de88c44c2dafa3018b45d054d57
X-Runtime:
- '3.313512'
body:
encoding: UTF-8
string: '{"response":{"charging_state":"Disconnected","charge_limit_soc":90,"charge_limit_soc_std":90,"charge_limit_soc_min":50,"charge_limit_soc_max":100,"charge_to_max_range":false,"battery_heater_on":null,"not_enough_power_to_heat":null,"max_range_charge_counter":0,"fast_charger_present":false,"fast_charger_type":"<invalid>","battery_range":197.95,"est_battery_range":142.23,"ideal_battery_range":224.1,"battery_level":78,"usable_battery_level":78,"battery_current":-0.3,"charge_energy_added":33.8,"charge_miles_added_rated":112.5,"charge_miles_added_ideal":127.5,"charger_voltage":null,"charger_pilot_current":null,"charger_actual_current":null,"charger_power":null,"time_to_full_charge":null,"charge_rate":0.0,"charge_port_door_open":false,"motorized_charge_port":false,"scheduled_charging_start_time":null,"scheduled_charging_pending":false,"user_charge_enable_request":null,"charge_enable_request":true,"eu_vehicle":false,"charger_phases":null}}'
http_version:
recorded_at: Wed, 17 Dec 2014 00:18:26 GMT
recorded_with: VCR 2.9.3
|
spec/cassettes/vehicle-charge_state.yml
|
server:
domain: http://ssl.zjxpp.com
session-timeout: 2000
port: 80
logging:
path: logs
level:
root: info
com.cxg.weChat: debug
config: classpath:logback-spring.xml
bootdo:
# 正式机路径
uploadPath: /var/uploaded_files/
imagePath: file:///var/uploaded_files/plan/
# 测试机路径
# uploadPath: D:/var/uploaded_files/
# imagePath: file:///D:/var/uploaded_files/plan/
spring:
thymeleaf:
mode: LEGACYHTML5
cache: false
jackson:
time-zone: GMT+8
date-format: yyyy-MM-dd HH:mm:ss
http:
encoding:
force: true
charset: UTF-8
enabled: true
resources:
static-locations: classpath:/,classpath:/static,classpath:/public,classpath:/resources,classpath:/META-INF/resources
# 数据库连接
datasource:
# oracle正式机连接方式
type: com.alibaba.druid.pool.DruidDataSource
driverClassName: oracle.jdbc.OracleDriver
url: jdbc:oracle:thin:exp/exp123@10.100.0.10:1530:expora
username: exp
password: <PASSWORD>
# oracle测试机连接方式
# type: com.alibaba.druid.pool.DruidDataSource
# driverClassName: oracle.jdbc.OracleDriver
# url: jdbc:oracle:thin:osap/osap@10.0.5.16:1521:expdbtest
# username: osap
# password: <PASSWORD>
# druid连接池配置
initialSize: 1
minIdle: 3
maxActive: 20
# 配置获取连接等待超时的时间
maxWait: 60000
# 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
timeBetweenEvictionRunsMillis: 60000
# 配置一个连接在池中最小生存的时间,单位是毫秒
minEvictableIdleTimeMillis: 30000
validationQuery: select 'x'
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
# 打开PSCache,并且指定每个连接上PSCache的大小
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
# 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
filters: stat,wall,slf4j
# 通过connectProperties属性来打开mergeSql功能;慢SQL记录
connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
# 合并多个DruidDataSource的监控数据
#useGlobalDataSourceStat: true
redis:
host: localhost
port: 6379
password:
# 连接超时时间(毫秒)
timeout: 50000
jedis:
pool:
# 连接池最大连接数(使用负值表示没有限制)
max-active: 1000
# 连接池最大阻塞等待时间(使用负值表示没有限制)
max-wait: -1
# 连接池中的最大空闲连接
max-idle: 10
# 连接池中的最小空闲连接
min-idle: 0
#微信配置文件
wechat:
open:
componentAppId : "wx1c9a9ae4ce02880c"
componentSecret : "<KEY>"
componentToken : "<KEY>"
componentAesKey : "ZKmhANpXAWNYrhLiXPpZoAmokloweUEeERleEBcfxPe"
mybatis:
configuration:
map-underscore-to-camel-case: true
mapper-locations: mybatis/**/*Mapper.xml
typeAliasesPackage: com.cxg.weChat.**.pojo
#配置缓存和session存储方式,默认ehcache,可选redis
cacheType: redis
|
src/main/resources/application.yml
|
language: scala
sudo: false
services:
- docker
before_install:
# upgrade to a later docker-compose which supports services.kafka.scale
- sudo rm /usr/local/bin/docker-compose
- curl -L https://github.com/docker/compose/releases/download/1.22.0/docker-compose-`uname -s`-`uname -m` > docker-compose
- chmod +x docker-compose
- sudo mv docker-compose /usr/local/bin
# fetch full history for correct current and previous version detection
- git fetch --unshallow
# using jabba for custom jdk management
- curl -sL https://raw.githubusercontent.com/shyiko/jabba/0.11.2/install.sh | bash && . ~/.jabba/jabba.sh
- jabba install adopt@~1.8.202-08
- jabba install adopt@~1.11.0-2
script:
- jabba use ${JDK:=adopt@~1.8.202-08}
- java -version
- sbt -jvm-opts .jvmopts-travis "$CMD"
jobs:
include:
- stage: check
script: sbt scalafmtCheck || { echo "[error] Unformatted code found. Please run 'Test/compile' and commit the reformatted code."; false; }
name: "Code style check (fixed with `sbt Test/compile`)"
- script: sbt scalafmtSbtCheck || { echo "[error] Unformatted sbt code found. Please run 'scalafmtSbt' and commit the reformatted code."; false; }
name: "Build code style check (fixed with `sbt scalafmtSbt`)"
- env: CMD="++2.11.12 Test/compile"
name: "Compile all tests (with Scala 2.11)"
- env: CMD="unidoc"
name: "Create all API docs"
- env: CMD="docs/Paradox/paradox"
name: "Create site with Paradox"
- stage: test
env: CMD="+test"
name: "Run tests (AdoptOpenJDK 8)"
- env:
- JDK="adopt@~1.11.0-2"
- CMD="+test"
name: "Run tests (AdoptOpenJDK 11)"
- env: CMD="mimaReportBinaryIssues"
name: "Check binary compatibility"
- stage: integration
env: CMD="dockerComposeTestAll"
- env: CMD="benchmarks/It/compile"
- stage: whitesource
env: CMD=";whitesourceCheckPolicies ;whitesourceUpdate"
- stage: publish
env: CMD="+publish"
name: "Publish artifacts for all Scala versions"
- script: openssl aes-256-cbc -K $encrypted_d80875c2ae41_key -iv $encrypted_d80875c2ae41_iv -in .travis/travis_alpakka_kafka_rsa.enc -out .travis/id_rsa -d && eval "$(ssh-agent -s)" && chmod 600 .travis/id_rsa && ssh-add .travis/id_rsa && sbt -jvm-opts .jvmopts-travis docs/publishRsync
name: "Publish API and reference documentation"
stages:
# runs on master commits and PRs
- name: check
if: NOT tag =~ ^v
# runs on master commits and PRs
- name: test
if: NOT tag =~ ^v
# runs on master commits and PRs
- name: integration
if: NOT tag =~ ^v
# runs on main repo master commits and version-tagged commits
- name: whitesource
if: repo = akka/alpakka-kafka AND ( ( branch = master AND type = push ) OR tag =~ ^v )
# runs on main repo master commits and version-tagged commits
- name: publish
if: repo = akka/alpakka-kafka AND ( ( branch = master AND type = push ) OR tag =~ ^v )
after_failure:
- docker-compose logs
- find . -name "*.log" -exec ./scripts/cat-log.sh {} \;
before_cache:
- find $HOME/.ivy2/ -name "ivydata-*.properties" -print -delete
- find $HOME/.sbt -name "*.lock" -print -delete
cache:
directories:
- $HOME/.ivy2/cache
- $HOME/.sbt/boot
- $HOME/.jabba/jdk
env:
global:
# encrypt with: travis encrypt WHITESOURCE_PASSWORD=...
- secure: "<KEY>
# encrypt with: travis encrypt BINTRAY_USER=...
- secure: "<KEY>
# encrypt with: travis encrypt BINTRAY_PASS=...
- secure: "<KEY>
|
spec/fixtures/configs/ak/akka:alpakka-kafka.yml
|
index:
banner:
- campaign_id: '1234'
creative_id: '12345'
price: 200.0
ad_id: '1234'
nurl: 'http://test.noadnolife.com/v1/win/12345?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}'
iurl: 'http://test.noadnolife.com/img/12345.png'
adm: '<a href="http://test.noadnolife.com/v1/click/12345?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}"><img src="http://test.noadnolife.com/img/12345?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}" width="728" height="90" border="0" alt="Advertisement" /></a>'
pecpm: 250.0
- campaign_id: '1235'
creative_id: '12346'
price: 155.0
ad_id: '1235'
nurl: 'http://test.noadnolife.com/v1/win/12346?price=${AUCTION_PRICE}'
iurl: 'http://test.noadnolife.com/img/12346.png'
adm: '<a href="http://test.noadnolife.com/v1/click/12346?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}"><img src="http://test.noadnolife.com/img/12346?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}" width="728" height="90" border="0" alt="Advertisement" /></a>'
pecpm: 240.0
banner_rect:
- campaign_id: '1237'
creative_id: '12347'
price: 200.0
ad_id: '1237'
nurl: 'http://test.noadnolife.com/v1/win/12347?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}'
iurl: 'http://test.noadnolife.com/img/12347.png'
adm: '<a href="http://test.noadnolife.com/v1/click/12347?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}"><img src="http://test.noadnolife.com/img/12347?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}" width="250" height="300" border="0" alt="Advertisement" /></a>'
pecpm: 250.0
- campaign_id: '1238'
creative_id: '12348'
price: 155.0
ad_id: '1238'
nurl: 'http://test.noadnolife.com/v1/win/12348?price=${AUCTION_PRICE}'
iurl: 'http://test.noadnolife.com/img/12348.png'
adm: '<a href="http://test.noadnolife.com/v1/click/12346?impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}"><img src="http://test.noadnolife.com/img/12348impid=${AUCTION_IMP_ID}&price=${AUCTION_PRICE}" width="250" height="300" border="0" alt="Advertisement" /></a>'
pecpm: 240.0
|
command/data.yml
|
name: Main
on:
pull_request:
push:
branches: [master]
jobs:
kubeval:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: validate
uses: instrumenta/kubeval-action@master
with:
files: ./deploy
strict: false
version: "1.18.12"
build:
name: Build
runs-on: ubuntu-20.04
needs: unit-tests
steps:
- name: Checkout repo
uses: actions/checkout@v2
- name: Restore docker cache
uses: actions/cache@v2
with:
path: ~/docker-cache
key: metrics-server-exporter-docker-${{ github.sha }}
restore-keys: |
metrics-server-exporter-docker-${{ github.sha }}
- name: build final docker image
run: |
docker load < ~/docker-cache/metrics-server-exporter-test.tar
docker build -t vivareal/metrics-server-exporter:build .
mkdir -p ~/docker-cache-final-image
docker save -o ~/docker-cache-final-image/metrics-server-exporter.tar vivareal/metrics-server-exporter
- name: Save docker cache final image
uses: actions/cache@v2
with:
path: ~/docker-cache-final-image
key: metrics-server-exporter-docker-final-image-${{ github.sha }}
publish:
name: publish
needs: build
runs-on: ubuntu-20.04
steps:
- name: "Get release name"
id: "release-name"
run: "echo \"::set-output name=release::${GITHUB_REF##*/}\""
- name: Checkout repo
uses: actions/checkout@v2
- name: Restore docker cache
uses: actions/cache@v2
with:
path: ~/docker-cache-final-image
key: metrics-server-exporter-docker-final-image-${{ github.sha }}
restore-keys: |
metrics-server-exporter-docker-final-image-${{ github.sha }}
- name: Load tar file
run: docker load < ~/docker-cache-final-image/metrics-server-exporter.tar
- name: Docker login
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: tag publish docker image
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
run: |
docker tag vivareal/metrics-server-exporter:build vivareal/metrics-server-exporter:master
docker push vivareal/metrics-server-exporter:master
|
.github/workflows/main.yaml
|
---
---
apt:
automatic_upgrades: false
keys: []
repositories: []
packages:
- apt-transport-https
- ca-certificates
- aptitude
- ncurses-term # provides the terminfo database in /usr/share/terminfo/**/*
- curl
- rsync
- zsh
- fasd
- git
- git-extras
- git-flow
- cowsay
- figlet
- tmux
- net-tools # provides `netstat`
- apache2-utils # provides `htpasswd`
- openssh-client
# provides `add-apt-repository` command
# ATTENTION depends: python3 (not desired in python debian containers)
- software-properties-common
- python3-dev # required to build python `psycopg2` module
- python3-apt # required for ansible module `apt_repository`
- python3-psutil # required for ansible module `dconf`
- python3-jmespath # required for ansible filter 'json_query'
- python3-junit.xml # required for ansible junit output
# required for ansible module `debconf`
# Depends: debconf
- debconf-utils
- libpq-dev # required for `pipx run pgcli` & `psycopg2` python module
# the `fortune` package is not available on debian (buster)
# the package name is `fortune-mod` + `fortunes-min`
- fortune
- postgresql-client # provides `psql`
- openssh-server
# --- gui packages ---
- fonts-emojione # provides emoji support in terminals
# --- gui applications ---
- peek
- wireshark
- google-chrome-stable
- chrome-gnome-shell
- firefox
- keepassxc
- xdotool
# TODO: 2020-05-16 Not yet compatible with Ubuntu 20.04 Focal
# - caffeine # tool & indicator to disable screensaver
# ---- packages that shall be absent last ---
- { name: "memtest86+", state: "absent" }
- { name: "ruby", state: "absent" }
- { name: "golang", state: "absent" }
- { name: "nodejs", state: "absent" }
# `gedebi-core` installs .deb packages and together with their dependencies
# not needed as modern versions of apt[-get] provide the same functionality
# Depends: python3-apt
# -> Leads to undesired installation of apt python in containers
#
# example: (the dot is required to prevent installing literal package by name)
# sudo apt -y install ./package.deb
# sudo dpkg -i /path/to/package.deb; sudo apt-get install -f
- { name: "gdebi-core", state: "absent" }
# Depends: python3
# -> Leads to undesired installation of apt python in containers
- { name: "lsb-release", state: "absent" }
|
nifr/playbooks/devcontainer/vars/distribution/ubuntu.yml
|
%YAML 1.2
---
$id: http://devicetree.org/schemas/phy/lantiq,vrx200-pcie-phy.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lantiq VRX200 and ARX300 PCIe PHY Device Tree Bindings
maintainers:
- <NAME> <<EMAIL>>
properties:
"#phy-cells":
const: 1
description: selects the PHY mode as defined in <dt-bindings/phy/phy-lantiq-vrx200-pcie.h>
compatible:
enum:
- lantiq,vrx200-pcie-phy
- lantiq,arx300-pcie-phy
reg:
maxItems: 1
clocks:
items:
- description: PHY module clock
- description: PDI register clock
clock-names:
items:
- const: phy
- const: pdi
resets:
items:
- description: exclusive PHY reset line
- description: shared reset line between the PCIe PHY and PCIe controller
reset-names:
items:
- const: phy
- const: pcie
lantiq,rcu:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the RCU syscon
lantiq,rcu-endian-offset:
$ref: /schemas/types.yaml#/definitions/uint32
description: the offset of the endian registers for this PHY instance in the RCU syscon
lantiq,rcu-big-endian-mask:
$ref: /schemas/types.yaml#/definitions/uint32
description: the mask to set the PDI (PHY) registers for this PHY instance to big endian
big-endian:
description: Configures the PDI (PHY) registers in big-endian mode
type: boolean
little-endian:
description: Configures the PDI (PHY) registers in big-endian mode
type: boolean
required:
- "#phy-cells"
- compatible
- reg
- clocks
- clock-names
- resets
- reset-names
- lantiq,rcu
- lantiq,rcu-endian-offset
- lantiq,rcu-big-endian-mask
additionalProperties: false
examples:
- |
pcie0_phy: phy@106800 {
compatible = "lantiq,vrx200-pcie-phy";
reg = <0x106800 0x100>;
lantiq,rcu = <&rcu0>;
lantiq,rcu-endian-offset = <0x4c>;
lantiq,rcu-big-endian-mask = <0x80>; /* bit 7 */
big-endian;
clocks = <&pmu 32>, <&pmu 36>;
clock-names = "phy", "pdi";
resets = <&reset0 12 24>, <&reset0 22 22>;
reset-names = "phy", "pcie";
#phy-cells = <1>;
};
...
|
kernel/linux-5.4/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
|
name: Realtime chat CI/CD
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
upload-docker-container-app-to-remote-repo:
name: CI
runs-on: ubuntu-latest
environment:
name: main
steps:
- uses: actions/checkout@v2
- name: logout
run: docker logout
- name: Build The Docker api Image
run: docker build ./api --tag arthursantos2228/realtime-chat:latest
- name: Build The Docker web Image
run: docker build ./web --tag arthursantos2228/web-realtime:latest
- name: docker login
env:
USER_SECURE: ${{secrets.USER_SECRET}}
PASSWORD_SECURE: ${{secrets.PASS_SECRET}}
run: |
docker login -u $USER_SECURE -p $PASSWORD_SECURE
- name: Docker push api
run: docker push arthursantos2228/realtime-chat:latest
- name: Docker push web
run: docker push arthursantos2228/web-realtime:latest
docker-container-back:
name: CD-back
runs-on: ubuntu-latest
environment:
name: deployprod
url: 'https://service.arthursantos.tech:8443/messages'
needs: upload-docker-container-app-to-remote-repo
steps:
- name: deploy back with ssh
uses: appleboy/ssh-action@master
with:
host: ${{secrets.HOST_ROSE}}
username: ${{secrets.USERNAME}}
key: ${{secrets.PRIVATE_KEY}}
port: ${{secrets.PORT}}
passphrase: ${{secrets.PASSWORD}}
debug: true
script: |
sudo docker container rm realtime-chat -f \
&& sudo docker image rm arthursantos2228/realtime-chat -f \
&& sudo docker-compose up -d
docker-container-front:
name: CD-front
runs-on: ubuntu-latest
environment:
name: deployprod
url: 'https://www.arthursantos.tech/'
needs: upload-docker-container-app-to-remote-repo
steps:
- name: deploy front with ssh
uses: appleboy/ssh-action@master
with:
host: ${{secrets.HOST_MARIA}}
username: ${{secrets.USERNAME}}
key: ${{secrets.PRIVATE_KEY}}
port: ${{secrets.PORT}}
passphrase: ${{secrets.PASSWORD}}
debug: true
script: |
sudo docker container rm web-realtime -f \
&& sudo docker image rm arthursantos2228/web-realtime -f \
&& sudo docker-compose up -d
|
.github/workflows/docker-deploy.yml
|
version: '3.7'
services:
jupyterhub:
build: jupyterhub
image: jupyterhub_img
container_name: jupyterhub
depends_on:
- database
- blob-store
- reverse-proxy
- oauth2
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- hub-data:/srv/jupyterhub
networks:
- hale-bopp-network
environment:
- DOCKER_JUPYTER_CONTAINER=jupyterlab_img
- DOCKER_NETWORK_NAME=hale-bopp-network
- CONTAINER_IDLE_TIMEOUT=600
- HUB_IP=jupyterhub
- HOST
- DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
- BLOB_STORE_URL=http://blob-store:8000
- AWS_ACCESS_KEY_ID=$AWS_KEY_ID
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_KEY
- POSTGRES_HOST=${POSTGRES_HOST}
- POSTGRES_PORT=${POSTGRES_PORT}
- OAUTH_ACCESS_TOKEN_URL=${OAUTH_ACCESS_TOKEN_URL}
- OAUTH_AUTHORIZE_URL=${OAUTH_AUTHORIZE_URL}
- OAUTH_CALLBACK_URL=${OAUTH_CALLBACK_URL}
- OAUTH_CLIENT_ID=${CLIENT_ID}
- OAUTH_CLIENT_SECRET=${CLIENT_SECRET}
- OAUTH_USERDATA_URL=${USERDATA_URL}
- KEYCLOAK_LOGOUT_URL=${KEYCLOAK_LOGOUT_URL}
- OAUTH2_USERNAME_KEY=${OAUTH2_USERNAME_KEY}
labels:
- "traefik.http.routers.jupyterhub.rule=Host(`jupyterhub.docker.localhost`)"
restart: on-failure
jupyterlab:
# This service is really just to facilitate building the image with docker-compose build
# Don't waste time configuring the container here
build: jupyterlab
image: jupyterlab_img
container_name: jupyterlab
network_mode: none
command: echo
reverse-proxy:
image: traefik:v2.4
container_name: reverse_proxy
networks:
- hale-bopp-network
command: --api.insecure=true --providers.docker
ports:
- "80:80"
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
restart: on-failure
oauth2:
build: oauth2
image: oauth2_img
container_name: oauth2
networks:
- hale-bopp-network
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- keycloak-data:/srv/jupyterhub
environment:
- KEYCLOAK_USER=admin
- KEYCLOAK_PASSWORD=<PASSWORD>
- KEYCLOAK_FRONTEND_URL=http://oauth2.docker.localhost/auth
- HOST
labels:
- "traefik.http.routers.oauth2.rule=Host(`oauth2.docker.localhost`)"
- "traefik.http.services.oauth2.loadbalancer.server.port=8080"
restart: on-failure
database:
image: postgres:12.7-alpine
container_name: db
environment:
- USER=${POSTGRES_USER}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
- HOST
volumes:
- type: volume
source: database-data
target: /var/lib/postgresql/data
networks:
- hale-bopp-network
blob-store:
build: blob-store
image: blobstore_img
container_name: blob-store
environment:
- ENDPOINT=blob-store
- REMOTE_MANAGEMENT_DISABLE=1
- S3BACKEND=file
- S3DATAPATH=/blob/data
- S3METADATAPATH=/blob/metadata
- SCALITY_ACCESS_KEY_ID=${AWS_KEY_ID}
- SCALITY_SECRET_ACCESS_KEY=${AWS_SECRET_KEY}
- HOST
volumes:
- type: volume
source: blob-data
target: /blob
networks:
- hale-bopp-network
volumes:
hub-data:
driver: local
keycloak-data:
driver: local
database-data:
driver: local
blob-data:
driver: local
networks:
hale-bopp-network:
external: true
|
docker-compose.yml
|
en:
riak:
bucket_link_conversion: "Can't convert a bucket link to a walk spec"
client_type: "invalid argument %{client} is not a Riak::Client"
content_type_undefined: "content_type is not defined!"
empty_map_reduce_query: "Specify one or more query phases to your MapReduce."
failed_request: "Expected %{expected} from Riak but received %{code}. %{body}"
filter_needs_block: "Filter %{filter} expects a block."
filter_arity_mismatch: "Filter %{filter} expects %{expected} arguments but %{received} were given."
hash_type: "invalid argument %{hash} is not a Hash"
http_configuration: "The %{backend} HTTP backend cannot be used. Please check its requirements."
hostname_invalid: "host must be a valid hostname"
invalid_client_id: "Invalid client ID, must be a string or between 0 and %{max_id}"
invalid_function_value: "invalid value for function: %{value}"
invalid_phase_type: "type must be :map, :reduce, or :link"
loading_bucket: "while loading bucket '%{name}'"
missing_block: "A block must be given."
missing_host_and_port: "You must specify a host and port, or use the defaults of 127.0.0.1:8098"
module_function_pair_required: "function must have two elements when an array"
path_and_body_required: "You must supply both a resource path and a body."
port_invalid: "port must be an integer between 0 and 65535"
request_body_type: "Request body must be a String or IO."
resource_path_short: "Resource path too short"
search_docs_require_id: "Search index documents must include the 'id' field."
search_remove_requires_id_or_query: "Search index documents to be removed must have 'id' or 'query' keys."
stored_function_invalid: "function must have :bucket and :key when a hash"
string_type: "invalid_argument %{string} is not a String"
too_few_arguments: "too few arguments: %{params}"
walk_spec_invalid_unless_link: "WalkSpec is only valid for a function when the type is :link"
wrong_argument_count_walk_spec: "wrong number of arguments (one Hash or bucket,tag,keep required)"
|
riak-client/lib/riak/locale/en.yml
|
- type: replace
path: /instance_groups/name=k8s-helm-addons/jobs/name=action/properties/actions/-
value:
type: secret
name: grafana-admin-secret
namespace: grafana
data:
- name: GF_SECURITY_ADMIN_USER
value: admin
- name: GF_SECURITY_ADMIN_PASSWORD
value: ((<PASSWORD>))
- type: replace
path: /instance_groups/name=k8s-helm-addons/jobs/name=action/properties/actions/-
value:
type: helm_chart
name: grafana
namespace: grafana
chart: grafana/grafana
version: ((helm_grafana))
debug: true
properties:
- name: adminPassword
value: ((grafana<PASSWORD>))
- name: envFromSecret
value: grafana-admin-secret
values_file_content:
env:
https_proxy: ((https_proxy))
http_proxy: ((http_proxy))
no_proxy: ((no_proxy))
downloadDashboards:
env:
https_proxy: ((https_proxy))
http_proxy: ((http_proxy))
no_proxy: ((no_proxy))
plugins:
- digrich-bubblechart-panel
- grafana-clock-panel
- grafana-piechart-panel
- vonage-status-panel
- marcuscalidus-svg-panel
- grafana-worldmap-panel
- jdbranham-diagram-panel
- btplc-status-dot-panel
extraSecretMounts:
- name: certs-configmap
mountPath: /etc/grafana/ssl/
subPath: ca.crt
secretName: ca-secret
readOnly: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
grafana.ini:
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
auth.ldap:
enabled: true
allow_sign_up: true
config_file: /etc/grafana/ldap.toml
ldap:
enabled: true
existingSecret: "grafana-ldap"
sidecar:
dashboards:
enabled: true
searchNamespace: grafana
folderAnnotation: folderAnnotation
provider:
foldersFromFilesStructure: true
datasources:
enabled: true
searchNamespace: grafana
- type: replace
path: /variables?/-
value:
name: grafanaAdminPassword
type: password
|
master-depls/k8s-grafana/template/grafana-operators.yml
|
name: Create Git and Docker tags
on:
deployment_status
jobs:
create-staging-tags:
runs-on: ubuntu-latest
if: github.event.deployment_status.state == 'success' && github.event.deployment_status.environment == 'dev'
outputs:
tag: ${{ steps.tag-git-commit.outputs.TAG }}
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.deployment.ref }}
- name: Tag Git Commit
id: tag-git-commit
run: |
git fetch --prune --unshallow
bash ./scripts/tag_git_dev.sh ${{ github.sha }}
- name: Configure VAEC AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.VAEC_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.VAEC_AWS_SECRET_ACCESS_KEY }}
aws-region: us-gov-west-1
role-to-assume: ${{ secrets.VAEC_DEPLOY_ROLE }}
role-skip-session-tagging: true
role-duration-seconds: 900
- name: Tag Docker Image in VAEC
run: |
bash ./scripts/tag_docker_image.sh ${{ github.sha }} ${{ steps.tag-git-commit.outputs.TAG }} us-gov-west-1
create-production-tags:
runs-on: ubuntu-latest
if: github.event.deployment_status.state == 'success' && github.event.deployment_status.environment == 'staging'
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.deployment.ref }}
- name: Tag Git Commit
id: tag-git-commit
run: |
bash ./scripts/tag_git_staging.sh ${{ github.event.deployment.ref }}
- name: Configure VAEC AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.VAEC_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.VAEC_AWS_SECRET_ACCESS_KEY }}
aws-region: us-gov-west-1
role-to-assume: ${{ secrets.VAEC_DEPLOY_ROLE }}
role-skip-session-tagging: true
role-duration-seconds: 900
- name: Tag Docker Image in VAEC
run: |
bash ./scripts/tag_docker_image.sh ${{ steps.tag-git-commit.outputs.STAGING_TAG }} ${{ steps.tag-git-commit.outputs.TAG }} us-gov-west-1
start-vaec-staging-deployment:
runs-on: ubuntu-latest
needs: [create-staging-tags]
steps:
- uses: actions/github-script@v4.1
with:
github-token: ${{secrets.GH_ACCESS_TOKEN}}
script: |
github.repos.createDeployment({
owner: context.repo.owner,
repo: context.repo.repo,
ref: '${{ needs.create-staging-tags.outputs.tag }}',
environment: 'staging',
required_contexts: [],
auto_merge: false,
});
|
.github/workflows/tagging.yml
|
project_name: vulcan-local
builds:
- id: vulcan-local
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
- windows
goarch:
- amd64
- arm
- arm64
goarm:
- "7"
ignore:
- goos: darwin
goarch: arm
- goos: windows
goarch: arm
- goos: windows
goarch: arm64
mod_timestamp: '{{ .CommitTimestamp }}'
dir: .
main: .
binary: vulcan-local
builder: go
gobinary: go
ldflags:
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}}
-X main.builtBy=goreleaser
archives:
- name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}'
replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
format_overrides:
- goos: windows
format: zip
dockers:
-
goos: linux
goarch: amd64
# Templates of the Docker image names.
image_templates:
- "{{ .Env.TRAVIS_REPO_SLUG }}:{{ .Tag }}"
- "{{ .Env.TRAVIS_REPO_SLUG }}:v{{ .Major }}"
- "{{ .Env.TRAVIS_REPO_SLUG }}:v{{ .Major }}.{{ .Minor }}"
- "{{ .Env.TRAVIS_REPO_SLUG }}:latest"
# Skips the docker push.
# Could be useful if you also do draft releases.
#
# If set to auto, the release will not be pushed to the Docker repository
# in case there is an indicator of a prerelease in the tag, e.g. v1.0.0-rc1.
#
# Defaults to false.
skip_push: false
# Path to the Dockerfile (from the project root).
#
# Defaults to `Dockerfile`.
dockerfile: Dockerfile
# Set the "backend" for the Docker pipe.
# Valid options are: docker, buildx, podman, buildpacks
# podman is a GoReleaser Pro feature and is only available on Linux.
# Defaults to docker.
use: docker
ids:
- vulcan-local
# Template of the docker build flags.
build_flag_templates:
- "--pull"
- "--label=org.opencontainers.image.created={{.Date}}"
- "--label=org.opencontainers.image.title={{.ProjectName}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--platform=linux/amd64"
# Extra flags to be passed down to the push command.
# Defaults to empty.
push_flags:
- --tls-verify=false
changelog:
sort: asc
use: github
filters:
exclude:
- '^docs:'
- '^test:'
- '^chore'
- Merge pull request
- Merge remote-tracking branch
- Merge branch
- go mod tidy
groups:
- title: 'New Features'
regexp: "^.*feat[(\\w)]*:+.*$"
order: 0
- title: 'Bug fixes'
regexp: "^.*fix[(\\w)]*:+.*$"
order: 10
- title: Other work
order: 999
release:
footer: |
**Full Changelog**: https://github.com/adevinta/vulcan-local/compare/{{ .PreviousTag }}...{{ .Tag }}
prerelease: auto
|
.goreleaser.yaml
|
installType: ocp
rbacApiVersion: rbac.authorization.k8s.io
namespace: openshift-operator-lifecycle-manager
catalog_namespace: openshift-marketplace
operator_namespace: openshift-operators
imagestream: true
writeStatusName: operator-lifecycle-manager
writeStatusNameCatalog: operator-lifecycle-manager-catalog
writePackageServerStatusName: operator-lifecycle-manager-packageserver
olm:
replicaCount: 1
image:
ref: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607
pullPolicy: IfNotPresent
service:
internalPort: 8443
externalPort: 8443
clientCASecret: pprof-cert
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node.kubernetes.io/unreachable
operator: Exists
effect: NoExecute
tolerationSeconds: 120
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoExecute
tolerationSeconds: 120
tlsSecret: olm-operator-serving-cert
resources:
requests:
cpu: 10m
memory: 160Mi
catalog:
replicaCount: 1
opmImageArgs: --opmImage=quay.io/operator-framework/configmap-operator-registry:latest
image:
ref: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607
pullPolicy: IfNotPresent
service:
internalPort: 8443
externalPort: 8443
clientCASecret: pprof-cert
tlsSecret: catalog-operator-serving-cert
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node.kubernetes.io/unreachable
operator: Exists
effect: NoExecute
tolerationSeconds: 120
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoExecute
tolerationSeconds: 120
resources:
requests:
cpu: 10m
memory: 80Mi
package:
replicaCount: 2
maxUnavailable: 1
maxSurge: 1
image:
ref: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607
pullPolicy: IfNotPresent
service:
internalPort: 5443
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node.kubernetes.io/unreachable
operator: Exists
effect: NoExecute
tolerationSeconds: 120
- key: node.kubernetes.io/not-ready
operator: Exists
effect: NoExecute
tolerationSeconds: 120
resources:
requests:
cpu: 10m
memory: 50Mi
monitoring:
enabled: true
namespace: openshift-monitoring
|
values.yaml
|
name: iheart_festival
description: iHeartRadio music festival app
dependencies:
flutter_staggered_grid_view: "^0.1.4"
flutter:
sdk: flutter
# The following adds the Cupertino Icons font to your application.
# Use with the CupertinoIcons class for iOS style icons.
cupertino_icons: ^0.1.0
zoomable_image: ^1.2.0
rxdart: ^0.16.7
dev_dependencies:
flutter_test:
sdk: flutter
# For information on the generic Dart part of this file, see the
# following page: https://www.dartlang.org/tools/pub/pubspec
# The following section is specific to Flutter.
flutter:
# The following line ensures that the Material Icons font is
# included with your application, so that you can use the icons in
# the material Icons class.
uses-material-design: true
# To add assets to your application, add an assets section, like this:
assets:
- map.png
- image_ticket.png
- icon_direction_200.png
- tag_free.png
- tag_contests.png
- logo_mars.png
- logo_cw.png
- logo_glade.png
- logo_inkbox.png
- logo_office.png
- logo_statefarm.png
- logo_taco.png
- logo_tmobile.png
- logo_ulta.png
- bg-wave.png
- qr_code.png
- background_img.png
- daytime_stage-logo.png
- MusicFest-logo.png
- twitter-verified.png
- twitter_comment.png
- twitter_retweet.png
- feed-img01.png
- feed-img02.png
- feed-img03.png
- profile-ihr.png
- bottom_nav_bg.png
- album_dualipa_IDGAF.png
- album_dualipa_liveacoustic.png
- icon_explicit.png
- soundwaves_on.png
- topsong_dualipa_album_blue.png
- dots_vertical.png
- selenagomez.jpeg
- alessia.jpeg
- annemarie.jpeg
- astrids.jpeg
- beberexha.jpeg
- halsey.jpeg
- album_dualipa_idgafremixes.png
- album_dualipa_newrules.png
- dualipa-news-001.png
- dualipa-news-002.png
- dualipa-news-003.png
- artist_ariana_medium.png
- artist_dualipa_large.png
- artist_flume_medium.png
- dualipa_001.png
- dualipa_002.png
- dualipa_003.png
- dualipa_004.png
- dualipa_005.png
- dualipa_006.png
- MusicFest-logo.png
# An image asset can refer to one or more resolution-specific "variants", see
# https://flutter.io/assets-and-images/#resolution-aware.
# For details regarding adding assets from package dependencies, see
# https://flutter.io/assets-and-images/#from-packages
# To add custom fonts to your application, add a fonts section here,
# in this "flutter" section. Each entry in this list should have a
# "family" key with the font family name, and a "fonts" key with a
# list giving the asset and other descriptors for the font. For
# example:
# fonts:
# - family: Schyler
# fonts:
# - asset: fonts/Avenir.ttc
# style: italic
# - family: Trajan Pro
# fonts:
# - asset: fonts/TrajanPro.ttf
# - asset: fonts/TrajanPro_Bold.ttf
# weight: 700
#
# For details regarding fonts from package dependencies,
# see https://flutter.io/custom-fonts/#from-packages
fonts:
- family: Avenir
fonts:
- asset: fonts/Avenir.ttc
|
pubspec.yaml
|
items:
- uid: Outlook.Office.MailboxEnums.EntityType
summary: |-
エンティティの種類を指定します。
\[[API set: Mailbox 1.0](/office/dev/add-ins/reference/requirement-sets/outlook-api-requirement-sets)\]
remarks: '**[適用可能な Outlook モード](https://docs.microsoft.com/outlook/add-ins/#extension-points)<!-- -->**: 新規作成または読み取り'
name: Office.MailboxEnums.EntityType
fullName: Office.MailboxEnums.EntityType
langs:
- typeScript
type: enum
package: Outlook
children:
- Outlook.Office.MailboxEnums.EntityType.Address
- Outlook.Office.MailboxEnums.EntityType.Contact
- Outlook.Office.MailboxEnums.EntityType.EmailAddress
- Outlook.Office.MailboxEnums.EntityType.MeetingSuggestion
- Outlook.Office.MailboxEnums.EntityType.PhoneNumber
- Outlook.Office.MailboxEnums.EntityType.TaskSuggestion
- Outlook.Office.MailboxEnums.EntityType.Url
- uid: Outlook.Office.MailboxEnums.EntityType.Address
summary: エンティティが郵送先住所であることを指定します。
name: Address
fullName: Address
langs:
- typeScript
type: field
numericValue: '"address"'
- uid: Outlook.Office.MailboxEnums.EntityType.Contact
summary: エンティティが連絡先であることを指定します。
name: Contact
fullName: Contact
langs:
- typeScript
type: field
numericValue: '"contact"'
- uid: Outlook.Office.MailboxEnums.EntityType.EmailAddress
summary: エンティティが SMTP 電子メールアドレスであることを指定します。
name: EmailAddress
fullName: EmailAddress
langs:
- typeScript
type: field
numericValue: '"emailAddress"'
- uid: Outlook.Office.MailboxEnums.EntityType.MeetingSuggestion
summary: エンティティが提案された会議であることを指定します。
name: MeetingSuggestion
fullName: MeetingSuggestion
langs:
- typeScript
type: field
numericValue: '"meetingSuggestion"'
- uid: Outlook.Office.MailboxEnums.EntityType.PhoneNumber
summary: エンティティが US 電話番号であることを指定します。
name: PhoneNumber
fullName: PhoneNumber
langs:
- typeScript
type: field
numericValue: '"phoneNumber"'
- uid: Outlook.Office.MailboxEnums.EntityType.TaskSuggestion
summary: エンティティがタスクのヒントであることを指定します。
name: TaskSuggestion
fullName: TaskSuggestion
langs:
- typeScript
type: field
numericValue: '"taskSuggestion"'
- uid: Outlook.Office.MailboxEnums.EntityType.Url
summary: エンティティがインターネット URL であることを指定します。
name: Url
fullName: Url
langs:
- typeScript
type: field
numericValue: '"url"'
|
docs/docs-ref-autogen/outlook_1_7/outlook/office.mailboxenums.entitytype.yml
|
---
############################################################
# common_vars.yml use common variable for every script
############################################################
## Initial value
TESTING: "ON"
all_in_infra: "OFF"
master_management_ip: ""
iaas_outbound_port: 6777
sysmanager_outbound_port: 6776
nft_outbound_port: ""
cds_outbound_port: 6779
locale: "ko_KR.UTF-8"
aptInstall: "yes"
user_tenant_uuid: 0739ec6d
admin_tenant_uuid: 11111111
project_uuid: 01bea881
## ansible vars
ansi_path: "{{ (ansible_env|default({})).PATH|default('') }}"
## docker vars
master_ip:
image_registry: ##image outer node ip
portal_ip:
## Portal web
portal_web_port_1: 80
portal_web_port_2: 9999
portal_server_port_1: 8080
portal_server_port_2: 9736
## Sysmanager vars
TMAX_SYSMANAGER_PORT: 1234
TMAX_SYSMANAGER_IP:
TMAX_SYSMANAGER_AGENT_PORT: 12345
TMAX_SYSMANAGER_AGENT_ANYMINER_PORT: 1413
TMAX_SYSMANAGER_AGENT_ANYMINER_IP:
TMAX_SYSMANAGER_AGENT_CONTAINER_RW_LAYER_DIRECTORY: "/root/tca_agent/container_rw_dir/"
TMAX_SYSMANAGER_AGENT_HOST_INTERFACE: ""
TMAX_SYSMANAGER_AGENT_REPORT_LOCATION: "sysmanager"
TMAX_SYSMANAGER_AGENT_QEMU_IMAGE_FILE_DIRECTORY: "/root/tca_agent/vm_rw_dir/"
## Image name
IMAGE_NAME_1: cloud_tcs_ssvr
IMAGE_NAME_2: cloud_tcs_tibero
IMAGE_NAME_3: jeus8_cloud_das
IMAGE_NAME_4: jeus8_cloud_ms
das_tag: b105_pro02
ms_tag: b105_pro02
ssvr_tag: 171017
tibero_tag: 171027_timestamp
## IaaS Config
TCA_MASTER_USER: "root"
TCA_MASTER_PW: "<PASSWORD>"
TCA_MASTER_IP: ""
TCA_MASTER_SVC_PORT: 8080
TCA_MASTER_SSH_PORT: 22
IMAGE_GATEWAY_USER: "root"
IMAGE_GATEWAY_PW: "<PASSWORD>"
IMAGE_GATEWAY_IP: ""
IMAGE_GATEWAY_PORT: "22"
IMAGE_INNER_LOCATION: ":22"
IMAGE_INNER_AUTH: "root:tmax@23"
IMAGE_OUTER_LOCATION: ":8080"
VM_IMAGE_OUTER_IP: "" ## IMAGE GATEWAY IP
VM_IMAGE_OUTER_SSH_PORT: 22
VM_IMAGE_OUTER_USER: "root"
VM_IMAGE_OUTER_PW: "<PASSWORD>"
VM_IMAGE_OUTER_DIR: "/tmp/vm_image"
IMAGE_STORAGE_LOCATION: "http:// :8081"
RADOS_GATEWAY_LOCATION: "http:// :8089"
LB_NFT: "true"
LB_NFT_IP: ""
SYS_AGENT_RUN: "ON"
TCN_SDN_RUN: "OFF"
VM_MNG_IP:
VM_INTERNAL_IP:
VM_PORT: 52000
VM_NODE_USER: "root"
VM_NODE_PASSWORD: "<PASSWORD>"
TCS_VM_HOME: "/root/automation/csvmgr"
TCS_LSNR_PORT: 10000
TCS_RECV_PORT: 34000
WEBCONSOLE_PROXY_SERVER_IP: ""
WEBCONSOLE_PROXY_SERVER_PORT: 22
WEBCONSOLE_PROXY_SERVER_USER: "root"
WEBCONSOLE_PROXY_SERVER_PASSWORD: "<PASSWORD>"
WEBCONSOLE_MIN_PORT: 15000
WEBCONSOLE_MAX_PORT: 60000
WEBCONSOLE_PROXY_SERVER_PUBLIC_IP: ""
CMDB_ENABLE: "ON"
CMDB_IP: ""
CMDB_PORT: 8080
TCNM_IP:
TCNM_PORT: 8080
AGENT_LXC_VERSION: 210
FAILOVER_ENABLE: "ON"
IAAS_VERSION: "IAAS"
VM_NODE_IP:
VM_ENV: "/root/set_dbenv.sh"
AUTH_IP:
CEPH_MON_IP:
|
cloud_auto/ansible/vars/common_vars.yml
|
uid: "com.azure.resourcemanager.compute.fluent.LogAnalyticsClient.exportThrottledRequestsAsync*"
fullName: "com.azure.resourcemanager.compute.fluent.LogAnalyticsClient.exportThrottledRequestsAsync"
name: "exportThrottledRequestsAsync"
nameWithType: "LogAnalyticsClient.exportThrottledRequestsAsync"
members:
- uid: "com.azure.resourcemanager.compute.fluent.LogAnalyticsClient.exportThrottledRequestsAsync(java.lang.String,com.azure.resourcemanager.compute.models.LogAnalyticsInputBase)"
fullName: "com.azure.resourcemanager.compute.fluent.LogAnalyticsClient.exportThrottledRequestsAsync(String location, LogAnalyticsInputBase parameters)"
name: "exportThrottledRequestsAsync(String location, LogAnalyticsInputBase parameters)"
nameWithType: "LogAnalyticsClient.exportThrottledRequestsAsync(String location, LogAnalyticsInputBase parameters)"
summary: "Export logs that show total throttled Api requests for this subscription in the given time window."
parameters:
- description: "The location upon which virtual-machine-sizes is queried."
name: "location"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- description: "Parameters supplied to the LogAnalytics getThrottledRequests Api."
name: "parameters"
type: "<xref href=\"com.azure.resourcemanager.compute.models.LogAnalyticsInputBase?alt=com.azure.resourcemanager.compute.models.LogAnalyticsInputBase&text=LogAnalyticsInputBase\" data-throw-if-not-resolved=\"False\" />"
syntax: "public abstract Mono<LogAnalyticsOperationResultInner> exportThrottledRequestsAsync(String location, LogAnalyticsInputBase parameters)"
returns:
description: "logAnalytics operation status response."
type: "<xref href=\"reactor.core.publisher.Mono?alt=reactor.core.publisher.Mono&text=Mono\" data-throw-if-not-resolved=\"False\" /><<xref href=\"com.azure.resourcemanager.compute.fluent.models.LogAnalyticsOperationResultInner?alt=com.azure.resourcemanager.compute.fluent.models.LogAnalyticsOperationResultInner&text=LogAnalyticsOperationResultInner\" data-throw-if-not-resolved=\"False\" />>"
type: "method"
metadata: {}
package: "com.azure.resourcemanager.compute.fluent"
artifact: com.azure.resourcemanager:azure-resourcemanager-compute:2.2.0
|
docs-ref-autogen/com.azure.resourcemanager.compute.fluent.LogAnalyticsClient.exportThrottledRequestsAsync.yml
|
groups:
- name: pgstat
rules:
- record: postgres:pg_stat_database_deadlocks:rate1m
expr: rate(pg_stat_database_deadlocks[1m])
- record: postgres:pg_stat_database_conflicts_confl_deadlock:rate1m
expr: rate(pg_stat_database_conflicts_confl_deadlock[1m])
- record: postgres:pg_stat_database_conflicts:rate1m
expr: rate(pg_stat_database_conflicts[1m])
- record: postgres:pg_stat_database_conflicts_confl_bufferpin:rate1m
expr: rate(pg_stat_database_conflicts_confl_bufferpin[1m])
- record: postgres:pg_stat_database_conflicts_confl_snapshot:rate1m
expr: rate(pg_stat_database_conflicts_confl_snapshot[1m])
- record: postgres:pg_stat_database_conflicts_confl_tablespace:rate1m
expr: rate(pg_stat_database_conflicts_confl_tablespace[1m])
- record: postgres:pg_stat_database_blks_hit:sumbyrate1
expr: sum(rate(pg_stat_database_blks_hit[1m]))by(instance,database)
- record: postgres:pg_stat_database_blks_read:sumbyrate1
expr: sum(rate(pg_stat_database_blks_read[1m]))by(instance,database)
- record: postgres:pg_stat_database_blk_read_time:rate1m
expr: rate(pg_stat_database_blk_read_time[1m])
- record: postgres:pg_stat_database_blk_write_time:rate1m
expr: rate(pg_stat_database_blk_write_time[1m])
- record: postgres:pg_stat_database_xact_commit:sumbyirate1m
expr: sum(irate(pg_stat_database_xact_commit[1m]))by(instance,database)
- record: postgres:pg_stat_database_xact_rollback:sumbyirate1m
expr: sum(irate(pg_stat_database_xact_rollback[1m]))by(instance,database)
- record: postgres:pg_stat_database_temp_bytes:sum
expr: sum(pg_stat_database_temp_bytes)by(instance,database)
- record: postgres:pg_stat_database_tup_inserted:sum
expr: sum(pg_stat_database_tup_inserted)by(instance,database)
- record: postgres:pg_stat_database_tup_fetched:sum
expr: sum(pg_stat_database_tup_fetched)by(instance,database)
- record: postgres:pg_stat_database_temp_files:sum
expr: sum(pg_stat_database_temp_files)by(instance,database)
- record: postgres:pg_stat_database_tup_updated:sum
expr: sum(pg_stat_database_tup_updated)by(instance,database)
- name: pgstat_statements
rules:
- record: postgres:pg_stat_statements_mean_exec_time:topk20_increase10m
expr: topk(20,increase(pg_stat_statements_mean_exec_time[10m]))
- record: postgres:pg_stat_statements_calls:topk20_increase10m
expr: topk(20,increase(pg_stat_statements_calls[10m]))
- record: postgres:pg_stat_statements_rows:sum_increase1m
expr: sum(increase(pg_stat_statements_rows[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_total_exec_time:sum_rate1m
expr: sum(rate(pg_stat_statements_total_exec_time[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_max_exec_time:sum_rate1m
expr: sum(rate(pg_stat_statements_max_exec_time[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_mean_exec_time:sum_rate1m
expr: sum(rate(pg_stat_statements_mean_exec_time[1m])) by(datname,instance)
- record: postgres:pg_stat_statements_stddev_exec_time:sum_rate1m
expr: sum(rate(pg_stat_statements_stddev_exec_time[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_min_plan_time:sum_rate1m
expr: sum(rate(pg_stat_statements_min_plan_time[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_shared_blks_hit:sum_increase1m
expr: sum(increase(pg_stat_statements_shared_blks_hit[1m])) by(datname,instance)
- record: postgres:pg_stat_statements_shared_blks_read:sum_increase1m
expr: sum(increase(pg_stat_statements_shared_blks_read[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_shared_blks_dirtied:sum_increase1m
expr: sum(increase(pg_stat_statements_shared_blks_dirtied[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_local_blks_hit:sum_increase1m
expr: sum(increase(pg_stat_statements_local_blks_hit[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_local_blks_read:sum_increase1m
expr: sum(increase(pg_stat_statements_local_blks_read[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_blk_write_time:sum_increase1m
expr: sum(increase(pg_stat_statements_blk_write_time[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_temp_blks_read:sum_increase1m
expr: sum(increase(pg_stat_statements_temp_blks_read[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_local_blks_dirtied:sum_increase1m
expr: sum(increase(pg_stat_statements_local_blks_dirtied[1m])) by (datname,instance)
- record: postgres:pg_stat_statements_temp_blks_written:sum_increase1m
expr: sum(increase(pg_stat_statements_temp_blks_written[1m])) by (datname,instance)
|
prometheus/main/postgres_rules.yml
|
homepage: http://github.com/composewell/unicode-data
changelog-type: markdown
hash: 57529e1f443a8c5f8f29cda24df810991dd2506edb89f10f7607fc5e44cd52fe
test-bench-deps: {}
maintainer: <EMAIL>
synopsis: Access Unicode character database
changelog: |
# Changelog
## 0.1.0.1 (Jul 2021)
* Workaround to avoid incorrect display of dependencies on Hackage by moving
build-depends of ucd2haskell executable under a build flag conditional.
## 0.1.0 (Jul 2021)
* Initial release
basic-deps:
base: '>=4.7 && <5'
all-versions:
- 0.1.0
- 0.1.0.1
author: <NAME> and Contributors
latest: 0.1.0.1
description-type: markdown
description: |
# README
`unicode-data` provides Haskell APIs to efficiently access the unicode
character database. Performance is the primary goal in the design of
this package.
The Haskell data structures are generated programmatically from the
unicode character database (UCD) files. The latest unicode version
supported by this library is 13.0.0.
This package is far from complete. Currently it supports normalization
related functions and a few other properties, primarily to support
`unicode-transforms` package. More properties can be added as needed by
any other packages or use cases.
Please see the haddock documentation for reference documentation.
## Unicode database version update
To update the unicode version please update the version number in
`ucd.sh`.
To download the unicode database, run `ucd.sh download` from the top
level directory of the repo to fetch the database in `./ucd`.
```
$ ./ucd.sh download
```
To generate the Haskell data structure files from the downloaded database
files, run `ucd.sh generate` from the top level directory of the repo.
```
$ ./ucd.sh generate
```
## Running property doctests
Temporarily add `QuickCheck` to build depends of library.
```
$ cabal build
$ cabal-docspec --check-properties --property-variables c
```
## Licensing
`unicode-data` is an [open source](https://github.com/composewell/unicode-data)
project available under a liberal [Apache-2.0 license](LICENSE).
## Contributing to Streamly
As an open project we welcome contributions.
license-name: Apache-2.0
|
packages/un/unicode-data.yaml
|
---
- name: Add linux-image-extra-virtual
apt:
name: linux-image-extra-virtual
update_cache: yes
state: present
- name: Add line to /etc/modules
lineinfile:
line: "snd-aloop"
path: /etc/modules
regexp: '^snd-aloop'
- name: Add the snd-aloop kernel module
modprobe:
name: snd-aloop
state: present
- name: Add ppa:mc3man/bionic-media repo
apt_repository:
repo: ppa:mc3man/bionic-media
state: present
- name: Install ffmpeg
apt:
name: ffmpeg
state: present
update_cache: yes
- name: Add google apt_key
apt_key:
url: https://dl-ssl.google.com/linux/linux_signing_key.pub
state: present
- name: Add google repo
apt_repository:
repo: deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main
state: present
- name: Install google-chrome-stable
apt:
name: google-chrome-stable
state: present
- name: Create dir /etc/opt/chrome/policies/managed
file:
path: /etc/opt/chrome/policies/managed
state: directory
owner: root
group: root
- name: Add google managed.policy
lineinfile:
line: '{ "CommandLineFlagSecurityWarningsEnabled": false }'
path: "/etc/opt/chrome/policies/managed/managed_policies.json"
regexp: '^{ "CommandLineFlagSecurityWarningsEnabled"'
create: yes
- name: Install unzip
apt:
name: unzip
state: present
- name: Get current google release
uri:
url: http://chromedriver.storage.googleapis.com/LATEST_RELEASE
return_content: yes
register: googleversion
- debug: msg="GoogleVersion - {{ googleversion.content }}"
when: not ansible_check_mode
- name: Unarchive a file that needs to be downloaded (added in 2.0)
unarchive:
src: http://chromedriver.storage.googleapis.com/{{ googleversion.content }}/chromedriver_linux64.zip
dest: /usr/local/bin/
remote_src: yes
owner: root
group: root
mode: '0755'
creates: /usr/local/bin/chromedriver
when: not ansible_check_mode
- name: Install different packages
apt:
name: "{{ item }}"
state: present
loop:
- default-jre-headless
- ffmpeg
- curl
- alsa-utils
- icewm
- xdotool
- xserver-xorg-input-void
- xserver-xorg-video-dummy
- name: Remove java-11
apt:
name: openjdk-11-jre-headless
state: absent
- name: Install java-8
apt:
name: "{{ item }}"
state: present
loop:
- openjdk-8-jdk
- openjdk-8-jdk-headless
- openjdk-8-jre
- openjdk-8-jre-headless
- name: Add jitsi key
apt_key:
url: https://download.jitsi.org/jitsi-key.gpg.key
state: present
- name: Add jitsi repo
apt_repository:
repo: 'deb https://download.jitsi.org unstable/'
state: present
- name: Install jibri
apt:
name: jibri
state: present
update_cache: yes
- name: Add jibri user to required groups
user:
name: jibri
groups: adm,audio,video,plugdev
append: yes
- name: Create recordings folder
file:
path: "{{ jibri_recordings_dir }}"
state: directory
owner: jibri
group: jitsi
|
ansible/roles/jibri/tasks/install.yml
|
gacode: UA-8971429-1
menus:
# - title: News
# items: [{ id: news , url: /news , name: Current News }]
- title: Works
items:
- { id: photography , url: /photography , name: Photography }
- {id: editions, name: Editions, url: /editions/ }
- {id: videos , url: /videos/ , name: Videos }
- { id: bibliography , name: Bibliography ,url: /bibliography/ }
# - title: Biography
# items:
# - { id: exhibitions , name: Exhibitions , url: /exhibitions/ }
# - { id: bibliography , name: Bibliography ,url: /bibliography/ }
- title: Extras
items:
# - {id: wallpapers , name: Wallpapers , url: /wallpapers/}
- {id: links , url: /links/, name: Links}
- {id: contact , url: /contact/ , name: Contact}
- {id: guestbook , url: /guestbook/ , name: Guestbook}
pages:
news:
title: Welcome
id: news
keywords: Elina Brotherus, photography, Finnish photography, Elina Brotherus website, Elina Brotherys, valokuvaus, elinabrotherus.com</
desc: Welcome to elinabrotherus.com. Elina Brotherus is a Finnish photographer currently living in France.
photography:
id: photography
title: Photography
keywords: Elina Brotherus, pictures, photographs, Elina Brotherus photo series
desc: elinabrotherus.com - Photography series
editions:
id: editions
title: Editions
keywords: Elina Brotherus, editions,The New Painting ,Complete Videoworks
desc: inabrotherus.com - Editions
bibliography:
id: bibliography
title: Bibliography
keywords: Bibliography,Elina Brotherus,Articles,Interview,Bibliography
desc: elinabrotherus.com - Articles, Interviews and Bibliography.
exhibitions:
id: exhibitions
title: Exhibitions
keywords: exibitions, Elina Brotherus,Ecology of Emotions,Correspondences , Martin Asbaek Gallery, Myth of Childhood , La Ruche en d’autres thèrmes
desc: elinabrotherus.com - Exhibitions
videos:
id: videos
title: Video Works
keywords: Elina Brotherus, video works, videos
desc: elinabrotherus.com - Video works
links:
id: links
title: Links
keywords: links from elinabrotherus
desc: Links to people and things I support
contact:
id: contact
title: Send me a message
keywords: ''
desc: The Crew
guestbook:
id: guestbook
title: Guestbook
keywords: guestbook
desc: Guestbook for elinabrotherus.com
|
config.yml
|
---
# This playbook install the apps required in a server
- name: checking if splunk init.d file is installed
tags:
- splunk
- splunk_enterprise
stat:
path: "/etc/init.d/splunk"
register: splunk_initd
- name: splunk is installed here
tags:
- splunk
- splunk_enterprise
set_fact:
when: splunk_path.stat.exists
- name: add disable THP in /etc/rc.local
tags:
- splunk
- splunk_enterprise
blockinfile:
path: /etc/rc.local
insertbefore: "^exit 0"
marker: "# {mark} ANSIBLE MANAGED BLOCK (THP)"
content: |
#SPLUNK: disable THP at boot time
THP=`find /sys/kernel/mm/ -name transparent_hugepage -type d | tail -n 1`
for SETTING in "enabled" "defrag";do
if test -f ${THP}/${SETTING}; then
echo never > ${THP}/${SETTING}
fi
done
when: use_systemctl != true
- name: "install disable-thp systemd service"
tags:
- splunk
- splunk_enterprise
copy:
src: "etc/systemd/system/disable-thp.service"
dest: "/etc/systemd/system/disable-thp.service"
owner: root
group: root
mode: 0644
when: use_systemctl == true and splunk_initd.stat.exists != true
- name: "install /etc/security/limits.d/splunk.conf"
tags:
- splunk
- splunk_enterprise
template:
src: "etc/security/limits.d/splunk.conf.j2"
dest: "/etc/security/limits.d/splunk.conf"
owner: root
group: root
mode: 0644
- name: add disable_huge and change_ulimits functions to /etc/init.d/splunk
tags:
- splunk
- splunk_enterprise
blockinfile:
path: /etc/init.d/splunk
marker: "# {mark} ANSIBLE MANAGED BLOCK (functions)"
insertbefore: "^case"
content: |
# disable hugepages
disable_huge() {
echo "disabling huge page support"
THP=`find /sys/kernel/mm/ -name transparent_hugepage -type d | tail -n 1`
for SETTING in "enabled" "defrag";do
if test -f ${THP}/${SETTING}; then
echo never > ${THP}/${SETTING}
fi
done
}
# change ulimits
change_ulimit() {
ulimit -Hn 65535
ulimit -Sn 65535
ulimit -Hu 20480
ulimit -Su 20480
ulimit -Hf unlimited
ulimit -Sf unlimited
}
when: splunk_initd.stat.exists == true
- name: enable usage of funtions during start in /etc/init.d/splunk
tags:
- splunk
- splunk_enterprise
blockinfile:
path: /etc/init.d/splunk
marker: " # {mark} ANSIBLE MANAGED BLOCK (start)"
insertbefore: "splunk_start$"
content: |2
disable_huge
change_ulimit
when: splunk_initd.stat.exists == true
- name: enable usage of funtions during restart in /etc/init.d/splunk
tags:
- splunk
- splunk_enterprise
blockinfile:
path: /etc/init.d/splunk
marker: " # {mark} ANSIBLE MANAGED BLOCK (restart)"
insertbefore: "splunk_restart$"
content: |2
disable_huge
change_ulimit
when: splunk_initd.stat.exists == true
- name: "update systemd service file"
tags:
- splunk
- splunk_enterprise
ini_file:
path: "/etc/systemd/system/{{splunk_service_name}}.service"
section: "{{ item.section }}"
option: "{{ item.var }}"
value: "{{ item.val }}"
owner: root
group: root
mode: 0700
with_items: "{{ splunk_systemd_services_updates }}"
when: use_systemctl == true and splunk_initd.stat.exists != true and splunk_systemd_services_updates is defined
- name: commit changes to systemctl
tags:
- splunk
- splunk_enterprise
command: systemctl daemon-reload
when: use_systemctl == true
- name: enable and start disable-thp service
tags:
- splunk
- splunk_enterprise
service:
name: disable-thp
state: started
enabled: yes
when: use_systemctl == true and splunk_initd.stat.exists != true
|
ansible/roles/splunk_enterprise/tasks/ulimit_thp.yml
|
# By default, the autogenerated rules include rules that attempt to
# restrict the set of system calls that can be performed by
# applications. However, we know that these rules are fairly FP-prone,
# so they are disabled by default. If you'd like to enable them,
# either change or override this macro's condition to "evt.num >= 0".
- macro: hyperkube_consider_syscalls
condition: (evt.num < 0)
# These policies are limited to containers, specifically those where
# the container image name starts with "hyperkube"
- macro: app_hyperkube
condition: container and container.image contains "hyperkube"
# Not considering all inbound networking suspect
# Not considering all outbound networking suspect
# Restricting listening ports to selected set
- list: hyperkube_allowed_inbound_ports_tcp
items: [10248, 10249, 10250, 10255, 4194, 443, 8080]
- rule: Unexpected inbound tcp connection hyperkube
desc: Detect inbound traffic to hyperkube using tcp on a port outside of expected set
condition: inbound and evt.rawres >= 0 and not fd.sport in (hyperkube_allowed_inbound_ports_tcp) and app_hyperkube
output: Inbound network connection to hyperkube on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting spawned processes to selected set
- list: hyperkube_allowed_processes
items:
[
"/bin/bash",
"/bin/findmnt",
"/hyperkube",
"findmnt",
"iptables",
"iptables-restor",
"iptables-restore",
"iptables-save",
"journalctl",
"nsenter",
"sleep",
]
- rule: Unexpected spawned process hyperkube
desc: Detect a process started in a hyperkube container outside of an expected set
condition: spawned_process and not proc.name in (hyperkube_allowed_processes) and app_hyperkube
output: Unexpected process spawned in hyperkube container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image)
priority: NOTICE
# Restricting files read or written to specific set
- list: hyperkube_allowed_file_prefixes_readonly
items: ["/"]
- rule: Unexpected file access readonly for hyperkube
desc: Detect an attempt to access a file readonly other than below an expected list of directories
condition: (open_read and evt.is_open_write=false) and not fd.name pmatch (hyperkube_allowed_file_prefixes_readonly) and app_hyperkube
output: Unexpected file accessed readonly for hyperkube (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image)
priority: NOTICE
- list: hyperkube_allowed_file_prefixes_readwrite
items: ["/dev", "/proc", "/var/lib/kubelet/pods"]
- rule: Unexpected file access readwrite for hyperkube
desc: Detect an attempt to access a file readwrite other than below an expected list of directories
condition: (open_write) and not fd.name pmatch (hyperkube_allowed_file_prefixes_readwrite) and app_hyperkube
output: Unexpected file accessed readwrite for hyperkube (command=%proc.cmdline pid=%proc.pid file=%fd.name %container.info image=%container.image)
priority: NOTICE
|
artifact-hub/falco/gke/1.0.1/hyperkube-rules.yaml
|
nl:
activerecord:
models:
activity:
one: Activiteit #g
other: Activiteiten #g
group:
one: Groep #g
other: Groepen #g
member:
one: Lid #g
other: Leden #g
participant:
one: Deelnemer
other: Deelnemers
person:
one: Persoon #g
other: Personen #g
session:
one: Sessie
other: Sessies
token:
one: Token
other: Tokens
user:
one: Gebruiker
other: Gebruikers
default_subgroup:
one: Standaardgroep
other: Standaardgroepen
subgroup:
one: Subgroep
other: Subgroepen
attributes:
activity:
deadline: Deadline #g
description: Beschrijving #g
end: Eind #g
group: :activerecord.models.group #g
location: Locatie
name: Naam #g
participants: Deelnemers #g
people: Mensen #g
start: Start #g
reminder_at: Herinnering om
reminder_done: Herinnering verstuurd
subgroups: :activerecord.models.subgroup.other
subgroup_division_enabled: Subgroepen indelen
subgroup_division_done: Subgroepen ingedeeld
no_response_action: Actie bij geen reactie
group:
activities: Activiteiten #g
default_subgroups: Standaardsubgroepen
members: Leden #g
name: Naam #g
people: Mensen #g
member:
group: :activerecord.models.group #g
is_leader: Is groepsleider #g
display_name: Weergavenaam
person: :activerecord.models.person #g
created_at: <NAME>
participant:
activity: :activerecord.models.activity #g
attending: Aanwezig
is_organizer: Is organisator #g
notes: Notes #g
person: :activerecord.models.person #g
person:
activities: Activiteiten #g
email: Emailadres #g
first_name: Voornaam #g
groups: Groepen #g
infix: Tussenvoegsel #g
is_admin: Is admin #g
last_name: Achternaam #g
members: leden #g
participants: Deelnemers #g
user: :activerecord.models.user #g
session:
active: Actief #g
expires: verloopt #g
ip: IP #g
remember_digest: Onthoud mij-digest #g
user: :activerecord.models.user #g
token:
expires: Verloopt #g
token: Token #g
tokentype: Tokentype #g
user: :activerecord.models.user #g
user:
confirmed: Bevestigd
email: E-mail
password: <PASSWORD>
password_confirmation: <PASSWORD>
password_digest: <PASSWORD>
person: :activerecord.models.person
default_subgroup:
name: Naam
is_assignable: Gebruiken voor indelen
subgroup:
name: Naam
is_assignable: Gebruiken voor indelen
|
config/locales/translation_nl.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-16 18:21"
game: "Unreal Tournament"
name: "CTF-Euro-2000"
author: "BaSoMaTiC - <EMAIL>"
description: "Somebody get our balls back!"
releaseDate: "2000-06"
attachments:
- type: "IMAGE"
name: "CTF-Euro-2000_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/Capture%20The%20Flag/E/CTF-Euro-2000_shot_1.png"
originalFilename: "ctf-euro-2000.zip"
hash: "bafbc9fb4fb8144b394e3d968cafa6641918d130"
fileSize: 157930
files:
- name: "CTF-Euro-2000.unr"
fileSize: 597486
hash: "41035193349e1200f8d9689ce3e8a043613a1657"
otherFiles: 2
dependencies: {}
downloads:
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/Capture%20The%20Flag/E/ctf-euro-2000.zip"
main: true
repack: false
state: "OK"
- url: "https://gamefront.online/files2/service/thankyou?id=1403839"
main: false
repack: false
state: "MISSING"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/MapsE/&file=ctf-euro-2000.zip"
main: false
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/MapsF/&file=ctf-euro-2000.zip"
main: false
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/CTF/&file=ctf-euro-2000.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/CTF&file=ctf-euro-2000.zip"
main: false
repack: false
state: "OK"
- url: "http://uttexture.com/UT/Downloads/Maps/CTF/MapsE/ctf-euro-2000.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/Capture%20The%20Flag/E/b/a/fbc9fb/ctf-euro-2000.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/Capture%20The%20Flag/E/b/a/fbc9fb/ctf-euro-2000.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "Capture The Flag"
title: "Euro 2000 Stadium"
playerCount: "11 + 11 (LOL)"
themes:
Tech: 0.5
Skaarj Crypt: 0.1
Natural: 0.3
bots: true
|
content/Unreal Tournament/Maps/Capture The Flag/E/b/a/fbc9fb/ctf-euro-2000_[bafbc9fb].yml
|
name: CI
on: [push, pull_request]
jobs:
static-code-analysis:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: PHP-CS-Fixer
uses: docker://jakzal/phpqa:1.58.7-php7.4-alpine
with:
args: php-cs-fixer --dry-run --diff --no-interaction --ansi fix
tests:
name: Symfony ${{ matrix.symfony-version }} on PHP ${{ matrix.php-version }} flags ${{ matrix.composer-flags }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
php-version: ['7.2','7.3', '7.4', '8.0']
composer-flags: ['']
symfony-version: ['']
include:
- php-version: 7.2
composer-flags: "--prefer-lowest"
- php-version: 7.2
symfony-version: "^4.4"
- php-version: 8.0
symfony-version: "^6.0"
- php-version: 8.1
symfony-version: "^6.0"
services:
mysql:
image: mysql:5.7
env:
MYSQL_ALLOW_EMPTY_PASSWORD: yes
MYSQL_DATABASE: acme
ports:
- 3306:3306
postgresql:
image: postgres:9.6
env:
POSTGRES_USER: 'postgres'
POSTGRES_PASSWORD: 'postgres'
POSTGRES_DB: 'postgres'
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup PHP, with composer and extensions
uses: shivammathur/setup-php@v2 #https://github.com/shivammathur/setup-php
with:
php-version: ${{ matrix.php-version }}
extensions: mbstring, xml, ctype, iconv, intl, pdo_sqlite
- name: Get composer cache directory
id: composercache
run: echo "::set-output name=dir::$(composer config cache-files-dir)"
- name: Cache composer dependencies
uses: actions/cache@v2
with:
path: ${{ steps.composercache.outputs.dir }}
key: ${{ runner.os }}-composer-${{ hashFiles('**/composer.json') }}
restore-keys: ${{ runner.os }}-composer-
- name: Require Symfony
if: matrix.symfony-version != ''
run: |
composer require --no-update symfony/flex:^1.17.3
composer config extra.symfony.require "${{ matrix.symfony-version }}"
composer require --no-update symfony/framework-bundle=${{ matrix.symfony-version }}
- name: Remove PHPCR with Symfony 6
if: matrix.symfony-version == '^6.0'
run: composer remove --no-update --dev doctrine/phpcr-bundle doctrine/phpcr-odm
- name: Install Composer dependencies
if: matrix.composer-flags == ''
run: composer install
- name: Install Composer dependencies with options
if: matrix.composer-flags != ''
# Use "update" instead of "install" since it allows using the "--prefer-lowest" option
run: composer update ${{ matrix.composer-flags }}
- name: Show Composer dependencies
run: composer show
- name: Run tests
# In phpunit.xml.dist, tests annotated with "@group mysql" are excluded, revert this
# Run tests twice to ensure that tests are idempotent even if database caching is enabled
run: |
php ./vendor/bin/phpunit --testdox --exclude-group ""
php ./vendor/bin/phpunit --testdox --exclude-group ""
|
.github/workflows/tests.yml
|
on:
push:
branches:
- "master"
name: build_for_multi-platform
jobs:
release-all:
name: release
needs: [build-mac,build-win,build-ubuntu]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# 读取json 文件的属性
- name: Read Properties
id: version # step_id 在后面会用到
uses: ashley-taylor/read-json-property-action@v1.0
with:
path: ./package.json # 文件路径
property: version # 相应的字段
- name: create release
id: create_release
uses: actions/create-release@master
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}
with:
# 根据版本号打标签
tag_name: v${{steps.version.outputs.value}}
release_name: Release v${{steps.version.outputs.value}}
draft: false
prerelease: false
- uses: actions/download-artifact@v2
with:
name: release
- name: test
run: ls
- name: upload macos
id: upload-macos
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./release-mac.zip
asset_name: release-mac.zip
asset_content_type: application/zip
- name: upload win
id: upload-windows
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./release-win.zip
asset_name: release-win.zip
asset_content_type: application/zip
- name: upload ubuntu
id: upload-ubuntu
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./release-ubuntu.zip
asset_name: release-ubuntu.zip
asset_content_type: application/zip
build-mac:
name: build for MacOs
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# 应用二进制依赖库,以节约编译时间
# 实践没成功
# - name: actions-rs/install@v0.1
# with:
# crate: cargo-audit
# version: latest
# use-tool-cache: true
- name: install rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
- name: install node/npm
uses: actions/setup-node@v2
with:
node-version: '14'
- name: build
# 编译后在根目录生成index.node
run: |
npm install
npm run build
- name: zip macos artifact
run: zip release-mac.zip ./index.node
- uses: actions/upload-artifact@v2
with:
name: release
path: ./release-mac.zip
build-win:
name: build for Windows
runs-on: windows-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# 应用二进制依赖库,以节约编译时间
# 实践没成功
# - name: actions-rs/install@v0.1
# with:
# crate: cargo-audit
# version: latest
# use-tool-cache: true
- name: install rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
- name: install node/npm
uses: actions/setup-node@v2
with:
node-version: '14'
- name: build
# 编译后在根目录生成index.node
run: |
npm install
npm run build
- name: zip windows artifact
run: |
powershell Compress-Archive ./index.node release-win.zip
- uses: actions/upload-artifact@v2
with:
name: release
path: ./release-win.zip
build-ubuntu:
name: build for Ubuntu
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# 应用二进制依赖库,以节约编译时间
# 实践没成功
# - name: actions-rs/install@v0.1
# with:
# crate: cargo-audit
# version: latest
# use-tool-cache: true
- name: install rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
- name: install node/npm
uses: actions/setup-node@v2
with:
node-version: '14'
- name: build
# 编译后在根目录生成index.node
run: |
npm install
npm run build
- name: zip ubuntu artifact
run: zip release-ubuntu.zip ./index.node
- uses: actions/upload-artifact@v2
with:
name: release
path: ./release-ubuntu.zip
|
.github/workflows/build.yaml
|
---
name: AWS Elasticsearch Service
description: <p>Amazon Elasticsearch Service makes it easy to deploy, operate, and
scale Elasticsearch for log analytics, full text search, application monitoring,
and more. Amazon Elasticsearch Service is a fully managed service that delivers
Elasticsearch’s easy-to-use APIs and real-time capabilities along with the
availability, scalability, and security required by production workloads. The service
offers built-in integrations with Kibana, Logstash, and AWS services including Amazon
Kinesis Firehose, AWS Lambda, and Amazon CloudWatch so that you can go from raw
data to actionable insights quickly.</p>
image: http://kinlane-productions2.s3.amazonaws.com/api-evangelist-site/company/logos/Analytics_AmazonElasticsearchService.png
created: "2021-02-04"
modified: "2021-02-04"
specificationVersion: "0.14"
x-rank: "10"
x-alexaRank: "14"
url: https://raw.githubusercontent.com/api-network/organizations/main/aws-elasticsearch-service.yaml
tags:
- Search
- Relative Data
- Logging
- Have API Paths
- Have API
- API Service Provider
- API Service Provider
- API Provider
- Amazon Web Services
apis: []
x-common:
- type: x-console
url: https://console.aws.amazon.com/es/home?region=us-east-1
- type: x-documentation
url: http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-configuration-api.html
- type: x-documentation
url: https://docs.aws.amazon.com/index.html?nc2=h_ql_doc
- type: x-facebook
url: https://www.facebook.com/amazonwebservices
- type: x-faq
url: https://aws.amazon.com/elasticsearch-service/faqs/
- type: x-getting-started
url: https://aws.amazon.com/elasticsearch-service/getting-started/
- type: x-marketplace
url: https://aws.amazon.com/marketplace/?nc2=h_ql_mp
- type: x-marketplace
url: https://aws.amazon.com/marketplace/?nc2=h_mo
- type: x-press
url: https://press.aboutamazon.com/press-releases/aws
- type: x-pricing
url: https://aws.amazon.com/elasticsearch-service/pricing/
- type: x-privacy
url: https://aws.amazon.com/privacy/?nc1=f_pr
- type: x-support
url: https://console.aws.amazon.com/support/home/?nc1=f_dr
- type: x-terms-of-service
url: https://aws.amazon.com/terms/?nc1=f_pr
- type: x-twitter
url: https://twitter.com/awscloud
- type: x-website
url: https://aws.amazon.com/elasticsearch-service/
- type: x-youtube
url: https://www.youtube.com/watch?v=oJUpUQ_yNVw
- type: x-youtube
url: https://www.youtube.com/watch?v=95kQkS51VnU
- type: x-youtube
url: https://www.youtube.com/watch?v=5BZ7a0uEeyI
include: []
maintainers:
- FN: <NAME>
x-twitter: apievangelist
email: <EMAIL>
---
|
_data/aws-elasticsearch-service.yaml
|
ru:
command:
freezebots:
description: Замораживает всех ботов.
message: "{1} замораживает ботов."
unfreezebots:
description: Размораживает всех ботов.
message: "{1} размораживает ботов."
addbots:
description: Добавляет определенное количество ботов на сервер.
syntax: "[количество]"
message: "{1} добавляет {2} {3} на сервер."
bot_one: бота
bot_many: ботов
kickbots:
description: Кикнуть всех ботов.
message: "{1} удаляет всех ботов."
changelevel:
description: Меняет карту на указанную.
syntax: <карта> [задержка]
message: "{1} меняет карту на {2} через {3} секунд!"
kick:
description: Выкидывает игрока с сервера.
syntax: <игрок> [причина]
message: "{1} исключает {2} из игры. ({3})"
ban:
description: Забанить урода!
syntax: <игрок> <срок блокировки> [причина]
message: "{admin} заблокировал {target} {time}. ({reason})"
unban:
description: Разблокировать игрока с таким Steam ID если тот находится в блокировке.
syntax: <заблокированный SteamID>
message: "{admin} разблокировал {target}."
tp:
description: Телепортировать игрока туда, куда вы смотрите.
syntax: <игрок>
tpto:
description: Телепортироваться к игроку.
syntax: <игрок>
return:
description: Вернуть игрока туда, где он находился до телепорта.
syntax: <игрок>
setgroup:
description: "Выставляет группу пользователя игрока. Группы: {1}"
syntax: <игрок> <группа>
message: "{1} добавляет {2} к группе '{3}'."
vanish:
description: "Полностью скрывает игрока от всех, кроме администрации."
syntax: "<игрок> [скрыть]"
self: "Вы теперь {state}."
enabled: "{admin_name} делает {player_name} невидимкой."
disabled: "{admin_name} сделал {player_name} видимым."
vanished: "скрыты от других игроков"
unvanished: "видимы другим игрокам"
fullbright:
description: "Отключает темноту."
syntax: "<игрок> <отключить_свет>"
enabled: "{player_name} включает ночное видение для {targets}."
disabled: "{player_name} отключает ночное видение {targets}."
enabled_self: "Вы теперь видите в темноте."
disabled_self: "Вы больше не видите в темноте."
demote:
description: Понижает игрока до пользователя.
syntax: <игрок>
message: "{1} понижает {2} с {3} до пользователя."
restart:
description: Перезапускает текущую карту.
syntax: "[задержка]"
message: "{1} перезапускает карту через {2} секунд!"
condition:
role:
name: Игрок с определенной ролью
text: Роль {1} {2}
message: Выберите роль
roles: Роли
error:
group_not_valid: "'{1}' не является группой пользователя!"
not_banned: Steam ID '{1}' не находится в блокировке!
no_permission: У вас недостаточно прав чтобы сделать это, {1}.
permission:
categories:
administration: Администрирование
tools: Инструменты
spawn: Спавн
general: Основное
misc: Разное
level_design: Дизайн уровней
server_management: Управление сервером
player_management: Управление игроками
compatibility: Совместимость
role:
user: Ранг, выдаваемый всем игрокам при заходе на сервер.
assistant: Административный ранг с низким уровнем доступа, для помощников администрации.
moderator: Административный ранг для доверенных членов команды.
admin: Административный ранг высшего уровня, имеющий доступ ко всему.
ui:
hud:
vanish: Вы невидимы
admin:
player_management: Управление игроками
players: Игроки
role: Роль
allow_all: Разрешить всё
no_all: "'Нет' для всех"
never_all: Запретить всё
expires: Истекает через
temp_permission:
title: Выдача временного разрешения
message: Укажите, на какое время будет выдано это разрешение. Если оно уже существует, оно будет продлено на это время. Оставьте пустым, чтобы изъять разрешение.
selector:
title: Изменение роли игрока
message: Выберите новую роль.
roles: Роли
config_editor: Конфигурация
new_config: Добавить новое значение
new_config_text: Введите новое значение.
delete_config: Удалить значение
delete_config_text: Вы уверены, что хотите удалить данное значение?
cmd_desc:
usage: "Синтаксис:"
aliases: "Алиасы:"
permission:
not_set: Не менять
allow: Разрешить
never: Запретить
error: Ошибка
|
plugins/admin/plugin/languages/ru.yml
|
uid: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException*"
fullName: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException"
name: "AmqpResourceLimitExceededException"
nameWithType: "AmqpResourceLimitExceededException.AmqpResourceLimitExceededException"
members:
- uid: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException()"
fullName: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException()"
name: "AmqpResourceLimitExceededException()"
nameWithType: "AmqpResourceLimitExceededException.AmqpResourceLimitExceededException()"
syntax: "public AmqpResourceLimitExceededException()"
- uid: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(java.lang.String)"
fullName: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(String message)"
name: "AmqpResourceLimitExceededException(String message)"
nameWithType: "AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(String message)"
parameters:
- name: "message"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
syntax: "public AmqpResourceLimitExceededException(String message)"
- uid: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(java.lang.String,java.lang.Throwable)"
fullName: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(String message, Throwable cause)"
name: "AmqpResourceLimitExceededException(String message, Throwable cause)"
nameWithType: "AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(String message, Throwable cause)"
parameters:
- name: "message"
type: "<xref href=\"java.lang.String?alt=java.lang.String&text=String\" data-throw-if-not-resolved=\"False\" />"
- name: "cause"
type: "<xref href=\"java.lang.Throwable?alt=java.lang.Throwable&text=Throwable\" data-throw-if-not-resolved=\"False\" />"
syntax: "public AmqpResourceLimitExceededException(String message, Throwable cause)"
- uid: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(java.lang.Throwable)"
fullName: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(Throwable cause)"
name: "AmqpResourceLimitExceededException(Throwable cause)"
nameWithType: "AmqpResourceLimitExceededException.AmqpResourceLimitExceededException(Throwable cause)"
parameters:
- name: "cause"
type: "<xref href=\"java.lang.Throwable?alt=java.lang.Throwable&text=Throwable\" data-throw-if-not-resolved=\"False\" />"
syntax: "public AmqpResourceLimitExceededException(Throwable cause)"
type: "constructor"
metadata: {}
package: "com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions"
artifact: com.microsoft.azure.sdk.iot:iot-device-client-preview:1.2.0
|
preview/docs-ref-autogen/com.microsoft.azure.sdk.iot.device.transport.amqps.exceptions.AmqpResourceLimitExceededException.AmqpResourceLimitExceededException.yml
|
---
# "secrets" for local development use
secrets:
artifactory_username: <copy_these_from_nkr-ops-sec_secrets_for_test>
artifactory_password: <copy_these_from_nkr-ops-sec_secrets_for_test>
artifactory_server: <copy_these_from_nkr-ops-sec_secrets_for_test>
indexservers:
solr:
port: "8983"
version: "6.2.0"
solr_java_mem: "-Xms512m -Xmx512m"
solr_cluster: false
finna_solr_commit: <PASSWORD>
zookeeper:
client_port: 2181
version: 3.6.2
httpd:
conf_loglevel: debug
listen_port: 443
intermediate_certificate_name: not_used
ba_users:
- { username: nkr-index, password: <PASSWORD> }
proxyservers:
server_domain_name: nkr-proxy.csc.local
flask_app:
debug: 1
log_level: DEBUG
verify_tls: 0
rems_host: rems.somewhere.org
rems_api_key: rems_api_key
rems_rejecter_bot_user: 'rejecter-bot'
index_hosts:
- localhost:8983
index_main_api: /solr
index_name: biblio
index_username: not_used
index_password: <PASSWORD>
index_allowed_apis: select
level_restriction_field: display_restriction_id_str
document_unique_id_field: _document_id
metadata_level_10_resource_id: metadata-level::10
exclude_requests_with_field_param: 'dummy'
include_requests_with_field_param: 'dummy'
include_requests_with_query_param: 'dummy'
exclude_requests_with_query_param: 'dummy'
short_timeframe: 7200
long_timeframe: 28800
max_amount_of_requests_short_period: 50
max_amount_of_requests_long_period: 500
mail:
short_period: 'Lyhyt'
long_period: 'Pitkä'
server: 'localhost'
port: 25
use_tls: False
use_ssl: True
default_sender: '<EMAIL>'
recipient: 'dummy'
cache:
host: localhost
port: 6379
password: <PASSWORD>
session_check:
rems_session_close_user: 'nkr-session-bot'
httpd:
conf_loglevel: debug
listen_port: 443
intermediate_certificate_name: not_used
ba_users:
- { username: nkr-proxy, password: <PASSWORD> }
harvesterservers:
branch: develop
repository_base_url: base.url
admin_email: <EMAIL>
index:
index_hostname: localhost
index_port: 8983
index_name: biblio
index_username: not_used
index_password: <PASSWORD>
datasource:
url_public: <copy_these_from_nkr-ops-sec_secrets_for_test>
url_restricted: <copy_these_from_nkr-ops-sec_secrets_for_test>
username: dummy
password: <PASSWORD>
|
ansible/secrets/local_development.yml
|
name: build-check
on: [push]
jobs:
ubuntu:
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Get Vulkan Version
run: |
SDK_VERSION=`curl https://vulkan.lunarg.com/sdk/latest/linux.txt`
echo "SDK_VERSION=${SDK_VERSION}" >> $GITHUB_ENV
- name: Create build cache
uses: actions/cache@v3
id: build-cache
with:
path: |
vulkan/
build/
key: ${{ runner.OS }}-${{ env.SDK_VERSION }}
- name: Install Vulkan SDK
if: steps.build-cache.outputs.cache-hit != 'true'
run: |
curl -O https://sdk.lunarg.com/sdk/download/${SDK_VERSION}/linux/vulkan_sdk.tar.gz
mkdir -p vulkan/
tar -xzvf vulkan_sdk.tar.gz -C vulkan/
- name: Install dependencies
run: |
sudo apt-get update
sudo apt install -y xorg-dev
- name: Configure CMake cache
run: |
source vulkan/${SDK_VERSION}/setup-env.sh
mkdir -p build
cd build
rm -f CMakeCache.txt
cmake .. -DBUILD_TESTS=ON
- name: Build
run: |
cd build
cmake --build . --target kronic kronic_tests --config Debug -j2
- name: Test
run: ./build/bin/kronic_tests
- name: Upload Artifacts
uses: actions/upload-artifact@v2
with:
name: kronic-ubuntu-${{ github.sha }}
if-no-files-found: error
path: |
./build/bin/kronic
windows:
runs-on: "windows-latest"
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Get Vulkan Version
run: |
$env:SDK_VERSION = (curl https://vulkan.lunarg.com/sdk/latest/windows.txt)
echo "SDK_VERSION=$env:SDK_VERSION" >> $env:GITHUB_ENV
- name: Create build cache
uses: actions/cache@v3
id: build-cache
with:
path: |
vulkan/
build/
key: ${{ runner.OS }}-${{ env.SDK_VERSION }}
- name: Install Vulkan SDK
if: steps.build-cache.outputs.cache-hit != 'true'
run: |
$ver = (Invoke-WebRequest -Uri "https://vulkan.lunarg.com/sdk/latest.json" | ConvertFrom-Json).windows
echo Version $ver
$ProgressPreference = 'SilentlyContinue'
Invoke-WebRequest -Uri "https://sdk.lunarg.com/sdk/download/$ver/windows/VulkanSDK-$ver-Installer.exe" -OutFile VulkanSDK.exe
echo Downloaded
$location = -join((Get-Location), "/vulkan")
.\VulkanSDK.exe --root $location --accept-licenses --default-answer --confirm-command install
- name: Configure CMake cache
run: |
$env:VULKAN_SDK = -join((Get-Location), "/vulkan")
test -d build || mkdir build
cd build
rm CMakeCache.txt -ErrorAction Ignore
cmake .. -DBUILD_TESTS=ON
# TODO: Fix this.
# We do a release build because the LunarG Vulkan SDK ships
# with a shaderc_combined.lib that links to release mode
# multithreaded runtime binaries. Doing a debug build with
# it would lead to linking errors.
- name: Build
run: |
cd build
cmake --build . --target kronic kronic_tests --config Release -j2
- name: Test
run: ./build/bin/Release/kronic_tests.exe
- name: Upload Artifacts
uses: actions/upload-artifact@v2
with:
name: kronic-windows-${{ github.sha }}
if-no-files-found: error
path: |
./build/bin/Release/kronic.exe
|
.github/workflows/build.yaml
|
gestion_vote_homepage:
path: /
defaults: { _controller: GestionVoteBundle:Default:index }
ajoutJurey:
path: /ajoutJurey
defaults: { _controller: GestionVoteBundle:Default:ajoutJurey }
ajoutCoatch:
path: /ajoutCoatch
defaults: { _controller: GestionVoteBundle:Default:ajoutCoatch }
post_like2:
path: /listePub/{id}/like2
defaults: { _controller: GestionVoteBundle:Default:like2 }
dislikeAction:
path: /listePub/{id}/like3
defaults: { _controller: GestionVoteBundle:Default:dislike }
meilleurslikeJurey:
path: /adminVote1
defaults: { _controller: GestionVoteBundle:Default:listeMeilleurLike }
mauvaiseslikeJurey:
path: /adminVote2
defaults: { _controller: GestionVoteBundle:Default:listeMeilleurdisLike }
meilleurslikeCoatch:
path: /adminVote3
defaults: { _controller: GestionVoteBundle:Default:listeMeilleurdLikeCoatch}
mauvaiseslikeCoatch:
path: /adminVote4
defaults: { _controller: GestionVoteBundle:Default:listeMeilleurdisLikeCoatch }
listeMeilleurLike1:
path: /adminVote5
defaults: { _controller: GestionVoteBundle:Default:listeMeilleurLike1 }
listeMeilleurdLikeCoatch1:
path: /adminVote6
defaults: { _controller: GestionVoteBundle:Default:listeMeilleurdLikeCoatch1 }
meilleurslikeJureyPdf:
path: /adminVote7
defaults: { _controller: GestionVoteBundle:Default:PdflisteMeilleurLike }
PdflisteMeilleurdisLike:
path: /adminVote8
defaults: { _controller: GestionVoteBundle:Default:PdflisteMeilleurdisLike }
PdflisteMeilleurdLikeCoatch:
path: /adminVote9
defaults: { _controller: GestionVoteBundle:Default:PdflisteMeilleurdLikeCoatch }
PdflisteMeilleurdisLikeCoatch:
path: /adminVote10
defaults: { _controller: GestionVoteBundle:Default:PdflisteMeilleurdisLikeCoatch }
PdflisteMeilleurLike1Jurey:
path: /adminVote11
defaults: { _controller: GestionVoteBundle:Default:PdflisteMeilleurLike1 }
PdflisteMeilleurdLikeCoatch1:
path: /adminVote12
defaults: { _controller: GestionVoteBundle:Default:PdflisteMeilleurdLikeCoatch1 }
|
src/GestionVoteBundle/Resources/config/routing.yml
|
name: build
on: push
jobs:
build:
name: build
runs-on: ubuntu-16.04
strategy:
matrix:
node: [8, 10, 12]
env:
KYOTOCABINET_VERSION: '1.2.77'
KYOTOTYCOON_VERSION: '0.9.56'
steps:
- uses: actions/checkout@master
with:
fetch-depth: 1
- name: Restore KyotoCabinet build cache
id: cache-kc
uses: actions/cache@v1
with:
path: kyotocabinet-${{ env.KYOTOCABINET_VERSION }}
key: ${{ runner.os }}-kc-${{ env.KYOTOCABINET_VERSION }}
- name: Restore KyotoTycoon build cache
id: cache-kt
uses: actions/cache@v1
with:
path: kyototycoon-${{ env.KYOTOTYCOON_VERSION }}
key: ${{ runner.os }}-kt-${{ env.KYOTOTYCOON_VERSION }}
- name: Setup dependencies
run: |
sudo apt-get update
sudo apt-get install g++ zlib1g-dev
- name: Build KyotoCabinet
if: steps.cache-kc.outputs.cache-hit != 'true'
run: |
wget http://fallabs.com/kyotocabinet/pkg/kyotocabinet-${KYOTOCABINET_VERSION}.tar.gz
tar zxvf kyotocabinet-${KYOTOCABINET_VERSION}.tar.gz
cd kyotocabinet-${KYOTOCABINET_VERSION}
./configure
make
- name: Install KyotoCabinet
run: |
cd kyotocabinet-${KYOTOCABINET_VERSION}
sudo make install
- name: Build KyotoTycoon
if: steps.cache-kc.outputs.cache-hit != 'true'
run: |
wget http://fallabs.com/kyototycoon/pkg/kyototycoon-${KYOTOTYCOON_VERSION}.tar.gz
tar zxvf kyototycoon-${KYOTOTYCOON_VERSION}.tar.gz
cd kyototycoon-${KYOTOTYCOON_VERSION}
./configure
sed -i -e "/^#include <kttimeddb.h>$/a \#include <unistd.h>" ./ktdbext.h
make
- name: Install KyotoTycoon
run: |
cd kyototycoon-${KYOTOTYCOON_VERSION}
sudo make install
- name: Setup library path
run: |
sudo sh -c "echo '/usr/local/lib' >> /etc/ld.so.conf.d/libc.conf"
sudo ldconfig
- name: Start KyotoTycoon
run: /usr/local/bin/ktserver -host localhost -port 1978 -dmn red#type=% blue#type=*
- name: Setup Node.js
uses: actions/setup-node@v1
with:
node-version: ${{ matrix.node }}
- name: Install
run: npm install
- name: Lint
run: npm run lint
- name: Test
run: npm run cover
- name: Coveralls
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
|
.github/workflows/build.yml
|
language: python
matrix:
fast_finish: true
include:
- python: "2.7"
env: TOXENV=py27 PYPI=true
os: linux
- python: "3.5"
env: TOXENV=py35
os: linux
- python: "3.6"
env: TOXENV=py36
os: linux
- python: "3.7"
env: TOXENV=py37
os: linux
dist: xenial
- python: "3.8"
env: TOXENV=py38
os: linux
dist: xenial
- name: "Python 3.7 on macOS"
env: TOXENV=py37
os: osx
osx_image: xcode10.2
language: shell
- name: "Python 2.7 on Windows"
env: TOXENV=py27 PATH=/c/Python27:/c/Python27/Scripts:$PATH
os: windows
before_install: choco install python2
language: shell
- name: "Python 3.5 on Windows"
env: TOXENV=py35 PATH=/c/Python35:/c/Python35/Scripts:$PATH
os: windows
before_install: choco install python3 --version 3.5.4
language: shell
- name: "Python 3.7 on Windows"
env: TOXENV=py37 PATH=/c/Python37:/c/Python37/Scripts:$PATH
os: windows
before_install: choco install python3 --version 3.7.3
language: shell
install:
- pip install tox pylint
- pip install .
script:
- pylint msal_extensions
- tox
deploy:
- # test pypi
provider: pypi
distributions: "sdist bdist_wheel"
server: https://test.pypi.org/legacy/
user: "nugetaad"
password:
secure: dpNi6BsZyiAx/gkxJ5Mz6m2yDz2dRGWsSgS5pF+ywNzgHJ6+0e234GyLbSUY5bFeeA7WtOr4is3bxSLB/6tTWDVWdw3TL4FGlDM/54MSLWg8R5bR9PRwO+VU1kvQ03yz+B9mTpzuiwL2e+OSwcwo97jForADzmSRA5OpEq5Z7zAs7WR8J2tyhl+288NwLtKJMVy39UmPl9oifu6/5RfBn7EWLmC7MrMFhHTb2Gj7fJWw4u+5vx9bsQ7ubfiwPbRAtYXLz6wDMtwtFzwme4zZPg5HwWCn0WWlX4b6x7xXirZ7yKsy9iACLgTrLMeAkferrex7f03NFeIDobasML+fLbZufATaL3M97kNGZwulEYNp2+RWyLu/NW6FoZCbS+cSL8HuFnkIDHzEoO56ItMiD9EH47q/NeKgwrrzKjfY+KzaMQOYLlVgCa4WrIeFh5CkwJ4RHrfanMIV2vbEvMxsnHc/mZ+yvgBOFoBNXYN1HEDzEv1NxDPcyt7MBlPUVinEreQaHba7w6qH9Rf0eWgfW2ypBXe+nHaZxQgaGC6J+WGUkzalYQspmHVU4CcuwJa55kuchJs/gbyZKkyK6P8uD5IP6VZiavwZcjWcfvwbZaLeOqzSDVCDMg8M2zYZHoa+6ZR4EgDVW7RvaRvjvvhPTPj5twmLf3YYVJtHIyJSLug=
on:
branch: master
tags: false
condition: $PYPI = "true"
- # production pypi
provider: pypi
distributions: "sdist bdist_wheel"
user: "nugetaad"
password:
secure: dpNi6BsZyiAx/gkxJ5Mz6m2yDz2dRGWsSgS5pF+ywNzgHJ6+0e234GyLbSUY5bFeeA7WtOr4is3bxSLB/6tTWDVWdw3TL4FGlDM/54MSLWg8R5bR9PRwO+VU1kvQ03yz+B9mTpzuiwL2e+OSwcwo97jForADzmSRA5OpEq5Z7zAs7WR8J2tyhl+288NwLtKJMVy39UmPl9oifu6/5RfBn7EWLmC7MrMFhHTb2Gj7fJWw4u+5vx9bsQ7ubfiwPbRAtYXLz6wDMtwtFzwme4zZPg5HwWCn0WWlX4b6x7xXirZ7yKsy9iACLgTrLMeAkferrex7f03NFeIDobasML+fLbZufATaL3M97kNGZwulEYNp2+RWyLu/NW6FoZCbS+cSL8HuFnkIDHzEoO56ItMiD9EH47q/NeKgwrrzKjfY+KzaMQOYLlVgCa4WrIeFh5CkwJ4RHrfanMIV2vbEvMxsnHc/mZ+yvgBOFoBNXYN1HEDzEv1NxDPcyt7MBlPUVinEreQaHba7w6qH9Rf0eWgfW2ypBXe+nHaZxQgaGC6J+WGUkzalYQspmHVU4CcuwJa55kuchJs/gbyZKkyK6P8uD5IP6VZiavwZcjWcfvwbZaLeOqzSDVCDMg8M2zYZHoa+6ZR4EgDVW7RvaRvjvvhPTPj5twmLf3YYVJtHIyJSLug=
on:
branch: master
tags: true
condition: $PYPI = "true"
|
.travis.yml
|
trigger: none
# minor change 7
# branches:
# include:
# - master
# paths:
# exclude:
# - RELEASES.txt
# - azure_pipelines-develop.yml
# - azure_pipelines.yml
pool:
name: Azure Pipelines
vmImage: 'ubuntu-latest'
variables:
env: 'prod'
buildPlatform: 'Any CPU'
buildConfiguration: 'Release'
stages:
- stage: stage_1
displayName: 'build and test'
jobs:
- job: Provision
pool:
vmImage: 'ubuntu-latest'
steps:
- task: GoTool@0
displayName: 'Use Go 1.13'
inputs:
version: '1.16'
- task: Go@0
displayName: 'go get'
inputs:
arguments: '-d'
- task: Bash@3
inputs:
targetType: 'inline'
script: |
go get -v "github.com/marshyon/semverUtils"
go get -v github.com/cucumber/godog
go get -v github.com/cucumber/godog/cmd/godog
go get -v github.com/jstemmer/go-junit-report
displayName: 'initialise project'
# - task: Bash@3
# inputs:
# targetType: 'inline'
# script: |
# go test -v ./...
# displayName: 'test project'
# - task: Go@0
# inputs:
# command: 'test'
# arguments: '-v ./...'
- task: Bash@3
inputs:
targetType: 'inline'
script: |
/home/vsts/go/bin/godog
/home/vsts/go/bin/godog --format=junit > godog_test_results.xml
go test -v ./... | /home/vsts/go/bin/go-junit-report > go_test_report.xml
displayName: 'run godog tests'
# - task: PublishTestResults@2
# inputs:
# testResultsFormat: 'JUnit'
# testResultsFiles: |
# godog_test_results.xml
# go_test_report.xml
# mergeTestResults: true
# - task: Go@0
# displayName: 'go build'
# inputs:
# command: build
# arguments: '-o semverutil cmd/version/main.go'
# - task: ArchiveFiles@2
# displayName: 'Archive files'
# inputs:
# rootFolderOrFile: '$(Build.Repository.LocalPath)'
# includeRootFolder: False
# - task: PublishBuildArtifacts@1
# displayName: 'Publish artifact'
# inputs:
# ArtifactName: semverUtilApp
# condition: succeededOrFailed()
# - stage: stage_2
# displayName: 'publish relase'
# jobs:
# - job: Publish
# pool:
# vmImage: 'ubuntu-latest'
# steps:
# - task: InstallSSHKey@0
# displayName: 'Install an SSH key'
# inputs:
# knownHostsEntry: '|1|OYu5fjyruG+jLSxpgKkLNgwKbSU=|10iWMohPRDOgv5HBhEBSQZJLcLo= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAsbJNNKxY2GXWG1oOO71rF+iAB/iSjegQxcYiUHha3A2f4u+bpz5E1r5/kbsYNaZPSRZeOJmPOmjy2xglehzKU='
# sshPublicKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDkGBjoZIF1vKCPGusI+xwgeVsNEWpoTGPHrC4veVpAr86SevU41tjpJJpcy37yGHGuzdsf27lebSzK77/USKBUNBmpOUD8/DAiZ8MQ6jR23tzDCU6UeFy2QukHcs9fIE4oVXM7mFgtxlJ6X4KahBVXU6CVvyWPowge1egydOJzfktU7ooKToe4n9hW53Kz8+W0UwvVcnEJ/lsuQgvwS5entQ0c1yvBrBQ0sfrGVe1PHtCTjExoU8Tubx2nMc5exVQJN5+cnvyXZyNEG6jxuY2Ey9Xdkaa16fxELs3W1iepdZFEBgE4C5GoR3/RTFafxyfadVAOF/y0WxgHR7mnODHp jon@jon-HP-Pavilion-Laptop-14-ce0xxx'
# sshPassphrase: <PASSWORD>
# sshKeySecureFile: 'id_az_devops_rsa'
# - task: DownloadPipelineArtifact@2
# inputs:
# artifact: semverUtilApp
# path: $(Build.SourcesDirectory)/zip
# - task: Bash@3
# inputs:
# targetType: 'inline'
# script: |
# find ~/.ssh
# cd $(Build.SourcesDirectory)/zip
# unzip *.zip
# git --no-pager log --decorate=short --no-color
# echo "semverutil : "
# VERSION="v"$(./semverutil)
# echo "["$VERSION"]"
# git remote -v
# git remote rm origin
# #git config --global user.name "marshyon"
# #git config --global url."<EMAIL>"
# git remote add origin <EMAIL>:marshyon/semverUtils.git
# git tag $VERSION
# echo "pushing to remote ..."
# git push origin --tags
# displayName: 'label release'
# - task: GitHubRelease@1
# displayName: 'GitHub release (create)'
# inputs:
# gitHubConnection: 'GitHub marshyon'
# repositoryName: '$(Build.Repository.Name)'
# action: 'create'
# target: '$(Build.SourceVersion)'
# tagSource: 'gitTag'
# tagPattern: 'v\d+\.\d+\.\d+'
# assets: '$(Build.SourcesDirectory)/zip/semverutil'
# changeLogCompareToRelease: 'lastFullRelease'
# changeLogType: 'commitBased'
|
azure_pipelines.yml
|
name: Lint and test
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip wheel
pip install -r requirements.txt
pip install -r async-requirements.txt
pip install -r dev-requirements.txt
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 codingame tests examples --count --select=E9,F63,F7,F82 --show-source --statistics \
--format='::error file=%(path)s,line=%(row)d,col=%(col)d::[flake8] %(code)s: %(text)s'
# exit-zero treats all errors as warnings
flake8 codingame tests examples --count --exit-zero --statistics \
--format='::warning file=%(path)s,line=%(row)d,col=%(col)d::[flake8] %(code)s: %(text)s'
- name: Check formatting with black
run: |
black codingame tests examples --check --line-length 80
- name: Check import ordering with isort
run: |
isort codingame tests examples --check-only
- name: Lint the docs with doc8
run: |
doc8 docs --quiet
- name: Check package build
run: |
python setup.py --quiet sdist
twine check dist/*
- name: Test with pytest
env:
TEST_LOGIN_REMEMBER_ME_COOKIE: ${{ secrets.TEST_LOGIN_REMEMBER_ME_COOKIE }}
TEST_CODINGAMER_ID: ${{ secrets.TEST_CODINGAMER_ID }}
TEST_CODINGAMER_PSEUDO: ${{ secrets.TEST_CODINGAMER_PSEUDO }}
TEST_CODINGAMER_PUBLIC_HANDLE: ${{ secrets.TEST_CODINGAMER_PUBLIC_HANDLE }}
TEST_CLASHOFCODE_PUBLIC_HANDLE: ${{ secrets.TEST_CLASHOFCODE_PUBLIC_HANDLE }}
run: |
pytest
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
fail_ci_if_error: true
|
.github/workflows/lint-test.yml
|
ID: Flash2Color
Name: Flash2Color
Designer: Alex
Experimenter: PCL
Log:
Subject_ID: AG3
Subject_Name: AG3
Subject_Species: Macaque
Subject_Gender: Male
Subject_Age: 0
Subject_Size: 0 0 0
Subject_Weight: 0
Subject_Log:
EnvPath: MarkerQuad
EnvParam:
MarkerSize@Marker@Marker: 3.5
MarkerPosition@Marker@Marker: -31.18 16.77 -1000
MarkerCorner@Marker@Marker: TopLeft
Mark@Marker@Marker: false
MarkOnColor@Marker@Marker: 1 1 1 1
MarkOffColor@Marker@Marker: 0 0 0 1
ScreenToEye@OrthoCamera@OrthoCamera: 33
ScreenHeight@OrthoCamera@OrthoCamera: 33.5
ScreenAspect@OrthoCamera@OrthoCamera: 1.33333337
BGColor@OrthoCamera@OrthoCamera: 0.5 0.5 0.5 1
CLUT@OrthoCamera@OrthoCamera: true
Rotation@Quad@Quad: 0 0 0
RotationOffset@Quad@Quad: 0 0 0
Ori@Quad@Quad: 0
OriOffset@Quad@Quad: 0
Diameter@Quad@Quad: 10
Size@Quad@Quad: 71.89 57.06 1
Color@Quad@Quad: 0.6327 0.382 1 1
MaskType@Quad@Quad: None
MaskRadius@Quad@Quad: 0.5
MaskSigma@Quad@Quad: 0.150000006
OriPositionOffset@Quad@Quad: false
Visible@Quad@Quad: false
Position@Quad@Quad: 0 0 0
PositionOffset@Quad@Quad: 0 0 0
CondPath: ''
Cond:
LogicPath: SpikeGLXColor
Hemisphere: Left
Eye: Right
RecordSession: V1
RecordSite: ODL18
DataDir: X:\
DataPath:
Input: false
CondSampling: Ascending
BlockSampling: Ascending
CondRepeat: 100
BlockRepeat: 1
BlockParam: []
PreICI: 0
CondDur: 1500
SufICI: 0
PreITI: 0
TrialDur: 0
SufITI: 0
PreIBI: 0
BlockDur: 0
SufIBI: 0
PushCondAtState: COND
CondTestAtState: PREICI
NotifyPerCondTest: 0
NotifyParam:
- CondIndex
- Event
- SyncEvent
InheritParam:
- Experimenter
- Subject_ID
- Subject_Name
- Subject_Species
- Subject_Gender
- Subject_Age
- Subject_Size
- Subject_Weight
- Hemisphere
- RecordSession
- RecordSite
- DataDir
- TimerDriftSpeed
- Display_ID
- NotifyExperimenter
- Eye
EnvInheritParam:
- MarkerCorner@Marker@Marker
- MarkOnColor@Marker@Marker
- MarkOffColor@Marker@Marker
- MarkerSize@Marker@Marker
- ScreenToEye@OrthoCamera@OrthoCamera
- ScreenHeight@OrthoCamera@OrthoCamera
- ScreenAspect@OrthoCamera@OrthoCamera
- CLUT@OrthoCamera@OrthoCamera
- Position@Quad@Quad
- Diameter@Quad@Quad
Param:
ColorSpace: DKL
Color: Z
TimerDriftSpeed: 6.4999999999999994E-05
EventSyncProtocol:
SyncMethods:
- GPIO
- Display
nSyncChannel: 1
nSyncpEvent: 1
Display_ID: ROGPG279Q
CondTestShowLevel: FULL
NotifyExperimenter: true
Version: 2
Config:
CondTest:
|
Experiment/SNL-C/Flash2Color.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: {{ template "naisd.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "naisd.fullname" . }}
spec:
selector:
matchLabels:
app: {{ template "naisd.name" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "naisd.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
annotations:
prometheus.io/scrape: "true"
nais.io/logformat: glog
spec:
serviceAccount: {{ template "naisd.fullname" . }}
containers:
- name: naisd
image: "{{ .Values.repository }}:{{ .Values.version }}"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["all"]
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: {{ .Values.runAsUser }}
resources:
requests:
memory: "{{ .Values.request.memory }}"
cpu: "{{ .Values.request.cpu }}"
limits:
memory: "{{ .Values.limit.memory }}"
cpu: "{{ .Values.limit.cpu }}"
livenessProbe:
httpGet:
path: /isalive
port: http
envFrom:
- secretRef:
name: {{ template "naisd.fullname" . }}
- secretRef:
name: naisd-kafka
env:
- name: fasit_url
value: "{{ .Values.fasitUrl }}"
- name: cluster_subdomain
value: "{{ .Values.clusterSubdomain }}"
- name: clustername
value: "{{ .Values.clusterName }}"
- name: authentication_enabled
value: "{{ .Values.authenticationEnabled }}"
- name: istio_enabled
value: "{{ .Values.istioEnabled }}"
- name: kafka_enabled
value: "{{ .Values.KafkaEnabled }}"
- name: kafka_brokers
value: "{{ .Values.KafkaBrokers }}"
- name: kafka_topic
value: "{{ .Values.KafkaTopic }}"
- name: NAISD_VAULT_ADDR
value: "{{ .Values.vaultAddr }}"
- name: NAISD_VAULT_INIT_CONTAINER_IMAGE
value: "{{ .Values.vaultInitContainerImage }}"
- name: NAISD_VAULT_KV_PATH
value: "{{ .Values.vaultKVPath }}"
- name: NAISD_VAULT_AUTH_PATH
value: "{{ .Values.vaultAuthPath }}"
- name: NAISD_VAULT_ENABLED
value: "{{ .Values.vaultEnabled }}"
- name: NAIS_POD_HTTP_PROXY
value: "{{ .Values.podHttpProxy }}"
- name: NAIS_POD_NO_PROXY
value: "{{ .Values.podNoProxy }}"
{{ if .skipProxy }}
{{ else }}
- name: https_proxy
value: "{{ .Values.httpsProxy }}"
- name: http_proxy
value: "{{ .Values.httpProxy }}"
- name: no_proxy
value: "{{ .Values.noProxy }}"
{{ end }}
ports:
- containerPort: 8081
protocol: TCP
name: http
|
helm/naisd/templates/deployment.yaml
|
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: basic-kafka
spec:
selector:
matchLabels:
env: test
replicas: 1
template:
metadata:
labels:
env: test
app: kafka
spec:
containers:
- name: kafka
image: bitnami/kafka:latest
ports:
- containerPort: 9092
env:
- name: KAFKA_ZOOKEEPER_CONNECT
value: "kafka-service:2181"
- name: ALLOW_PLAINTEXT_LISTENER
value: "yes"
resources:
limits:
memory: "1Gi"
cpu: "500m"
- name: zookeeper
image: bitnami/zookeeper:latest
ports:
- containerPort: 2181
env:
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
resources:
limits:
memory: "1Gi"
cpu: "500m"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-manager
spec:
selector:
matchLabels:
env: test
replicas: 1
template:
metadata:
labels:
env: test
app: kafka-manager
spec:
containers:
- name: kafka-manager
image: hlebalbau/kafka-manager:latest
ports:
- containerPort: 9000
env:
- name: ZK_HOSTS
value: "kafka-service:2181"
- name: APPLICATION_SECRET
value: "let<PASSWORD>"
resources:
limits:
memory: "250Mi"
cpu: "250m"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: tasking-manager
spec:
selector:
matchLabels:
env: test
replicas: 1
template:
metadata:
labels:
env: test
app: tasking-manager
spec:
containers:
- name: monitoring-engine
image: python:latest
command: ["/bin/sh"]
args: ["-c", "while true; do echo hello; sleep 10;done"]
resources:
limits:
memory: "250Mi"
cpu: "500m"
- name: sqllite
image: nouchka/sqlite3:latest
command: ["/bin/sh"]
args: ["-c", "while true; do echo hello; sleep 10;done"]
resources:
limits:
memory: "250Mi"
cpu: "500m"
---
apiVersion: v1
kind: Service
metadata:
name: kafka-service
spec:
selector:
app: kafka
ports:
- name: kafka
protocol: TCP
port: 9092
targetPort: 9092
- name: zookeeper
protocol: TCP
port: 2181
targetPort: 2181
---
apiVersion: v1
kind: Service
metadata:
name: kafka-manager-ui-service
spec:
selector:
app: kafka-manager
ports:
- name: kafka-manager-ui
protocol: TCP
port: 80
targetPort: 9000
---
|
environment/test/test-logstash-autoscale.yaml
|
--- !ruby/hash:SeasonHash
title: 大正メビウスライン ちっちゃいさん
watchable: true
thumbnail_url: https://cs1.anime.dmkt-sp.jp/anime_kv/img/21/83/5/21835_1_6.png?1507024805000
outline: 華やかなりし大正浪漫活劇、開幕。時は大正末期。日本が軍事国家として世界と争わねばならない、熾烈な時代。主人公・柊京一郎は帝國大学へ進学するため、帝都へと上京する。勉学に邁進し、郷里のためにひとかどの人物になると決意していた京一郎はしかし、己が持つ特別な力のために軍部-大日本帝國軍-に目をつけられることになる。死んだ人間-死霊が見えるようになったのは大病を患った幼少時。その時からずっと京一郎にとって恐怖の対象でしかなかった死霊を、軍部は外国の脅威に対抗するための力として用いようとしていた。軍部とその計画を阻止しようとする者たちの対立が、京一郎の運命を巻き込んで、帝國の未来を変えていく-。
tags: !ruby/array:Hashie::Array
- !ruby/hash:TagHash
name: ショート
type: genre
- !ruby/hash:TagHash
name: SF/ファンタジー
type: genre
- !ruby/hash:TagHash
name: ドラマ/青春
type: genre
- !ruby/hash:TagHash
name: 日野聡
type: cast
- !ruby/hash:TagHash
name: 興津和幸
type: cast
- !ruby/hash:TagHash
name: 赤羽根健治
type: cast
- !ruby/hash:TagHash
name: 内匠靖明
type: cast
- !ruby/hash:TagHash
name: 花田光
type: cast
- !ruby/hash:TagHash
name: 三浦祥朗
type: cast
- !ruby/hash:TagHash
name: 監督:渡部周
type: staff
- !ruby/hash:TagHash
name: キャラクターデザイン:minatsu
type: staff
- !ruby/hash:TagHash
name: 音楽:六弦アリス
type: staff
- !ruby/hash:TagHash
name: アニメーション制作:studio A-CAT
type: staff
- !ruby/hash:TagHash
name: 製作年代:2010年代
type: other
- !ruby/hash:TagHash
name: 製作年:2017年
type: other
episodes: !ruby/array:Hashie::Array
- !ruby/hash:EpisodeHash
episode_no: 第1話/第2話/第3話/第4話
title: 運命の上京/帝都の死霊/陸軍遊軍/ノブレス・オブ・リージュ
description: 第1話:主人公・柊京一郎は最高学府である帝國大学で学ぶため、故郷の桃木村を後にして帝都へとやって来た。しかし、帝都の雑踏に降り立った京一郎が目にしたのは、なぜか可愛らしくなってしまった己の姿であった――。/第2話:帝都でも噂に名高いおせんべいを頬張り、幸せを噛みしめていた京一郎。だがその耳に、死霊の雄叫びが届く。駆けつけるとそこでは五本刀頭領時雨と、特殊遊軍部隊の館林開らが刃を交えているのだった――。/第3話:大日本帝國陸軍特殊遊撃部隊に所属する伊瀬馨と薫の双子の兄弟もまた、目覚めると可愛らしく変貌を遂げてた。さらには隊長である館林開までもが――。しかし、任務は果たさねばならない。雄真を加えた四人は帝都の警邏に向かい、因縁の相手である時雨と邂逅するのであった。/第4話:館林たちと激しい戦いを繰り広げる時雨だが、死霊を操る伊瀬兄弟により押されていく。そんな彼を京一郎は守ろうとするが、どこからともなく現れた男、ミサキにその覚悟を問われる。京一郎の出した答えとは――。<br><br>柊
京一郎:内匠靖明/ミサキ:花田 光/時雨:三浦祥郎/千家伊織:興津和幸/館林 開:日野 聡/伊勢 馨・薫:赤羽根健治<br><br>原作:大正メビウスラインVitable/監督:渡部周/キャラクター原案:敷田歳、斗目/シリーズ構成:御雄幸路/キャラクターデザイン:minatsu(マカリア)/SDキャラクターデザイン:中野一/美術監督:三宅昌和/色彩設計:中川昭洋(ライジングフォース)/撮影監督:高橋圭佑/編集:須藤瞳(REAL-T)/音響監督:くぼぞのまりこ/音響監修:小林克良/音楽:六弦アリス/アニメーション制作:studio
A-CAT<br><br>次話→so32315928
length_seconds: 843
content_id: so32201056
default_thread_id: 1509573799
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32201056
- !ruby/hash:EpisodeHash
episode_no: 第5話/第6話/第7話/第8話
title: 根の路/死霊とは/邂逅/戦う為に
description: 第5話:根の路を進む京一郎、ミサキ、時雨の三人。京一郎は道中現れた五本刀衆の一人・臣から五本刀衆とは何なのかを告げられる。また、ミサキからは「これ以上関わるな」と言われ――。一方、大日本帝國陸軍少将・千家伊織の執務室では、千家が京一郎の情報を手にしていたのだった。/第6話:大日本帝國陸軍の小会議室では、館林と千家が可愛らしい姿で対峙していた。館林の窮状を見かねたという千家は、みずからが京一郎に接触すると告げる。それを知る由もない京一郎は、夕暮れの帝都で、軍人が死霊を回収している場面に遭遇する。/第7話:京一郎の元に、帝國大学から教科書購入案内が届く。じきに始まる大学生活に意気込む京一郎だが、帝都で出会ったミサキや時雨、館林たち、そして死霊の姿が思い出される。悩んでいても仕方がないと考え、商店街へ赴くものの、その帰り道で千家に出会うのだった。/第8話:結界のなか、千家と京一郎は向かい合う。大日本帝國の現状について、問答をかわすふたり。列強に対抗するため、千家が考える有効な手とは――。その答えを知った時、京一郎は千家に忠誠を誓うよう迫られる。<br><br>柊
京一郎:内匠靖明/ミサキ:花田 光/時雨:三浦祥郎/千家伊織:興津和幸/館林 開:日野 聡/伊勢 馨・薫:赤羽根健治<br><br>原作:大正メビウスラインVitable/監督:渡部周/キャラクター原案:敷田歳、斗目/シリーズ構成:御雄幸路/キャラクターデザイン:minatsu(マカリア)/SDキャラクターデザイン:中野一/美術監督:三宅昌和/色彩設計:中川昭洋(ライジングフォース)/撮影監督:高橋圭佑/編集:須藤瞳(REAL-T)/音響監督:くぼぞのまりこ/音響監修:小林克良/音楽:六弦アリス/アニメーション制作:studio
A-CAT<br><br>so32201056←前話|次話→so32458688 第一話→so32201056
length_seconds: 843
content_id: so32315928
default_thread_id: 1511490139
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32315928
- !ruby/hash:EpisodeHash
episode_no: 第9話/第10話/第11話/第12話
title: 問われ、問う/是も非も/先に見えた光/大正メビウスライン
description: 第9話:千家の誘いを京一郎ははねのける。その時、結界を破って、ミサキと時雨が飛び込んできた。京一郎はふたりに窮地を救われた形になったが、時雨に「お前はどうしたい」と問われ答えることができない。一介の学生でしかない自分が、何を――? 一方、千家と館林もそれぞれの思惑を抱えていた。/第10話:ひとり思索に耽る京一郎の前に現れたのは館林だった。京一郎を捕らえに来たのかと思えば、答えは否。館林は死霊兵を使うべきかは、軍部でも意見が割れていると語る。そして彼もまた、京一郎に「どうしたい」のかを問うのだった。自分がこの國のためにすべき事とは? 京一郎の悩みは深まっていく。/第11話:汽車は深い闇のなかを走っていた。時雨、千家、館林、それぞれが抱えるものはあまりにも重く、京一郎はその心に寄り添うことしかできない。そんな彼を見守る、ミサキの願い。京一郎は決意のもと、遥か先へと光を見出す――。/第12話:長い夢から目覚めた京一郎。もうすぐ汽車は帝都に着く。これから新しい生活が始まるのだ。文明と電氣が張巡らされた帝都に死霊など出るはずがない。京一郎は、意気揚々と一歩を踏み出すのだった――。<br><br>柊
京一郎:内匠靖明/ミサキ:花田 光/時雨:三浦祥郎/千家伊織:興津和幸/館林 開:日野 聡/伊勢 馨・薫:赤羽根健治<br><br>原作:大正メビウスラインVitable/監督:渡部周/キャラクター原案:敷田歳、斗目/シリーズ構成:御雄幸路/キャラクターデザイン:minatsu(マカリア)/SDキャラクターデザイン:中野一/美術監督:三宅昌和/色彩設計:中川昭洋(ライジングフォース)/撮影監督:高橋圭佑/編集:須藤瞳(REAL-T)/音響監督:くぼぞのまりこ/音響監修:小林克良/音楽:六弦アリス/アニメーション制作:studio
A-CAT<br><br>so32315928←前話 第一話→so32201056
length_seconds: 843
content_id: so32458688
default_thread_id: 1513919381
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32458688
cast: "[キャスト]<br>柊 京一郎:内匠靖明/ミサキ:花田 光/時雨:三浦祥郎/千家伊織:興津和幸/館林 開:日野 聡/伊勢 馨・薫:赤羽根健治"
staff: "[スタッフ]<br>原作:大正メビウスラインVitable/監督:渡部周/キャラクター原案:敷田歳、斗目/シリーズ構成:御雄幸路/キャラクターデザイン:minatsu(マカリア)/SDキャラクターデザイン:中野一/美術監督:三宅昌和/色彩設計:中川昭洋(ライジングフォース)/撮影監督:高橋圭佑/編集:須藤瞳(REAL-T)/音響監督:くぼぞのまりこ/音響監修:小林克良/音楽:六弦アリス/アニメーション制作:studio
A-CAT"
produced_year: "[製作年]<br>2017年"
copyright: "©ちっちゃいいいんかい"
related_seasons: !ruby/array:Hashie::Array []
|
db/fixtures/seasons/season_00820.yml
|
title: internationalisation-de-la-foret-amazonienne-2
date: '2004-11-10T00:00:00.000Z'
image: 'https://www.populationdata.net/wp-content/uploads/amazonie.jpg'
fr:
title: Internationalisation de la forêt amazonienne
body: >-
Discours du ministre brésilien de l'ducation aux tats-unis. Pendant un débat
dans une université aux tats-unis, le ministre de l'ducation Cristovam
Buarque, fut interrogé sur ce qu'il pensait au sujet de
l'internationalisation de l'Amazonie. Le jeune étudiant américain commença
sa question en affirmant qu'il espérait une réponse d'un humaniste et non
d'un Brésilien.<!--more-->Voici la réponse de <NAME>.<br />
<br /> En effet, en tant que Brésilien, je m'élèverais tout simplement
contre l'internationalisation de l'Amazonie. Quelle que soit l'insuffisance
de l'attention de nos gouvernements pour ce patrimoine, il est nôtre.<br
/> En tant qu'humaniste, conscient du risque de dégradation du milieu
ambiant dont souffre l'Amazonie, je peux imaginer que l'Amazonie soit
internationalisée, comme du reste tout ce qui a de l'importance pour toute
l'humanité. Si, au nom d'une éthique humaniste, nous devions
internationaliser l'Amazonie, alors nous devrions internationaliser les
réserves de pétrole du monde entier.<br /> <br /> Le pétrole est aussi
important pour le bien-être de l'humanité que l'Amazonie l'est pour notre
avenir. Et malgré cela, les maîtres des réserves de pétrole se sentent le
droit d'augmenter ou de diminuer l'extraction de pétrole, comme d'augmenter
ou non son prix.<br /> <br /> De la même manière, on devrait
internationaliser le capital financier des pays riches. Si l'Amazonie est
une réserve pour tous les hommes, elle ne peut être brûlée par la volonté
de son propriétaire, ou d'un pays. Brûler l'Amazonie, c'est aussi grave que
le chômage provoqué par les décisions arbitraires des spéculateurs de
l'économie globale. Nous ne pouvons pas laisser les réserves financières
brûler des pays entiers pour le bon plaisir de la spéculation.<br /><br
/>Avant l'Amazonie, j'aimerai assister à l'internationalisation de tous
les grands musées du monde. Le Louvre ne doit pas appartenir à la seule
France. Chaque musée du monde est le gardien des plus belles oeuvres
produites par le génie humain. On ne peut pas laisser ce patrimoine
culturel, au même titre que le patrimoine naturel de l'Amazonie, être
manipulé et détruit selon la fantaisie d'un seul propriétaire ou d'un seul
pays.<br /> <br /> Il y a quelque temps, un millionnaire japonais a décidé
d'enterrer avec lui le tableau d'un grand maître. Avant que cela n'arrive,
il faudrait internationaliser ce tableau.<br /> <br /> Pendant que cette
rencontre se déroule, les Nations unies organisent le Forum du Millénaire,
mais certains Présidents de pays ont eu des difficultés pour y assister, à
cause de difficultés aux frontières des tats-unis. Je crois donc qu'il
faudrait que New York, lieu du siège des Nations unies, soit
internationalisé. Au moins Manhattan devrait appartenir à toute l'humanité.
Comme du reste Paris, Venise, Rome, Londres, Rio de Janeiro, Brasília,
Recife, chaque ville avec sa beauté particulière, et son histoire du monde
devraient appartenir au monde entier.<br /> <br /> Si les tats-unis
veulent internationaliser l'Amazonie, à cause du risque que fait courir le
fait de la laisser entre les mains des Brésiliens, alors internationalisons
aussi tout l'arsenal nucléaire des tats-unis. Ne serait-ce que par ce
qu'ils sont capables d'utiliser de telles armes, ce qui provoquerait une
destruction mille fois plus vaste que les déplorables incendies des forêts
Brésiliennes.<br /> <br /> Au cours de leurs débats, les actuels candidats
à la Présidence des tats-unis ont soutenu l'idée d'une internationalisation
des réserves florestales du monde en échange d'un effacement de la dette.
Commençons donc par utiliser cette dette pour s'assurer que tous les
enfants du monde ait la possibilité de manger et d'aller à l'école. <br />
Internationalisons les enfants, en les traitant, où qu'ils naissent, comme
un patrimoine qui mérite l'attention du monde entier. Davantage encore que
l'Amazonie. Quand les dirigeants du monde traiteront les enfants pauvres du
monde comme un Patrimoine de l'Humanité, ils ne les laisseront pas
travailler alors qu'ils devraient aller à l'école; ils ne les laisseront
pas mourir alors qu'ils devraient vivre.<br /> <br /> En tant qu'humaniste,
j'accepte de défendre l'idée d'une internationalisation du monde. Mais tant
que le monde me traitera comme un Brésilien, je lutterai pour que
l'Amazonie soit à nous. Et seulement à nous !<br />
en:
title: Internationalisation de la forêt amazonienne
body: ''
|
data/posts/2004-11-09_internationalisation-de-la-foret-amazonienne-2.yml
|
items:
- uid: IdentityServer4.Contrib.HttpClientService.Models
commentId: N:IdentityServer4.Contrib.HttpClientService.Models
id: IdentityServer4.Contrib.HttpClientService.Models
children:
- IdentityServer4.Contrib.HttpClientService.Models.ClientCredentialsOptions
- IdentityServer4.Contrib.HttpClientService.Models.HttpClientServiceOptions
- IdentityServer4.Contrib.HttpClientService.Models.IIdentityServerOptions
- IdentityServer4.Contrib.HttpClientService.Models.PasswordOptions
- IdentityServer4.Contrib.HttpClientService.Models.ResponseObject`1
langs:
- csharp
- vb
name: IdentityServer4.Contrib.HttpClientService.Models
nameWithType: IdentityServer4.Contrib.HttpClientService.Models
fullName: IdentityServer4.Contrib.HttpClientService.Models
type: Namespace
assemblies:
- IdentityServer4.Contrib.HttpClientService
references:
- uid: IdentityServer4.Contrib.HttpClientService.Models.IIdentityServerOptions
commentId: T:IdentityServer4.Contrib.HttpClientService.Models.IIdentityServerOptions
parent: IdentityServer4.Contrib.HttpClientService.Models
name: IIdentityServerOptions
nameWithType: IIdentityServerOptions
fullName: IdentityServer4.Contrib.HttpClientService.Models.IIdentityServerOptions
- uid: IdentityServer4.Contrib.HttpClientService.Models.ClientCredentialsOptions
commentId: T:IdentityServer4.Contrib.HttpClientService.Models.ClientCredentialsOptions
name: ClientCredentialsOptions
nameWithType: ClientCredentialsOptions
fullName: IdentityServer4.Contrib.HttpClientService.Models.ClientCredentialsOptions
- uid: IdentityServer4.Contrib.HttpClientService.Models.PasswordOptions
commentId: T:IdentityServer4.Contrib.HttpClientService.Models.PasswordOptions
name: PasswordOptions
nameWithType: PasswordOptions
fullName: IdentityServer4.Contrib.HttpClientService.Models.PasswordOptions
- uid: IdentityServer4.Contrib.HttpClientService.Models.HttpClientServiceOptions
commentId: T:IdentityServer4.Contrib.HttpClientService.Models.HttpClientServiceOptions
name: HttpClientServiceOptions
nameWithType: HttpClientServiceOptions
fullName: IdentityServer4.Contrib.HttpClientService.Models.HttpClientServiceOptions
- uid: IdentityServer4.Contrib.HttpClientService.Models.ResponseObject`1
commentId: T:IdentityServer4.Contrib.HttpClientService.Models.ResponseObject`1
name: ResponseObject<TResponseBody>
nameWithType: ResponseObject<TResponseBody>
fullName: IdentityServer4.Contrib.HttpClientService.Models.ResponseObject<TResponseBody>
nameWithType.vb: ResponseObject(Of TResponseBody)
fullName.vb: IdentityServer4.Contrib.HttpClientService.Models.ResponseObject(Of TResponseBody)
name.vb: ResponseObject(Of TResponseBody)
spec.csharp:
- uid: IdentityServer4.Contrib.HttpClientService.Models.ResponseObject`1
name: ResponseObject
nameWithType: ResponseObject
fullName: IdentityServer4.Contrib.HttpClientService.Models.ResponseObject
- name: <
nameWithType: <
fullName: <
- name: TResponseBody
nameWithType: TResponseBody
fullName: TResponseBody
- name: '>'
nameWithType: '>'
fullName: '>'
spec.vb:
- uid: IdentityServer4.Contrib.HttpClientService.Models.ResponseObject`1
name: ResponseObject
nameWithType: ResponseObject
fullName: IdentityServer4.Contrib.HttpClientService.Models.ResponseObject
- name: '(Of '
nameWithType: '(Of '
fullName: '(Of '
- name: TResponseBody
nameWithType: TResponseBody
fullName: TResponseBody
- name: )
nameWithType: )
fullName: )
- uid: IdentityServer4.Contrib.HttpClientService.Models
commentId: N:IdentityServer4.Contrib.HttpClientService.Models
name: IdentityServer4.Contrib.HttpClientService.Models
nameWithType: IdentityServer4.Contrib.HttpClientService.Models
fullName: IdentityServer4.Contrib.HttpClientService.Models
|
docfx_project/api/IdentityServer4.Contrib.HttpClientService.Models.yml
|
version: 2
jobs:
build:
working_directory: ~/vzvol_build
machine:
enabled: true
steps:
- checkout
- run:
name: InstallDeps
command: sudo add-apt-repository -y ppa:zfs-native/stable && ( if [ ! -d /etc/apt/sources.d ]; then sudo mkdir /etc/apt/sources.d; fi && sudo touch /etc/apt/sources.d/virtualbox.list && echo "deb https://download.virtualbox.org/virtualbox/debian trusty contrib" | sudo tee -a /etc/apt/sources.d/virtualbox.list ) && ( wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add - ) && sudo apt-get update && sudo apt-get -y install pv dialog xfsprogs dosfstools ruby2.0 ruby2.0-dev gcc make rpm linux-headers-4.4.0-96-generic
- run:
name: InstallVBox
command: sudo apt-get install -y virtualbox
- run:
name: InstallZFS
command: sudo apt-get install -y ubuntu-zfs zfs-dkms
- run:
name: kldload
command: sudo modprobe zfs
- run:
name: InstallGems
command: sudo gem2.0 install fpm --no-ri --no-doc
- run:
name: BuildPkgDeb
command: sudo sh ~/vzvol_build/build/vzvol_build.sh 0.0.0 0 ~/vzvol_build <EMAIL>
- run:
name: InstallPkg
command: sudo dpkg -i ~/vzvol_build/vzvol_0.0.0-ubuntu1404-0_all.deb
- run:
name: CreateDiskImage
command: sudo dd if=/dev/zero of=/root/vzvolpool.img bs=1024M count=10
- run:
name: CreateZpool
command: sudo zpool create vzvoltestpool /root/vzvolpool.img
- run:
name: TestCreate
command: sudo /usr/local/sbin/vzvol -t virtualbox -v HorseTest -s 512M
- run:
name: TestCreateRaw
command: sudo /usr/local/sbin/vzvol -t raw -v HorseTest2 -s 20M
- run:
name: TestCreateEXT2
command: sudo /usr/local/sbin/vzvol -t raw -v ext2test -s 20M --file-system -f ext2
- run:
name: TestCreateXFS
command: sudo /usr/local/sbin/vzvol -t raw -v xfstest -s 20M --file-system -f xfs
- run:
name: TestCreateFAT32
command: sudo /usr/local/sbin/vzvol -t raw -v xfstest -s 20M --file-system -f fat32
- run:
name: TestList
command: sudo /usr/local/sbin/vzvol --list
- run:
name: TestDelete
command: sudo /usr/local/sbin/vzvol --delete -f vzvoltestpool/HorseTest
- run:
name: MakeSureDeleteWorked
command: sudo /usr/local/sbin/vzvol --list
- run:
name: TestDeleteRaw
command: sudo /usr/local/sbin/vzvol --delete -f vzvoltestpool/HorseTest2
- run :
name: MakeSureVMDKDeleteWorked
command: test ! -f ~/VBoxdisks/HorseTest.vmdk
|
.circleci/config.yml
|
name: Java CI
on: [push]
env:
REGISTRY_NAME: pickleddragon
APP_NAME: container-demo
CLUSTER_NAME: democluster
CLUSTER_RES_GROUP: demo-ing
NAMESPACE: container-demo-ns
SECRET_NAME: demo-cr
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up JDK 11
uses: actions/setup-java@v2
with:
java-version: '11'
distribution: 'adopt'
- name: Build with Maven
run: mvn --batch-mode --update-snapshots verify
- name: Upload JAR Artifact
uses: actions/upload-artifact@v2.2.3
with:
# Artifact name
name: app.jar
path: target/*.jar
retention-days: 7
- name: Login to Azure Container Registry
uses: azure/docker-login@v1
with:
login-server: ${{ env.REGISTRY_NAME }}.azurecr.io
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ <PASSWORD> }}
- name: Build docker image and push to ACR
run: |
docker build . -t ${{ env.REGISTRY_NAME }}.azurecr.io/${{ env.APP_NAME }}:${{ github.sha }}
docker push ${{ env.REGISTRY_NAME }}.azurecr.io/${{ env.APP_NAME }}:${{ github.sha }}
- name: Set AKS cluster context
uses: azure/aks-set-context@v1
with:
creds: '${{ secrets.AZURE_CREDS }}'
cluster-name: ${{ env.CLUSTER_NAME }}
resource-group: ${{ env.CLUSTER_RES_GROUP }}
- name: Check if namespace exists, create if not.
run: |
kubectl create namespace ${{ env.NAMESPACE }} --dry-run=client -o json | kubectl apply -f -
- name: Create image pull secret for ACR
uses: azure/k8s-create-secret@v1
with:
container-registry-url: ${{ env.REGISTRY_NAME }}.azurecr.io
container-registry-username: ${{ secrets.REGISTRY_USERNAME }}
container-registry-password: ${{ secrets.REGISTRY_SECRET }}
secret-name: ${{ env.SECRET_NAME }}
namespace: ${{ env.NAMESPACE }}
force: true
- name: Deploy to AKS Cluster
uses: azure/k8s-deploy@v1
with:
manifests: |
k8s-manifests/deployment.yml
k8s-manifests/service.yml
k8s-manifests/ingress.yml
images: |
${{ env.REGISTRY_NAME }}.azurecr.io/${{ env.APP_NAME }}:${{ github.sha }}
imagepullsecrets: |
${{ env.SECRET_NAME }}
namespace: ${{ env.NAMESPACE }}
|
.github/workflows/main.yml
|
---
http_interactions:
- request:
method: post
uri: https://apisandbox.zuora.com/apps/services/a/91.0
body:
encoding: UTF-8
string: |
<?xml version="1.0"?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:api="http://api.zuora.com/" xmlns:obj="http://object.api.zuora.com/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<soapenv:Header>
<api:SessionHeader>
<api:session>s-fs3YIbf3oHJTv_l6mJuJoIRkDQ9OHq0Rq3v3n3nnXyOJWjiD6S7hAs0TWuiHo_VG2PsnpHPk2slDNgvbCNLPvJzDenSidg24MS63QSbZGVPjbjV62zLVgQuL5HsEr5FbzxJbyFqooojOysQ1ynmtKjLMiMcFByKfIjBHoIgKWra7v8aB4loAyefZWhVV5Th3_CPYFUGHx9a1qCdbG-m9HcFgOFdilGv0nwlOD557BMDZD9wArUP_8CPkZgyUlw</api:session>
</api:SessionHeader>
</soapenv:Header>
<soapenv:Body>
<api:amend>
<api:requests>
<api:Amendments>
<obj:ContractEffectiveDate>2017-01-01</obj:ContractEffectiveDate>
<obj:Description>Renewing at customer request</obj:Description>
<obj:CustomerAcceptanceDate>2017-01-01</obj:CustomerAcceptanceDate>
<obj:EffectiveDate>2017-01-01</obj:EffectiveDate>
<obj:ServiceActivationDate>2017-01-01</obj:ServiceActivationDate>
<obj:SubscriptionId>2c92c0f95282215501528a9b494c0a48</obj:SubscriptionId>
<obj:Status>Completed</obj:Status>
<obj:Type>UpdateProduct</obj:Type>
<obj:Name>Product update</obj:Name>
<api:RatePlanData>
<api:RatePlan>
<obj:AmendmentSubscriptionRatePlanId>2c92c0f950fa763f01510cbb937812dd</obj:AmendmentSubscriptionRatePlanId>
</api:RatePlan>
</api:RatePlanData>
<api:RatePlanChargeData>
<api:RatePlanCharge>
<obj:ProductRatePlanId>2c92c0f950fa763f01510cbb937812dd</obj:ProductRatePlanId>
</api:RatePlanCharge>
</api:RatePlanChargeData>
</api:Amendments>
<api:AmendOptions>
<api:GenerateInvoice>true</api:GenerateInvoice>
<api:ProcessPayments>true</api:ProcessPayments>
</api:AmendOptions>
<api:PreviewOptions>
<api:EnablePreviewMode>false</api:EnablePreviewMode>
<api:PreviewThroughTermEnd>false</api:PreviewThroughTermEnd>
</api:PreviewOptions>
</api:requests>
</api:amend>
</soapenv:Body>
</soapenv:Envelope>
headers:
User-Agent:
- Faraday v0.9.2
Content-Type:
- text/xml
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
response:
status:
code: 200
message: OK
headers:
Server:
- Zuora App
Content-Type:
- text/xml;charset=UTF-8
Expires:
- Fri, 29 Jan 2016 04:24:42 GMT
Cache-Control:
- max-age=0, no-cache, no-store
Pragma:
- no-cache
Date:
- Fri, 29 Jan 2016 04:24:42 GMT
Content-Length:
- '439'
Connection:
- keep-alive
body:
encoding: UTF-8
string: <?xml version='1.0' encoding='UTF-8'?><soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"><soapenv:Body><ns1:amendResponse
xmlns:ns1="http://api.zuora.com/"><ns1:results><ns1:Errors><ns1:Code>INVALID_VALUE</ns1:Code><ns1:Message>Can't
find the RatePlan by AmendmentSubscriptionRatePlanId.</ns1:Message></ns1:Errors><ns1:Success>false</ns1:Success></ns1:results></ns1:amendResponse></soapenv:Body></soapenv:Envelope>
http_version:
recorded_at: Fri, 29 Jan 2016 04:24:42 GMT
recorded_with: VCR 3.0.1
|
spec/cassettes/soap/amend_update_product_success.yml
|
items:
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials
id: WithCredentials
artifact: com.microsoft.azure.apimanagement.v2019_01_01:azure-mgmt-apimanagement:1.0.0-beta-1
parent: com.microsoft.azure.management.apimanagement.v2019_01_01
children:
- com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials.withCredentials(com.microsoft.azure.management.apimanagement.v2019_01_01.BackendCredentialsContract)
langs:
- java
name: BackendContract.DefinitionStages.WithCredentials
nameWithType: BackendContract.DefinitionStages.WithCredentials
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials
type: Interface
package: com.microsoft.azure.management.apimanagement.v2019_01_01
summary: The stage of the backendcontract definition allowing to specify Credentials.
syntax:
content: public static interface BackendContract.DefinitionStages.WithCredentials
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials.withCredentials(com.microsoft.azure.management.apimanagement.v2019_01_01.BackendCredentialsContract)
id: withCredentials(com.microsoft.azure.management.apimanagement.v2019_01_01.BackendCredentialsContract)
artifact: com.microsoft.azure.apimanagement.v2019_01_01:azure-mgmt-apimanagement:1.0.0-beta-1
parent: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials
langs:
- java
name: withCredentials(BackendCredentialsContract credentials)
nameWithType: BackendContract.DefinitionStages.WithCredentials.withCredentials(BackendCredentialsContract credentials)
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials.withCredentials(BackendCredentialsContract credentials)
overload: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials.withCredentials*
type: Method
package: com.microsoft.azure.management.apimanagement.v2019_01_01
summary: Specifies credentials.
syntax:
content: public abstract BackendContract.DefinitionStages.WithCreate withCredentials(BackendCredentialsContract credentials)
parameters:
- id: credentials
type: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendCredentialsContract
description: Backend Credentials Contract Properties
return:
type: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCreate
description: the next definition stage
references:
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendCredentialsContract
name: BackendCredentialsContract
nameWithType: BackendCredentialsContract
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendCredentialsContract
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCreate
name: BackendContract.DefinitionStages.WithCreate
nameWithType: BackendContract.DefinitionStages.WithCreate
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCreate
- uid: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials.withCredentials*
name: withCredentials
nameWithType: BackendContract.DefinitionStages.WithCredentials.withCredentials
fullName: com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials.withCredentials
package: com.microsoft.azure.management.apimanagement.v2019_01_01
|
docs-ref-autogen/com.microsoft.azure.management.apimanagement.v2019_01_01.BackendContract.DefinitionStages.WithCredentials.yml
|
version: "3.7"
services:
sf4d-h2-proxy:
# Don't use this proxy in prod
build:
context: ./
dockerfile: docker/dev/h2-proxy/Dockerfile
container_name: sf4d-h2-proxy
hostname: sf4d-h2-proxy
depends_on:
- sf4d-nginx
ports:
- "80:80"
- "443:443"
volumes:
- "./volumes/$APP_NAME/http2-proxy:/var/log/nginx"
sf4d-nginx:
build:
context: ./
dockerfile: docker/dev/nginx/Dockerfile
args:
WORKDIR: ${WORKDIR:-var/www}
APP_NAME: ${APP_NAME:-app.test}
container_name: sf4d-nginx
hostname: sf4d-nginx
restart: on-failure
depends_on:
- sf4d-php-fpm
ports:
- "${NGINX_LOCAL_PORT:-81}:80"
volumes:
- "./apps/$APP_NAME/public:/${WORKDIR}/${APP_NAME}/public"
- "./volumes/$APP_NAME/nginx/logs:/var/log/nginx:rw"
sf4d-php-fpm:
build:
context: ./
dockerfile: docker/dev/php/7.3/fpm/Dockerfile
args:
TIMEZONE: ${TIMEZONE:-UTC}
WORKDIR: ${WORKDIR:-var/www}
APP_NAME: ${APP_NAME:-app.test}
container_name: sf4d-php-fpm
hostname: sf4d-php-fpm
volumes:
- "./apps/$APP_NAME:/$WORKDIR/$APP_NAME"
- "./volumes/$APP_NAME/php/var/cache:/$WORKDIR/$APP_NAME/var/cache:rw"
- "./volumes/$APP_NAME/php/var/sessions:/$WORKDIR/$APP_NAME/var/sessions:rw"
- "./volumes/$APP_NAME/php/var/logs:/$WORKDIR/$APP_NAME/var/logs:rw"
depends_on:
- sf4d-db
- sf4d-redis
#user: ${LOCAL_USER}
sf4d-php-cli:
build:
context: ./
dockerfile: docker/dev/php/7.3/cli/Dockerfile
args:
TIMEZONE: ${TIMEZONE:-UTC}
WORKDIR: ${WORKDIR:-var/www}
APP_NAME: ${APP_NAME:-app.test}
container_name: sf4d-php-cli
hostname: sf4d-php-cli
volumes:
- ./apps/$APP_NAME:/$WORKDIR/$APP_NAME
- composer:/root/.composer/cache:rw
links:
- sf4d-db
- sf4d-redis
#user: ${LOCAL_USER}
tty: true
sf4d-db:
image: postgres:11.2-alpine
container_name: sf4d-db
hostname: sf4d-db
restart: on-failure
environment:
POSTGRES_USER: ${POSTGRES_USER:-symfonist}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-secret}
POSTGRES_DB: ${POSTGRES_DB:-app_db}
ports:
- "${POSTGRES_LOCAL_PORT:-54321}:5432"
volumes:
- db-data:/var/lib/postgresql/data:rw
sf4d-adminer:
image: adminer
container_name: sf4d-adminer
hostname: sf4d-adminer
restart: always
links:
- sf4d-db
ports:
- '${ADMINER_LOCAL_PORT:-8080}:8080'
sf4d-redis:
image: redis:5-alpine
container_name: sf4d-redis
hostname: sf4d-redis
ports:
- "6379:6379"
volumes:
- redis:/data
command:
- 'redis-server'
- '--databases 2'
- '--save 900 1'
- '--save 300 10'
- '--save 60 10000'
- '--requirepass secret'
volumes:
db-data:
redis:
composer:
|
docker-compose.yml
|
--- !ruby/hash:SeasonHash
title: いっぽう日本昔ばなし
watchable: true
thumbnail_url: https://cs1.anime.dmkt-sp.jp/anime_kv/img/21/02/3/21023_1_6.png?1468220412000
outline: むかしむかしの日本のお話。桃太郎は鬼退治へ行き、ウサギはタヌキに敵討ち、華やかな英雄譚から涙止まらぬ悲劇まで、その物語は現代まで語り継がれ愛されています。これは、そんな物語のいっぽうで繰り広げられた、地味で醜悪でどうでもいい物語。
tags: !ruby/array:Hashie::Array
- !ruby/hash:TagHash
name: コメディ/ギャグ
type: genre
- !ruby/hash:TagHash
name: 監督:谷口崇
type: staff
- !ruby/hash:TagHash
name: 製作年代:2010年代
type: other
- !ruby/hash:TagHash
name: 製作年:2016年
type: other
episodes: !ruby/array:Hashie::Array
- !ruby/hash:EpisodeHash
episode_no: ep01~ep03
title: 桃太郎/浦島太郎/かぐや姫
description: ep01:桃から生まれた桃太郎は、犬、サル、キジをお伴に従え、鬼退治に向かいました。いっぽうその頃、家に残されたおじいさんとおばあさんは…/ep02:地上に戻り玉手箱を開けた浦島太郎は、煙を浴びて、おじいさんになってしまいました。いっぽうその頃、浦島太郎に助けられた亀は…/ep03:かぐや姫が、天の羽衣をまとい月へと帰っていくのを、おじいさんとおばあさんは、いつまでも見送っていました。いっぽうその頃、かぐや姫に求婚していた貴公子たちは…<br><br>監督:谷口崇/原作:谷口崇<br><br>次話→so32024696
length_seconds: 543
content_id: so32024695
default_thread_id: 1506820066
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32024695
- !ruby/hash:EpisodeHash
episode_no: ep04~ep06
title: 耳なし芳一/わらしべ長者/鶴の恩返し
description: ep04:魔除けのお経を耳だけ書き忘れられた芳一は、平家の怨霊から、耳を引きちぎられてしまいました。いっぽうその頃、芳一の体にお経を書いた和尚さんは…/ep05:男は、藁しべからミカン、反物、馬、ついには屋敷と交換し、裕福な暮らしを手に入れました。いっぽうその頃、屋敷を馬と交換した長者は…/ep06:おじいさんに姿を見られてしまったツウは、鶴の姿に戻り、おじいさんの元から飛び去ってしまいました。いっぽうその頃、反物が出来るのを楽しみにしているおばあさんは…<br><br>監督:谷口崇/原作:谷口崇<br><br>so32024695←前話|次話→so32024697 第一話→so32024695
length_seconds: 615
content_id: so32024696
default_thread_id: 1506820043
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32024696
- !ruby/hash:EpisodeHash
episode_no: ep07~ep09
title: カチカチ山/さるかに合戦/おむすびころりん
description: ep07:おばあさんの仇を取るため、ウサギさんは、タヌキさんの薪に火を放ちました。いっぽうその頃、ウサギさんの帰りを待っているおじいさんは…/ep08:カニさんの敵討ちのため、蜂、栗、牛の糞、臼は、力を合わせてサルをこらしめる作戦会議を開きました。いっぽうその頃、その様子を遠くから見ていた牛は…/ep09:おじいさんは、おむすびのお礼として小さいほうのつづらをもらい、ねずみたちに見送られて家へ帰りました。いっぽうその頃、穴に戻ったねずみの青年は…<br><br>監督:谷口崇/原作:谷口崇<br><br>so32024696←前話|次話→so32024698 第一話→so32024695
length_seconds: 576
content_id: so32024697
default_thread_id: 1506820072
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32024697
- !ruby/hash:EpisodeHash
episode_no: ep10~ep12
title: 笠地蔵/金太郎/一寸法師
description: ep10:おじいさんとおばあさんは、お地蔵様から笠のお礼にもらった贈り物で、無事、新年を迎えることができました。いっぽうその頃、お地蔵様が元々いた場所に戻ってくると…/ep11:金太郎は、力自慢の大きな熊と相撲をとりました。いっぽうその頃、それを見ていた動物たちは…/ep12:お腹の中を針で刺して鬼を倒した一寸法師は、鬼が落とした打出の小槌で大きくなり、長者の娘と結婚しました。いっぽうその頃、一寸法師にお腹の中を刺された鬼は…<br><br>監督:谷口崇/原作:谷口崇<br><br>so32024697←前話 第一話→so32024695
length_seconds: 511
content_id: so32024698
default_thread_id: 1506820032
channel_id: 2632720
thumbnail_url: http://tn.smilevideo.jp/smile?i=32024698
staff: "[スタッフ]<br>監督:谷口崇/原作:谷口崇"
produced_year: "[製作年]<br>2016年"
copyright: "©谷口 崇/エイベックス・ピクチャーズ"
related_seasons: !ruby/array:Hashie::Array []
|
db/fixtures/seasons/season_00107.yml
|
version: '3'
services:
jobservice:
env_file:
- ./rabbitmq.env
environment:
JOB_SERVICE_DATABASE_APPNAME: "Job Service"
JOB_SERVICE_DATABASE_URL: "jdbc:postgresql://${JOB_SERVICE_DATABASE_HOST}:${JOB_SERVICE_DATABASE_PORT}/jobservice"
JOB_SERVICE_DATABASE_USERNAME: "${JOB_SERVICE_DATABASE_USERNAME}"
JOB_SERVICE_DATABASE_PASSWORD: "${JOB_SERVICE_DATABASE_PASSWORD}"
CAF_STATUS_CHECK_INTERVAL_SECONDS: 5
CAF_TRACKING_PIPE: jobtracking-in
CAF_WEBSERVICE_URL: "http://${JOB_SERVICE_DOCKER_HOST}:${JOB_SERVICE_PORT}/job-service/v1"
CAF_JOB_SERVICE_RESUME_JOB_QUEUE: "${CAF_JOB_SERVICE_RESUME_JOB_QUEUE}"
image: jobservice/job-service:${project.version}
deploy:
mode: replicated
replicas: 2
resources:
limits:
cpus: '0.5'
memory: 1024M
update_config:
parallelism: 1
delay: 10s
ports:
- "${JOB_SERVICE_PORT}:8080"
worker-jobtracking:
env_file:
- ./rabbitmq.env
environment:
CAF_WORKER_INPUT_QUEUE: jobtracking-in
CAF_WORKER_ERROR_QUEUE: jobtracking-err
JOB_SERVICE_DATABASE_APPNAME: "Job Tracking Worker"
JOB_SERVICE_DATABASE_URL: "jdbc:postgresql://${JOB_SERVICE_DATABASE_HOST}:${JOB_SERVICE_DATABASE_PORT}/jobservice"
JOB_SERVICE_DATABASE_USERNAME: "${JOB_SERVICE_DATABASE_USERNAME}"
JOB_SERVICE_DATABASE_PASSWORD: "${JOB_SERVICE_DATABASE_PASSWORD}"
CAF_WEBSERVICE_URL: "http://${JOB_SERVICE_DOCKER_HOST}:${JOB_SERVICE_PORT}/job-service/v1"
image: jobservice/worker-jobtracking:${project.version}
deploy:
mode: replicated
replicas: 2
resources:
limits:
cpus: '0.5'
memory: 1024M
update_config:
parallelism: 1
delay: 10s
jobservicescheduledexecutor:
env_file:
- ./rabbitmq.env
environment:
JOB_SERVICE_DATABASE_APPNAME: "Job Service Scheduled Executor"
JOB_SERVICE_DATABASE_URL: "jdbc:postgresql://${JOB_SERVICE_DATABASE_HOST}:${JOB_SERVICE_DATABASE_PORT}/jobservice"
JOB_SERVICE_DATABASE_USERNAME: "${JOB_SERVICE_DATABASE_USERNAME}"
JOB_SERVICE_DATABASE_PASSWORD: "${JOB_SERVICE_DATABASE_PASSWORD}"
CAF_WORKER_INPUT_QUEUE: jobservicescheduler-in
CAF_STATUS_CHECK_INTERVAL_SECONDS: 5
CAF_TRACKING_PIPE: jobtracking-in
CAF_WEBSERVICE_URL: "http://${JOB_SERVICE_DOCKER_HOST}:${JOB_SERVICE_PORT}/job-service/v1"
CAF_SCHEDULED_EXECUTOR_PERIOD: 10
image: jobservice/job-service-scheduled-executor:${project.version}
deploy:
mode: replicated
replicas: 2
resources:
limits:
cpus: '0.25'
memory: 512M
update_config:
parallelism: 1
delay: 10s
|
deploy/src/main/resources-filtered/production-swarm/docker-stack.yml
|
# No CI or automated runs
name: $(Date:yyyy-MM-dd)$(Rev:.rr)
trigger: none
pr: none
# NOTE! The 'shared-variables' variable group must be pre-created and populated
# - Expected variables: acr-connection, dockerhub-connection
variables:
- group: shared-variables
stages:
- stage: docker
displayName: Update Stable Dockerhub
jobs:
- job: dockerJob
displayName: Docker Job
pool:
vmImage: Ubuntu-16.04
steps:
- template: templates/acr-connection.yml
- task: Docker@2
displayName: Pull data-api from ACR
inputs:
command: pull
arguments: $(ACR_HOST)/smilr/data-api:latest
- task: Docker@2
displayName: Pull frontend from ACR
inputs:
command: pull
arguments: $(ACR_HOST)/smilr/frontend:latest
# Update tags
- bash: |
docker tag $(ACR_HOST)/smilr/frontend:latest $(ACR_HOST)/smilr/frontend:stable
docker tag $(ACR_HOST)/smilr/data-api:latest $(ACR_HOST)/smilr/data-api:stable
docker tag $(ACR_HOST)/smilr/frontend:latest smilr/frontend:latest
docker tag $(ACR_HOST)/smilr/data-api:latest smilr/data-api:latest
docker tag $(ACR_HOST)/smilr/frontend:latest smilr/frontend:stable
docker tag $(ACR_HOST)/smilr/data-api:latest smilr/data-api:stable
displayName: Tag images
- task: Docker@2
displayName: Push data-api stable image to ACR
inputs:
command: push
containerRegistry: $(acr-connection)
repository: smilr/data-api
tags: stable
- task: Docker@2
displayName: Push frontend stable image to ACR
inputs:
command: push
containerRegistry: $(acr-connection)
repository: smilr/frontend
tags: stable
- task: Docker@2
displayName: Push data-api stable/latest images to Dockerhub
inputs:
command: push
containerRegistry: $(dockerhub-connection)
repository: smilr/data-api
tags: |
stable
latest
- task: Docker@2
displayName: Push frontend stable/latest images to Dockerhub
inputs:
command: push
containerRegistry: $(dockerhub-connection)
repository: smilr/frontend
tags: |
stable
latest
# # Push to ACR
# - bash: |
# docker push $(acr-name).azurecr.io/smilr/frontend:stable
# docker push $(acr-name).azurecr.io/smilr/data-api:stable
# displayName: 'Push stable images to ACR'
# # Push to public Dockerhub
# - bash: |
# docker login -u $(dockerhub-user) -p $(dockerhub-password)
# docker push smilr/frontend:stable
# docker push smilr/data-api:stable
# docker push smilr/frontend:latest
# docker push smilr/data-api:latest
# displayName: 'Push stable/latest images to Dockerhub'
|
azure/pipelines/tag-stable.yml
|
title: Documentatie voor Azure Data Lake Storage Gen1
summary: Leer hoe u een grootschalige, met Hadoop compatibele Data Lake-opslagplaats kunt instellen, beheren en openen voor analyse van gegevens van elke grootte, soort en opnamesnelheid. Azure Data Lake Storage Gen2 is overal algemeen beschikbaar. We raden aan vandaag nog hierop over te stappen.
metadata:
title: Documentatie voor Azure Data Lake Storage Gen1
description: Leer hoe u een grootschalige, met Hadoop compatibele Data Lake-opslagplaats kunt instellen, beheren en openen voor analyse van gegevens van elke grootte, soort en opnamesnelheid.
ms.service: data-lake-store
ms.topic: landing-page
author: twooley
ms.author: twooley
ms.date: 03/11/2020
ms.openlocfilehash: 09685af7a10c8624cb8b1442189cd68057031e9d
ms.sourcegitcommit: ae6e7057a00d95ed7b828fc8846e3a6281859d40
ms.translationtype: HT
ms.contentlocale: nl-NL
ms.lasthandoff: 10/16/2020
ms.locfileid: "92106532"
landingContent:
- title: Over Azure Data Lake Storage Gen1
linkLists:
- linkListType: overview
links:
- text: Wat is Azure Data Lake Storage Gen1
url: data-lake-store-overview.md
- text: Vergelijken met Azure Storage
url: data-lake-store-comparison-with-blob-storage.md
- text: Overschakelen naar documentatie van Data Lake Storage Gen2
url: ../storage/blobs/data-lake-storage-introduction.md
- title: Aan de slag
linkLists:
- linkListType: get-started
links:
- text: Azure Portal
url: data-lake-store-get-started-portal.md
- text: Azure PowerShell
url: data-lake-store-get-started-powershell.md
- text: Azure CLI
url: data-lake-store-get-started-cli-2.0.md
- title: Gegevens laden en verplaatsen
linkLists:
- linkListType: how-to-guide
links:
- text: Azure Data Factory gebruiken
url: ../data-factory/load-azure-data-lake-store.md
- text: Storage Explorer gebruiken
url: data-lake-store-in-storage-explorer.md
- text: AdlCopy gebruiken
url: data-lake-store-copy-data-azure-storage-blob.md
- text: DistCp gebruiken
url: data-lake-store-copy-data-wasb-distcp.md
- text: Sqoop gebruiken
url: data-lake-store-data-transfer-sql-sqoop.md
- title: Beveiligde gegevens
linkLists:
- linkListType: overview
links:
- text: Beveiligingsoverzicht
url: data-lake-store-security-overview.md
- text: Toegangsbeheer
url: data-lake-store-access-control.md
- linkListType: how-to-guide
links:
- text: Opgeslagen gegevens beveiligen
url: data-lake-store-secure-data.md
- text: Versleuteling
url: data-lake-store-encryption.md
- text: Integratie van virtueel netwerk
url: data-lake-store-network-security.md
- title: Verificatie
linkLists:
- linkListType: overview
links:
- text: Verificatieopties
url: data-lakes-store-authentication-using-azure-active-directory.md
- linkListType: how-to-guide
links:
- text: Verificatie van de eindgebruiker
url: data-lake-store-end-user-authenticate-using-active-directory.md
- text: Verificatie van service-tot-service
url: data-lake-store-service-to-service-authenticate-using-active-directory.md
- title: Bestandssysteembewerkingen
linkLists:
- linkListType: how-to-guide
links:
- text: .NET SDK
url: data-lake-store-data-operations-net-sdk.md
- text: Java-SDK
url: data-lake-store-get-started-java-sdk.md
- text: REST-API
url: data-lake-store-data-operations-rest-api.md
- text: Python
url: data-lake-store-data-operations-python.md
- title: Accountbeheerbewerkingen
linkLists:
- linkListType: how-to-guide
links:
- text: .NET SDK
url: data-lake-store-get-started-net-sdk.md
- text: REST-API
url: data-lake-store-get-started-rest-api.md
- text: Python
url: data-lake-store-get-started-python.md
|
articles/data-lake-store/index.yml
|