text
stringlengths 1
1.05M
|
|---|
<reponame>qaqRose/TLog
package com.yomahub.tlog.id;
import cn.hutool.core.util.IdUtil;
public class TLogDefaultIdGenerator extends TLogIdGenerator{
@Override
public String generateTraceId() {
return IdUtil.getSnowflake().nextIdStr();
}
public static void main(String[] args) {
System.out.println();
}
}
|
const axios = require('axios');
axios.get('/api/v1/users/')
.then(response => {
console.log(response.data);
})
.catch(error => {
console.log(error);
});
|
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# keep in sync with hack/verify-toc.sh
TOOL_VERSION=4dc3d6f908138504b02a1766f1f8ea282d6bdd7c
# cd to the root path
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
cd "${ROOT}"
# create a temporary directory
TMP_DIR=$(mktemp -d)
# cleanup
exitHandler() (
echo "Cleaning up..."
rm -rf "${TMP_DIR}"
)
trap exitHandler EXIT
# perform go get in a temp dir as we are not tracking this version in a go module
# if we do the go get in the repo, it will create / update a go.mod and go.sum
cd "${TMP_DIR}"
GO111MODULE=on GOBIN="${TMP_DIR}" go get "github.com/tallclair/mdtoc@${TOOL_VERSION}"
export PATH="${TMP_DIR}:${PATH}"
cd "${ROOT}"
echo "Checking table of contents are up to date..."
# Verify tables of contents are up-to-date
grep --include='*.md' -rl keps -e '<!-- toc -->' | xargs mdtoc --inplace --dryrun
|
<reponame>fuchina/FSPassword<filename>FSPasswordSample/Pods/Headers/Public/FSJZBus/FSMultiPeerService.h<gh_stars>0
//
// FSMultiPeerService.h
// myhome
//
// Created by FudonFuchina on 2017/10/21.
// Copyright © 2017年 fuhope. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <MultipeerConnectivity/MultipeerConnectivity.h>
#import "FSMultiPeerTool.h"
#import <FSTuple.h>
@interface FSMultiPeerService : NSObject
@property (nonatomic, strong) MCNearbyServiceAdvertiser *advertiser;
@property (nonatomic, strong) MCSession *session;
@property (nonatomic, strong) MCPeerID *peerID;
@property (nonatomic,copy) void (^receivedData)(id bData);
@property (nonatomic,copy) void (^usersChanged)(NSArray<MCPeerID *> *users);
@property (nonatomic,copy) void (^receiveResource)(NSString *rName,NSString *from,NSProgress *p);
@property (nonatomic,copy) void (^finishedResource)(NSString *rName,NSString *from,NSURL *local,NSError *error);
@end
|
import { Enforcer } from 'casbin';
import { GraphqlModuleContext, ModuleResolvers } from '../GraphqlModule';
import { GraphqlPlugin } from './@GraphqlPlugin';
export interface WrapResolverMapWithCasbinOptions<User> {
enforcer: {
current: null | Enforcer;
};
getUser: (context: any) => Promise<User | null | undefined>;
getUserRole: (user: User | null | undefined) => Promise<string>;
extraActionMap?: {
[key: string]: string;
};
ignore?: string[];
}
export default class CasbinGraphqlPlugin<User> implements GraphqlPlugin {
options: WrapResolverMapWithCasbinOptions<User>;
constructor(options: WrapResolverMapWithCasbinOptions<User>) {
this.options = options;
}
private wrapResolverWithCasbin(
resolver: any,
resource: string,
action: string
) {
const defaultResolve = resolver.resolve;
resolver.resolve = async (...resolveParams) => {
const context =
resolveParams.length === 1
? resolveParams[0].context
: resolveParams[2];
const user = await this.options.getUser(context);
const role = await this.options.getUserRole(user);
const { req } = context;
if (!this.options.enforcer.current) {
throw new Error('Enforcer is not ready');
}
if (
!(await this.options.enforcer.current.enforce(role, resource, action))
) {
throw new Error(`Permission denied for '${resource}' - '${action}' `);
}
req.user = user;
return defaultResolve(...resolveParams);
};
return resolver;
}
private getResolverAction(resolverName: string, resource: string): string {
const bits = resolverName.split('.');
// console.log(`ResolverName: ${resolverName}`);
// console.log(`Prefix: ${prefix}`);
// console.log(resolverName.startsWith(prefix));
if (bits[0] === resource) {
const action = bits[1];
return action;
} else {
throw new Error(
`Can not get action for resolver "${resolverName}" with resource "${resource}"`
);
}
}
private wrapResolverMapWithCasbin<T extends { [key: string]: any }>(
resource: string,
resolverMap: T,
options?: WrapResolverMapWithCasbinOptions<User>
): T {
const data: T = { ...resolverMap };
for (const resolverName in data) {
if (options?.ignore && options.ignore.includes(resolverName)) {
continue;
}
if (options?.extraActionMap && resolverName in options.extraActionMap) {
data[resolverName] = this.wrapResolverWithCasbin(
data[resolverName],
resource,
options.extraActionMap[resolverName]
);
continue;
}
data[resolverName] = this.wrapResolverWithCasbin(
data[resolverName],
resource,
this.getResolverAction(resolverName, resource)
);
}
return data;
}
resolveQueries(
context: GraphqlModuleContext,
queries: ModuleResolvers
): ModuleResolvers {
return this.wrapResolverMapWithCasbin(
context.resource,
queries,
this.options
);
}
resolveMutations(
context: GraphqlModuleContext,
mutations: ModuleResolvers
): ModuleResolvers {
return this.wrapResolverMapWithCasbin(
context.resource,
mutations,
this.options
);
}
resolveSubscriptions(
context: GraphqlModuleContext,
subscriptions: ModuleResolvers
): ModuleResolvers {
return subscriptions;
// return wrapResolverMapWithCasbin(
// context.resource,
// subscriptions,
// this.options
// );
}
}
|
CREATE TABLE users (
name varchar(255),
username varchar(255),
email varchar(255),
PRIMARY key (username),
INDEX username_index (username),
INDEX email_index (email)
);
|
import React, { Component, Fragment } from "react";
import checkAuth from "../../checkAuth";
import { Redirect } from "react-router-dom";
class Default extends Component {
state = {
status: 2
};
refetch = () => {
checkAuth()
.then(data => {
this.setState({
...this.state,
...data
});
})
.catch(err => console.log);
};
componentDidMount() {
this.refetch();
}
render() {
let renderItem = <div className="loading">Loading...</div>;
if (!this.state.status) {
renderItem = <Redirect to="/login" />;
} else if (this.state.status === 1) {
if (this.state.admin === 1) {
renderItem = <Redirect to="/admin" />;
} else renderItem = <Redirect to="/faculty" />;
}
return <Fragment>{renderItem}</Fragment>;
}
}
export default Default;
|
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro (vincent.passaro@gmail.com)
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 17-Dec-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#######################PCI INFORMATION###############################
#Risk =
#######################PCI INFORMATION###############################
#Global Variables#
BACKLOG=$( cat /proc/sys/net/ipv4/tcp_max_syn_backlog )
#Start-Lockdown
if [ $BACKLOG -lt 4096 ]
then
echo " " >> /etc/sysctl.conf
echo "#Added by PCI" >> /etc/sysctl.conf
echo "net.ipv4.tcp_max_syn_backlog=4096" >> /etc/sysctl.conf
sysctl -p > /dev/null
fi
|
<filename>mango/__main__.py
import click
from pydoc import locate
@click.group()
def cli():
pass
@cli.command()
@click.argument('experiment')
@click.argument('param', nargs=-1)
def train(experiment, param):
Experiment = locate(experiment)
if Experiment is None:
click.echo(f'Class {experiment} not found')
return
exp = Experiment.create(param)
exp.run()
if __name__ == '__main__':
cli()
|
#!/bin/bash -f
#*********************************************************************************************************
# Vivado (TM) v2018.3 (64-bit)
#
# Filename : system.sh
# Simulator : Mentor Graphics Questa Advanced Simulator
# Description : Simulation script for compiling, elaborating and verifying the project source files.
# The script will automatically create the design libraries sub-directories in the run
# directory, add the library logical mappings in the simulator setup file, create default
# 'do/prj' file, execute compilation, elaboration and simulation steps.
#
# Generated by Vivado on Tue Oct 22 17:18:43 +0800 2019
# SW Build 2405991 on Thu Dec 6 23:38:27 MST 2018
#
# Copyright 1986-2018 Xilinx, Inc. All Rights Reserved.
#
# usage: system.sh [-help]
# usage: system.sh [-lib_map_path]
# usage: system.sh [-noclean_files]
# usage: system.sh [-reset_run]
#
# Prerequisite:- To compile and run simulation, you must compile the Xilinx simulation libraries using the
# 'compile_simlib' TCL command. For more information about this command, run 'compile_simlib -help' in the
# Vivado Tcl Shell. Once the libraries have been compiled successfully, specify the -lib_map_path switch
# that points to these libraries and rerun export_simulation. For more information about this switch please
# type 'export_simulation -help' in the Tcl shell.
#
# You can also point to the simulation libraries by either replacing the <SPECIFY_COMPILED_LIB_PATH> in this
# script with the compiled library directory path or specify this path with the '-lib_map_path' switch when
# executing this script. Please type 'system.sh -help' for more information.
#
# Additional references - 'Xilinx Vivado Design Suite User Guide:Logic simulation (UG900)'
#
#*********************************************************************************************************
# Script info
echo -e "system.sh - Script generated by export_simulation (Vivado v2018.3 (64-bit)-id)\n"
# Main steps
run()
{
check_args $# $1
setup $1 $2
compile
elaborate
simulate
}
# RUN_STEP: <compile>
compile()
{
# Compile design files
source compile.do 2>&1 | tee -a compile.log
}
# RUN_STEP: <elaborate>
elaborate()
{
source elaborate.do 2>&1 | tee -a elaborate.log
}
# RUN_STEP: <simulate>
simulate()
{
vsim -64 -c -do "do {simulate.do}" -l simulate.log
}
# STEP: setup
setup()
{
case $1 in
"-lib_map_path" )
if [[ ($2 == "") ]]; then
echo -e "ERROR: Simulation library directory path not specified (type \"./system.sh -help\" for more information)\n"
exit 1
fi
copy_setup_file $2
;;
"-reset_run" )
reset_run
echo -e "INFO: Simulation run files deleted.\n"
exit 0
;;
"-noclean_files" )
# do not remove previous data
;;
* )
copy_setup_file $2
esac
create_lib_dir
# Add any setup/initialization commands here:-
# <user specific commands>
}
# Copy modelsim.ini file
copy_setup_file()
{
file="modelsim.ini"
if [[ ($1 != "") ]]; then
lib_map_path="$1"
else
lib_map_path="F:/ZYNQ/zynq7010/gpio_mio/gpio_mio.cache/compile_simlib/questa"
fi
if [[ ($lib_map_path != "") ]]; then
src_file="$lib_map_path/$file"
cp $src_file .
fi
}
# Create design library directory
create_lib_dir()
{
lib_dir="questa_lib"
if [[ -e $lib_dir ]]; then
rm -rf $lib_dir
fi
mkdir $lib_dir
}
# Delete generated data from the previous run
reset_run()
{
files_to_remove=(compile.log elaborate.log simulate.log vsim.wlf questa_lib)
for (( i=0; i<${#files_to_remove[*]}; i++ )); do
file="${files_to_remove[i]}"
if [[ -e $file ]]; then
rm -rf $file
fi
done
create_lib_dir
}
# Check command line arguments
check_args()
{
if [[ ($1 == 1 ) && ($2 != "-lib_map_path" && $2 != "-noclean_files" && $2 != "-reset_run" && $2 != "-help" && $2 != "-h") ]]; then
echo -e "ERROR: Unknown option specified '$2' (type \"./system.sh -help\" for more information)\n"
exit 1
fi
if [[ ($2 == "-help" || $2 == "-h") ]]; then
usage
fi
}
# Script usage
usage()
{
msg="Usage: system.sh [-help]\n\
Usage: system.sh [-lib_map_path]\n\
Usage: system.sh [-reset_run]\n\
Usage: system.sh [-noclean_files]\n\n\
[-help] -- Print help information for this script\n\n\
[-lib_map_path <path>] -- Compiled simulation library directory path. The simulation library is compiled\n\
using the compile_simlib tcl command. Please see 'compile_simlib -help' for more information.\n\n\
[-reset_run] -- Recreate simulator setup files and library mappings for a clean run. The generated files\n\
from the previous run will be removed. If you don't want to remove the simulator generated files, use the\n\
-noclean_files switch.\n\n\
[-noclean_files] -- Reset previous run, but do not remove simulator generated files from the previous run.\n\n"
echo -e $msg
exit 1
}
# Launch script
run $1 $2
|
def compareLength(string1, string2):
if len(string1) == len(string2):
return True
else:
return False
string1 = "Hello"
string2 = "Goodbye"
isSameLength = compareLength(string1, string2)
print(isSameLength) # Output: false
|
#!/bin/bash
#
# snap_nt_combine.sh
#
# This script runs SNAP against the NT database
#
# Chiu Laboratory
# University of California, San Francisco
# January, 2014
#
# This script will successively run SNAP against NT partitions and then combine the results
#
# Note: for the NT database, default FASTQ headers will cause segmentation fault in SNAP
# need to change FASTQ headers to gi only
# 1/16/13
#
# Note: you need to use SNAP 0.15 or a higher version which outputs the "d" value in the SAM file
#
# Copyright (C) 2014 Charles Chiu - All Rights Reserved
# Permission to copy and modify is granted under the BSD license
expected_args=6
scriptname=${0##*/}
if [ $# -lt $expected_args ]
then
echo "Usage: $scriptname <FASTQ input file> <directory containing SNAP NT indexes> <number of cores> <free cache memory cutoff in GB> <SNAP d-value cutoff> <# of simultaneous SNAP runs>"
exit 65
fi
###
inputfile=$1
SNAP_NT_index_directory=$2
cores=$3
free_cache_cutoff=$4
SNAP_d_cutoff=$5
simultaneous_SNAPs=$6
###
echo -e "$(date)\t$scriptname\tStarting SNAP to NT"
START1=$(date +%s)
echo -e "$(date)\t$scriptname\tInput file: $inputfile"
nopathf=${inputfile##*/} # remove the path to file
echo -e "$(date)\t$scriptname\tAfter removing path: $nopathf"
basef=${nopathf%.fastq} # remove FASTQextension
echo -e "$(date)\t$scriptname\tAfter removing FASTQ extension: $basef"
echo -e "$(date)\t$scriptname\tMapping $basef to NT..."
# rm -f $basef.NT.sam # this is removing the output file, if it is present? Should not be necessary, but commenting out for now.
for snap_index in $SNAP_NT_index_directory/*; do
START2=$(date +%s)
nopathsnap_index=${snap_index##*/} # remove the path to file
echo -e "$(date)\t$scriptname\tStarting SNAP on $nopathsnap_index"
freemem=$(free -g | awk '{print $4}' | head -n 2 | tail -1)
echo -e "$(date)\t$scriptname\tThere is $freemem GB available free memory...[cutoff=$free_cache_cutoff GB]"
if [ $freemem -lt $free_cache_cutoff ]
then
echo -e "$(date)\t$scriptname\tClearing cache..."
dropcache
fi
START_SNAP=$(date +%s)
/usr/bin/time -o $basef.$nopathsnap_index.snap.log snap single $snap_index $basef.fastq -o $basef.$nopathsnap_index.sam -t $cores -x -f -h 250 -d $SNAP_d_cutoff -n 25 > $basef.$nopathsnap_index.time.log
SNAP_DONE=$(date +%s)
snap_time=$(( SNAP_DONE - START_SNAP ))
echo -e "$(date)\t$scriptname\tCompleted running SNAP using $snap_index in $snap_time seconds."
echo -e "$(date)\t$scriptname\tRemoving headers..."
START_HEADER_REMOVAL=$(date +%s)
sed '/^@/d' $basef.$nopathsnap_index.sam > $basef.$nopathsnap_index.noheader.sam
END_HEADER_REMOVAL=$(date +%s)
header_removal_time=$(( END_HEADER_REMOVAL - START_HEADER_REMOVAL ))
echo -e "$(date)\t$scriptname\tCompleted removing headers in $header_removal_time seconds."
END2=$(date +%s)
diff=$(( END2 - START2 ))
echo -e "$(date)\t$scriptname\tMapping to $snap_index took $diff seconds"
done
#SNAP does not sort its results, so in order to compare files linearly, we need to sort them manually.
echo -e "$(date)\t$scriptname\tSorting..."
START_SORT=$(date +%s)
# did some testing with optimizing the number of simultaneous sorts - maximizing this number was the speediest, so no need for GNU parallel - SMF 5/22/14
# parallel -j $cores "sort {} > {}.sorted;" ::: *.noheader.sam
for file in *.noheader.sam
do
sort $file > $file.sorted &
done
for jobs in `jobs -p`
do
wait $job
done
END_SORT=$(date +%s)
sort_time=$(( END_SORT - START_SORT ))
echo -e "$(date)\t$scriptname\tCompleted sorting in $sort_time seconds."
# find the best alignment hit for each line
FILEARRAY=()
for snap_index in $SNAP_NT_index_directory/* ; do
nopathsnap_index=${snap_index##*/} # remove the path to file
FILEARRAY=("${FILEARRAY[@]}" "$basef.$nopathsnap_index.noheader.sam.sorted")
done
echo -e "$(date)\t$scriptname\tStarting comparison of all SAM files."
START_COMPARE=$(date +%s)
FILEARRAY=("${FILEARRAY[@]}" "$basef.NT.sam")
# find the best alignment hit for each line
compare_multiple_sam.py ${FILEARRAY[@]}
END1=$(date +%s)
comparison_time=$(( END1 - START_COMPARE ))
echo -e "$(date)\t$scriptname\tComparison took $comparison_time seconds."
echo -e "$(date)\t$scriptname\tDone with SNAP_NT "
diff=$(( END1 - START1 ))
echo -e "$(date)\t$scriptname\toutput written to $basef.NT.sam"
echo -e "$(date)\t$scriptname\tSNAP_NT took $diff seconds"
#delete intermediate SAM files
rm *.noheader.sam
rm *.noheader.sam.sorted
for snap_index in $SNAP_NT_index_directory/* ; do
nopathsnap_index=${snap_index##*/} # remove the path to file
rm *.$nopathsnap_index.sam
done
|
#!/bin/bash
export PATH=$PATH:$HOME/bin
function delete() {
/bin/rm -f $1
}
if [ "x$HOME" = "x" ] ; then
HOME=/home/naehas
fi
LOG=$HOME/cron.log
echo "`date`: ====== statsCron.sh Starting. ======" >> $LOG
if [ ! -f $HOME/bin/.statsrc ] ; then
echo "`date`: Stats settings not initialized." >> $LOG
exit 1
fi
. $HOME/bin/.statsrc
if [ "x$STATS_PASSWORD" = "x" ] ; then
echo "`date`: Stats password not initialized." >> $LOG
exit 1
fi
if [ "x$STATS_HOST" = "x" ] ; then
echo "`date`: Stats host not initialized." >> $LOG
exit 1
fi
if [ "x$STATS_DBNAME" = "x" ] ; then
STATS_DBNAME=nadb
fi
if [ "x$1" = "x" ] ; then
SKIP_JOBS=yes
else
SKIP_JOBS=no
fi
YESTERDAY=`yesterday`
DEV="base-dashboard "
for DASH in $PROD $UAT $DEV; do
DASHDIR=/home/naehas/.hudson/jobs/base-dashboard/workspace/trunk
if [ -d $DASHDIR ] ; then
echo "`date`: Processing $DASH" >> $LOG
cd $DASHDIR
cd logs/
if [ "$SKIP_JOBS" = "no" ] ; then
echo "`date`: Collecting job stats." >> $LOG
collectJobStats.py $DASHDIR/conf/Catalina/localhost/dashboard.xml $YESTERDAY 2>> $LOG
JOB_STATS="N_EVENTS.jobs.$YESTERDAY"
if [ -f $JOB_STATS ] ; then
#load it!
echo "`date`: loading $JOB_STATS" >> $LOG
mysql -uroot -p$STATS_PASSWORD -A -h $STATS_HOST $STATS_DBNAME 2>> $LOG <<EOF
load data local infile '$JOB_STATS'
into table N_EVENTS
fields terminated by ',' optionally enclosed by '"'
lines terminated by '\n'
(LOAD_HOST,DASHBOARD,REVISION,TYPE,JOB,FINISH_DATE,FINISH_TIME,URI,ACTION,DETAILS,RESULT,CONTROLLER_MILLIS,FULL_MILLIS,SIZE)
EOF
# delete $JOB_STATS
fi
fi
YESTERDAYS_LOG=dashboard.log.$YESTERDAY
if [ -f $YESTERDAYS_LOG ] ; then
echo "`date`: Collecting UI stats." >> $LOG
parseDashboardRequests.py $YESTERDAYS_LOG 2>> $LOG
REQ_STATS="N_EVENTS.$YESTERDAYS_LOG"
if [ -f $REQ_STATS ] ; then
echo "`date`: loading $REQ_STATS" >> $LOG
mysql -uroot -p$STATS_PASSWORD -A -h $STATS_HOST $STATS_DBNAME 2>> $LOG <<EOF
load data local infile '$REQ_STATS'
into table N_EVENTS
fields terminated by ',' optionally enclosed by '"'
lines terminated by '\n'
(LOAD_HOST,DASHBOARD,REVISION,FINISH_DATE,FINISH_TIME,TYPE,URI,ACTION,RESULT,CONTROLLER_MILLIS,FULL_MILLIS)
EOF
delete $REQ_STATS
fi
gzip $YESTERDAYS_LOG
fi
echo "`date`: Done." >> $LOG
else
echo "`date`: Skipping $DASHDIR" >> $LOG
fi
done
echo "`date`: ====== statsCron.sh Finished. ======" >> $LOG
|
require File.expand_path('../../../spec_helper', __FILE__)
require 'date'
describe "Date#day" do
it "returns the day" do
d = Date.new(2000, 7, 1).day
d.should == 1
end
end
|
<gh_stars>1-10
/*
Copyright 2019-2020 Netfoundry, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
const ZitiControllerChannel = require('../channel/controller-channel');
const utils = require('../utils/utils');
/**
* Module dependencies.
*/
const Q = require('q');
/**
*
* @class ZitiControllerWSClient
* @param {(string|object)} [domainOrOptions] - The project domain or options object. If object, see the object's optional properties.
* @param {string} [domainOrOptions.domain] - The project domain
* @param {object} [domainOrOptions.token] - auth token - object with value property and optional headerOrQueryName and isQuery properties
*/
let ZitiControllerWSClient = (function() {
'use strict';
function ZitiControllerWSClient(options) {
this._ctx = options.ctx;
let domain = (typeof options === 'object') ? options.domain : options;
this.domain = domain ? domain : 'ws://demo.ziti.dev/ws';
if (this.domain.length === 0) {
throw new Error('Domain parameter must be specified as a string.');
}
let parsedURL = new URL(this.domain);
this._controllerHost = parsedURL.hostname;
this._controllerPort = parsedURL.port;
// Create a Channel to the Controller
this._ch = new ZitiControllerChannel({
ctx: this._ctx,
controllerHost: this._controllerHost,
controllerPort: this._controllerPort,
});
this.apiKey = (typeof options === 'object') ? (options.apiKey ? options.apiKey : {}) : {};
this.logger = (typeof options === 'object') ? (options.logger ? options.logger : function() { /* NOP */ }) : function() { /* NOP */ };
}
function serializeQueryParams(parameters) {
let str = [];
for (let p in parameters) {
if (parameters.hasOwnProperty(p)) {
str.push(encodeURIComponent(p) + '=' + encodeURIComponent(parameters[p]));
}
}
return str.join('&');
}
function mergeQueryParams(parameters, queryParameters) {
if (parameters.$queryParameters) {
Object.keys(parameters.$queryParameters)
.forEach(function(parameterName) {
let parameter = parameters.$queryParameters[parameterName];
queryParameters[parameterName] = parameter;
});
}
return queryParameters;
}
ZitiControllerWSClient.prototype.connect = async function() {
await this._ch.connect();
};
ZitiControllerWSClient.prototype.echo = async function(data) {
return await this._ch.echo(data);
};
/**
* HTTP Request
* @method
* @name ZitiControllerWSClient#request
* @param {string} method - http method
* @param {string} url - url to do request
* @param {object} parameters
* @param {object} body - body parameters / object
* @param {object} headers - header parameters
* @param {object} queryParameters - querystring parameters
* @param {object} form - form data object
* @param {object} deferred - promise object
*/
ZitiControllerWSClient.prototype.request = function(method, url, parameters, body, headers, queryParameters, form, deferred) {
const queryParams = queryParameters && Object.keys(queryParameters).length ? serializeQueryParams(queryParameters) : null;
const urlWithParams = url + (queryParams ? '?' + queryParams : '');
let parsedURL = new URL(url);
let path = parsedURL.pathname;
if (body && !Object.keys(body).length) {
body = undefined;
}
this.logger.debug('ZitiControllerWSClient: doing request to [%o]', urlWithParams);
return deferred.resolve(this._ch.request(
JSON.stringify(
{
method,
path,
queryParams,
headers,
body
}
)
));
};
/**
* Set Api Key
* @method
* @name ZitiControllerWSClient#setApiKey
* @param {string} value - apiKey's value
* @param {string} headerOrQueryName - the header or query name to send the apiKey at
* @param {boolean} isQuery - true if send the apiKey as query param, otherwise, send as header param
*/
ZitiControllerWSClient.prototype.setApiKey = function(value, headerOrQueryName, isQuery) {
this.apiKey.value = value;
this.apiKey.headerOrQueryName = headerOrQueryName;
this.apiKey.isQuery = isQuery;
};
/**
* Set Auth headers
* @method
* @name ZitiControllerWSClient#setAuthHeaders
* @param {object} headerParams - headers object
*/
ZitiControllerWSClient.prototype.setAuthHeaders = function(headerParams) {
let headers = headerParams ? headerParams : {};
if (!this.apiKey.isQuery && this.apiKey.headerOrQueryName) {
headers[this.apiKey.headerOrQueryName] = this.apiKey.value;
}
return headers;
};
/**
* This endpoint is used during enrollments to bootstrap trust between enrolling clients and the Ziti Edge API.
This endpoint returns a base64 encoded PKCS7 store. The content can be base64 decoded and parsed by any library
that supports parsing PKCS7 stores.
* @method
* @name ZitiControllerWSClient#listWellKnownCas
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.listWellKnownCas = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/.well-known/est/cacerts';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/pkcs7-mime'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Returns version information
* @method
* @name ZitiControllerWSClient#listRoot
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.listRoot = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Returns version information
* @method
* @name ZitiControllerWSClient#listVersion
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.listVersion = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/version';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* This endpoint is usefull for UIs that wish to display UI elements with counts.
* @method
* @name ZitiControllerWSClient#listSummary
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.listSummary = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/summary';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Returns a list of spec files embedded within the controller for consumption/documentation/code geneartion
* @method
* @name ZitiControllerWSClient#listSpecs
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.listSpecs = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/specs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Returns single spec resource embedded within the controller for consumption/documentation/code geneartion
* @method
* @name ZitiControllerWSClient#detailSpec
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailSpec = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/specs/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Return the body of the specification (i.e. Swagger, OpenAPI 2.0, 3.0, etc).
* @method
* @name ZitiControllerWSClient#detailSpecBody
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailSpecBody = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/specs/{id}/spec';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['text/yaml, application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Returns a list of active API sessions. The resources can be sorted, filtered, and paginated. This endpoint
requries admin access.
* @method
* @name ZitiControllerWSClient#listAPISessions
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listAPISessions = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/api-sessions';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json; charset=utf-8'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single API Session by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailAPISessions
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailAPISessions = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/api-sessions/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Deletes and API sesion by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteAPISessions
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteAPISessions = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/api-sessions/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Allows authentication Methods include "password" and "cert"
* @method
* @name ZitiControllerWSClient#authenticate
* @param {object} parameters - method options and parameters
* @param {} parameters.body -
* @param {string} parameters.method -
*/
ZitiControllerWSClient.prototype.authenticate = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/authenticate';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['method'] !== undefined) {
queryParameters['method'] = parameters['method'];
}
if (parameters['method'] === undefined) {
deferred.reject(new Error('Missing required parameter: method'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Returns a list of authenticators associated to identities. The resources can be sorted, filtered, and paginated.
This endpoint requries admin access.
* @method
* @name ZitiControllerWSClient#listAuthenticators
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.listAuthenticators = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/authenticators';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Creates an authenticator for a specific identity. Requires admin access.
* @method
* @name ZitiControllerWSClient#createAuthenticator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A Authenticators create object
*/
ZitiControllerWSClient.prototype.createAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/authenticators';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single authenticator by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailAuthenticator
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/authenticators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on an authenticator by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateAuthenticator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An authenticator put object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/authenticators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on an authenticator by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchAuthenticator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An authenticator patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/authenticators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete an authenticator by id. Deleting all authenticators for an identity will make it impossible to log in.
Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteAuthenticator
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/authenticators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of CA resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listCas
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listCas = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Creates a CA in an unverified state. Requires admin access.
* @method
* @name ZitiControllerWSClient#createCa
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A CA to create
*/
ZitiControllerWSClient.prototype.createCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single CA by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailCa
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a CA by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateCa
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A CA update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update only the supplied fields on a CA by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchCa
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A CA patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a CA by id. Deleting a CA will delete its associated certificate authenticators. This can make it
impossible for identities to authenticate if they no longer have any valid authenticators. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteCa
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* For CA auto enrollment, the enrollment JWT is static and provided on each CA resource. This endpoint provides
the jwt as a text response.
* @method
* @name ZitiControllerWSClient#getCaJwt
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.getCaJwt = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas/{id}/jwt';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/jwt'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Allows a CA to become verified by submitting a certificate in PEM format that has been signed by the target CA.
The common name on the certificate must match the verificationToken property of the CA. Unverfieid CAs can not
be used for enrollment/authentication. Requires admin access.
* @method
* @name ZitiControllerWSClient#verifyCa
* @param {object} parameters - method options and parameters
* @param {} parameters.certificate - A PEM formatted certificate signed by the target CA with the common name matching the CA's validationToken
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.verifyCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/cas/{id}/verify';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['text/plain'];
if (parameters['certificate'] !== undefined) {
body = parameters['certificate'];
}
if (parameters['certificate'] === undefined) {
deferred.reject(new Error('Missing required parameter: certificate'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of config-type resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listConfigTypes
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listConfigTypes = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/config-types';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a config-type. Requires admin access.
* @method
* @name ZitiControllerWSClient#createConfigType
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A config-type to create
*/
ZitiControllerWSClient.prototype.createConfigType = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/config-types';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single config-type by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailConfigType
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailConfigType = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/config-types/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a config-type by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateConfigType
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A config-type update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateConfigType = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/config-types/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on a config-type. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchConfigType
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A config-type patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchConfigType = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/config-types/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a config-type by id. Removing a configuration type that are in use will result in a 409 conflict HTTP status code and error. All configurations of a type must be removed first.
* @method
* @name ZitiControllerWSClient#deleteConfigType
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteConfigType = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/config-types/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Lists the configs associated to a config-type. Requires admin access.
* @method
* @name ZitiControllerWSClient#listConfigsForConfigType
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listConfigsForConfigType = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/config-types/{id}/configs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of config resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listConfigs
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listConfigs = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/configs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a config resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createConfig
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A config to create
*/
ZitiControllerWSClient.prototype.createConfig = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/configs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single config by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailConfig
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailConfig = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/configs/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a config by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateConfig
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A config update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateConfig = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/configs/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on a config. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchConfig
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A config patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchConfig = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/configs/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a config by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteConfig
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteConfig = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/configs/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves the API session that was used to issue the current request
* @method
* @name ZitiControllerWSClient#getCurrentAPISession
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.getCurrentAPISession = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/current-api-session';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Terminates the current API session
* @method
* @name ZitiControllerWSClient#deleteCurrentApiSession
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.deleteCurrentApiSession = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/current-api-session';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Returns the identity associated with the API sessions used to issue the current request
* @method
* @name ZitiControllerWSClient#getCurrentIdentity
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.getCurrentIdentity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/current-identity';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of authenticators assigned to the current API session's identity; supports filtering, sorting, and pagination.
* @method
* @name ZitiControllerWSClient#listCurrentIdentityAuthenticators
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listCurrentIdentityAuthenticators = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/current-identity/authenticators';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single authenticator by id. Will only show authenticators assigned to the API session's identity.
* @method
* @name ZitiControllerWSClient#detailCurrentIdentityAuthenticator
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailCurrentIdentityAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/current-identity/authenticators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on an authenticator by id. Will only update authenticators assigned to the API session's
identity.
* @method
* @name ZitiControllerWSClient#updateCurrentIdentityAuthenticator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An authenticator put object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateCurrentIdentityAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/current-identity/authenticators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on an authenticator by id. Will only update authenticators assigned to the API
session's identity.
* @method
* @name ZitiControllerWSClient#patchCurrentIdentityAuthenticator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An authenticator patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchCurrentIdentityAuthenticator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/current-identity/authenticators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of edge router policy resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEdgeRouterPolicies
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listEdgeRouterPolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create an edge router policy resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An edge router policy to create
*/
ZitiControllerWSClient.prototype.createEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single edge router policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on an edge router policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An edge router policy update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on an edge router policy. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An edge router policy patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete an edge router policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of edge routers an edge router policy resources affects; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEdgeRouterPolicyEdgeRouters
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listEdgeRouterPolicyEdgeRouters = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies/{id}/edge-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of identities an edge router policy resources affects; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEdgeRouterPolicyIdentities
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listEdgeRouterPolicyIdentities = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-policies/{id}/identities';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of edge router resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEdgeRouters
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {array} parameters.roleFilter -
* @param {string} parameters.roleSemantic -
*/
ZitiControllerWSClient.prototype.listEdgeRouters = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
if (parameters['roleFilter'] !== undefined) {
queryParameters['roleFilter'] = parameters['roleFilter'];
}
if (parameters['roleSemantic'] !== undefined) {
queryParameters['roleSemantic'] = parameters['roleSemantic'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a edge router resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createEdgeRouter
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A config-type to create
*/
ZitiControllerWSClient.prototype.createEdgeRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single edge router by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailEdgeRouter
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailEdgeRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on an edge router by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateEdgeRouter
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An edge router update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateEdgeRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on an edge router. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchEdgeRouter
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An edge router patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchEdgeRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete an edge router by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteEdgeRouter
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteEdgeRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of edge router policies that apply to the specified edge router.
* @method
* @name ZitiControllerWSClient#listEdgeRouterEdgeRouterPolicies
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listEdgeRouterEdgeRouterPolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}/edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of identities that may access services via the given edge router. Supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEdgeRouterIdentities
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listEdgeRouterIdentities = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}/identities';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service policies policies that apply to the specified edge router.
* @method
* @name ZitiControllerWSClient#listEdgeRouterServiceEdgeRouterPolicies
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listEdgeRouterServiceEdgeRouterPolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}/service-edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of services that may be accessed via the given edge router. Supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEdgeRouterServices
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listEdgeRouterServices = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-routers/{id}/services';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* endpoint defers to the logic in the more specific `enroll/*` endpoints
* @method
* @name ZitiControllerWSClient#enroll
* @param {object} parameters - method options and parameters
* @param {string} parameters.token -
*/
ZitiControllerWSClient.prototype.enroll = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enroll';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers['Accept'] = ['application/x-pem-file, application/json'];
headers['Content-Type'] = ['application/pkcs10,application/json,application/x-pem-file,text/plain'];
if (parameters['token'] !== undefined) {
queryParameters['token'] = parameters['token'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* For CA auto enrollment, an identity is not created beforehand.
Instead one will be created during enrollment. The client will present a client certificate that is signed by a
Certificate Authority that has been added and verified (See POST /cas and POST /cas/{id}/verify).
During this process no CSRs are requires as the client should already be in possession of a valid certificate.
* @method
* @name ZitiControllerWSClient#enrollCa
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.enrollCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enroll/ca';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Enroll an identity via a one-time-token which is supplied via a query string parameter. This enrollment method
expects a PEM encoded CSRs to be provided for fulfillment. It is up to the enrolling identity to manage the
private key backing the CSR request.
* @method
* @name ZitiControllerWSClient#enrollOtt
* @param {object} parameters - method options and parameters
* @param {string} parameters.token -
*/
ZitiControllerWSClient.prototype.enrollOtt = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enroll/ott';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers['Accept'] = ['application/x-x509-user-cert'];
headers['Content-Type'] = ['application/pkcs10'];
if (parameters['token'] !== undefined) {
queryParameters['token'] = parameters['token'];
}
if (parameters['token'] === undefined) {
deferred.reject(new Error('Missing required parameter: token'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Enroll an identity via a one-time-token that also requires a pre-exchanged client certificate to match a
Certificate Authority that has been added and verified (See POST /cas and POST /cas{id}/verify). The client
must present a client certificate signed by CA associated with the enrollment. This enrollment is similar to
CA auto enrollment except that is required the identity to be pre-created.
As the client certificat has been pre-exchanged there is no CSR input to this enrollment method.
* @method
* @name ZitiControllerWSClient#enrollOttCa
* @param {object} parameters - method options and parameters
* @param {string} parameters.token -
*/
ZitiControllerWSClient.prototype.enrollOttCa = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enroll/ottca';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['token'] !== undefined) {
queryParameters['token'] = parameters['token'];
}
if (parameters['token'] === undefined) {
deferred.reject(new Error('Missing required parameter: token'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Enrolls an identity via a one-time-token to establish an initial username and password combination
* @method
* @name ZitiControllerWSClient#ernollUpdb
* @param {object} parameters - method options and parameters
* @param {string} parameters.token -
*/
ZitiControllerWSClient.prototype.ernollUpdb = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enroll/updb';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['token'] !== undefined) {
queryParameters['token'] = parameters['token'];
}
if (parameters['token'] === undefined) {
deferred.reject(new Error('Missing required parameter: token'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Enrolls an edge-router via a one-time-token to establish a certificate based identity.
* @method
* @name ZitiControllerWSClient#enrollErOtt
* @param {object} parameters - method options and parameters
* @param {string} parameters.token -
*/
ZitiControllerWSClient.prototype.enrollErOtt = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enroll/erott';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['token'] !== undefined) {
queryParameters['token'] = parameters['token'];
}
if (parameters['token'] === undefined) {
deferred.reject(new Error('Missing required parameter: token'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of outstanding enrollments; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEnrollments
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listEnrollments = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enrollments';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single outstanding enrollment by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailEnrollment
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailEnrollment = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enrollments/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete an outstanding enrollment by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteEnrollment
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteEnrollment = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/enrollments/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of geo-regions; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listGeoRegions
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listGeoRegions = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/geo-regions';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single geo-region by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailGeoRegion
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailGeoRegion = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/geo-regions/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of identity resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listIdentities
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {array} parameters.roleFilter -
* @param {string} parameters.roleSemantic -
*/
ZitiControllerWSClient.prototype.listIdentities = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
if (parameters['roleFilter'] !== undefined) {
queryParameters['roleFilter'] = parameters['roleFilter'];
}
if (parameters['roleSemantic'] !== undefined) {
queryParameters['roleSemantic'] = parameters['roleSemantic'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create an identity resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createIdentity
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An identity to create
*/
ZitiControllerWSClient.prototype.createIdentity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single identity by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailIdentity
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailIdentity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on an identity by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateIdentity
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An identity update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateIdentity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on an identity. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchIdentity
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An identity patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchIdentity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete an identity by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteIdentity
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteIdentity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of edge router policies that apply to the specified identity.
* @method
* @name ZitiControllerWSClient#listIdentitysEdgeRouterPolicies
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listIdentitysEdgeRouterPolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service configs associated to a specific identity
* @method
* @name ZitiControllerWSClient#listIdentitysServiceConfigs
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listIdentitysServiceConfigs = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/service-configs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Associate service configs to a specific identity
* @method
* @name ZitiControllerWSClient#associateIdentitysServiceConfigs
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An identity patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.associateIdentitysServiceConfigs = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/service-configs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Remove service configs from a specific identity
* @method
* @name ZitiControllerWSClient#disassociateIdentitysServiceConfigs
* @param {object} parameters - method options and parameters
* @param {} parameters.body - An array of service and config id pairs to remove
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.disassociateIdentitysServiceConfigs = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/service-configs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service policies that apply to the specified identity.
* @method
* @name ZitiControllerWSClient#listIdentityServicePolicies
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listIdentityServicePolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/service-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of edge-routers that the given identity may use to access services. Supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listIdentityEdgeRouters
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listIdentityEdgeRouters = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/edge-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of services that the given identity has access to. Supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listIdentityServices
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listIdentityServices = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/services';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Analyzes policies to see if the given identity should be able to dial or bind the given service. |
Will check services policies to see if the identity can access the service. Will check edge router policies |
to check if the identity and service have access to common edge routers so that a connnection can be made. |
Will also check if at least one edge router is on-line. Requires admin access.
* @method
* @name ZitiControllerWSClient#getIdentityPolicyAdvice
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
* @param {string} parameters.serviceId - The id of a service
*/
ZitiControllerWSClient.prototype.getIdentityPolicyAdvice = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identities/{id}/policy-advice/{serviceId}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
path = path.replace('{serviceId}', parameters['serviceId']);
if (parameters['serviceId'] === undefined) {
deferred.reject(new Error('Missing required parameter: serviceId'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of identity types; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listIdentityTypes
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listIdentityTypes = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identity-types';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single identity type by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailIdentityType
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailIdentityType = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identity-types/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service edge router policy resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceEdgeRouterPolicies
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listServiceEdgeRouterPolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a service edge router policy resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createServiceEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service edge router policy to create
*/
ZitiControllerWSClient.prototype.createServiceEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single service edge policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailServiceEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailServiceEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a service edge policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateServiceEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service edge router policy update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateServiceEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on a service edge policy. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchServiceEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service edge router policy patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchServiceEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a service edge policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteServiceEdgeRouterPolicy
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteServiceEdgeRouterPolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* List the edge routers that a service edge router policy applies to
* @method
* @name ZitiControllerWSClient#listServiceEdgeRouterPolicyEdgeRouters
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceEdgeRouterPolicyEdgeRouters = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies/{id}/edge-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* List the services that a service edge router policy applies to
* @method
* @name ZitiControllerWSClient#listServiceEdgeRouterPolicyServices
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceEdgeRouterPolicyServices = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-edge-router-policies/{id}/services';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service policy resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServicePolicies
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listServicePolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a service policy resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createServicePolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service policy to create
*/
ZitiControllerWSClient.prototype.createServicePolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single service policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailServicePolicy
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailServicePolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a service policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateServicePolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service policy update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateServicePolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on a service policy. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchServicePolicy
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service policy patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchServicePolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a service policy by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteServicePolicy
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteServicePolicy = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of identity resources that are affected by a service policy; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServicePolicyIdentities
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServicePolicyIdentities = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies/{id}/identities';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service resources that are affected by a service policy; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServicePolicyServices
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServicePolicyServices = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-policies/{id}/services';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of config resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServices
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {array} parameters.roleFilter -
* @param {string} parameters.roleSemantic -
*/
ZitiControllerWSClient.prototype.listServices = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
if (parameters['roleFilter'] !== undefined) {
queryParameters['roleFilter'] = parameters['roleFilter'];
}
if (parameters['roleSemantic'] !== undefined) {
queryParameters['roleSemantic'] = parameters['roleSemantic'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a services resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createService
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service to create
*/
ZitiControllerWSClient.prototype.createService = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single service by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailService
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailService = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a service by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateService
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateService = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on a service. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchService
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A service patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchService = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a service by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteService
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteService = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of config resources associated to a specific service; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceConfig
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceConfig = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}/configs';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service edge router policy resources that affect a specific service; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceServiceEdgeRouterPolicies
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceServiceEdgeRouterPolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}/service-edge-router-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of service policy resources that affect specific service; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceServicePolicies
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceServicePolicies = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}/service-policies';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of identities that have access to this service. Supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceIdentities
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceIdentities = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}/identities';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of edge-routers that may be used to access the given service. Supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceEdgeRouters
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceEdgeRouters = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}/edge-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of terminator resources that are assigned specific service; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceTerminators
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.listServiceTerminators = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/services/{id}/terminators';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of active sessions resources; supports filtering, sorting, and pagination. Requires admin access.
Sessions are tied to an API session and are moved when an API session times out or logs out. Active sessions
(i.e. Ziti SDK connected to an edge router) will keep the session and API session marked as active.
* @method
* @name ZitiControllerWSClient#listSessions
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listSessions = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/sessions';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a session resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createSession
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A session to create
*/
ZitiControllerWSClient.prototype.createSession = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/sessions';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single session by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailSession
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailSession = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/sessions/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a session by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteSession
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteSession = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/sessions/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of terminator resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listTerminators
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listTerminators = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/terminators';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a terminator resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createTerminator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A terminator to create
*/
ZitiControllerWSClient.prototype.createTerminator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/terminators';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single terminator by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailTerminator
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailTerminator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/terminators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a terminator by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateTerminator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A terminator update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateTerminator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/terminators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on a terminator. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchTerminator
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A terminator patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchTerminator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/terminators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a terminator by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteTerminator
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteTerminator = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/terminators/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of role attributes in use by edge routers; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listEdgeRouterRoleAttributes
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listEdgeRouterRoleAttributes = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/edge-router-role-attributes';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of role attributes in use by identities; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listIdentityRoleAttributes
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listIdentityRoleAttributes = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/identity-role-attributes';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of role attributes in use by services; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listServiceRoleAttributes
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listServiceRoleAttributes = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/service-role-attributes';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a list of transit router resources; supports filtering, sorting, and pagination. Requires admin access.
* @method
* @name ZitiControllerWSClient#listTransitRouters
* @param {object} parameters - method options and parameters
* @param {integer} parameters.limit -
* @param {integer} parameters.offset -
* @param {string} parameters.filter -
*/
ZitiControllerWSClient.prototype.listTransitRouters = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/transit-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['limit'] !== undefined) {
queryParameters['limit'] = parameters['limit'];
}
if (parameters['offset'] !== undefined) {
queryParameters['offset'] = parameters['offset'];
}
if (parameters['filter'] !== undefined) {
queryParameters['filter'] = parameters['filter'];
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a transit router resource. Requires admin access.
* @method
* @name ZitiControllerWSClient#createTransitRouter
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A transit router to create
*/
ZitiControllerWSClient.prototype.createTransitRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/transit-routers';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Retrieves a single transit router by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#detailTransitRouter
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.detailTransitRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/transit-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update all fields on a transit router by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#updateTransitRouter
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A transit router update object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.updateTransitRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/transit-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PUT', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Update the supplied fields on a transit router. Requires admin access.
* @method
* @name ZitiControllerWSClient#patchTransitRouter
* @param {object} parameters - method options and parameters
* @param {} parameters.body - A transit router patch object
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.patchTransitRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/transit-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
if (parameters['body'] !== undefined) {
body = parameters['body'];
}
if (parameters['body'] === undefined) {
deferred.reject(new Error('Missing required parameter: body'));
return deferred.promise;
}
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('PATCH', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Delete a transit router by id. Requires admin access.
* @method
* @name ZitiControllerWSClient#deleteTransitRouter
* @param {object} parameters - method options and parameters
* @param {string} parameters.id - The id of the requested resource
*/
ZitiControllerWSClient.prototype.deleteTransitRouter = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/transit-routers/{id}';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
path = path.replace('{id}', parameters['id']);
if (parameters['id'] === undefined) {
deferred.reject(new Error('Missing required parameter: id'));
return deferred.promise;
}
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('DELETE', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Create a new database snapshot. Requires admin access.
* @method
* @name ZitiControllerWSClient#createDatabaseSnapshot
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.createDatabaseSnapshot = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/database/snapshot';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Runs an data integrity scan on the datastore and returns any found issues. Requires admin access.
* @method
* @name ZitiControllerWSClient#checkDataIntegrity
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.checkDataIntegrity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/database/check-data-integrity';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('GET', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
/**
* Runs an data integrity scan on the datastore, attempts to fix any issues it can, and returns any found issues. Requires admin access.
* @method
* @name ZitiControllerWSClient#fixDataIntegrity
* @param {object} parameters - method options and parameters
*/
ZitiControllerWSClient.prototype.fixDataIntegrity = function(parameters) {
if (parameters === undefined) {
parameters = {};
}
let deferred = Q.defer();
let domain = this.domain,
path = '/database/fix-data-integrity';
let body = {},
queryParameters = {},
headers = {},
form = {};
headers = this.setAuthHeaders(headers);
headers['Accept'] = ['application/json'];
headers['Content-Type'] = ['application/json'];
queryParameters = mergeQueryParams(parameters, queryParameters);
this.request('POST', domain + path, parameters, body, headers, queryParameters, form, deferred);
return deferred.promise;
};
return ZitiControllerWSClient;
})();
module.exports = ZitiControllerWSClient;
|
def fib(n):
if n <= 1:
return n
else:
return fib(n-1)+fib(n-2)
|
<filename>src/index.ts
import {
JupyterFrontEnd,
JupyterFrontEndPlugin
} from '@jupyterlab/application';
import { IFoo } from "provider-test";
/**
* Initialization data for the consumer-test extension.
*/
const extension: JupyterFrontEndPlugin<void> = {
id: 'consumer-test',
autoStart: true,
requires: [IFoo],
activate: (app: JupyterFrontEnd, token: IFoo) => {
console.log('JupyterLab extension consumer-test is activated!');
console.log('Token read', token);
}
};
export default extension;
|
#!/usr/bin/env bash
set -e
source bosh-cpi-src/ci/tasks/utils.sh
source /etc/profile.d/chruby-with-ruby-2.1.2.sh
check_param release_blobs_access_key
check_param release_blobs_secret_key
# Version info
semver_version=`cat release-version-semver/number`
echo $semver_version > promoted/semver_version
echo "BOSH Google CPI BOSH Release v${semver_version}" > promoted/annotation_message
today=$(date +%Y-%m-%d)
cp -r bosh-cpi-src promoted/repo
# CPI vars
cpi_release_name="bosh-google-cpi"
cpi_blob=${cpi_release_name}-${semver_version}.tgz
cpi_link=https://storage.googleapis.com/bosh-cpi-artifacts/bosh-google-cpi-$semver_version.tgz
# Stemcell vars
stemcell_path=$(basename `ls stemcell/*.tgz`)
stemcell_name=${stemcell_path%.*}
stemcell_version=`cat stemcell/version`
stemcell_url=`cat stemcell/url | sed "s|gs://|https://storage.googleapis.com/|"`
stemcell_type=Heavy
if [[ $stemcell_name == light* ]]; then stemcell_type=Light; fi
stemcell_sha=$(sha1sum stemcell/*.tgz | awk '{print $1}')
dev_release=$(echo $PWD/bosh-cpi-release/*.tgz)
pushd promoted/repo
echo "Creating config/private.yml with blobstore secrets"
set +x
cat > config/private.yml << EOF
---
blobstore:
s3:
access_key_id: ${release_blobs_access_key}
secret_access_key: ${release_blobs_secret_key}
EOF
echo "Using BOSH CLI version..."
bosh version
echo "Finalizing CPI BOSH Release..."
bosh finalize release ${dev_release} --version ${semver_version}
rm config/private.yml
# Insert CPI details into README.md
# Template markers in the README
cpi_marker="\[//\]: # (new-cpi)"
cpi_sha=$(sha1sum releases/$cpi_release_name/$cpi_blob | awk '{print $1}')
new_cpi="|[$semver_version]($cpi_link)|$cpi_sha|$today|"
sed -i "s^$cpi_marker^$new_cpi\n$cpi_marker^" README.md
git diff | cat
git add .
git config --global user.email cf-bosh-eng@pivotal.io
git config --global user.name CI
git commit -m "BOSH Google CPI BOSH Release v${semver_version}"
mv releases/$cpi_release_name/$cpi_blob ../
echo $cpi_sha > ../$cpi_blob.sha1
popd
|
<filename>src/pages/Experience4/index.js
/**
* @module Experiences/Experience0
*/
import React, { Profiler } from 'react'
import { Observable } from 'rxjs'
const onRender = (id, phase, actualDuration) => {
console.log(id, phase, actualDuration)
}
const observer = {
next: (x) => console.log(`Observer got a next value: ${x}`),
error: (err) => console.error(`Observer got an error: ${err}`),
complete: () => console.log('Observer got a complete notification')
}
const observable = new Observable((subscriber) => {
subscriber.next(1)
setTimeout(() => {
subscriber.next(2)
subscriber.complete()
}, 1000)
})
/**
* @function Experience
* @return {Object} Return the dom of the Experience
*/
const Experience = () => {
const handleClick = () => {
const subscription = observable.subscribe(observer)
return () => {
subscription.unsubscribe()
}
}
return (
<Profiler id="Experience" onRender={onRender}>
<button onClick={handleClick}>
Call the Observable and then look at the console
</button>
</Profiler>
)
}
export default Experience
|
import requests
from bs4 import BeautifulSoup
def get_article_detail(article_url: str) -> dict:
article_info = {}
try:
response = requests.get(article_url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
article_info["Author"] = soup.find("meta", {"name": "author"})["content"]
article_info["LinkUrl"] = article_url
article_info["PubTime"] = soup.find("meta", {"name": "pubDate"})["content"]
article_info["Title"] = soup.find("meta", {"property": "og:title"})["content"]
article_info["allPics"] = [img["src"] for img in soup.find_all("img")]
else:
print("Failed to retrieve the article. Status code:", response.status_code)
except Exception as e:
print("An error occurred while fetching the article details:", str(e))
return article_info
|
# Prompt the user to enter a commit message
commit_message = input("Enter the commit message: ")
# Simulate the process of committing the changes using the entered message
print(f"Committing changes with message: {commit_message}")
# Simulate pushing the committed changes to a remote repository
print("Pushing changes to remote repository...")
# Display a message confirming the successful upload of the files
print("Files uploaded...")
|
import re
def extract_module_info(code_snippet):
module_info = {}
pattern = r'pub mod (\w+);.*?// Export `(.+?)` as Rust module `(\w+::\w+::\w+)`'
matches = re.findall(pattern, code_snippet, re.DOTALL)
for match in matches:
module_name = match[2]
file_path = match[1]
module_info[module_name] = file_path
return module_info
# Example usage
code_snippet = """
<gh_stars>1-10
//! Mynewt System API for Rust
pub mod console; // Export `sys/console.rs` as Rust module `mynewt::sys::console`
"""
print(extract_module_info(code_snippet))
|
def construct_file_path(check, use_a6_conf_dir):
A5_CONF_DIR = '/path/to/A5_conf'
A6_CONF_DIR = '/path/to/A6_conf'
if use_a6_conf_dir:
return '{}/{}.d/conf*'.format(A6_CONF_DIR, check)
else:
return '{}/{}*'.format(A5_CONF_DIR, check)
|
import {Injectable} from '@angular/core';
import {
HttpRequest,
HttpHandler,
HttpEvent,
HttpInterceptor, HttpErrorResponse
} from '@angular/common/http';
import {Observable, of} from 'rxjs';
import {Router} from "@angular/router";
import {catchError} from "rxjs/internal/operators";
import {ToastrService} from 'ngx-toastr';
@Injectable()
export class InterceptService implements HttpInterceptor {
constructor(private router: Router,private toastr : ToastrService) {
}
/**
* intercept all XHR request
* @param request
* @param next
* @returns {Observable<A>}
*/
intercept(request: HttpRequest<any>, next: HttpHandler): Observable<HttpEvent<any>> {
/**
* continues request execution
*/
return next.handle(request).pipe(catchError((error, caught) => {
this.handleAuthError(error);
return of(error);
}) as any);
}
/**
* manage errors
* @param err
* @returns {any}
*/
private handleAuthError(err: HttpErrorResponse): Observable<any> {
let self = this;
if (err.status === 401) {
this.toastr.error('Unauthorrized','Error!');
localStorage.clear();
setTimeout(function () {
self.router.navigate(['/login']);
}, 1000);
return of(err.message);
}
throw err;
}
}
|
#!/bin/sh
# GET
# Simon Hugh Moore
#
# Retrive meta data from pass file
#
# Meta data must be organized in file like so:
# meta_name: data
# for example:
# login: user_name
get(){
pass show "$2" | rg "$1: " | cut -d' ' -f2
}
case "$2" in
-c) get "$1" "$3" | xclip -selection clipboard;;
*) get "$@";;
esac
|
<gh_stars>0
/**
* Copyright 2015 Flipkart Internet Pvt. Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.flipkart.ranger.model;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.flipkart.ranger.ServiceFinderBuilders;
import com.flipkart.ranger.ServiceProviderBuilders;
import com.flipkart.ranger.finder.unsharded.UnshardedClusterFinder;
import com.flipkart.ranger.finder.unsharded.UnshardedClusterInfo;
import com.flipkart.ranger.healthcheck.Healthchecks;
import com.flipkart.ranger.serviceprovider.ServiceProvider;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.Multiset;
import org.apache.curator.test.TestingCluster;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
public class SimpleServiceProviderTest {
private TestingCluster testingCluster;
private ObjectMapper objectMapper;
@Before
public void startTestCluster() throws Exception {
objectMapper = new ObjectMapper();
testingCluster = new TestingCluster(3);
testingCluster.start();
registerService("localhost-1", 9000, 1);
registerService("localhost-2", 9000, 1);
registerService("localhost-3", 9000, 2);
}
@After
public void stopTestCluster() throws Exception {
if(null != testingCluster) {
testingCluster.close();
}
}
@Test
public void testBasicDiscovery() throws Exception {
UnshardedClusterFinder serviceFinder = ServiceFinderBuilders.unshardedFinderBuilder()
.withConnectionString(testingCluster.getConnectString())
.withNamespace("test")
.withServiceName("test-service")
.withDeserializer(new Deserializer<UnshardedClusterInfo>() {
@Override
public ServiceNode<UnshardedClusterInfo> deserialize(byte[] data) {
try {
return objectMapper.readValue(data,
new TypeReference<ServiceNode<UnshardedClusterInfo>>() {
});
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
})
.withDisableWatchers()
.build();
serviceFinder.start();
{
ServiceNode node = serviceFinder.get(null);
Assert.assertNotNull(node);
System.out.println(node.getHost());
}
Multiset<String> frequency = HashMultiset.create();
long startTime = System.currentTimeMillis();
for(long i = 0; i <1000000; i++)
{
ServiceNode node = serviceFinder.get(null);
Assert.assertNotNull(node);
frequency.add(node.getHost());
}
System.out.println("1 Million lookups and freq counting took (ms):" + (System.currentTimeMillis() -startTime));
System.out.println("Frequency: " + frequency);
//while (true);
}
private void registerService(String host, int port, int shardId) throws Exception {
ServiceProvider<UnshardedClusterInfo> serviceProvider = ServiceProviderBuilders.unshardedServiceProviderBuilder()
.withConnectionString(testingCluster.getConnectString())
.withNamespace("test")
.withServiceName("test-service")
.withSerializer(new Serializer<UnshardedClusterInfo>() {
@Override
public byte[] serialize(ServiceNode<UnshardedClusterInfo> data) {
try {
return objectMapper.writeValueAsBytes(data);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
return null;
}
})
.withHostname(host)
.withPort(port)
.withHealthcheck(Healthchecks.defaultHealthyCheck())
.buildServiceDiscovery();
serviceProvider.start();
}
}
|
def find_invalid_values(arr):
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] == "N/A":
arr[i][j] = 0
return arr
|
'use strict';
var superagent = require('superagent');
var path = require('path');
var fs = require('fs');
var resolutionCache = {};
module.exports = class Resolver {
constructor(options, github) {
this.options = {
organizations: [
'PolymerElements',
'Polymer'
],
cacheValidTime: 3600,
cacheFile: options.cacheFile || path.resolve(__dirname, '../cache/registry.json')
};
this.github = github;
this.readCache()
.then(() => {
console.log('using registry cache (' + this.itemCount + ' items)');
})
.catch((e) => {
let orgs = this.options.organizations;
(new Promise((resolve, reject) => {
let poll_then, poll_next;
poll_then = () => {
if(!poll_next(poll_then)) {
console.log('Resolver finished loading');
resolve();
}
}
poll_next = (cb) => {
if(orgs && orgs.length) {
console.log('Polling GitHub organization repositories for', orgs[0]);
this.pollRepos(orgs.shift())
.then(cb);
return true;
} else {
return false;
}
}
if(!orgs || !orgs.length) {
throw new Error('No organizations defined');
}
poll_next(poll_then);
}))
.then(() => {
this.writeCache();
})
.catch((e) => {
console.errror('caching failed');
console.error(e.stack);
});
});
}
lookup(assetName) {
var asset;
Object.keys(resolutionCache).forEach((registryItemName) => {
if(!asset && (assetName.indexOf(registryItemName) === 0)) {
asset = {
name: registryItemName,
file: assetName.slice(registryItemName.length + 1),
clone_url: resolutionCache[registryItemName].clone_url,
updated_at: resolutionCache[registryItemName].updated_at
};
}
});
return asset;
}
pollRepos(organization, page) {
page = page || 1;
return new Promise((resolve, reject) => {
try {
this.github.repos.getFromOrg({
org: organization,
page: page,
per_page: 100 // max 100, see https://developer.github.com/guides/traversing-with-pagination/
}, (err, res) => {
if(err) {
console.log(err);
reject(err);
return;
}
res.forEach((repo) => {
if(!resolutionCache.hasOwnProperty(repo.name)) {
console.log('indexing ' + repo.name + ' => ' + repo.clone_url);
resolutionCache[repo.name] = {
updated_at: (new Date()).getTime(),
clone_url: repo.clone_url
}
}
});
if(res.length === 100) {
this.pollRepos(organization, page + 1)
.then(resolve);
} else {
resolve();
}
});
} catch(e) {
reject(e);
}
});
}
readCache() {
return new Promise((resolve, reject) => {
var stat, content;
try {
stat = fs.statSync(this.options.cacheFile);
if((stat.atime.getTime() + this.options.cacheValidTime) > (new Date()).getTime()) {
return reject(new Error('registry cache outdated'));
}
content = fs.readFileSync(this.options.cacheFile);
resolutionCache = JSON.parse(content);
console.log('read ' + this.itemCount + ' items from cache');
} catch(e) {
return reject(e);
}
resolve();
});
}
writeCache() {
fs.writeFileSync(this.options.cacheFile, JSON.stringify(resolutionCache));
console.log('written ' + Object.keys(resolutionCache).length + ' entries to ' + this.options.cacheFile);
}
resolveVersion(assetName, version) {
version = version || 'master';
return version;
}
get itemCount() {
return Object.keys(resolutionCache).length;
}
}
|
<reponame>emersonbrs/desafio-frontend<filename>src/components/Button/styles.ts
import styled from 'styled-components';
export const Container = styled.button`
width: 100%;
background: var(--yellow);
color: var(--blue-grey);
height: 3.75rem;
border-radius: 0.5rem;
border: 0;
font-weight: bold;
margin-top: 1rem;
transition: filter 0.2s;
&:hover {
filter: brightness(0.7);
}
`;
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# shellcheck disable=SC2030,SC2031
: "${PYTHON_MAJOR_MINOR_VERSION:?"ERROR: PYTHON_MAJOR_MINOR_VERSION not set !!!!"}"
: "${INSTALL_AIRFLOW_VERSION:?"ERROR: INSTALL_AIRFLOW_VERSION not set !!!!"}"
export FORCE_ANSWER_TO_QUESTIONS="yes"
export VERBOSE="true"
# This is an image built from the "release" tag (either RC or final one).
# In this case all packages are taken from PyPI rather than from locally built sources
export INSTALL_FROM_PYPI="true"
export INSTALL_FROM_DOCKER_CONTEXT_FILES="false"
export INSTALL_PROVIDERS_FROM_SOURCES="false"
export AIRFLOW_PRE_CACHED_PIP_PACKAGES="false"
export DOCKER_CACHE="local"
export DOCKER_TAG=${INSTALL_AIRFLOW_VERSION}-python${PYTHON_MAJOR_MINOR_VERSION}
export AIRFLOW_CONSTRAINTS_REFERENCE="constraints-${INSTALL_AIRFLOW_VERSION}"
export AIRFLOW_CONSTRAINTS="constraints"
# shellcheck source=scripts/ci/libraries/_script_init.sh
. "$( dirname "${BASH_SOURCE[0]}" )/../libraries/_script_init.sh"
echo
echo "Building and pushing ${INSTALL_AIRFLOW_VERSION} Airflow PROD image for ${PYTHON_MAJOR_MINOR_VERSION}"
echo
rm -rf "${BUILD_CACHE_DIR}"
rm -rf "${AIRFLOW_SOURCES}/docker-context-files/*"
build_images::prepare_prod_build
build_images::build_prod_images
verify_image::verify_prod_image "${AIRFLOW_PROD_IMAGE}"
export RELEASE_IMAGE="apache/airflow:${INSTALL_AIRFLOW_VERSION}-python${PYTHON_MAJOR_MINOR_VERSION}"
echo
echo "Pushing airflow PROD image as ${RELEASE_IMAGE}"
echo
# Re-tag the image to be published in "apache/airflow"
docker tag "${AIRFLOW_PROD_IMAGE}" "${RELEASE_IMAGE}"
docker push "${RELEASE_IMAGE}"
if [[ ${PYTHON_MAJOR_MINOR_VERSION} == "${DEFAULT_PYTHON_MAJOR_MINOR_VERSION}" ]]; then
export DEFAULT_VERSION_IMAGE="apache/airflow:${INSTALL_AIRFLOW_VERSION}"
echo
echo "Pushing default airflow image as ${DEFAULT_VERSION_IMAGE}"
echo
# In case of default Python version we also push ":version" tag
docker tag "${RELEASE_IMAGE}" "${DEFAULT_VERSION_IMAGE}"
docker push "${DEFAULT_VERSION_IMAGE}"
fi
|
cp -r graded-kanji-examples/dist/* src/data
cp -r fonts src
mkdir -p docs
cp -r src/* docs
minify -r src -o docs
|
#!/bin/bash
source includes/core.sh
:: "Start"
rm -rf vendor/
warden env exec -u root -T php-fpm bash -c "composer clearcache && composer install"
warden env exec -u root -T php-fpm bash -c "chown www-data:www-data -R /var/www/html/"
:: "Finished."
|
import App from "../src/app";
import * as assert from "power-assert";
describe("app", () => {
context("production mode", () => {
let window:any = {};
window.__import_view_component__ = null;
window.__import_user_attr__ = null;
window.__import_user_attrs_value__ = null;
global.window = window;
const app = new App("production");
it("does not exist __import_view_component__", () => {
assert(!window.__import_view_component__);
});
it("does not exist __import_user_attr__", () => {
assert(!window.__import_user_attr__);
});
it("does not exist __import_user_attrs_value__", () => {
assert(!window.__import_user_attrs_value__);
});
});
context("development mode", () => {
let window:any = {};
window.__import_view_component__ = null;
window.__import_user_attr__ = null;
window.__import_user_attrs_value__ = null;
global.window = window;
const app = new App();
describe("__import_view_component__", () => {
it("is alias of app.view.import", () => {
app.__import_view_component__("test", ()=>{});
assert(app.view.components[0].id === "dev-test");
});
it("exists __import_view_component__ in window", () => {
assert(window.__import_view_component__);
});
});
describe("__import_user_attr__", () => {
it("is alias of app.user.import", () => {
const attr = { key : "value" };
app.__import_user_attr__("test", attr);
assert.deepEqual(app.user.attrs["dev-test"], attr);
});
it("exists __import_user_attr__", () => {
assert(window.__import_user_attr__);
});
});
describe("__import_user_attrs_value__", () => {
it("is alias of app.user.setAttrs", () => {
const attr = { key : "value" };
const attrs = { attr : attr };
app.__import_user_attrs_value__(attrs);
assert.deepEqual(app.user.attrs.attr, attr);
});
it("exists __import_user_attrs_value__", () => {
assert(window.__import_user_attrs_value__);
});
});
});
});
|
#!/bin/bash
echo "" > min.js
cat js/animateRotate.js >> min.js
cat js/data.js >> min.js
cat js/dropmenu.js >> min.js
cat js/hiring.js >> min.js
cat js/stat.js >> min.js
cat js/upgrades.js >> min.js
cat js/science.js >> min.js
|
function* gen() {
yield 1;
yield 2;
yield 3;
yield 4;
yield 5;
}
// for (let g of gen()) {
// console.log(g);
// }
var geniter = gen();
console.log(geniter.next());
console.log(geniter.next());
console.log(geniter.next());
console.log(geniter.next());
console.log(geniter.next());
console.log(geniter.next());
console.log(geniter.next());
|
<gh_stars>100-1000
/*
* Copyright © 2021 Lisk Foundation
*
* See the LICENSE file at the top-level directory of this distribution
* for licensing information.
*
* Unless otherwise agreed in a custom licensing agreement with the Lisk Foundation,
* no part of this software, including this file, may be copied, modified,
* propagated, or distributed except according to the terms contained in the
* LICENSE file.
*
* Removal or modification of this copyright notice is prohibited.
*/
import * as React from 'react';
import styles from './Dialog.module.scss';
export interface DialogProps {
open: boolean;
onOpen?: () => void;
onClose?: () => void;
}
// eslint-disable-next-line @typescript-eslint/no-empty-function
export const DialogContext = React.createContext({ closeDialog: () => {} });
const Dialog: React.FC<DialogProps> = props => {
const [open, setOpen] = React.useState(props.open);
const triggerClose = () => {
setOpen(false);
if (props.onClose) {
props.onClose();
}
};
const triggerOpen = () => {
setOpen(true);
if (props.onOpen) {
props.onOpen();
}
};
React.useEffect(() => {
if (props.open) {
triggerOpen();
} else {
triggerClose();
}
}, [props.open]);
return (
<div className={`${styles.root} ${open ? styles.open : styles.close}`}>
<div className={styles.background}>
<div className={styles.modal}>
<DialogContext.Provider value={{ closeDialog: triggerClose }}>
{props.children}
</DialogContext.Provider>
</div>
</div>
</div>
);
};
export default Dialog;
|
<reponame>ooooo-youwillsee/leetcode
//
// Created by ooooo on 2020/1/25.
//
#ifndef CPP_0067__SOLUTION1_H_
#define CPP_0067__SOLUTION1_H_
#include <iostream>
using namespace std;
/**
* 从尾遍历到头
*/
class Solution {
public:
string addBinary(string a, string b) {
string ans = "";
int i = a.size() - 1, j = b.size() - 1;
// flag表示是否需要进一
bool flag = false;
string aStr = "", bStr = "";
while (i >= 0 || j >= 0) {
aStr = i < 0 ? "0" : a.substr(i--, 1);
bStr = j < 0 ? "0" : b.substr(j--, 1);
if (flag) {
if (aStr == "0" && bStr == "0") {
flag = false;
ans.insert(0, "1");
} else if (aStr == "1" && bStr == "1") {
ans.insert(0, "1");
} else {
ans.insert(0, "0");
}
} else {
if (aStr == "0" && bStr == "0") {
ans.insert(0, "0");
} else if (aStr == "1" && bStr == "1") {
flag = true;
ans.insert(0, "0");
} else {
ans.insert(0, "1");
}
}
}
if (flag) {
ans.insert(0, "1");
}
return ans;
}
};
#endif //CPP_0067__SOLUTION1_H_
|
#! /bin/bash
##########################################################################
# >> SETUP DEFAULT VALUES
##########################################################################
DOVECOT_MAILBOX_FORMAT="${DOVECOT_MAILBOX_FORMAT:=maildir}"
DOVECOT_TLS="${DOVECOT_TLS:=no}"
ENABLE_CLAMAV="${ENABLE_CLAMAV:=0}"
ENABLE_FAIL2BAN="${ENABLE_FAIL2BAN:=0}"
ENABLE_FETCHMAIL="${ENABLE_FETCHMAIL:=0}"
ENABLE_LDAP="${ENABLE_LDAP:=0}"
ENABLE_MANAGESIEVE="${ENABLE_MANAGESIEVE:=0}"
ENABLE_POP3="${ENABLE_POP3:=0}"
ENABLE_POSTGREY="${ENABLE_POSTGREY:=0}"
ENABLE_QUOTAS="${ENABLE_QUOTAS:=1}"
ENABLE_SASLAUTHD="${ENABLE_SASLAUTHD:=0}"
ENABLE_SPAMASSASSIN="${ENABLE_SPAMASSASSIN:=0}"
ENABLE_SRS="${ENABLE_SRS:=0}"
FETCHMAIL_POLL="${FETCHMAIL_POLL:=300}"
FETCHMAIL_PARALLEL="${FETCHMAIL_PARALLEL:=0}"
LDAP_START_TLS="${LDAP_START_TLS:=no}"
LOGROTATE_INTERVAL="${LOGROTATE_INTERVAL:=${REPORT_INTERVAL:-daily}}"
LOGWATCH_INTERVAL="${LOGWATCH_INTERVAL:=none}"
MOVE_SPAM_TO_JUNK="${MOVE_SPAM_TO_JUNK:=1}"
NETWORK_INTERFACE="${NETWORK_INTERFACE:=eth0}"
ONE_DIR="${ONE_DIR:=0}"
OVERRIDE_HOSTNAME="${OVERRIDE_HOSTNAME}"
POSTGREY_AUTO_WHITELIST_CLIENTS="${POSTGREY_AUTO_WHITELIST_CLIENTS:=5}"
POSTGREY_DELAY="${POSTGREY_DELAY:=300}"
POSTGREY_MAX_AGE="${POSTGREY_MAX_AGE:=35}"
POSTGREY_TEXT="${POSTGREY_TEXT:=Delayed by Postgrey}"
POSTFIX_INET_PROTOCOLS="${POSTFIX_INET_PROTOCOLS:=all}"
POSTFIX_MAILBOX_SIZE_LIMIT="${POSTFIX_MAILBOX_SIZE_LIMIT:=0}" # no limit by default
POSTFIX_MESSAGE_SIZE_LIMIT="${POSTFIX_MESSAGE_SIZE_LIMIT:=10240000}" # ~10 MB by default
POSTSCREEN_ACTION="${POSTSCREEN_ACTION:=enforce}"
REPORT_RECIPIENT="${REPORT_RECIPIENT:="0"}"
SMTP_ONLY="${SMTP_ONLY:=0}"
SPAMASSASSIN_SPAM_TO_INBOX_IS_SET="$( if [[ -n ${SPAMASSASSIN_SPAM_TO_INBOX+'set'} ]]; then echo true ; else echo false ; fi )"
SPAMASSASSIN_SPAM_TO_INBOX="${SPAMASSASSIN_SPAM_TO_INBOX:=0}"
SPOOF_PROTECTION="${SPOOF_PROTECTION:=0}"
SRS_SENDER_CLASSES="${SRS_SENDER_CLASSES:=envelope_sender}"
SSL_TYPE="${SSL_TYPE:=''}"
TLS_LEVEL="${TLS_LEVEL:=modern}"
VIRUSMAILS_DELETE_DELAY="${VIRUSMAILS_DELETE_DELAY:=7}"
##########################################################################
# >> GLOBAL VARIABLES
##########################################################################
HOSTNAME="$(hostname -f)"
DOMAINNAME="$(hostname -d)"
CHKSUM_FILE=/tmp/docker-mailserver-config-chksum
##########################################################################
# >> REGISTER FUNCTIONS
#
# Add your new functions/methods here.
#
# NOTE: Position matters when registering a function in stacks.
# First in First out
#
# Execution Logic:
# > check functions
# > setup functions
# > fix functions
# > misc functions
# > start-daemons
#
# Example:
#
# if [[ CONDITION IS MET ]]
# then
# _register_{setup,fix,check,start}_{functions,daemons} "${FUNCNAME}"
# fi
#
# Implement them in the section-group: {check, setup, fix, start}
#
##########################################################################
function register_functions
{
_notify 'tasklog' 'Initializing setup'
_notify 'task' 'Registering check, setup, fix, misc and start-daemons functions'
################### >> check funcs
_register_check_function "_check_environment_variables"
_register_check_function "_check_hostname"
################### >> setup funcs
_register_setup_function "_setup_default_vars"
_register_setup_function "_setup_file_permissions"
if [[ ${SMTP_ONLY} -ne 1 ]]
then
_register_setup_function "_setup_dovecot"
_register_setup_function "_setup_dovecot_dhparam"
_register_setup_function "_setup_dovecot_quota"
_register_setup_function "_setup_dovecot_local_user"
fi
[[ ${ENABLE_LDAP} -eq 1 ]] && _register_setup_function "_setup_ldap"
[[ ${ENABLE_SASLAUTHD} -eq 1 ]] && _register_setup_function "_setup_saslauthd"
[[ ${ENABLE_POSTGREY} -eq 1 ]] && _register_setup_function "_setup_postgrey"
_register_setup_function "_setup_dkim"
_register_setup_function "_setup_ssl"
[[ ${POSTFIX_INET_PROTOCOLS} != "all" ]] && _register_setup_function "_setup_inet_protocols"
_register_setup_function "_setup_docker_permit"
_register_setup_function "_setup_mailname"
_register_setup_function "_setup_amavis"
_register_setup_function "_setup_dmarc_hostname"
_register_setup_function "_setup_postfix_hostname"
_register_setup_function "_setup_dovecot_hostname"
_register_setup_function "_setup_postfix_smtputf8"
_register_setup_function "_setup_postfix_sasl"
_register_setup_function "_setup_postfix_sasl_password"
_register_setup_function "_setup_security_stack"
_register_setup_function "_setup_postfix_aliases"
_register_setup_function "_setup_postfix_vhost"
_register_setup_function "_setup_postfix_dhparam"
_register_setup_function "_setup_postfix_postscreen"
_register_setup_function "_setup_postfix_sizelimits"
# needs to come after _setup_postfix_aliases
[[ ${SPOOF_PROTECTION} -eq 1 ]] && _register_setup_function "_setup_spoof_protection"
if [[ ${ENABLE_SRS} -eq 1 ]]
then
_register_setup_function "_setup_SRS"
_register_start_daemon "_start_daemons_postsrsd"
fi
_register_setup_function "_setup_postfix_access_control"
[[ -n ${DEFAULT_RELAY_HOST:-''} ]] && _register_setup_function "_setup_postfix_default_relay_host"
[[ -n ${RELAY_HOST:-''} ]] && _register_setup_function "_setup_postfix_relay_hosts"
[[ ${ENABLE_POSTFIX_VIRTUAL_TRANSPORT:-0} -eq 1 ]] && _register_setup_function "_setup_postfix_virtual_transport"
_register_setup_function "_setup_postfix_override_configuration"
_register_setup_function "_setup_environment"
_register_setup_function "_setup_logrotate"
_register_setup_function "_setup_mail_summary"
_register_setup_function "_setup_logwatch"
_register_setup_function "_setup_user_patches"
# compute last as the config files are modified in-place
_register_setup_function "_setup_chksum_file"
################### >> fix funcs
_register_fix_function "_fix_var_mail_permissions"
_register_fix_function "_fix_var_amavis_permissions"
[[ ${ENABLE_CLAMAV} -eq 0 ]] && _register_fix_function "_fix_cleanup_clamav"
[[ ${ENABLE_SPAMASSASSIN} -eq 0 ]] && _register_fix_function "_fix_cleanup_spamassassin"
################### >> misc funcs
_register_misc_function "_misc_save_states"
################### >> daemon funcs
_register_start_daemon "_start_daemons_cron"
_register_start_daemon "_start_daemons_rsyslog"
[[ ${SMTP_ONLY} -ne 1 ]] && _register_start_daemon "_start_daemons_dovecot"
# needs to be started before saslauthd
_register_start_daemon "_start_daemons_opendkim"
_register_start_daemon "_start_daemons_opendmarc"
#postfix uses postgrey, needs to be started before postfix
[[ ${ENABLE_POSTGREY} -eq 1 ]] && _register_start_daemon "_start_daemons_postgrey"
_register_start_daemon "_start_daemons_postfix"
[[ ${ENABLE_SASLAUTHD} -eq 1 ]] && _register_start_daemon "_start_daemons_saslauthd"
# care :: needs to run after postfix
[[ ${ENABLE_FAIL2BAN} -eq 1 ]] && _register_start_daemon "_start_daemons_fail2ban"
[[ ${ENABLE_FETCHMAIL} -eq 1 ]] && _register_start_daemon "_start_daemons_fetchmail"
[[ ${ENABLE_CLAMAV} -eq 1 ]] && _register_start_daemon "_start_daemons_clamav"
[[ ${ENABLE_LDAP} -eq 0 ]] && _register_start_daemon "_start_changedetector"
_register_start_daemon "_start_daemons_amavis"
}
##########################################################################
# << REGISTER FUNCTIONS
##########################################################################
# ! ––––––––––––––––––––––––––––––––––––––––––––––
# ! ––– CARE – BEGIN –––––––––––––––––––––––––––––
# ! ––––––––––––––––––––––––––––––––––––––––––––––
##########################################################################
# >> CONSTANTS
##########################################################################
declare -a FUNCS_SETUP
declare -a FUNCS_FIX
declare -a FUNCS_CHECK
declare -a FUNCS_MISC
declare -a DAEMONS_START
##########################################################################
# << CONSTANTS
##########################################################################
##########################################################################
# >> protected register_functions
##########################################################################
function _register_start_daemon
{
DAEMONS_START+=("${1}")
_notify 'inf' "${1}() registered"
}
function _register_setup_function
{
FUNCS_SETUP+=("${1}")
_notify 'inf' "${1}() registered"
}
function _register_fix_function
{
FUNCS_FIX+=("${1}")
_notify 'inf' "${1}() registered"
}
function _register_check_function
{
FUNCS_CHECK+=("${1}")
_notify 'inf' "${1}() registered"
}
function _register_misc_function
{
FUNCS_MISC+=("${1}")
_notify 'inf' "${1}() registered"
}
##########################################################################
# << protected register_functions
##########################################################################
function _defunc
{
_notify 'fatal' "Please fix your configuration. Exiting..."
exit 1
}
function display_startup_daemon
{
${1} &>/dev/null
local RES=${?}
if [[ ${DMS_DEBUG} -eq 1 ]]
then
if [[ ${RES} -eq 0 ]]
then
_notify 'inf' " OK"
else
_notify 'err' " STARTUP FAILED"
fi
fi
return "${RES}"
}
# ! ––––––––––––––––––––––––––––––––––––––––––––––
# ! ––– CARE – END –––––––––––––––––––––––––––––––
# ! ––––––––––––––––––––––––––––––––––––––––––––––
##########################################################################
# >> Check Stack
#
# Description: Place functions for initial check of container sanity
##########################################################################
function check
{
_notify 'tasklog' 'Checking configuration'
for FUNC in "${FUNCS_CHECK[@]}"
do
if ! ${FUNC}
then
_defunc
fi
done
}
function _check_hostname
{
_notify "task" "Check that hostname/domainname is provided or overridden (no default docker hostname/kubernetes) [in ${FUNCNAME[0]}]"
if [[ -n ${OVERRIDE_HOSTNAME} ]]
then
export HOSTNAME=${OVERRIDE_HOSTNAME}
export DOMAINNAME="${HOSTNAME#*.}"
fi
_notify 'inf' "Domain has been set to ${DOMAINNAME}"
_notify 'inf' "Hostname has been set to ${HOSTNAME}"
if ( ! grep -E '^(\S+[.]\S+)$' <<< "${HOSTNAME}" >/dev/null )
then
_notify 'err' "Setting hostname/domainname is required"
kill "$(< /var/run/supervisord.pid)" && return 1
else
return 0
fi
}
function _check_environment_variables
{
_notify "task" "Check that there are no conflicts with env variables [in ${FUNCNAME[0]}]"
return 0
}
##########################################################################
# << Check Stack
##########################################################################
##########################################################################
# >> Setup Stack
#
# Description: Place functions for functional configurations here
##########################################################################
function setup
{
_notify 'tasklog' 'Configuring mail server'
for FUNC in "${FUNCS_SETUP[@]}"
do
${FUNC}
done
}
function _setup_default_vars
{
_notify 'task' "Setting up default variables"
# update POSTMASTER_ADDRESS - must be done done after _check_hostname
POSTMASTER_ADDRESS="${POSTMASTER_ADDRESS:="postmaster@${DOMAINNAME}"}"
# update REPORT_SENDER - must be done done after _check_hostname
REPORT_SENDER="${REPORT_SENDER:="mailserver-report@${HOSTNAME}"}"
PFLOGSUMM_SENDER="${PFLOGSUMM_SENDER:=${REPORT_SENDER}}"
# set PFLOGSUMM_TRIGGER here for backwards compatibility
# when REPORT_RECIPIENT is on the old method should be used
# ! needs to be a string comparison
if [[ ${REPORT_RECIPIENT} == "0" ]]
then
PFLOGSUMM_TRIGGER="${PFLOGSUMM_TRIGGER:="none"}"
else
PFLOGSUMM_TRIGGER="${PFLOGSUMM_TRIGGER:="logrotate"}"
fi
# expand address to simplify the rest of the script
if [[ ${REPORT_RECIPIENT} == "0" ]] || [[ ${REPORT_RECIPIENT} == "0" ]]
then
REPORT_RECIPIENT="${POSTMASTER_ADDRESS}"
REPORT_RECIPIENT="${REPORT_RECIPIENT}"
fi
PFLOGSUMM_RECIPIENT="${PFLOGSUMM_RECIPIENT:=${REPORT_RECIPIENT}}"
LOGWATCH_RECIPIENT="${LOGWATCH_RECIPIENT:=${REPORT_RECIPIENT}}"
{
echo "DOVECOT_MAILBOX_FORMAT=${DOVECOT_MAILBOX_FORMAT}"
echo "DOVECOT_TLS=${DOVECOT_TLS}"
echo "ENABLE_CLAMAV=${ENABLE_CLAMAV}"
echo "ENABLE_FAIL2BAN=${ENABLE_FAIL2BAN}"
echo "ENABLE_FETCHMAIL=${ENABLE_FETCHMAIL}"
echo "ENABLE_LDAP=${ENABLE_LDAP}"
echo "ENABLE_MANAGESIEVE=${ENABLE_MANAGESIEVE}"
echo "ENABLE_POP3=${ENABLE_POP3}"
echo "ENABLE_POSTGREY=${ENABLE_POSTGREY}"
echo "ENABLE_QUOTAS=${ENABLE_QUOTAS}"
echo "ENABLE_SASLAUTHD=${ENABLE_SASLAUTHD}"
echo "ENABLE_SPAMASSASSIN=${ENABLE_SPAMASSASSIN}"
echo "ENABLE_SRS=${ENABLE_SRS}"
echo "FETCHMAIL_POLL=${FETCHMAIL_POLL}"
echo "FETCHMAIL_PARALLEL=${FETCHMAIL_PARALLEL}"
echo "LDAP_START_TLS=${LDAP_START_TLS}"
echo "LOGROTATE_INTERVAL=${LOGROTATE_INTERVAL}"
echo "LOGWATCH_INTERVAL=${LOGWATCH_INTERVAL}"
echo "MOVE_SPAM_TO_JUNK=${MOVE_SPAM_TO_JUNK}"
echo "NETWORK_INTERFACE=${NETWORK_INTERFACE}"
echo "ONE_DIR=${ONE_DIR}"
echo "OVERRIDE_HOSTNAME=${OVERRIDE_HOSTNAME}"
echo "POSTGREY_AUTO_WHITELIST_CLIENTS=${POSTGREY_AUTO_WHITELIST_CLIENTS}"
echo "POSTGREY_DELAY=${POSTGREY_DELAY}"
echo "POSTGREY_MAX_AGE=${POSTGREY_MAX_AGE}"
echo "POSTGREY_TEXT=${POSTGREY_TEXT}"
echo "POSTFIX_INET_PROTOCOLS=${POSTFIX_INET_PROTOCOLS}"
echo "POSTFIX_MAILBOX_SIZE_LIMIT=${POSTFIX_MAILBOX_SIZE_LIMIT}"
echo "POSTFIX_MESSAGE_SIZE_LIMIT=${POSTFIX_MESSAGE_SIZE_LIMIT}"
echo "POSTSCREEN_ACTION=${POSTSCREEN_ACTION}"
echo "REPORT_RECIPIENT=${REPORT_RECIPIENT}"
echo "SMTP_ONLY=${SMTP_ONLY}"
echo "SPAMASSASSIN_SPAM_TO_INBOX=${SPAMASSASSIN_SPAM_TO_INBOX}"
echo "SPOOF_PROTECTION=${SPOOF_PROTECTION}"
echo "SRS_SENDER_CLASSES=${SRS_SENDER_CLASSES}"
echo "SSL_TYPE=${SSL_TYPE}"
echo "TLS_LEVEL=${TLS_LEVEL}"
echo "VIRUSMAILS_DELETE_DELAY=${VIRUSMAILS_DELETE_DELAY}"
echo "DMS_DEBUG=${DMS_DEBUG}"
} >>/root/.bashrc
}
# File/folder permissions are fine when using docker volumes, but may be wrong
# when file system folders are mounted into the container.
# Set the expected values and create missing folders/files just in case.
function _setup_file_permissions
{
_notify 'task' "Setting file/folder permissions"
mkdir -p /var/log/supervisor
mkdir -p /var/log/mail
chown syslog:root /var/log/mail
touch /var/log/mail/clamav.log
chown clamav:adm /var/log/mail/clamav.log
chmod 640 /var/log/mail/clamav.log
touch /var/log/mail/freshclam.log
chown clamav:adm /var/log/mail/freshclam.log
chmod 640 /var/log/mail/freshclam.log
}
function _setup_chksum_file
{
_notify 'task' "Setting up configuration checksum file"
if [[ -d /tmp/docker-mailserver ]]
then
_notify 'inf' "Creating ${CHKSUM_FILE}"
_monitored_files_checksums >"${CHKSUM_FILE}"
else
# We could just skip the file, but perhaps config can be added later?
# If so it must be processed by the check for changes script
_notify 'inf' "Creating empty ${CHKSUM_FILE} (no config)"
touch "${CHKSUM_FILE}"
fi
}
function _setup_mailname
{
_notify 'task' 'Setting up Mailname'
_notify 'inf' "Creating /etc/mailname"
echo "${DOMAINNAME}" > /etc/mailname
}
function _setup_amavis
{
_notify 'task' 'Setting up Amavis'
_notify 'inf' "Applying hostname to /etc/amavis/conf.d/05-node_id"
# shellcheck disable=SC2016
sed -i 's/^#\$myhostname = "mail.example.com";/\$myhostname = "'"${HOSTNAME}"'";/' /etc/amavis/conf.d/05-node_id
}
function _setup_dmarc_hostname
{
_notify 'task' 'Setting up dmarc'
_notify 'inf' "Applying hostname to /etc/opendmarc.conf"
sed -i -e 's/^AuthservID.*$/AuthservID '"${HOSTNAME}"'/g' \
-e 's/^TrustedAuthservIDs.*$/TrustedAuthservIDs '"${HOSTNAME}"'/g' /etc/opendmarc.conf
}
function _setup_postfix_hostname
{
_notify 'task' 'Applying hostname and domainname to Postfix'
_notify 'inf' "Applying hostname to /etc/postfix/main.cf"
postconf -e "myhostname = ${HOSTNAME}"
postconf -e "mydomain = ${DOMAINNAME}"
}
function _setup_dovecot_hostname
{
_notify 'task' 'Applying hostname to Dovecot'
_notify 'inf' "Applying hostname to /etc/dovecot/conf.d/15-lda.conf"
sed -i 's/^#hostname =.*$/hostname = '"${HOSTNAME}"'/g' /etc/dovecot/conf.d/15-lda.conf
}
function _setup_dovecot
{
_notify 'task' 'Setting up Dovecot'
# moved from docker file, copy or generate default self-signed cert
if [[ -f /var/mail-state/lib-dovecot/dovecot.pem ]] && [[ ${ONE_DIR} -eq 1 ]]
then
_notify 'inf' "Copying default dovecot cert"
cp /var/mail-state/lib-dovecot/dovecot.key /etc/dovecot/ssl/
cp /var/mail-state/lib-dovecot/dovecot.pem /etc/dovecot/ssl/
fi
if [[ ! -f /etc/dovecot/ssl/dovecot.pem ]]
then
_notify 'inf' "Generating default dovecot cert"
pushd /usr/share/dovecot || return 1
./mkcert.sh
popd || return 1
if [[ ${ONE_DIR} -eq 1 ]]
then
mkdir -p /var/mail-state/lib-dovecot
cp /etc/dovecot/ssl/dovecot.key /var/mail-state/lib-dovecot/
cp /etc/dovecot/ssl/dovecot.pem /var/mail-state/lib-dovecot/
fi
fi
cp -a /usr/share/dovecot/protocols.d /etc/dovecot/
# disable pop3 (it will be eventually enabled later in the script, if requested)
mv /etc/dovecot/protocols.d/pop3d.protocol /etc/dovecot/protocols.d/pop3d.protocol.disab
mv /etc/dovecot/protocols.d/managesieved.protocol /etc/dovecot/protocols.d/managesieved.protocol.disab
sed -i -e 's/#ssl = yes/ssl = yes/g' /etc/dovecot/conf.d/10-master.conf
sed -i -e 's/#port = 993/port = 993/g' /etc/dovecot/conf.d/10-master.conf
sed -i -e 's/#port = 995/port = 995/g' /etc/dovecot/conf.d/10-master.conf
sed -i -e 's/#ssl = yes/ssl = required/g' /etc/dovecot/conf.d/10-ssl.conf
sed -i 's/^postmaster_address = .*$/postmaster_address = '"${POSTMASTER_ADDRESS}"'/g' /etc/dovecot/conf.d/15-lda.conf
# set mail_location according to mailbox format
case "${DOVECOT_MAILBOX_FORMAT}" in
sdbox|mdbox )
_notify 'inf' "Dovecot ${DOVECOT_MAILBOX_FORMAT} format configured"
sed -i -e 's/^mail_location = .*$/mail_location = '"${DOVECOT_MAILBOX_FORMAT}"':\/var\/mail\/%d\/%n/g' /etc/dovecot/conf.d/10-mail.conf
_notify 'inf' "Enabling cron job for dbox purge"
mv /etc/cron.d/dovecot-purge.disabled /etc/cron.d/dovecot-purge
chmod 644 /etc/cron.d/dovecot-purge
;;
* )
_notify 'inf' "Dovecot maildir format configured (default)"
sed -i -e 's/^mail_location = .*$/mail_location = maildir:\/var\/mail\/%d\/%n/g' /etc/dovecot/conf.d/10-mail.conf
;;
esac
# enable Managesieve service by setting the symlink
# to the configuration file Dovecot will actually find
if [[ ${ENABLE_MANAGESIEVE} -eq 1 ]]
then
_notify 'inf' "Sieve management enabled"
mv /etc/dovecot/protocols.d/managesieved.protocol.disab /etc/dovecot/protocols.d/managesieved.protocol
fi
# copy pipe and filter programs, if any
rm -f /usr/lib/dovecot/sieve-filter/*
rm -f /usr/lib/dovecot/sieve-pipe/*
[[ -d /tmp/docker-mailserver/sieve-filter ]] && cp /tmp/docker-mailserver/sieve-filter/* /usr/lib/dovecot/sieve-filter/
[[ -d /tmp/docker-mailserver/sieve-pipe ]] && cp /tmp/docker-mailserver/sieve-pipe/* /usr/lib/dovecot/sieve-pipe/
# create global sieve directories
mkdir -p /usr/lib/dovecot/sieve-global/before
mkdir -p /usr/lib/dovecot/sieve-global/after
if [[ -f /tmp/docker-mailserver/before.dovecot.sieve ]]
then
cp /tmp/docker-mailserver/before.dovecot.sieve /usr/lib/dovecot/sieve-global/before/50-before.dovecot.sieve
sievec /usr/lib/dovecot/sieve-global/before/50-before.dovecot.sieve
else
rm -f /usr/lib/dovecot/sieve-global/before/50-before.dovecot.sieve /usr/lib/dovecot/sieve-global/before/50-before.dovecot.svbin
fi
if [[ -f /tmp/docker-mailserver/after.dovecot.sieve ]]
then
cp /tmp/docker-mailserver/after.dovecot.sieve /usr/lib/dovecot/sieve-global/after/50-after.dovecot.sieve
sievec /usr/lib/dovecot/sieve-global/after/50-after.dovecot.sieve
else
rm -f /usr/lib/dovecot/sieve-global/after/50-after.dovecot.sieve /usr/lib/dovecot/sieve-global/after/50-after.dovecot.svbin
fi
# sieve will move spams to .Junk folder when SPAMASSASSIN_SPAM_TO_INBOX=1 and MOVE_SPAM_TO_JUNK=1
if [[ ${SPAMASSASSIN_SPAM_TO_INBOX} -eq 1 ]] && [[ ${MOVE_SPAM_TO_JUNK} -eq 1 ]]
then
_notify 'inf' "Spam messages will be moved to the Junk folder."
cp /etc/dovecot/sieve/before/60-spam.sieve /usr/lib/dovecot/sieve-global/before/
sievec /usr/lib/dovecot/sieve-global/before/60-spam.sieve
else
rm -f /usr/lib/dovecot/sieve-global/before/60-spam.sieve /usr/lib/dovecot/sieve-global/before/60-spam.svbin
fi
chown docker:docker -R /usr/lib/dovecot/sieve*
chmod 550 -R /usr/lib/dovecot/sieve*
chmod -f +x /usr/lib/dovecot/sieve-pipe/*
}
function _setup_dovecot_quota
{
_notify 'task' 'Setting up Dovecot quota'
# Dovecot quota is disabled when using LDAP or SMTP_ONLY or when explicitly disabled.
if [[ ${ENABLE_LDAP} -eq 1 ]] || [[ ${SMTP_ONLY} -eq 1 ]] || [[ ${ENABLE_QUOTAS} -eq 0 ]]
then
# disable dovecot quota in docevot confs
if [[ -f /etc/dovecot/conf.d/90-quota.conf ]]
then
mv /etc/dovecot/conf.d/90-quota.conf /etc/dovecot/conf.d/90-quota.conf.disab
sed -i "s/mail_plugins = \$mail_plugins quota/mail_plugins = \$mail_plugins/g" /etc/dovecot/conf.d/10-mail.conf
sed -i "s/mail_plugins = \$mail_plugins imap_quota/mail_plugins = \$mail_plugins/g" /etc/dovecot/conf.d/20-imap.conf
fi
# disable quota policy check in postfix
sed -i "s/check_policy_service inet:localhost:65265//g" /etc/postfix/main.cf
else
if [[ -f /etc/dovecot/conf.d/90-quota.conf.disab ]]
then
mv /etc/dovecot/conf.d/90-quota.conf.disab /etc/dovecot/conf.d/90-quota.conf
sed -i "s/mail_plugins = \$mail_plugins/mail_plugins = \$mail_plugins quota/g" /etc/dovecot/conf.d/10-mail.conf
sed -i "s/mail_plugins = \$mail_plugin/mail_plugins = \$mail_plugins imap_quota/g" /etc/dovecot/conf.d/20-imap.conf
fi
local MESSAGE_SIZE_LIMIT_MB=$((POSTFIX_MESSAGE_SIZE_LIMIT / 1000000))
local MAILBOX_LIMIT_MB=$((POSTFIX_MAILBOX_SIZE_LIMIT / 1000000))
sed -i "s/quota_max_mail_size =.*/quota_max_mail_size = ${MESSAGE_SIZE_LIMIT_MB}$([[ ${MESSAGE_SIZE_LIMIT_MB} -eq 0 ]] && echo "" || echo "M")/g" /etc/dovecot/conf.d/90-quota.conf
sed -i "s/quota_rule = \*:storage=.*/quota_rule = *:storage=${MAILBOX_LIMIT_MB}$([[ ${MAILBOX_LIMIT_MB} -eq 0 ]] && echo "" || echo "M")/g" /etc/dovecot/conf.d/90-quota.conf
if [[ ! -f /tmp/docker-mailserver/dovecot-quotas.cf ]]
then
_notify 'inf' "'config/docker-mailserver/dovecot-quotas.cf' is not provided. Using default quotas."
: >/tmp/docker-mailserver/dovecot-quotas.cf
fi
# enable quota policy check in postfix
sed -i "s/reject_unknown_recipient_domain, reject_rbl_client zen.spamhaus.org/reject_unknown_recipient_domain, check_policy_service inet:localhost:65265, reject_rbl_client zen.spamhaus.org/g" /etc/postfix/main.cf
fi
}
function _setup_dovecot_local_user
{
_notify 'task' 'Setting up Dovecot Local User'
: >/etc/postfix/vmailbox
: >/etc/dovecot/userdb
if [[ -f /tmp/docker-mailserver/postfix-accounts.cf ]] && [[ ${ENABLE_LDAP} -ne 1 ]]
then
_notify 'inf' "Checking file line endings"
sed -i 's/\r//g' /tmp/docker-mailserver/postfix-accounts.cf
_notify 'inf' "Regenerating postfix user list"
echo "# WARNING: this file is auto-generated. Modify config/postfix-accounts.cf to edit user list." > /etc/postfix/vmailbox
# checking that /tmp/docker-mailserver/postfix-accounts.cf ends with a newline
# shellcheck disable=SC1003
sed -i -e '$a\' /tmp/docker-mailserver/postfix-accounts.cf
chown dovecot:dovecot /etc/dovecot/userdb
chmod 640 /etc/dovecot/userdb
sed -i -e '/\!include auth-ldap\.conf\.ext/s/^/#/' /etc/dovecot/conf.d/10-auth.conf
sed -i -e '/\!include auth-passwdfile\.inc/s/^#//' /etc/dovecot/conf.d/10-auth.conf
# creating users ; 'pass' is encrypted
# comments and empty lines are ignored
while IFS=$'|' read -r LOGIN PASS
do
# Setting variables for better readability
USER=$(echo "${LOGIN}" | cut -d @ -f1)
DOMAIN=$(echo "${LOGIN}" | cut -d @ -f2)
USER_ATTRIBUTES=""
# test if user has a defined quota
if [[ -f /tmp/docker-mailserver/dovecot-quotas.cf ]]
then
declare -a USER_QUOTA
IFS=':' ; read -r -a USER_QUOTA < <(grep "${USER}@${DOMAIN}:" -i /tmp/docker-mailserver/dovecot-quotas.cf)
unset IFS
[[ ${#USER_QUOTA[@]} -eq 2 ]] && USER_ATTRIBUTES="${USER_ATTRIBUTES}userdb_quota_rule=*:bytes=${USER_QUOTA[1]}"
fi
# Let's go!
_notify 'inf' "user '${USER}' for domain '${DOMAIN}' with password '********', attr=${USER_ATTRIBUTES}"
echo "${LOGIN} ${DOMAIN}/${USER}/" >> /etc/postfix/vmailbox
# User database for dovecot has the following format:
# user:password:uid:gid:(gecos):home:(shell):extra_fields
# Example :
# ${LOGIN}:${PASS}:5000:5000::/var/mail/${DOMAIN}/${USER}::userdb_mail=maildir:/var/mail/${DOMAIN}/${USER}
echo "${LOGIN}:${PASS}:5000:5000::/var/mail/${DOMAIN}/${USER}::${USER_ATTRIBUTES}" >> /etc/dovecot/userdb
mkdir -p "/var/mail/${DOMAIN}/${USER}"
# Copy user provided sieve file, if present
if [[ -e "/tmp/docker-mailserver/${LOGIN}.dovecot.sieve" ]]
then
cp "/tmp/docker-mailserver/${LOGIN}.dovecot.sieve" "/var/mail/${DOMAIN}/${USER}/.dovecot.sieve"
fi
echo "${DOMAIN}" >> /tmp/vhost.tmp
done < <(grep -v "^\s*$\|^\s*\#" /tmp/docker-mailserver/postfix-accounts.cf)
else
_notify 'inf' "'config/docker-mailserver/postfix-accounts.cf' is not provided. No mail account created."
fi
if ! grep '@' /tmp/docker-mailserver/postfix-accounts.cf | grep -q '|'
then
if [[ ${ENABLE_LDAP} -eq 0 ]]
then
_notify 'fatal' "Unless using LDAP, you need at least 1 email account to start Dovecot."
_defunc
fi
fi
}
function _setup_ldap
{
_notify 'task' 'Setting up Ldap'
_notify 'inf' 'Checking for custom configs'
for i in 'users' 'groups' 'aliases' 'domains'
do
local FPATH="/tmp/docker-mailserver/ldap-${i}.cf"
if [[ -f ${FPATH} ]]
then
cp "${FPATH}" "/etc/postfix/ldap-${i}.cf"
fi
done
_notify 'inf' 'Starting to override configs'
local FILES=(
/etc/postfix/ldap-users.cf
/etc/postfix/ldap-groups.cf
/etc/postfix/ldap-aliases.cf
/etc/postfix/ldap-domains.cf
/etc/postfix/maps/sender_login_maps.ldap
)
for FILE in "${FILES[@]}"
do
[[ ${FILE} =~ ldap-user ]] && export LDAP_QUERY_FILTER="${LDAP_QUERY_FILTER_USER}"
[[ ${FILE} =~ ldap-group ]] && export LDAP_QUERY_FILTER="${LDAP_QUERY_FILTER_GROUP}"
[[ ${FILE} =~ ldap-aliases ]] && export LDAP_QUERY_FILTER="${LDAP_QUERY_FILTER_ALIAS}"
[[ ${FILE} =~ ldap-domains ]] && export LDAP_QUERY_FILTER="${LDAP_QUERY_FILTER_DOMAIN}"
configomat.sh "LDAP_" "${FILE}"
done
_notify 'inf' "Configuring dovecot LDAP"
declare -A _dovecot_ldap_mapping
_dovecot_ldap_mapping["DOVECOT_BASE"]="${DOVECOT_BASE:="${LDAP_SEARCH_BASE}"}"
_dovecot_ldap_mapping["DOVECOT_DN"]="${DOVECOT_DN:="${LDAP_BIND_DN}"}"
_dovecot_ldap_mapping["DOVECOT_DNPASS"]="${DOVECOT_DNPASS:="${LDAP_BIND_PW}"}"
_dovecot_ldap_mapping["DOVECOT_HOSTS"]="${DOVECOT_HOSTS:="${LDAP_SERVER_HOST}"}"
# Not sure whether this can be the same or not
# _dovecot_ldap_mapping["DOVECOT_PASS_FILTER"]="${DOVECOT_PASS_FILTER:="${LDAP_QUERY_FILTER_USER}"}"
# _dovecot_ldap_mapping["DOVECOT_USER_FILTER"]="${DOVECOT_USER_FILTER:="${LDAP_QUERY_FILTER_USER}"}"
for VAR in "${!_dovecot_ldap_mapping[@]}"
do
export "${VAR}=${_dovecot_ldap_mapping[${VAR}]}"
done
configomat.sh "DOVECOT_" "/etc/dovecot/dovecot-ldap.conf.ext"
# add domainname to vhost
echo "${DOMAINNAME}" >>/tmp/vhost.tmp
_notify 'inf' "Enabling dovecot LDAP authentification"
sed -i -e '/\!include auth-ldap\.conf\.ext/s/^#//' /etc/dovecot/conf.d/10-auth.conf
sed -i -e '/\!include auth-passwdfile\.inc/s/^/#/' /etc/dovecot/conf.d/10-auth.conf
_notify 'inf' "Configuring LDAP"
if [[ -f /etc/postfix/ldap-users.cf ]]
then
postconf -e "virtual_mailbox_maps = ldap:/etc/postfix/ldap-users.cf" || \
_notify 'inf' "==> Warning: /etc/postfix/ldap-user.cf not found"
fi
if [[ -f /etc/postfix/ldap-domains.cf ]]
then
postconf -e "virtual_mailbox_domains = /etc/postfix/vhost, ldap:/etc/postfix/ldap-domains.cf" || \
_notify 'inf' "==> Warning: /etc/postfix/ldap-domains.cf not found"
fi
if [[ -f /etc/postfix/ldap-aliases.cf ]] && [[ -f /etc/postfix/ldap-groups.cf ]]
then
postconf -e "virtual_alias_maps = ldap:/etc/postfix/ldap-aliases.cf, ldap:/etc/postfix/ldap-groups.cf" || \
_notify 'inf' "==> Warning: /etc/postfix/ldap-aliases.cf or /etc/postfix/ldap-groups.cf not found"
fi
# shellcheck disable=SC2016
sed -i -E 's+mydestination = $myhostname, +mydestination = +' /etc/postfix/main.cf
return 0
}
function _setup_postgrey
{
_notify 'inf' "Configuring postgrey"
sed -i -e 's/, reject_rbl_client bl.spamcop.net$/, reject_rbl_client bl.spamcop.net, check_policy_service inet:127.0.0.1:10023/' /etc/postfix/main.cf
sed -i -e "s/\"--inet=127.0.0.1:10023\"/\"--inet=127.0.0.1:10023 --delay=${POSTGREY_DELAY} --max-age=${POSTGREY_MAX_AGE} --auto-whitelist-clients=${POSTGREY_AUTO_WHITELIST_CLIENTS}\"/" /etc/default/postgrey
TEXT_FOUND=$(grep -c -i "POSTGREY_TEXT" /etc/default/postgrey)
if [[ ${TEXT_FOUND} -eq 0 ]]
then
printf "POSTGREY_TEXT=\"%s\"\n\n" "${POSTGREY_TEXT}" >> /etc/default/postgrey
fi
if [[ -f /tmp/docker-mailserver/whitelist_clients.local ]]
then
cp -f /tmp/docker-mailserver/whitelist_clients.local /etc/postgrey/whitelist_clients.local
fi
if [[ -f /tmp/docker-mailserver/whitelist_recipients ]]
then
cp -f /tmp/docker-mailserver/whitelist_recipients /etc/postgrey/whitelist_recipients
fi
}
function _setup_postfix_postscreen
{
_notify 'inf' "Configuring postscreen"
sed -i -e "s/postscreen_dnsbl_action = enforce/postscreen_dnsbl_action = ${POSTSCREEN_ACTION}/" \
-e "s/postscreen_greet_action = enforce/postscreen_greet_action = ${POSTSCREEN_ACTION}/" \
-e "s/postscreen_bare_newline_action = enforce/postscreen_bare_newline_action = ${POSTSCREEN_ACTION}/" /etc/postfix/main.cf
}
function _setup_postfix_sizelimits
{
_notify 'inf' "Configuring postfix message size limit"
postconf -e "message_size_limit = ${POSTFIX_MESSAGE_SIZE_LIMIT}"
_notify 'inf' "Configuring postfix mailbox size limit"
postconf -e "mailbox_size_limit = ${POSTFIX_MAILBOX_SIZE_LIMIT}"
_notify 'inf' "Configuring postfix virtual mailbox size limit"
postconf -e "virtual_mailbox_limit = ${POSTFIX_MAILBOX_SIZE_LIMIT}"
}
function _setup_postfix_smtputf8
{
_notify 'inf' "Configuring postfix smtputf8 support (disable)"
postconf -e "smtputf8_enable = no"
}
function _setup_spoof_protection
{
_notify 'inf' "Configuring Spoof Protection"
sed -i \
's+smtpd_sender_restrictions =+smtpd_sender_restrictions = reject_authenticated_sender_login_mismatch,+' \
/etc/postfix/main.cf
if [[ ${ENABLE_LDAP} -eq 1 ]]
then
postconf -e "smtpd_sender_login_maps = ldap:/etc/postfix/ldap-users.cf ldap:/etc/postfix/ldap-aliases.cf ldap:/etc/postfix/ldap-groups.cf"
else
if [[ -f /etc/postfix/regexp ]]
then
postconf -e "smtpd_sender_login_maps = unionmap:{ texthash:/etc/postfix/virtual, hash:/etc/aliases, pcre:/etc/postfix/maps/sender_login_maps.pcre, pcre:/etc/postfix/regexp }"
else
postconf -e "smtpd_sender_login_maps = texthash:/etc/postfix/virtual, hash:/etc/aliases, pcre:/etc/postfix/maps/sender_login_maps.pcre"
fi
fi
}
function _setup_postfix_access_control
{
_notify 'inf' "Configuring user access"
if [[ -f /tmp/docker-mailserver/postfix-send-access.cf ]]
then
sed -i 's|smtpd_sender_restrictions =|smtpd_sender_restrictions = check_sender_access texthash:/tmp/docker-mailserver/postfix-send-access.cf,|' /etc/postfix/main.cf
fi
if [[ -f /tmp/docker-mailserver/postfix-receive-access.cf ]]
then
sed -i 's|smtpd_recipient_restrictions =|smtpd_recipient_restrictions = check_recipient_access texthash:/tmp/docker-mailserver/postfix-receive-access.cf,|' /etc/postfix/main.cf
fi
}
function _setup_postfix_sasl
{
if [[ ${ENABLE_SASLAUTHD} -eq 1 ]]
then
[[ ! -f /etc/postfix/sasl/smtpd.conf ]] && cat > /etc/postfix/sasl/smtpd.conf << EOF
pwcheck_method: saslauthd
mech_list: plain login
EOF
fi
# cyrus sasl or dovecot sasl
if [[ ${ENABLE_SASLAUTHD} -eq 1 ]] || [[ ${SMTP_ONLY} -eq 0 ]]
then
sed -i -e 's|^smtpd_sasl_auth_enable[[:space:]]\+.*|smtpd_sasl_auth_enable = yes|g' /etc/postfix/main.cf
else
sed -i -e 's|^smtpd_sasl_auth_enable[[:space:]]\+.*|smtpd_sasl_auth_enable = no|g' /etc/postfix/main.cf
fi
return 0
}
function _setup_saslauthd
{
_notify 'task' "Setting up Saslauthd"
_notify 'inf' "Configuring Cyrus SASL"
# checking env vars and setting defaults
[[ -z ${SASLAUTHD_MECHANISMS:-} ]] && SASLAUTHD_MECHANISMS=pam
[[ ${SASLAUTHD_MECHANISMS:-} == ldap ]] && [[ -z ${SASLAUTHD_LDAP_SEARCH_BASE} ]] && SASLAUTHD_MECHANISMS=pam
[[ -z ${SASLAUTHD_LDAP_SERVER} ]] && SASLAUTHD_LDAP_SERVER=localhost
[[ -z ${SASLAUTHD_LDAP_FILTER} ]] && SASLAUTHD_LDAP_FILTER='(&(uniqueIdentifier=%u)(mailEnabled=TRUE))'
if [[ -z ${SASLAUTHD_LDAP_SSL} ]] || [[ ${SASLAUTHD_LDAP_SSL} -eq 0 ]]
then
SASLAUTHD_LDAP_PROTO='ldap://' || SASLAUTHD_LDAP_PROTO='ldaps://'
fi
[[ -z ${SASLAUTHD_LDAP_START_TLS} ]] && SASLAUTHD_LDAP_START_TLS=no
[[ -z ${SASLAUTHD_LDAP_TLS_CHECK_PEER} ]] && SASLAUTHD_LDAP_TLS_CHECK_PEER=no
[[ -z ${SASLAUTHD_LDAP_AUTH_METHOD} ]] && SASLAUTHD_LDAP_AUTH_METHOD=bind
if [[ -z ${SASLAUTHD_LDAP_TLS_CACERT_FILE} ]]
then
SASLAUTHD_LDAP_TLS_CACERT_FILE=""
else
SASLAUTHD_LDAP_TLS_CACERT_FILE="ldap_tls_cacert_file: ${SASLAUTHD_LDAP_TLS_CACERT_FILE}"
fi
if [[ -z ${SASLAUTHD_LDAP_TLS_CACERT_DIR} ]]
then
SASLAUTHD_LDAP_TLS_CACERT_DIR=""
else
SASLAUTHD_LDAP_TLS_CACERT_DIR="ldap_tls_cacert_dir: ${SASLAUTHD_LDAP_TLS_CACERT_DIR}"
fi
if [[ -z ${SASLAUTHD_LDAP_PASSWORD_ATTR} ]]
then
SASLAUTHD_LDAP_PASSWORD_ATTR=""
else
SASLAUTHD_LDAP_PASSWORD_ATTR="ldap_password_attr: ${SASLAUTHD_LDAP_PASSWORD_ATTR}"
fi
if [[ -z ${SASLAUTHD_LDAP_MECH} ]]
then
SASLAUTHD_LDAP_MECH=""
else
SASLAUTHD_LDAP_MECH="ldap_mech: ${SASLAUTHD_LDAP_MECH}"
fi
if [[ ! -f /etc/saslauthd.conf ]]
then
_notify 'inf' "Creating /etc/saslauthd.conf"
cat > /etc/saslauthd.conf << EOF
ldap_servers: ${SASLAUTHD_LDAP_PROTO}${SASLAUTHD_LDAP_SERVER}
ldap_auth_method: ${SASLAUTHD_LDAP_AUTH_METHOD}
ldap_bind_dn: ${SASLAUTHD_LDAP_BIND_DN}
ldap_bind_pw: ${SASLAUTHD_LDAP_PASSWORD}
ldap_search_base: ${SASLAUTHD_LDAP_SEARCH_BASE}
ldap_filter: ${SASLAUTHD_LDAP_FILTER}
ldap_start_tls: ${SASLAUTHD_LDAP_START_TLS}
ldap_tls_check_peer: ${SASLAUTHD_LDAP_TLS_CHECK_PEER}
${SASLAUTHD_LDAP_TLS_CACERT_FILE}
${SASLAUTHD_LDAP_TLS_CACERT_DIR}
${SASLAUTHD_LDAP_PASSWORD_ATTR}
${SASLAUTHD_LDAP_MECH}
ldap_referrals: yes
log_level: 10
EOF
fi
sed -i \
-e "/^[^#].*smtpd_sasl_type.*/s/^/#/g" \
-e "/^[^#].*smtpd_sasl_path.*/s/^/#/g" \
/etc/postfix/master.cf
sed -i \
-e "/smtpd_sasl_path =.*/d" \
-e "/smtpd_sasl_type =.*/d" \
-e "/dovecot_destination_recipient_limit =.*/d" \
/etc/postfix/main.cf
gpasswd -a postfix sasl
}
function _setup_postfix_aliases
{
_notify 'task' 'Setting up Postfix Aliases'
: >/etc/postfix/virtual
: >/etc/postfix/regexp
if [[ -f /tmp/docker-mailserver/postfix-virtual.cf ]]
then
# fixing old virtual user file
if grep -q ",$" /tmp/docker-mailserver/postfix-virtual.cf
then
sed -i -e "s/, /,/g" -e "s/,$//g" /tmp/docker-mailserver/postfix-virtual.cf
fi
cp -f /tmp/docker-mailserver/postfix-virtual.cf /etc/postfix/virtual
# the `to` is important, don't delete it
# shellcheck disable=SC2034
while read -r FROM TO
do
UNAME=$(echo "${FROM}" | cut -d @ -f1)
DOMAIN=$(echo "${FROM}" | cut -d @ -f2)
# if they are equal it means the line looks like: "user1 other@domain.tld"
[[ ${UNAME} != "${DOMAIN}" ]] && echo "${DOMAIN}" >>/tmp/vhost.tmp
done < <(grep -v "^\s*$\|^\s*\#" /tmp/docker-mailserver/postfix-virtual.cf || true)
else
_notify 'inf' "Warning 'config/postfix-virtual.cf' is not provided. No mail alias/forward created."
fi
if [[ -f /tmp/docker-mailserver/postfix-regexp.cf ]]
then
_notify 'inf' "Adding regexp alias file postfix-regexp.cf"
cp -f /tmp/docker-mailserver/postfix-regexp.cf /etc/postfix/regexp
sed -i -E \
's+virtual_alias_maps(.*)+virtual_alias_maps\1 pcre:/etc/postfix/regexp+g' \
/etc/postfix/main.cf
fi
_notify 'inf' "Configuring root alias"
echo "root: ${POSTMASTER_ADDRESS}" > /etc/aliases
if [[ -f /tmp/docker-mailserver/postfix-aliases.cf ]]
then
cat /tmp/docker-mailserver/postfix-aliases.cf >> /etc/aliases
else
_notify 'inf' "'config/postfix-aliases.cf' is not provided and will be auto created."
: >/tmp/docker-mailserver/postfix-aliases.cf
fi
postalias /etc/aliases
}
function _setup_SRS
{
_notify 'task' 'Setting up SRS'
postconf -e "sender_canonical_maps = tcp:localhost:10001"
postconf -e "sender_canonical_classes = ${SRS_SENDER_CLASSES}"
postconf -e "recipient_canonical_maps = tcp:localhost:10002"
postconf -e "recipient_canonical_classes = envelope_recipient,header_recipient"
}
function _setup_dkim
{
_notify 'task' 'Setting up DKIM'
mkdir -p /etc/opendkim && touch /etc/opendkim/SigningTable
# Check if keys are already available
if [[ -e "/tmp/docker-mailserver/opendkim/KeyTable" ]]
then
cp -a /tmp/docker-mailserver/opendkim/* /etc/opendkim/
_notify 'inf' "DKIM keys added for: $(ls -C /etc/opendkim/keys/)"
_notify 'inf' "Changing permissions on /etc/opendkim"
chown -R opendkim:opendkim /etc/opendkim/
chmod -R 0700 /etc/opendkim/keys/ # make sure permissions are right
else
_notify 'warn' "No DKIM key provided. Check the documentation to find how to get your keys."
local KEYTABLE_FILE="/etc/opendkim/KeyTable"
[[ ! -f ${KEYTABLE_FILE} ]] && touch "${KEYTABLE_FILE}"
fi
# setup nameservers paramater from /etc/resolv.conf if not defined
if ! grep '^Nameservers' /etc/opendkim.conf
then
echo "Nameservers $(grep '^nameserver' /etc/resolv.conf | awk -F " " '{print $2}' | paste -sd ',' -)" >> /etc/opendkim.conf
_notify 'inf' "Nameservers added to /etc/opendkim.conf"
fi
}
function _setup_ssl
{
_notify 'task' 'Setting up SSL'
# TLS strength/level configuration
case "${TLS_LEVEL}" in
"modern" )
# Postfix configuration
sed -i -r 's/^smtpd_tls_mandatory_protocols =.*$/smtpd_tls_mandatory_protocols = !SSLv2,!SSLv3,!TLSv1,!TLSv1.1/' /etc/postfix/main.cf
sed -i -r 's/^smtpd_tls_protocols =.*$/smtpd_tls_protocols = !SSLv2,!SSLv3,!TLSv1,!TLSv1.1/' /etc/postfix/main.cf
sed -i -r 's/^smtp_tls_protocols =.*$/smtp_tls_protocols = !SSLv2,!SSLv3,!TLSv1,!TLSv1.1/' /etc/postfix/main.cf
sed -i -r 's/^tls_high_cipherlist =.*$/tls_high_cipherlist = ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256/' /etc/postfix/main.cf
# Dovecot configuration (secure by default though)
sed -i -r 's/^ssl_min_protocol =.*$/ssl_min_protocol = TLSv1.2/' /etc/dovecot/conf.d/10-ssl.conf
sed -i -r 's/^ssl_cipher_list =.*$/ssl_cipher_list = ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256/' /etc/dovecot/conf.d/10-ssl.conf
_notify 'inf' "TLS configured with 'modern' ciphers"
;;
"intermediate" )
# Postfix configuration
sed -i -r 's/^smtpd_tls_mandatory_protocols =.*$/smtpd_tls_mandatory_protocols = !SSLv2,!SSLv3/' /etc/postfix/main.cf
sed -i -r 's/^smtpd_tls_protocols =.*$/smtpd_tls_protocols = !SSLv2,!SSLv3/' /etc/postfix/main.cf
sed -i -r 's/^smtp_tls_protocols =.*$/smtp_tls_protocols = !SSLv2,!SSLv3/' /etc/postfix/main.cf
sed -i -r 's/^tls_high_cipherlist =.*$/tls_high_cipherlist = ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS/' /etc/postfix/main.cf
# Dovecot configuration
sed -i -r 's/^ssl_min_protocol = .*$/ssl_min_protocol = TLSv1/' /etc/dovecot/conf.d/10-ssl.conf
sed -i -r 's/^ssl_cipher_list = .*$/ssl_cipher_list = ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS/' /etc/dovecot/conf.d/10-ssl.conf
_notify 'inf' "TLS configured with 'intermediate' ciphers"
;;
* )
_notify 'err' 'TLS_LEVEL not found [ in _setup_ssl ]'
;;
esac
# SSL certificate Configuration
case "${SSL_TYPE}" in
"letsencrypt" )
_notify 'inf' "Configuring SSL using 'letsencrypt'"
# letsencrypt folders and files mounted in /etc/letsencrypt
local LETSENCRYPT_DOMAIN=""
local LETSENCRYPT_KEY=""
if [[ -f /etc/letsencrypt/acme.json ]]
then
if ! _extract_certs_from_acme "${SSL_DOMAIN}"
then
if ! _extract_certs_from_acme "${HOSTNAME}"
then
_extract_certs_from_acme "${DOMAINNAME}"
fi
fi
fi
# first determine the letsencrypt domain by checking both the full hostname or just the domainname if a SAN is used in the cert
if [[ -e /etc/letsencrypt/live/${HOSTNAME}/fullchain.pem ]]
then
LETSENCRYPT_DOMAIN=${HOSTNAME}
elif [[ -e /etc/letsencrypt/live/${DOMAINNAME}/fullchain.pem ]]
then
LETSENCRYPT_DOMAIN=${DOMAINNAME}
else
_notify 'err' "Cannot access '/etc/letsencrypt/live/${HOSTNAME}/fullchain.pem' or '/etc/letsencrypt/live/${DOMAINNAME}/fullchain.pem'"
return 1
fi
# then determine the keyfile to use
if [[ -n ${LETSENCRYPT_DOMAIN} ]]
then
if [[ -e /etc/letsencrypt/live/${LETSENCRYPT_DOMAIN}/privkey.pem ]]
then
LETSENCRYPT_KEY="privkey"
elif [[ -e /etc/letsencrypt/live/${LETSENCRYPT_DOMAIN}/key.pem ]]
then
LETSENCRYPT_KEY="key"
else
_notify 'err' "Cannot access '/etc/letsencrypt/live/${LETSENCRYPT_DOMAIN}/privkey.pem' nor 'key.pem'"
return 1
fi
fi
# finally, make the changes to the postfix and dovecot configurations
if [[ -n ${LETSENCRYPT_KEY} ]]
then
_notify 'inf' "Adding ${LETSENCRYPT_DOMAIN} SSL certificate to the postfix and dovecot configuration"
# Postfix configuration
sed -i -r 's~smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem~smtpd_tls_cert_file=/etc/letsencrypt/live/'"${LETSENCRYPT_DOMAIN}"'/fullchain.pem~g' /etc/postfix/main.cf
sed -i -r 's~smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key~smtpd_tls_key_file=/etc/letsencrypt/live/'"${LETSENCRYPT_DOMAIN}"'/'"${LETSENCRYPT_KEY}"'\.pem~g' /etc/postfix/main.cf
# Dovecot configuration
sed -i -e 's~ssl_cert = </etc/dovecot/ssl/dovecot\.pem~ssl_cert = </etc/letsencrypt/live/'"${LETSENCRYPT_DOMAIN}"'/fullchain\.pem~g' /etc/dovecot/conf.d/10-ssl.conf
sed -i -e 's~ssl_key = </etc/dovecot/ssl/dovecot\.key~ssl_key = </etc/letsencrypt/live/'"${LETSENCRYPT_DOMAIN}"'/'"${LETSENCRYPT_KEY}"'\.pem~g' /etc/dovecot/conf.d/10-ssl.conf
_notify 'inf' "SSL configured with 'letsencrypt' certificates"
fi
return 0
;;
"custom" )
# Adding CA signed SSL certificate if provided in 'postfix/ssl' folder
if [[ -e /tmp/docker-mailserver/ssl/${HOSTNAME}-full.pem ]]
then
_notify 'inf' "Adding ${HOSTNAME} SSL certificate"
mkdir -p /etc/postfix/ssl
cp "/tmp/docker-mailserver/ssl/${HOSTNAME}-full.pem" /etc/postfix/ssl
# Postfix configuration
sed -i -r 's~smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem~smtpd_tls_cert_file=/etc/postfix/ssl/'"${HOSTNAME}"'-full.pem~g' /etc/postfix/main.cf
sed -i -r 's~smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key~smtpd_tls_key_file=/etc/postfix/ssl/'"${HOSTNAME}"'-full.pem~g' /etc/postfix/main.cf
# Dovecot configuration
sed -i -e 's~ssl_cert = </etc/dovecot/ssl/dovecot\.pem~ssl_cert = </etc/postfix/ssl/'"${HOSTNAME}"'-full\.pem~g' /etc/dovecot/conf.d/10-ssl.conf
sed -i -e 's~ssl_key = </etc/dovecot/ssl/dovecot\.key~ssl_key = </etc/postfix/ssl/'"${HOSTNAME}"'-full\.pem~g' /etc/dovecot/conf.d/10-ssl.conf
_notify 'inf' "SSL configured with 'CA signed/custom' certificates"
fi
;;
"manual" )
# Lets you manually specify the location of the SSL Certs to use. This gives you some more control over this whole processes (like using kube-lego to generate certs)
if [[ -n ${SSL_CERT_PATH} ]] && [[ -n ${SSL_KEY_PATH} ]]
then
_notify 'inf' "Configuring certificates using cert ${SSL_CERT_PATH} and key ${SSL_KEY_PATH}"
mkdir -p /etc/postfix/ssl
cp "${SSL_CERT_PATH}" /etc/postfix/ssl/cert
cp "${SSL_KEY_PATH}" /etc/postfix/ssl/key
chmod 600 /etc/postfix/ssl/cert
chmod 600 /etc/postfix/ssl/key
# Postfix configuration
sed -i -r 's~smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem~smtpd_tls_cert_file=/etc/postfix/ssl/cert~g' /etc/postfix/main.cf
sed -i -r 's~smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key~smtpd_tls_key_file=/etc/postfix/ssl/key~g' /etc/postfix/main.cf
# Dovecot configuration
sed -i -e 's~ssl_cert = </etc/dovecot/ssl/dovecot\.pem~ssl_cert = </etc/postfix/ssl/cert~g' /etc/dovecot/conf.d/10-ssl.conf
sed -i -e 's~ssl_key = </etc/dovecot/ssl/dovecot\.key~ssl_key = </etc/postfix/ssl/key~g' /etc/dovecot/conf.d/10-ssl.conf
_notify 'inf' "SSL configured with 'Manual' certificates"
fi
;;
"self-signed" )
# Adding self-signed SSL certificate if provided in 'postfix/ssl' folder
if [[ -e /tmp/docker-mailserver/ssl/${HOSTNAME}-cert.pem ]] \
&& [[ -e /tmp/docker-mailserver/ssl/${HOSTNAME}-key.pem ]] \
&& [[ -e /tmp/docker-mailserver/ssl/${HOSTNAME}-combined.pem ]] \
&& [[ -e /tmp/docker-mailserver/ssl/demoCA/cacert.pem ]]
then
_notify 'inf' "Adding ${HOSTNAME} SSL certificate"
mkdir -p /etc/postfix/ssl
cp "/tmp/docker-mailserver/ssl/${HOSTNAME}-cert.pem" /etc/postfix/ssl
cp "/tmp/docker-mailserver/ssl/${HOSTNAME}-key.pem" /etc/postfix/ssl
# Force permission on key file
chmod 600 "/etc/postfix/ssl/${HOSTNAME}-key.pem"
cp "/tmp/docker-mailserver/ssl/${HOSTNAME}-combined.pem" /etc/postfix/ssl
cp /tmp/docker-mailserver/ssl/demoCA/cacert.pem /etc/postfix/ssl
# Postfix configuration
sed -i -r 's~smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem~smtpd_tls_cert_file=/etc/postfix/ssl/'"${HOSTNAME}"'-cert.pem~g' /etc/postfix/main.cf
sed -i -r 's~smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key~smtpd_tls_key_file=/etc/postfix/ssl/'"${HOSTNAME}"'-key.pem~g' /etc/postfix/main.cf
sed -i -r 's~#smtpd_tls_CAfile=~smtpd_tls_CAfile=/etc/postfix/ssl/cacert.pem~g' /etc/postfix/main.cf
sed -i -r 's~#smtp_tls_CAfile=~smtp_tls_CAfile=/etc/postfix/ssl/cacert.pem~g' /etc/postfix/main.cf
ln -s /etc/postfix/ssl/cacert.pem "/etc/ssl/certs/cacert-${HOSTNAME}.pem"
# Dovecot configuration
sed -i -e 's~ssl_cert = </etc/dovecot/ssl/dovecot\.pem~ssl_cert = </etc/postfix/ssl/'"${HOSTNAME}"'-combined\.pem~g' /etc/dovecot/conf.d/10-ssl.conf
sed -i -e 's~ssl_key = </etc/dovecot/ssl/dovecot\.key~ssl_key = </etc/postfix/ssl/'"${HOSTNAME}"'-key\.pem~g' /etc/dovecot/conf.d/10-ssl.conf
_notify 'inf' "SSL configured with 'self-signed' certificates"
fi
;;
'' )
# no SSL certificate, plain text access
# Dovecot configuration
sed -i -e 's~#disable_plaintext_auth = yes~disable_plaintext_auth = no~g' /etc/dovecot/conf.d/10-auth.conf
sed -i -e 's~ssl = required~ssl = yes~g' /etc/dovecot/conf.d/10-ssl.conf
_notify 'inf' "SSL configured with plain text access"
;;
* )
# Unknown option, default behavior, no action is required
_notify 'warn' "SSL configured by default"
;;
esac
}
function _setup_postfix_vhost
{
_notify 'task' "Setting up Postfix vhost"
if [[ -f /tmp/vhost.tmp ]]
then
sort < /tmp/vhost.tmp | uniq > /etc/postfix/vhost && rm /tmp/vhost.tmp
elif [[ ! -f /etc/postfix/vhost ]]
then
touch /etc/postfix/vhost
fi
}
function _setup_inet_protocols
{
_notify 'task' 'Setting up POSTFIX_INET_PROTOCOLS option'
postconf -e "inet_protocols = ${POSTFIX_INET_PROTOCOLS}"
}
function _setup_docker_permit
{
_notify 'task' 'Setting up PERMIT_DOCKER Option'
local CONTAINER_IP CONTAINER_NETWORK
unset CONTAINER_NETWORKS
declare -a CONTAINER_NETWORKS
CONTAINER_IP=$(ip addr show "${NETWORK_INTERFACE}" | grep 'inet ' | sed 's/[^0-9\.\/]*//g' | cut -d '/' -f 1)
CONTAINER_NETWORK="$(echo "${CONTAINER_IP}" | cut -d '.' -f1-2).0.0"
while read -r IP
do
CONTAINER_NETWORKS+=("${IP}")
done < <(ip -o -4 addr show type veth | grep -E -o '[0-9\.]+/[0-9]+')
case ${PERMIT_DOCKER} in
"host" )
_notify 'inf' "Adding ${CONTAINER_NETWORK}/16 to my networks"
postconf -e "$(postconf | grep '^mynetworks =') ${CONTAINER_NETWORK}/16"
echo "${CONTAINER_NETWORK}/16" >> /etc/opendmarc/ignore.hosts
echo "${CONTAINER_NETWORK}/16" >> /etc/opendkim/TrustedHosts
;;
"network" )
_notify 'inf' "Adding docker network in my networks"
postconf -e "$(postconf | grep '^mynetworks =') 172.16.0.0/12"
echo 172.16.0.0/12 >> /etc/opendmarc/ignore.hosts
echo 172.16.0.0/12 >> /etc/opendkim/TrustedHosts
;;
"connected-networks" )
for NETWORK in "${CONTAINER_NETWORKS[@]}"
do
NETWORK=$(_sanitize_ipv4_to_subnet_cidr "${NETWORK}")
_notify 'inf' "Adding docker network ${NETWORK} in my networks"
postconf -e "$(postconf | grep '^mynetworks =') ${NETWORK}"
echo "${NETWORK}" >> /etc/opendmarc/ignore.hosts
echo "${NETWORK}" >> /etc/opendkim/TrustedHosts
done
;;
* )
_notify 'inf' "Adding container ip in my networks"
postconf -e "$(postconf | grep '^mynetworks =') ${CONTAINER_IP}/32"
echo "${CONTAINER_IP}/32" >> /etc/opendmarc/ignore.hosts
echo "${CONTAINER_IP}/32" >> /etc/opendkim/TrustedHosts
;;
esac
}
function _setup_postfix_virtual_transport
{
_notify 'task' 'Setting up Postfix virtual transport'
[[ -z ${POSTFIX_DAGENT} ]] && echo "${POSTFIX_DAGENT} not set." && \
kill -15 "$(< /var/run/supervisord.pid)" && return 1
postconf -e "virtual_transport = ${POSTFIX_DAGENT}"
}
function _setup_postfix_override_configuration
{
_notify 'task' 'Setting up Postfix Override configuration'
if [[ -f /tmp/docker-mailserver/postfix-main.cf ]]
then
while read -r LINE
do
# all valid postfix options start with a lower case letter
# http://www.postfix.org/postconf.5.html
if [[ ${LINE} =~ ^[a-z] ]]
then
postconf -e "${LINE}"
fi
done < /tmp/docker-mailserver/postfix-main.cf
_notify 'inf' "Loaded 'config/postfix-main.cf'"
else
_notify 'inf' "No extra postfix settings loaded because optional '/tmp/docker-mailserver/postfix-main.cf' not provided."
fi
if [[ -f /tmp/docker-mailserver/postfix-master.cf ]]
then
while read -r LINE
do
if [[ ${LINE} =~ ^[0-9a-z] ]]
then
postconf -P "${LINE}"
fi
done < /tmp/docker-mailserver/postfix-master.cf
_notify 'inf' "Loaded 'config/postfix-master.cf'"
else
_notify 'inf' "No extra postfix settings loaded because optional '/tmp/docker-mailserver/postfix-master.cf' not provided."
fi
_notify 'inf' "set the compatibility level to 2"
postconf compatibility_level=2
}
function _setup_postfix_sasl_password
{
_notify 'task' 'Setting up Postfix SASL Password'
# support general SASL password
rm -f /etc/postfix/sasl_passwd
if [[ -n ${SASL_PASSWD} ]]
then
echo "${SASL_PASSWD}" >> /etc/postfix/sasl_passwd
fi
# install SASL passwords
if [[ -f /etc/postfix/sasl_passwd ]]
then
chown root:root /etc/postfix/sasl_passwd
chmod 0600 /etc/postfix/sasl_passwd
_notify 'inf' "Loaded SASL_PASSWD"
else
_notify 'inf' "Warning: 'SASL_PASSWD' is not provided. /etc/postfix/sasl_passwd not created."
fi
}
function _setup_postfix_default_relay_host
{
_notify 'task' 'Applying default relay host to Postfix'
_notify 'inf' "Applying default relay host ${DEFAULT_RELAY_HOST} to /etc/postfix/main.cf"
postconf -e "relayhost = ${DEFAULT_RELAY_HOST}"
}
function _setup_postfix_relay_hosts
{
_notify 'task' 'Setting up Postfix Relay Hosts'
[[ -z ${RELAY_PORT} ]] && RELAY_PORT=25
_notify 'inf' "Setting up outgoing email relaying via ${RELAY_HOST}:${RELAY_PORT}"
# setup /etc/postfix/sasl_passwd
# --
# @domain1.com postmaster@domain1.com:your-password-1
# @domain2.com postmaster@domain2.com:your-password-2
# @domain3.com postmaster@domain3.com:your-password-3
#
# [smtp.mailgun.org]:587 postmaster@domain2.com:your-password-2
if [[ -f /tmp/docker-mailserver/postfix-sasl-password.cf ]]
then
_notify 'inf' "Adding relay authentication from postfix-sasl-password.cf"
while read -r LINE
do
if ! echo "${LINE}" | grep -q -e "^\s*#"
then
echo "${LINE}" >> /etc/postfix/sasl_passwd
fi
done < /tmp/docker-mailserver/postfix-sasl-password.cf
fi
# add default relay
if [[ -n ${RELAY_USER} ]] && [[ -n ${RELAY_PASSWORD} ]]
then
echo "[${RELAY_HOST}]:${RELAY_PORT} ${RELAY_USER}:${RELAY_PASSWORD}" >> /etc/postfix/sasl_passwd
else
if [[ ! -f /tmp/docker-mailserver/postfix-sasl-password.cf ]]
then
_notify 'warn' "No relay auth file found and no default set"
fi
fi
if [[ -f /etc/postfix/sasl_passwd ]]
then
chown root:root /etc/postfix/sasl_passwd
chmod 0600 /etc/postfix/sasl_passwd
fi
# end /etc/postfix/sasl_passwd
_populate_relayhost_map
postconf -e \
"smtp_sasl_auth_enable = yes" \
"smtp_sasl_security_options = noanonymous" \
"smtp_sasl_password_maps = texthash:/etc/postfix/sasl_passwd" \
"smtp_use_tls = yes" \
"smtp_tls_security_level = encrypt" \
"smtp_tls_note_starttls_offer = yes" \
"smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt" \
"sender_dependent_relayhost_maps = texthash:/etc/postfix/relayhost_map" \
"smtp_sender_dependent_authentication = yes"
}
function _setup_postfix_dhparam
{
_notify 'task' 'Setting up Postfix dhparam'
if [[ ${ONE_DIR} -eq 1 ]]
then
DHPARAMS_FILE=/var/mail-state/lib-shared/dhparams.pem
if [[ ! -f ${DHPARAMS_FILE} ]]
then
_notify 'inf' "Use ffdhe4096 for dhparams (postfix)"
cp -f /etc/postfix/shared/ffdhe4096.pem /etc/postfix/dhparams.pem
else
_notify 'inf' "Use postfix dhparams that was generated previously"
_notify 'warn' "Using self-generated dhparams is considered as insecure."
_notify 'warn' "Unless you known what you are doing, please remove /var/mail-state/lib-shared/dhparams.pem."
# Copy from the state directory to the working location
cp -f "${DHPARAMS_FILE}" /etc/postfix/dhparams.pem
fi
else
if [[ ! -f /etc/postfix/dhparams.pem ]]
then
if [[ -f /etc/dovecot/dh.pem ]]
then
_notify 'inf' "Copy dovecot dhparams to postfix"
cp /etc/dovecot/dh.pem /etc/postfix/dhparams.pem
elif [[ -f /tmp/docker-mailserver/dhparams.pem ]]
then
_notify 'inf' "Copy pre-generated dhparams to postfix"
_notify 'warn' "Using self-generated dhparams is considered as insecure."
_notify 'warn' "Unless you known what you are doing, please remove /var/mail-state/lib-shared/dhparams.pem."
cp /tmp/docker-mailserver/dhparams.pem /etc/postfix/dhparams.pem
else
_notify 'inf' "Use ffdhe4096 for dhparams (postfix)"
cp /etc/postfix/shared/ffdhe4096.pem /etc/postfix/dhparams.pem
fi
else
_notify 'inf' "Use existing postfix dhparams"
_notify 'warn' "Using self-generated dhparams is considered insecure."
_notify 'warn' "Unless you known what you are doing, please remove /etc/postfix/dhparams.pem."
fi
fi
}
function _setup_dovecot_dhparam
{
_notify 'task' 'Setting up Dovecot dhparam'
if [[ ${ONE_DIR} -eq 1 ]]
then
DHPARAMS_FILE=/var/mail-state/lib-shared/dhparams.pem
if [[ ! -f ${DHPARAMS_FILE} ]]
then
_notify 'inf' "Use ffdhe4096 for dhparams (dovecot)"
cp -f /etc/postfix/shared/ffdhe4096.pem /etc/dovecot/dh.pem
else
_notify 'inf' "Use dovecot dhparams that was generated previously"
_notify 'warn' "Using self-generated dhparams is considered as insecure."
_notify 'warn' "Unless you known what you are doing, please remove /var/mail-state/lib-shared/dhparams.pem."
# Copy from the state directory to the working location
cp -f "${DHPARAMS_FILE}" /etc/dovecot/dh.pem
fi
else
if [[ ! -f /etc/dovecot/dh.pem ]]
then
if [[ -f /etc/postfix/dhparams.pem ]]
then
_notify 'inf' "Copy postfix dhparams to dovecot"
cp /etc/postfix/dhparams.pem /etc/dovecot/dh.pem
elif [[ -f /tmp/docker-mailserver/dhparams.pem ]]
then
_notify 'inf' "Copy pre-generated dhparams to dovecot"
_notify 'warn' "Using self-generated dhparams is considered as insecure."
_notify 'warn' "Unless you known what you are doing, please remove /tmp/docker-mailserver/dhparams.pem."
cp /tmp/docker-mailserver/dhparams.pem /etc/dovecot/dh.pem
else
_notify 'inf' "Use ffdhe4096 for dhparams (dovecot)"
cp /etc/postfix/shared/ffdhe4096.pem /etc/dovecot/dh.pem
fi
else
_notify 'inf' "Use existing dovecot dhparams"
_notify 'warn' "Using self-generated dhparams is considered as insecure."
_notify 'warn' "Unless you known what you are doing, please remove /etc/dovecot/dh.pem."
fi
fi
}
function _setup_security_stack
{
_notify 'task' "Setting up Security Stack"
# recreate auto-generated file
local DMS_AMAVIS_FILE=/etc/amavis/conf.d/61-dms_auto_generated
echo "# WARNING: this file is auto-generated." >"${DMS_AMAVIS_FILE}"
echo "use strict;" >>"${DMS_AMAVIS_FILE}"
# Spamassassin
if [[ ${ENABLE_SPAMASSASSIN} -eq 0 ]]
then
_notify 'warn' "Spamassassin is disabled. You can enable it with 'ENABLE_SPAMASSASSIN=1'"
echo "@bypass_spam_checks_maps = (1);" >>"${DMS_AMAVIS_FILE}"
elif [[ ${ENABLE_SPAMASSASSIN} -eq 1 ]]
then
_notify 'inf' "Enabling and configuring spamassassin"
# shellcheck disable=SC2016
SA_TAG=${SA_TAG:="2.0"} && sed -i -r 's/^\$sa_tag_level_deflt (.*);/\$sa_tag_level_deflt = '"${SA_TAG}"';/g' /etc/amavis/conf.d/20-debian_defaults
# shellcheck disable=SC2016
SA_TAG2=${SA_TAG2:="6.31"} && sed -i -r 's/^\$sa_tag2_level_deflt (.*);/\$sa_tag2_level_deflt = '"${SA_TAG2}"';/g' /etc/amavis/conf.d/20-debian_defaults
# shellcheck disable=SC2016
SA_KILL=${SA_KILL:="6.31"} && sed -i -r 's/^\$sa_kill_level_deflt (.*);/\$sa_kill_level_deflt = '"${SA_KILL}"';/g' /etc/amavis/conf.d/20-debian_defaults
SA_SPAM_SUBJECT=${SA_SPAM_SUBJECT:="***SPAM*** "}
if [[ ${SA_SPAM_SUBJECT} == "undef" ]]
then
# shellcheck disable=SC2016
sed -i -r 's/^\$sa_spam_subject_tag (.*);/\$sa_spam_subject_tag = undef;/g' /etc/amavis/conf.d/20-debian_defaults
else
# shellcheck disable=SC2016
sed -i -r 's/^\$sa_spam_subject_tag (.*);/\$sa_spam_subject_tag = '"'${SA_SPAM_SUBJECT}'"';/g' /etc/amavis/conf.d/20-debian_defaults
fi
# activate short circuits when SA BAYES is certain it has spam or ham.
if [[ ${SA_SHORTCIRCUIT_BAYES_SPAM} -eq 1 ]]
then
# automatically activate the Shortcircuit Plugin
sed -i -r 's/^# loadplugin Mail::SpamAssassin::Plugin::Shortcircuit/loadplugin Mail::SpamAssassin::Plugin::Shortcircuit/g' /etc/spamassassin/v320.pre
sed -i -r 's/^# shortcircuit BAYES_99/shortcircuit BAYES_99/g' /etc/spamassassin/local.cf
fi
if [[ ${SA_SHORTCIRCUIT_BAYES_HAM} -eq 1 ]]
then
# automatically activate the Shortcircuit Plugin
sed -i -r 's/^# loadplugin Mail::SpamAssassin::Plugin::Shortcircuit/loadplugin Mail::SpamAssassin::Plugin::Shortcircuit/g' /etc/spamassassin/v320.pre
sed -i -r 's/^# shortcircuit BAYES_00/shortcircuit BAYES_00/g' /etc/spamassassin/local.cf
fi
if [[ -e /tmp/docker-mailserver/spamassassin-rules.cf ]]
then
cp /tmp/docker-mailserver/spamassassin-rules.cf /etc/spamassassin/
fi
if [[ ${SPAMASSASSIN_SPAM_TO_INBOX} -eq 1 ]]
then
_notify 'inf' "Configure Spamassassin/Amavis to put SPAM inbox"
sed -i "s/\$final_spam_destiny.*=.*$/\$final_spam_destiny = D_PASS;/g" /etc/amavis/conf.d/49-docker-mailserver
sed -i "s/\$final_bad_header_destiny.*=.*$/\$final_bad_header_destiny = D_PASS;/g" /etc/amavis/conf.d/49-docker-mailserver
else
sed -i "s/\$final_spam_destiny.*=.*$/\$final_spam_destiny = D_BOUNCE;/g" /etc/amavis/conf.d/49-docker-mailserver
sed -i "s/\$final_bad_header_destiny.*=.*$/\$final_bad_header_destiny = D_BOUNCE;/g" /etc/amavis/conf.d/49-docker-mailserver
if ! ${SPAMASSASSIN_SPAM_TO_INBOX_IS_SET}
then
_notify 'warn' "Spam messages WILL NOT BE DELIVERED, you will NOT be notified of ANY message bounced. Please define SPAMASSASSIN_SPAM_TO_INBOX explicitly."
fi
fi
fi
# Clamav
if [[ ${ENABLE_CLAMAV} -eq 0 ]]
then
_notify 'warn' "Clamav is disabled. You can enable it with 'ENABLE_CLAMAV=1'"
echo "@bypass_virus_checks_maps = (1);" >>"${DMS_AMAVIS_FILE}"
elif [[ ${ENABLE_CLAMAV} -eq 1 ]]
then
_notify 'inf' "Enabling clamav"
fi
echo "1; # ensure a defined return" >>"${DMS_AMAVIS_FILE}"
chmod 444 "${DMS_AMAVIS_FILE}"
# Fail2ban
if [[ ${ENABLE_FAIL2BAN} -eq 1 ]]
then
_notify 'inf' "Fail2ban enabled"
if [[ -e /tmp/docker-mailserver/fail2ban-fail2ban.cf ]]
then
cp /tmp/docker-mailserver/fail2ban-fail2ban.cf /etc/fail2ban/fail2ban.local
fi
if [[ -e /tmp/docker-mailserver/fail2ban-jail.cf ]]
then
cp /tmp/docker-mailserver/fail2ban-jail.cf /etc/fail2ban/jail.local
fi
else
# disable logrotate config for fail2ban if not enabled
rm -f /etc/logrotate.d/fail2ban
fi
# fix cron.daily for spamassassin
sed -i -e 's~invoke-rc.d spamassassin reload~/etc/init\.d/spamassassin reload~g' /etc/cron.daily/spamassassin
# copy user provided configuration files if provided
if [[ -f /tmp/docker-mailserver/amavis.cf ]]
then
cp /tmp/docker-mailserver/amavis.cf /etc/amavis/conf.d/50-user
fi
}
function _setup_logrotate
{
_notify 'inf' "Setting up logrotate"
LOGROTATE='/var/log/mail/mail.log\n{\n compress\n copytruncate\n delaycompress\n'
case "${LOGROTATE_INTERVAL}" in
"daily" )
_notify 'inf' "Setting postfix logrotate interval to daily"
LOGROTATE="${LOGROTATE} rotate 4\n daily\n"
;;
"weekly" )
_notify 'inf' "Setting postfix logrotate interval to weekly"
LOGROTATE="${LOGROTATE} rotate 4\n weekly\n"
;;
"monthly" )
_notify 'inf' "Setting postfix logrotate interval to monthly"
LOGROTATE="${LOGROTATE} rotate 4\n monthly\n"
;;
* ) _notify 'warn' 'LOGROTATE_INTERVAL not found in _setup_logrotate' ;;
esac
LOGROTATE="${LOGROTATE}}"
echo -e "${LOGROTATE}" > /etc/logrotate.d/maillog
}
function _setup_mail_summary
{
_notify 'inf' "Enable postfix summary with recipient ${PFLOGSUMM_RECIPIENT}"
case "${PFLOGSUMM_TRIGGER}" in
"daily_cron" )
_notify 'inf' "Creating daily cron job for pflogsumm report"
echo "#! /bin/bash" > /etc/cron.daily/postfix-summary
echo "/usr/local/bin/report-pflogsumm-yesterday ${HOSTNAME} ${PFLOGSUMM_RECIPIENT} ${PFLOGSUMM_SENDER}" >> /etc/cron.daily/postfix-summary
chmod +x /etc/cron.daily/postfix-summary
;;
"logrotate" )
_notify 'inf' "Add postrotate action for pflogsumm report"
sed -i "s|}| postrotate\n /usr/local/bin/postfix-summary ${HOSTNAME} ${PFLOGSUMM_RECIPIENT} ${PFLOGSUMM_SENDER}\n endscript\n}\n|" /etc/logrotate.d/maillog
;;
"none" ) _notify 'inf' "Postfix log summary reports disabled. You can enable them with 'PFLOGSUMM_TRIGGER=daily_cron' or 'PFLOGSUMM_TRIGGER=logrotate'" ;;
* ) _notify 'err' 'PFLOGSUMM_TRIGGER not found in _setup_mail_summery' ;;
esac
}
function _setup_logwatch
{
_notify 'inf' "Enable logwatch reports with recipient ${LOGWATCH_RECIPIENT}"
echo "LogFile = /var/log/mail/freshclam.log" >> /etc/logwatch/conf/logfiles/clam-update.conf
case "${LOGWATCH_INTERVAL}" in
"daily" )
_notify 'inf' "Creating daily cron job for logwatch reports"
echo "#! /bin/bash" > /etc/cron.daily/logwatch
echo "/usr/sbin/logwatch --range Yesterday --hostname ${HOSTNAME} --mailto ${LOGWATCH_RECIPIENT}" \
>> /etc/cron.daily/logwatch
chmod 744 /etc/cron.daily/logwatch
;;
"weekly" )
_notify 'inf' "Creating weekly cron job for logwatch reports"
echo "#! /bin/bash" > /etc/cron.weekly/logwatch
echo "/usr/sbin/logwatch --range 'between -7 days and -1 days' --hostname ${HOSTNAME} --mailto ${LOGWATCH_RECIPIENT}" \
>> /etc/cron.weekly/logwatch
chmod 744 /etc/cron.weekly/logwatch
;;
"none" ) _notify 'inf' "Logwatch reports disabled. You can enable them with 'LOGWATCH_INTERVAL=daily' or 'LOGWATCH_INTERVAL=weekly'" ;;
* ) _notify 'warn' 'LOGWATCH_INTERVAL not found in _setup_logwatch' ;;
esac
}
function _setup_user_patches
{
if [[ -f /tmp/docker-mailserver/user-patches.sh ]]
then
_notify 'inf' 'Executing user-patches.sh'
chmod +x /tmp/docker-mailserver/user-patches.sh &>/dev/null || true
if [[ -x /tmp/docker-mailserver/user-patches.sh ]]
then
/tmp/docker-mailserver/user-patches.sh
_notify 'inf' "Executed 'config/user-patches.sh'"
else
_notify 'err' "Could not execute user-patches.sh. Not executable!"
fi
else
_notify 'inf' "No user patches executed because optional '/tmp/docker-mailserver/user-patches.sh' is not provided."
fi
}
function _setup_environment
{
_notify 'task' 'Setting up /etc/environment'
local BANNER="# Docker Environment"
if ! grep -q "${BANNER}" /etc/environment
then
echo "${BANNER}" >> /etc/environment
echo "VIRUSMAILS_DELETE_DELAY=${VIRUSMAILS_DELETE_DELAY}" >> /etc/environment
fi
}
##########################################################################
# << Setup Stack
##########################################################################
##########################################################################
# >> Fix Stack
#
# Description: Place functions for temporary workarounds and fixes here
##########################################################################
function fix
{
_notify 'taskgrg' "Post-configuration checks..."
for FUNC in "${FUNCS_FIX[@]}"
do
if ! ${FUNC}
then
_defunc
fi
done
_notify 'taskgrg' "Remove leftover pid files from a stop/start"
rm -rf /var/run/*.pid /var/run/*/*.pid
touch /dev/shm/supervisor.sock
}
function _fix_var_mail_permissions
{
_notify 'task' 'Checking /var/mail permissions'
# dix permissions, but skip this if 3 levels deep the user id is already set
if [[ $(find /var/mail -maxdepth 3 -a \( \! -user 5000 -o \! -group 5000 \) | grep -c .) -ne 0 ]]
then
_notify 'inf' "Fixing /var/mail permissions"
chown -R 5000:5000 /var/mail
else
_notify 'inf' "Permissions in /var/mail look OK"
return 0
fi
}
function _fix_var_amavis_permissions
{
if [[ ${ONE_DIR} -eq 0 ]]
then
amavis_state_dir=/var/lib/amavis
else
amavis_state_dir=/var/mail-state/lib-amavis
fi
# shellcheck disable=SC2016
_notify 'task' 'Checking $amavis_state_dir permissions'
amavis_permissions_status=$(find -H "${amavis_state_dir}" -maxdepth 3 -a \( \! -user amavis -o \! -group amavis \))
if [[ -n ${amavis_permissions_status} ]]
then
_notify 'inf' "Fixing ${amavis_state_dir} permissions"
chown -hR amavis:amavis "${amavis_state_dir}"
else
_notify 'inf' "Permissions in ${amavis_state_dir} look OK"
return 0
fi
}
function _fix_cleanup_clamav
{
_notify 'task' 'Cleaning up disabled Clamav'
rm -f /etc/logrotate.d/clamav-*
rm -f /etc/cron.d/clamav-freshclam
}
function _fix_cleanup_spamassassin
{
_notify 'task' 'Cleaning up disabled spamassassin'
rm -f /etc/cron.daily/spamassassin
}
##########################################################################
# << Fix Stack
##########################################################################
##########################################################################
# >> Misc Stack
#
# Description: Place functions that do not fit in the sections above here
##########################################################################
function misc
{
_notify 'tasklog' 'Startin misc'
for FUNC in "${FUNCS_MISC[@]}"
do
if ! ${FUNC}
then
_defunc
fi
done
}
function _misc_save_states
{
# consolidate all states into a single directory (`/var/mail-state`) to allow persistence using docker volumes
statedir=/var/mail-state
if [[ ${ONE_DIR} -eq 1 ]] && [[ -d ${statedir} ]]
then
_notify 'inf' "Consolidating all state onto ${statedir}"
local FILES=(
/var/spool/postfix
/var/lib/postfix
/var/lib/amavis
/var/lib/clamav
/var/lib/spamassassin
/var/lib/fail2ban
/var/lib/postgrey
/var/lib/dovecot
)
for d in "${FILES[@]}"
do
dest="${statedir}/$(echo "${d}" | sed -e 's/.var.//; s/\//-/g')"
if [[ -d ${dest} ]]
then
_notify 'inf' " Destination ${dest} exists, linking ${d} to it"
rm -rf "${d}"
ln -s "${dest}" "${d}"
elif [[ -d ${d} ]]
then
_notify 'inf' " Moving contents of ${d} to ${dest}:" "$(ls "${d}")"
mv "${d}" "${dest}"
ln -s "${dest}" "${d}"
else
_notify 'inf' " Linking ${d} to ${dest}"
mkdir -p "${dest}"
ln -s "${dest}" "${d}"
fi
done
_notify 'inf' 'Fixing /var/mail-state/* permissions'
chown -R clamav /var/mail-state/lib-clamav
chown -R postfix /var/mail-state/lib-postfix
chown -R postgrey /var/mail-state/lib-postgrey
chown -R debian-spamd /var/mail-state/lib-spamassassin
chown -R postfix /var/mail-state/spool-postfix
fi
}
##########################################################################
# >> Start Daemons
##########################################################################
function start_daemons
{
_notify 'tasklog' 'Starting mail server'
for FUNC in "${DAEMONS_START[@]}"
do
if ! ${FUNC}
then
_defunc
fi
done
}
function _start_daemons_cron
{
_notify 'task' 'Starting cron' 'n'
supervisorctl start cron
}
function _start_daemons_rsyslog
{
_notify 'task' 'Starting rsyslog ' 'n'
supervisorctl start rsyslog
}
function _start_daemons_saslauthd
{
_notify 'task' 'Starting saslauthd' 'n'
supervisorctl start "saslauthd_${SASLAUTHD_MECHANISMS}"
}
function _start_daemons_fail2ban
{
_notify 'task' 'Starting fail2ban ' 'n'
touch /var/log/auth.log
# delete fail2ban.sock that probably was left here after container restart
if [[ -e /var/run/fail2ban/fail2ban.sock ]]
then
rm /var/run/fail2ban/fail2ban.sock
fi
supervisorctl start fail2ban
}
function _start_daemons_opendkim
{
_notify 'task' 'Starting opendkim ' 'n'
supervisorctl start opendkim
}
function _start_daemons_opendmarc
{
_notify 'task' 'Starting opendmarc ' 'n'
supervisorctl start opendmarc
}
function _start_daemons_postsrsd
{
_notify 'task' 'Starting postsrsd ' 'n'
supervisorctl start postsrsd
}
function _start_daemons_postfix
{
_notify 'task' 'Starting postfix' 'n'
supervisorctl start postfix
}
function _start_daemons_dovecot
{
# Here we are starting sasl and imap, not pop3 because it's disabled by default
_notify 'task' 'Starting dovecot services' 'n'
if [[ ${ENABLE_POP3} -eq 1 ]]
then
_notify 'task' 'Starting pop3 services' 'n'
mv /etc/dovecot/protocols.d/pop3d.protocol.disab /etc/dovecot/protocols.d/pop3d.protocol
fi
if [[ -f /tmp/docker-mailserver/dovecot.cf ]]
then
cp /tmp/docker-mailserver/dovecot.cf /etc/dovecot/local.conf
fi
supervisorctl start dovecot
# TODO fix: on integration test
# doveadm: Error: userdb lookup: connect(/var/run/dovecot/auth-userdb) failed: No such file or directory
# doveadm: Fatal: user listing failed
# if [[ ${ENABLE_LDAP} -ne 1 ]]
# then
# echo "Listing users"
# /usr/sbin/dovecot user '*'
# fi
}
function _start_daemons_fetchmail
{
_notify 'task' 'Preparing fetchmail config'
/usr/local/bin/setup-fetchmail
if [[ ${FETCHMAIL_PARALLEL} -eq 1 ]]
then
mkdir /etc/fetchmailrc.d/
/usr/local/bin/fetchmailrc_split
COUNTER=0
for RC in /etc/fetchmailrc.d/fetchmail-*.rc
do
COUNTER=$((COUNTER+1))
cat <<EOF > "/etc/supervisor/conf.d/fetchmail-${COUNTER}.conf"
[program:fetchmail-${COUNTER}]
startsecs=0
autostart=false
autorestart=true
stdout_logfile=/var/log/supervisor/%(program_name)s.log
stderr_logfile=/var/log/supervisor/%(program_name)s.log
user=fetchmail
command=/usr/bin/fetchmail -f ${RC} -v --nodetach --daemon %(ENV_FETCHMAIL_POLL)s -i /var/lib/fetchmail/.fetchmail-UIDL-cache --pidfile /var/run/fetchmail/%(program_name)s.pid
EOF
chmod 700 "${RC}"
chown fetchmail:root "${RC}"
done
supervisorctl reread
supervisorctl update
COUNTER=0
for _ in /etc/fetchmailrc.d/fetchmail-*.rc
do
COUNTER=$((COUNTER+1))
_notify 'task' "Starting fetchmail instance ${COUNTER}" 'n'
supervisorctl start "fetchmail-${COUNTER}"
done
else
_notify 'task' 'Starting fetchmail' 'n'
supervisorctl start fetchmail
fi
}
function _start_daemons_clamav
{
_notify 'task' 'Starting clamav' 'n'
supervisorctl start clamav
}
function _start_daemons_postgrey
{
_notify 'task' 'Starting postgrey' 'n'
rm -f /var/run/postgrey/postgrey.pid
supervisorctl start postgrey
}
function _start_daemons_amavis
{
_notify 'task' 'Starting amavis' 'n'
supervisorctl start amavis
}
##########################################################################
# << Start Daemons
##########################################################################
##########################################################################
# Start check for update postfix-accounts and postfix-virtual
##########################################################################
function _start_changedetector
{
_notify 'task' 'Starting changedetector' 'n'
supervisorctl start changedetector
}
# ! ––––––––––––––––––––––––––––––––––––––––––––––
# ! ––– CARE – BEGIN –––––––––––––––––––––––––––––
# ! ––––––––––––––––––––––––––––––––––––––––––––––
# shellcheck source=./helper-functions.sh
. /usr/local/bin/helper-functions.sh
if [[ ${DMS_DEBUG:-0} -eq 1 ]]
then
_notify 'none'
_notify 'tasklog' 'ENVIRONMENT'
_notify 'none'
printenv
fi
_notify 'none'
_notify 'tasklog' 'Welcome to docker-mailserver!'
_notify 'none'
register_functions
check
setup
fix
misc
start_daemons
_notify 'none'
_notify 'tasklog' "${HOSTNAME} is up and running"
_notify 'none'
touch /var/log/mail/mail.log
tail -fn 0 /var/log/mail/mail.log
# ! ––––––––––––––––––––––––––––––––––––––––––––––
# ! ––– CARE – END –––––––––––––––––––––––––––––––
# ! ––––––––––––––––––––––––––––––––––––––––––––––
exit 0
|
import sounddevice as sd
def process_audio(input_device, output_device, sample_rate, duration):
# Define callback function for audio processing
def callback(indata, outdata, frames, time, status):
if status:
print(f"Error: {status}")
outdata[:] = indata # Process the audio data (in this case, pass through)
# Set the audio devices and parameters
with sd.Stream(device=(input_device, output_device), samplerate=sample_rate, callback=callback):
sd.sleep(int(duration * 1000)) # Sleep for the specified duration in milliseconds
# Example usage
input_device = 'input_device_name' # Replace with actual input device name or index
output_device = 'output_device_name' # Replace with actual output device name or index
sample_rate = 44100 # Replace with desired sample rate
duration = 5 # Replace with desired duration in seconds
process_audio(input_device, output_device, sample_rate, duration)
|
#!/bin/bash
# Input list of packages to install
packages=("package1" "package2" "package3")
# Function to install a package
install_package() {
package_name=$1
echo "--"
echo "-- Attempting to install $package_name"
echo "--"
cpanm $package_name
if [[ $? -ne 0 ]]; then
echo "!!! !!!"
echo "!!! !!! Final attempt to install $package_name failed!"
fi
}
# Loop through the list of packages and attempt installation
for package in "${packages[@]}"; do
install_package $package
done
|
<reponame>lananh265/social-network<filename>node_modules/react-icons-kit/md/ic_filter_2_twotone.js
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_filter_2_twotone = void 0;
var ic_filter_2_twotone = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0V0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M7 17h14V3H7v14zm4-6c0-1.11.9-2 2-2h2V7h-4V5h4c1.1 0 2 .89 2 2v2c0 1.11-.9 2-2 2h-2v2h4v2h-6v-4z",
"opacity": ".3"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M17 13h-4v-2h2c1.1 0 2-.89 2-2V7c0-1.11-.9-2-2-2h-4v2h4v2h-2c-1.1 0-2 .89-2 2v4h6v-2zm4-12H7c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V3c0-1.1-.9-2-2-2zm0 16H7V3h14v14zM1 21c0 1.1.9 2 2 2h16v-2H3V5H1v16z"
},
"children": []
}]
};
exports.ic_filter_2_twotone = ic_filter_2_twotone;
|
<reponame>mmvvpp123/Reminderse-API<gh_stars>1-10
import * as React from "react";
import { SkeletonCard } from "./SkeletonCard";
const DashboardLoading = () => {
return (
<>
{Array(4)
.fill(0)
.map((_, i) => (
<SkeletonCard key={i} />
))}
</>
);
};
export default DashboardLoading;
|
<reponame>Baranov-Ivan/towel-sort
module.exports = function towelSort (matrix) {
if(Array.isArray(matrix) && matrix.length && arguments.length > 0) {
let resarr = [];
matrix.reduce((_,currentValue,currentIndex) => {
currentIndex % 2 ? currentValue.reduceRight((_,deepValue) => resarr.push(deepValue), null) :
currentValue.reduce((_,deepValue) => resarr.push(deepValue), null)}
, null);
return resarr;
}
return [];
}
|
import ContactHeader from '@authenticator/contact/components/ContactHeader';
export {
ContactHeader,
}
|
import { createTestEvent } from './create-test-event'
import { Destination } from './destination-kit'
import { mapValues } from './map-values'
import type { DestinationDefinition } from './destination-kit'
import type { JSONObject } from './json-object'
import type { SegmentEvent } from './segment-event'
import { AuthTokens } from './destination-kit/parse-settings'
interface InputData<Settings> {
/**
* The Segment event. You can use `createTestEvent` if you want
* to construct an event from partial data.
*/
event?: Partial<SegmentEvent>
/**
* The raw input - this is what customers define. It may include
* literal values as well as mapping-kit directives.
*/
mapping?: JSONObject
/**
* The settings for a destination instance. Includes things like
* `apiKey` or `subdomain`. Any fields that are used across all actions
* in a destination.
*/
settings?: Settings
/**
* Whether or not to use default mappings in the test.
* Set to `false` or omit if you want to explicitly provide the raw input.
* Set to `true` if you want to test the defaultMappings (along with any mapping passed in)
*/
useDefaultMappings?: boolean
auth?: AuthTokens
}
class TestDestination<T> extends Destination<T> {
responses: Destination['responses'] = []
constructor(destination: DestinationDefinition<T>) {
super(destination)
}
/** Testing method that runs an action e2e while allowing slightly more flexible inputs */
async testAction(
action: string,
{ event, mapping, settings, useDefaultMappings, auth }: InputData<T>
): Promise<Destination['responses']> {
mapping = mapping ?? {}
if (useDefaultMappings) {
const fields = this.definition.actions[action].fields
const defaultMappings = mapValues(fields, 'default')
mapping = { ...defaultMappings, ...mapping } as JSONObject
}
await super.executeAction(action, {
event: createTestEvent(event),
mapping,
settings: settings ?? ({} as T),
auth
})
const responses = this.responses
this.responses = []
return responses
}
}
export function createTestIntegration<T>(destination: DestinationDefinition<T>): TestDestination<T> {
return new TestDestination(destination)
}
|
class E:
def __init__(self, x, y):
self.x = x
self.y = y
@staticmethod
def a4():
return 0 # Replace with the actual coefficient A
@staticmethod
def a6():
return 0 # Replace with the actual coefficient B
def hensel_lift(curve, p, point):
A, B = map(long, (curve.a4(), curve.a6()))
x, y = map(long, point.x, point.y)
# Compute the lifted x coordinate
x_lifted = (x ** 2 - A) % (p ** 2)
# Compute the lifted y coordinate
y_lifted = (y + B * x + A * B) % (p ** 2)
return E(x_lifted, y_lifted)
|
import android.app.NotificationChannel;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.os.Build;
import androidx.core.app.NotificationCompat;
public class NotificationUtils {
private static final String CHANNEL_ID = "BookingChannel";
public static void createNotificationChannel(Context context) {
// Create the NotificationChannel, but only on API 26+ because
// the NotificationChannel class is new and not in the support library
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
CharSequence name = context.getString(R.string.channel_name);
String description = context.getString(R.string.channel_description);
int importance = NotificationManager.IMPORTANCE_DEFAULT;
NotificationChannel channel = new NotificationChannel(CHANNEL_ID, name, importance);
channel.setDescription(description);
// Register the channel with the system; you can't change the importance
// or other notification behaviors after this
NotificationManager notificationManager = context.getSystemService(NotificationManager.class);
if (notificationManager != null) {
notificationManager.createNotificationChannel(channel);
}
}
}
public static void showNotification(Context context, String title, String message, Intent intent) {
// Create the pending intent to launch the activity
PendingIntent pendingIntent = PendingIntent.getActivity(context, 0, intent, 0);
NotificationCompat.Builder builder = new NotificationCompat.Builder(context, CHANNEL_ID)
.setSmallIcon(R.drawable.ic_restaurant_notification)
.setContentTitle(title)
.setStyle(new NotificationCompat.BigTextStyle().bigText(message))
.setContentText(message)
.setPriority(NotificationCompat.PRIORITY_DEFAULT)
.setContentIntent(pendingIntent);
NotificationManager notificationManager = (NotificationManager) context
.getSystemService(Context.NOTIFICATION_SERVICE);
// notificationId is a unique int for each notification that you must define
if (notificationManager != null) {
notificationManager.notify(0, builder.build());
}
}
}
|
<gh_stars>0
import { Subscriber } from '../Subscriber';
import { EmptyObservable } from '../observable/EmptyObservable';
/**
* Returns an Observable that repeats the stream of items emitted by the source Observable at most count times,
* on a particular Scheduler.
*
* <img src="./img/repeat.png" width="100%">
*
* an empty Observable.
* count times.
* @owner Observable
* @this {?}
* @param {?=} count
* @return {?}
*/
export function repeat(count = -1) {
if (count === 0) {
return new EmptyObservable();
}
else if (count < 0) {
return this.lift(new RepeatOperator(-1, this));
}
else {
return this.lift(new RepeatOperator(count - 1, this));
}
}
class RepeatOperator {
/**
* @param {?} count
* @param {?} source
*/
constructor(count, source) {
this.count = count;
this.source = source;
}
/**
* @param {?} subscriber
* @param {?} source
* @return {?}
*/
call(subscriber, source) {
return source.subscribe(new RepeatSubscriber(subscriber, this.count, this.source));
}
}
/**
* We need this JSDoc comment for affecting ESDoc.
*/
class RepeatSubscriber extends Subscriber {
/**
* @param {?} destination
* @param {?} count
* @param {?} source
*/
constructor(destination, count, source) {
super(destination);
this.count = count;
this.source = source;
}
/**
* @return {?}
*/
complete() {
if (!this.isStopped) {
const { source, count } = this;
if (count === 0) {
return super.complete();
}
else if (count > -1) {
this.count = count - 1;
}
this.unsubscribe();
this.isStopped = false;
this.closed = false;
source.subscribe(this);
}
}
}
|
<filename>xmlEnumeration.go
// Copyright 2020 The xgen Authors. All rights reserved. Use of this source
// code is governed by a BSD-style license that can be found in the LICENSE
// file.
//
// Package xgen written in pure Go providing a set of functions that allow you
// to parse XSD (XML schema files). This library needs Go version 1.10 or
// later.
package xgen
import "encoding/xml"
// OnEnumeration handles parsing event on the enumeration start elements.
func (opt *Options) OnEnumeration(ele xml.StartElement, protoTree []interface{}) (err error) {
for _, attr := range ele.Attr {
if attr.Name.Local == "value" {
if opt.SimpleType.Peek() != nil {
opt.SimpleType.Peek().(*SimpleType).Restriction.Enum = append(opt.SimpleType.Peek().(*SimpleType).Restriction.Enum, attr.Value)
}
}
}
return nil
}
// EndEnumeration handles parsing event on the enumeration end elements.
// Enumeration defines a list of acceptable values.
func (opt *Options) EndEnumeration(ele xml.EndElement, protoTree []interface{}) (err error) {
if opt.Attribute.Len() > 0 && opt.SimpleType.Peek() != nil {
if opt.Attribute.Peek().(*Attribute).Type, err = opt.GetValueType(opt.SimpleType.Peek().(*SimpleType).Base, opt.ProtoTree); err != nil {
return
}
opt.CurrentEle = ""
}
if opt.SimpleType.Len() > 0 && opt.Element.Len() > 0 {
if opt.Element.Peek().(*Element).Type, err = opt.GetValueType(opt.SimpleType.Peek().(*SimpleType).Base, opt.ProtoTree); err != nil {
return
}
opt.CurrentEle = ""
}
return
}
|
#!/usr/bin/env bash
## Complete the following steps to get Docker running locally
# Step 1:
# Build image and add a descriptive tag
docker build . --tag qasibeat/project4attempt2
# Step 2:
# List docker images
docker image ls
# Step 3:
# Run flask app
sudo docker run --name qasibeat/project4attempt2 -p 8000:80 qasibeat/project4attempt2
|
import { Router } from "express";
import cController from "../controllers/mensajeController";
class MensajeRoutes {
public router: Router = Router();
constructor() {
this.config();
}
config(): void {
this.router.get('/', cController.obtenerMisUltimosMensajes);
this.router.get('/:destinatario', cController.leerMisMensajesCon);
this.router.post('/', cController.NuevoMensajesDeMiPara);
this.router.delete('/:idmensaje', cController.BorrarMensaje);
this.router.get('/enviar/destinatario', cController.leerUsuarioNombre);
}
}
const mensajeRoutes = new MensajeRoutes();
export default mensajeRoutes.router;
|
<filename>src/components/CommentCard/index.js
import component from './CommentCard'
import connector from './CommentCard.connector'
export default connector(component)
|
nohup wget -r -l1 -H -t1 -nd -N -np -A.gz -erobots=off https://ftp.ncbi.nlm.nih.gov/pubmed/baseline &
nohup wget -r -l1 -H -t1 -nd -N -np -A.gz -erobots=off -P daily https://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/ &
|
import {
actions,
} from '../constants';
const user = (state = [], action) => {
switch (action.type) {
case actions.ADD_ASSET:
return [
...state,
action.assetDetails,
];
case actions.REMOVE_ASSET:
return state.filter((asset) => asset.id !== action.assetDetails.id);
default:
return state;
}
};
export default user;
|
#! /bin/bash
CURRENTDIR=$(pwd)
cd $PRODDIR/numu_all_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_all_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_cccoh_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_nccoh_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_ccdis_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_ncdis_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_ccqel_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_ccres_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_ncres_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/numu_ccmec_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_cccoh_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_nccoh_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_ccdis_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_ncdis_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_ccqel_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_ccres_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_ncres_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_ccmec_numuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/nuel_all_nuelflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/anumu_all_anumuflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/anuel_all_anuelflux/scripts/gen/ && source ../chips_1200_map.sh
cd $PRODDIR/cosmics/scripts/gen/ && source ../chips_1200_map.sh
cd $CURRENTDIR
|
#!/bin/bash
yarn db:generate
yarn db:migrate
yarn dev
|
<filename>homeassistant/components/directv/media_player.py
"""Support for the DirecTV receivers."""
import logging
from typing import Callable, List
from directv import DIRECTV
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from . import DIRECTVEntity
from .const import (
ATTR_MEDIA_CURRENTLY_RECORDING,
ATTR_MEDIA_RATING,
ATTR_MEDIA_RECORDED,
ATTR_MEDIA_START_TIME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_DTV = (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
)
SUPPORT_DTV_CLIENT = (
SUPPORT_PAUSE
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List, bool], None],
) -> bool:
"""Set up the DirecTV config entry."""
dtv = hass.data[DOMAIN][entry.entry_id]
entities = []
for location in dtv.device.locations:
entities.append(
DIRECTVMediaPlayer(
dtv=dtv, name=str.title(location.name), address=location.address,
)
)
async_add_entities(entities, True)
class DIRECTVMediaPlayer(DIRECTVEntity, MediaPlayerDevice):
"""Representation of a DirecTV receiver on the network."""
def __init__(self, *, dtv: DIRECTV, name: str, address: str = "0") -> None:
"""Initialize DirecTV media player."""
super().__init__(
dtv=dtv, name=name, address=address,
)
self._assumed_state = None
self._available = False
self._is_recorded = None
self._is_standby = True
self._last_position = None
self._last_update = None
self._paused = None
self._program = None
self._state = None
async def async_update(self):
"""Retrieve latest state."""
self._state = await self.dtv.state(self._address)
self._available = self._state.available
self._is_standby = self._state.standby
self._program = self._state.program
if self._is_standby:
self._assumed_state = False
self._is_recorded = None
self._last_position = None
self._last_update = None
self._paused = None
elif self._program is not None:
self._paused = self._last_position == self._program.position
self._is_recorded = self._program.recorded
self._last_position = self._program.position
self._last_update = self._state.at
self._assumed_state = self._is_recorded
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attributes = {}
if not self._is_standby:
attributes[ATTR_MEDIA_CURRENTLY_RECORDING] = self.media_currently_recording
attributes[ATTR_MEDIA_RATING] = self.media_rating
attributes[ATTR_MEDIA_RECORDED] = self.media_recorded
attributes[ATTR_MEDIA_START_TIME] = self.media_start_time
return attributes
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID to use for this media player."""
if self._address == "0":
return self.dtv.device.info.receiver_id
return self._address
# MediaPlayerDevice properties and methods
@property
def state(self):
"""Return the state of the device."""
if self._is_standby:
return STATE_OFF
# For recorded media we can determine if it is paused or not.
# For live media we're unable to determine and will always return
# playing instead.
if self._paused:
return STATE_PAUSED
return STATE_PLAYING
@property
def available(self):
"""Return if able to retrieve information from DVR or not."""
return self._available
@property
def assumed_state(self):
"""Return if we assume the state or not."""
return self._assumed_state
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
if self._is_standby or self._program is None:
return None
return self._program.program_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
if self._is_standby or self._program is None:
return None
known_types = [MEDIA_TYPE_MOVIE, MEDIA_TYPE_TVSHOW]
if self._program.program_type in known_types:
return self._program.program_type
return MEDIA_TYPE_MOVIE
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
if self._is_standby or self._program is None:
return None
return self._program.duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._is_standby:
return None
return self._last_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
if self._is_standby:
return None
return self._last_update
@property
def media_title(self):
"""Return the title of current playing media."""
if self._is_standby or self._program is None:
return None
return self._program.title
@property
def media_series_title(self):
"""Return the title of current episode of TV show."""
if self._is_standby or self._program is None:
return None
return self._program.episode_title
@property
def media_channel(self):
"""Return the channel current playing media."""
if self._is_standby or self._program is None:
return None
return f"{self._program.channel_name} ({self._program.channel})"
@property
def source(self):
"""Name of the current input source."""
if self._is_standby or self._program is None:
return None
return self._program.channel
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_DTV_CLIENT if self._is_client else SUPPORT_DTV
@property
def media_currently_recording(self):
"""If the media is currently being recorded or not."""
if self._is_standby or self._program is None:
return None
return self._program.recording
@property
def media_rating(self):
"""TV Rating of the current playing media."""
if self._is_standby or self._program is None:
return None
return self._program.rating
@property
def media_recorded(self):
"""If the media was recorded or live."""
if self._is_standby:
return None
return self._is_recorded
@property
def media_start_time(self):
"""Start time the program aired."""
if self._is_standby or self._program is None:
return None
return dt_util.as_local(self._program.start_time)
async def async_turn_on(self):
"""Turn on the receiver."""
if self._is_client:
raise NotImplementedError()
_LOGGER.debug("Turn on %s", self._name)
await self.dtv.remote("poweron", self._address)
async def async_turn_off(self):
"""Turn off the receiver."""
if self._is_client:
raise NotImplementedError()
_LOGGER.debug("Turn off %s", self._name)
await self.dtv.remote("poweroff", self._address)
async def async_media_play(self):
"""Send play command."""
_LOGGER.debug("Play on %s", self._name)
await self.dtv.remote("play", self._address)
async def async_media_pause(self):
"""Send pause command."""
_LOGGER.debug("Pause on %s", self._name)
await self.dtv.remote("pause", self._address)
async def async_media_stop(self):
"""Send stop command."""
_LOGGER.debug("Stop on %s", self._name)
await self.dtv.remote("stop", self._address)
async def async_media_previous_track(self):
"""Send rewind command."""
_LOGGER.debug("Rewind on %s", self._name)
await self.dtv.remote("rew", self._address)
async def async_media_next_track(self):
"""Send fast forward command."""
_LOGGER.debug("Fast forward on %s", self._name)
await self.dtv.remote("ffwd", self._address)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Select input source."""
if media_type != MEDIA_TYPE_CHANNEL:
_LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_CHANNEL,
)
return
_LOGGER.debug("Changing channel on %s to %s", self._name, media_id)
await self.dtv.tune(media_id, self._address)
|
<filename>src/utils/contextUtils.ts
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
import * as fse from "fs-extra";
import * as os from "os";
import * as path from "path";
import { ExtensionContext, extensions } from "vscode";
let EXTENSION_PUBLISHER: string;
let EXTENSION_NAME: string;
let EXTENSION_VERSION: string;
let EXTENSION_AI_KEY: string;
export async function loadPackageInfo(context: ExtensionContext): Promise<void> {
const { publisher, name, version, aiKey } = await fse.readJSON(context.asAbsolutePath("./package.json"));
EXTENSION_AI_KEY = aiKey;
EXTENSION_PUBLISHER = publisher;
EXTENSION_NAME = name;
EXTENSION_VERSION = version;
}
export function getExtensionPublisher(): string {
return EXTENSION_PUBLISHER;
}
export function getExtensionName(): string {
return EXTENSION_NAME;
}
export function getExtensionId(): string {
return `${EXTENSION_PUBLISHER}.${EXTENSION_NAME}`;
}
export function getExtensionVersion(): string {
return EXTENSION_VERSION;
}
export function getAiKey(): string {
return EXTENSION_AI_KEY;
}
export function getPathToTempFolder(...args: string[]): string {
return path.join(os.tmpdir(), ...args);
}
export function getPathToExtensionRoot(...args: string[]): string {
return path.join(extensions.getExtension(getExtensionId()).extensionPath, ...args);
}
|
# Load fastqc module
module add fastqc/0.11.7
# Set input and output variables
OUTDIR=results/fastqc_untrimmed_reads
INPUT=data/untrimmed_fastq/*.fastq.gz
# Create output directory if necessary
mkdir -p $OUTDIR
# Run fastqc
fastqc -o $OUTDIR $INPUT
|
printf "installing curl... "
sudo apt install curl -y
|
import random
def randomElement(arr):
return random.choice(arr)
|
<reponame>lgoldstein/communitychest<filename>development/src/main/java/net/community/chest/svn/ui/filesmgr/SVNLocalCopyFileNameRenderer.java
/*
*
*/
package net.community.chest.svn.ui.filesmgr;
import java.awt.Component;
import java.util.Map;
import javax.swing.Icon;
import javax.swing.JTable;
import javax.swing.filechooser.FileSystemView;
import net.community.chest.awt.attributes.AttrUtils;
import net.community.chest.awt.image.ImageUtils;
import net.community.chest.awt.layout.border.BorderLayoutPosition;
import net.community.chest.svnkit.core.wc.SVNLocalCopyData;
import net.community.chest.svnkit.core.wc.SVNStatusTypeEnum;
import net.community.chest.ui.components.table.file.FileNameCellRenderer;
/**
* <P>Copyright 2009 as per GPLv2</P>
*
* @author <NAME>.
* @since Aug 6, 2009 11:28:05 AM
*/
public class SVNLocalCopyFileNameRenderer extends FileNameCellRenderer {
/**
*
*/
private static final long serialVersionUID = -3997347297375953795L;
private Map<SVNStatusTypeEnum,? extends Icon> _statusIconMap;
public Map<SVNStatusTypeEnum,? extends Icon> getStatusIconsMap ()
{
return _statusIconMap;
}
public void setStatusIconsMap (Map<SVNStatusTypeEnum,? extends Icon> m)
{
_statusIconMap = m;
}
public SVNLocalCopyFileNameRenderer (FileSystemView v, Map<SVNStatusTypeEnum,? extends Icon> m)
{
super(v);
_statusIconMap = m;
}
public SVNLocalCopyFileNameRenderer (Map<SVNStatusTypeEnum,? extends Icon> m)
{
this(FileSystemView.getFileSystemView(), m);
}
public SVNLocalCopyFileNameRenderer (FileSystemView v)
{
this(v, null);
}
public SVNLocalCopyFileNameRenderer ()
{
this(FileSystemView.getFileSystemView());
}
/*
* @see net.community.chest.ui.components.table.file.AbstractFileDisplayNameCellRenderer#getTableCellRendererComponent(javax.swing.JTable, java.lang.Object, boolean, boolean, int, int)
*/
@Override
public Component getTableCellRendererComponent (JTable table, Object value,
boolean isSelected, boolean hasFocus, int row, int column)
{
final Component c;
if (value instanceof SVNLocalCopyData)
{
final SVNLocalCopyData ld=(SVNLocalCopyData) value;
c = super.getTableCellRendererComponent(table, ld.getFile(), isSelected, hasFocus, row, column);
final SVNStatusTypeEnum st=
AttrUtils.isIconableComponent(c) ? ld.getStatus() : null;
final Map<SVNStatusTypeEnum,? extends Icon> im=
(null == st) ? null : getStatusIconsMap();
final Icon addIcon=
((null == im) || (im.size() <= 0)) ? null : im.get(st);
if (addIcon != null)
{
final Icon orgIcon=
AttrUtils.getComponentIcon(c),
newIcon=
ImageUtils.getOverlayIcon(orgIcon, addIcon, BorderLayoutPosition.AFTER_LAST_LINE, 75, 75, c);
if (newIcon != orgIcon)
AttrUtils.setComponentIcon(c, newIcon);
}
}
else
c = super.getTableCellRendererComponent(table, value, isSelected, hasFocus, row, column);
return c;
}
}
|
#shellcheck disable=SC2034
#shellcheck disable=SC2154
pkg_name=infra-proxy-service
pkg_description="Automate infra views"
pkg_origin=chef
pkg_version="0.1.0"
pkg_maintainer="Chef Software Inc. <support@chef.io>"
pkg_license=('Chef-MLSA')
pkg_upstream_url="http://github.com/chef/automate/components/infra-proxy-service"
pkg_deps=(
core/bash
"${local_platform_tools_origin:-chef}/automate-platform-tools"
chef/mlsa
)
pkg_exports=(
[port]=service.port # default service is grpc
[host]=service.host
)
pkg_exposes=(
port
)
pkg_binds=(
[automate-pg-gateway]="port"
[pg-sidecar-service]="port"
[authz-service]="port"
[secrets-service]="port"
)
pkg_bin_dirs=(bin)
pkg_scaffolding="${local_scaffolding_origin:-chef}/automate-scaffolding-go"
scaffolding_go_base_path=github.com/chef
scaffolding_go_repo_name=automate
scaffolding_go_import_path="${scaffolding_go_base_path}/${scaffolding_go_repo_name}/components/${pkg_name}"
scaffolding_go_build_tags=(prod)
scaffolding_go_binary_list=(
"${scaffolding_go_import_path}/cmd/${pkg_name}"
)
do_install() {
do_default_install
build_line "Copying migration files"
cp -r storage/postgres/migration/sql "${pkg_prefix}/migrations"
}
do_strip() {
return 0
}
|
/*
* Copyright (C) 2017 The Dagger Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dagger.functional.gwt;
import dagger.functional.gwt.GwtIncompatibles.GwtIncompatible;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Tests for {@code @GwtIncompatible} bindings. */
@RunWith(JUnit4.class)
public class GwtIncompatiblesTest {
@Test
public void testIncompatible() {
assertGwtIncompatible(GwtIncompatibles_OnClass_Factory.class);
assertGwtIncompatible(GwtIncompatibles_OnConstructor_Factory.class);
assertGwtIncompatible(GwtIncompatibles_OuterClass_OnOuterClass_Factory.class);
assertGwtIncompatible(GwtIncompatibles_MembersInjectedType_MembersInjector.class);
assertGwtIncompatible(GwtIncompatibles_OnModule_OnModuleFactory.class);
assertGwtIncompatible(GwtIncompatibles_OnMethod_OnMethodFactory.class);
}
private void assertGwtIncompatible(Class<?> clazz) {
boolean gwtIncompatible = clazz.isAnnotationPresent(GwtIncompatible.class);
if (!gwtIncompatible) {
throw new AssertionError(clazz.getCanonicalName() + " is not @GwtIncompatible");
}
}
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbac
import (
"reflect"
"testing"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
)
func TestSubjectLocator(t *testing.T) {
type actionToSubjects struct {
action authorizer.Attributes
subjects []rbac.Subject
}
tests := []struct {
name string
roles []*rbac.Role
roleBindings []*rbac.RoleBinding
clusterRoles []*rbac.ClusterRole
clusterRoleBindings []*rbac.ClusterRoleBinding
superUser string
actionsToSubjects []actionToSubjects
}{
{
name: "no super user, star matches star",
clusterRoles: []*rbac.ClusterRole{
newClusterRole("admin", newRule("*", "*", "*", "*")),
},
clusterRoleBindings: []*rbac.ClusterRoleBinding{
newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"),
},
roleBindings: []*rbac.RoleBinding{
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
},
actionsToSubjects: []actionToSubjects{
{
&defaultAttributes{"", "", "get", "Pods", "", "ns1", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, Name: "super-admin"},
{Kind: rbac.GroupKind, Name: "super-admins"},
{Kind: rbac.UserKind, Name: "admin"},
{Kind: rbac.GroupKind, Name: "admins"},
},
},
{
// cluster role matches star in namespace
&defaultAttributes{"", "", "*", "Pods", "", "*", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, Name: "super-admin"},
{Kind: rbac.GroupKind, Name: "super-admins"},
},
},
{
// empty ns
&defaultAttributes{"", "", "*", "Pods", "", "", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, Name: "super-admin"},
{Kind: rbac.GroupKind, Name: "super-admins"},
},
},
},
},
{
name: "super user, local roles work",
superUser: "foo",
clusterRoles: []*rbac.ClusterRole{
newClusterRole("admin", newRule("*", "*", "*", "*")),
},
clusterRoleBindings: []*rbac.ClusterRoleBinding{
newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"),
},
roles: []*rbac.Role{
newRole("admin", "ns1", newRule("get", "*", "Pods", "*")),
},
roleBindings: []*rbac.RoleBinding{
newRoleBinding("ns1", "admin", bindToRole, "User:admin", "Group:admins"),
},
actionsToSubjects: []actionToSubjects{
{
&defaultAttributes{"", "", "get", "Pods", "", "ns1", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIVersion: "v1alpha1", Name: "foo"},
{Kind: rbac.UserKind, Name: "super-admin"},
{Kind: rbac.GroupKind, Name: "super-admins"},
{Kind: rbac.UserKind, Name: "admin"},
{Kind: rbac.GroupKind, Name: "admins"},
},
},
{
// verb matchies correctly
&defaultAttributes{"", "", "create", "Pods", "", "ns1", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIVersion: "v1alpha1", Name: "foo"},
{Kind: rbac.UserKind, Name: "super-admin"},
{Kind: rbac.GroupKind, Name: "super-admins"},
},
},
{
// binding only works in correct ns
&defaultAttributes{"", "", "get", "Pods", "", "ns2", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIVersion: "v1alpha1", Name: "foo"},
{Kind: rbac.UserKind, Name: "super-admin"},
{Kind: rbac.GroupKind, Name: "super-admins"},
},
},
},
},
}
for _, tt := range tests {
ruleResolver, lister := rbacregistryvalidation.NewTestRuleResolver(tt.roles, tt.roleBindings, tt.clusterRoles, tt.clusterRoleBindings)
a := SubjectAccessEvaluator{tt.superUser, lister, lister, ruleResolver}
for i, action := range tt.actionsToSubjects {
actualSubjects, err := a.AllowedSubjects(action.action)
if err != nil {
t.Errorf("case %q %d: error %v", tt.name, i, err)
}
if !reflect.DeepEqual(actualSubjects, action.subjects) {
t.Errorf("case %q %d: expected %v actual %v", tt.name, i, action.subjects, actualSubjects)
}
}
}
}
|
# File: T (Python 2.4)
from pandac.PandaModules import *
from direct.showbase.DirectObject import *
from direct.interval.IntervalGlobal import *
from direct.actor import Actor
from pirates.piratesbase import PiratesGlobals
from PooledEffect import PooledEffect
from EffectController import EffectController
import random
class ThunderBallGlow(PooledEffect, EffectController):
def __init__(self, effectParent = None, billboardOffset = 1.0):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.setColorScaleOff()
self.setBillboardPointEye(billboardOffset)
self.fadePulse = None
self.track1 = None
if effectParent:
self.reparentTo(effectParent)
self.glow = loader.loadModel('models/effects/gypsyBallGlow')
self.glow.node().setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd))
self.glow.setDepthWrite(0)
self.glow.setFogOff()
self.glow.setLightOff()
self.glow.setBin('fixed', 50)
self.glow.reparentTo(self)
self.glow.setScale(2.5)
def createTrack(self):
randomness = random.random() / 4.0
fadeIn = self.glow.colorInterval(0.25 + randomness, Vec4(0.5, 0.80000000000000004, 0.90000000000000002, 0.75), startColor = Vec4(0.5, 0.90000000000000002, 1, 1), blendType = 'easeInOut')
fadeOut = self.glow.colorInterval(0.25 + randomness, Vec4(0.5, 0.90000000000000002, 1, 1), startColor = Vec4(0.5, 0.80000000000000004, 0.90000000000000002, 0.75), blendType = 'easeInOut')
self.fadePulse = Sequence(fadeIn, fadeOut)
glowPart = self.glow.find('**/glow_aura')
glowPart.setColorScale(0.25, 0.20000000000000001, 0.29999999999999999, 0.75)
scaleUp = glowPart.scaleInterval(0.25 + randomness, 1.0, startScale = 0.80000000000000004, blendType = 'easeInOut')
scaleDown = glowPart.scaleInterval(0.25 + randomness, 0.75, startScale = 1.0, blendType = 'easeInOut')
self.track1 = Sequence(scaleDown, scaleUp)
def play(self, rate = 1):
self.createTrack()
self.fadePulse.start()
self.track1.start()
def stop(self):
if self.fadePulse:
self.fadePulse.finish()
if self.track1:
self.track1.finish()
def startLoop(self, rate = 1):
self.createTrack()
self.fadePulse.loop()
self.track1.loop()
def stopLoop(self):
if self.fadePulse:
self.fadePulse.finish()
if self.track1:
self.track1.finish()
self.cleanUpEffect()
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
if self.pool:
self.checkInEffect(self)
def destroy(self):
self.fadePulse = None
self.track1 = None
if hasattr(self, 'glow'):
self.glow.removeNode()
self.glow = None
EffectController.destroy(self)
PooledEffect.destroy(self)
|
#include <Arduino.h>
#include "DharmaIO_Button.h"
/*
*
*
*
*/
Button::Button(byte init_pin, bool digitalTrigger, bool enableInternalPullup)
{
this->pin = init_pin;
this->digitalTrigger = digitalTrigger;
// if button is set to react to logic level high then disable the internal pullup
if (digitalTrigger == HIGH)
{
enableInternalPullup = DISABLE_INTERNAL_PULLUP;
}
// set the pinMode based on whether or not to use the internal pullup
if (enableInternalPullup)
{
pinMode(init_pin, INPUT_PULLUP);
}
else
{
pinMode(init_pin, INPUT);
}
}
/*
*
*
*
*/
byte Button::currentEvent(bool requireDebounce)
{
// first make sure enough time has passed so that functions don't execute twice
if (requireDebounce == DEBOUNCE_REQUIRED && checkEventDebounce() == INVALID_EVENT)
{
return NO_EVENT;
}
this->updateState();
byte currentEvent = NO_EVENT;
if (this->state == TRIGGERED)
{
// if current state = TRIGGERED
if (this->prevState == NOT_TRIGGERED)
{
// if button went from NOT_TRIGGERED to TRIGGERED, then a press occured
currentEvent = PRESSED;
}
else
{
// if button was TRIGGERED and is still TRIGGERED, then its being held down
currentEvent = HELD;
}
}
else
{
// if current state = NOT_TRIGGERED
if (this->prevState == TRIGGERED)
{
// if button went from TRIGGERED to NON_TRIGGERED, then it was released
currentEvent = RELEASED;
}
}
if (currentEvent != NO_EVENT)
{
this->updateEvent(currentEvent);
}
// return the current event, which must be interpreted by the event's byte definition
// NO_EVENT = (byte)0 , PRESSED = (byte)1
// HELD = (byte)2 , RELEASED = (byte)3
return currentEvent;
}
/*
*
*
*
*/
// updates the objects 'state' property to either TRIGGERED (true) or NOT_TRIGGERED (false)
bool Button::currentState()
{
if (digitalRead(this->pin) == this->digitalTrigger)
{
this->state = TRIGGERED;
}
else
{
this->state = NOT_TRIGGERED;
}
return this->state;
}
/*
*
*
*
*/
bool Button::updateState()
{
// save the last state
this->prevState = this->state;
// update the current state and also return it
return currentState();
}
// requireDebounce = DEBOUNCE_REQUIRED (true) by default
// debounce is "buffer time" between events and prevents double-executing functions
bool Button::isPressed(bool requireDebounce)
{
// first make sure enough time has passed so that functions don't execute twice
if (requireDebounce == DEBOUNCE_REQUIRED && checkEventDebounce() == INVALID_EVENT)
{
return false;
}
// update this button's state information
this->updateState();
// on a button press, the state changes from not triggered to triggered
// if it was already triggered and the current state was triggered again, thats a held down button
if (this->prevState == NOT_TRIGGERED && this->state == TRIGGERED)
{
return true;
}
else
{
return false;
}
}
// requireDebounce = DEBOUNCE_REQUIRED (true) by default
// debounce is "buffer time" between events and prevents double-executing functions
bool Button::wasReleased(bool requireDebounce)
{
// first make sure enough time has passed so that functions don't execute twice
if (requireDebounce == DEBOUNCE_REQUIRED && checkEventDebounce() == INVALID_EVENT)
{
return false;
}
// update this button's state information
this->updateState();
// on a button press, the state changes from not triggered to triggered
// if it was already triggered and the current state was triggered again, thats a held down button
if (this->prevState == TRIGGERED && this->state == NOT_TRIGGERED)
{
return true;
}
else
{
return false;
}
}
bool Button::checkEventDebounce()
{
// determine how much time has passed since the last currentEvent, and
// if it is greater than the required debounce time then its a valid currentEvent
unsigned long currentMillis = millis();
if (this->lastEventMillis > currentMillis) {
}
if ((unsigned long)(millis() - this->lastEventMillis) >= this->debounceTime)
{
return VALID_EVENT;
}
else
{
return INVALID_EVENT;
}
}
void Button::updateEvent(byte currentEvent)
{
unsigned long currentEventTime = millis();
if (currentEvent == PRESSED)
{
// on a press, reset the time held to 0 since this is the beginning of a hold
this->timeHeld = 0;
}
else if (currentEvent == HELD)
{
// since we track every button event, simply add the time since the last event to the time held
this->timeHeld += (unsigned long)(currentEventTime - this->lastEventMillis);
}
// store the current time as the time of "the last event"
this->lastEventMillis = currentEventTime;
}
|
<gh_stars>1-10
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RNmf(RPackage):
"""Provides a framework to perform Non-negative Matrix Factorization (NMF).
The package implements a set of already published algorithms and seeding
methods, and provides a framework to test, develop and plug new/custom
algorithms. Most of the built-in algorithms have been optimized in C++, and
the main interface function provides an easy way of performing parallel
computations on multicore machines.."""
homepage = "http://renozao.github.io/NMF"
url = "https://cran.r-project.org/src/contrib/NMF_0.20.6.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/NMF"
version('0.20.6', '81df07b3bf710a611db5af24730ff3d0')
depends_on('r-pkgmaker', type=('build', 'run'))
depends_on('r-registry', type=('build', 'run'))
depends_on('r-rngtools', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
depends_on('r-stringr', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
depends_on('r-gridbase', type=('build', 'run'))
depends_on('r-colorspace', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
|
#! /bin/sh
UNAME=$(uname -r)
if [ $UNAME | grep 'linux' ]; then
BT_CLIENT="hcitool"
BT_SEARCHLINE="scan"
BT_CONNECT="auth"
else; #if not on linux, assume OSX for now.
BT_CLIENT="blued"
BT_SEARCH="listall"
BT_CONNECT="join"
fi
if [ echo $1 | grep -io '([[:xdigit:]]{1,2}[:-]){5}[[:xdigit:]]{1,2}' ]; then
ADR=$(echo $1 | grep -io '([[:xdigit:]]{1,2}[:-]){5}[[:xdigit:]]{1,2}')
while not 1; do
$BT_CLIENT $BT_CONNECT $ADR && sleep 3
done
else
BT_MACHINES=$($BT_CLIENT $BT_SEARCH | grep -io '([[:xdigit:]]{1,2}[:-]){5}[[:xdigit:]]{1,2}')
while not 1; do
for ADR in $BT_MACHINES; do
$BT_CLIENT $BT_CONNECT $ADR
done
sleep 3
done
fi
|
import styled from 'styled-components/native'
import { RectButton } from 'react-native-gesture-handler'
import getColorFromType from '../../utils/getColorFromType'
export const Container = styled(RectButton) <{ type: string }>`
height: 130px;
padding: 20px;
overflow: hidden;
margin-bottom: 15px;
border-radius: 15px;
background: ${({ type }) => getColorFromType(type)};
`
export const ID = styled.Text`
font-size: 15px;
font-weight: bold;
color: rgba(23, 23, 27, 0.3);
`
export const Name = styled.Text`
margin-bottom: 5px;
color: #fff;
font-size: 22px;
font-weight: bold;
`
export const Types = styled.View`
flex-direction: row;
`
export const Pokemon = styled.Image`
width: 115px;
height: 115px;
position: absolute;
right: 5px;
bottom: 5px;
`
export const Pokeball = styled.Image`
opacity: 0.15;
width: 150px;
height: 150px;
position: absolute;
right: -15px;
bottom: -15px;
tint-color: #fff;
`
|
<gh_stars>1-10
package com.krrrr38.mackerel4s.model
import com.krrrr38.mackerel4s.model.Types.{ HostID, MonitorID, AlertID }
object AlertStatus {
def fromString(status: String): Option[AlertStatus] = status match {
case "OK" => Some(AlertStatusOK)
case "CRITICAL" => Some(AlertStatusCritical)
case "WARNING" => Some(AlertStatusWarning)
case "UNKNOWN" => Some(AlertStatusUnknown)
case _ => None
}
}
sealed trait AlertStatus {
override def toString = this match {
case AlertStatusOK => "OK"
case AlertStatusCritical => "CRITICAL"
case AlertStatusWarning => "WARNING"
case AlertStatusUnknown => "UNKNOWN"
}
}
case object AlertStatusOK extends AlertStatus
case object AlertStatusCritical extends AlertStatus
case object AlertStatusWarning extends AlertStatus
case object AlertStatusUnknown extends AlertStatus
case class Alert(
id: AlertID,
status: AlertStatus,
monitorId: MonitorID,
`type`: MonitorType,
hostId: Option[HostID],
value: Option[Long],
message: Option[String],
reason: Option[String],
openedAt: Int,
closedAt: Option[Int]) extends APIResponse
|
#!/bin/sh
cwd="$(pwd)"
root="$(git rev-parse --show-toplevel)"
cd "$root" || exit 1
jsfiles=$(git diff --cached --name-only --diff-filter=ACM "src/**.js" "src/**.jsx" | tr '\n' ' ')
[ -z "$jsfiles" ] && exit 0
# Prettify all staged .js files
echo "$jsfiles" | xargs ./node_modules/.bin/prettier --write
# Add back the modified/prettified files to staging
echo "$jsfiles" | xargs git add
cd "$cwd" || exit 1
exit 0
|
#!/bin/bash
# Generate c?d.doc and cguru.doc from *.docsrc, from the docs/ directory.
# Contains some of their text content here too.
# Barnett 7/24/20.
# local expansions done before insertion
NU="nonuniform point"
NF="nonuniform frequency target"
CO=coordinates
LM="length M real array"
LN="length N real array"
PI="in [-3pi,3pi)"
# stage 1: flesh out *.docsrc (input i) to *.docexp (output o)...
for i in *.docsrc
do
o=${i/.docsrc/.doc}
#echo "$o"
# create or overwrite output as 0-length file (not needed): echo -n "" > $o
while IFS= read -r line; do
# define all tags (case-sens) and their actions here:
case $line in
*@F*) # declarations: bash string replacement gets 4 combos...
simp=${line//@F/finufft}
many=${simp//\(/many\(int ntr, } # insert new 1st arg; esc (
echo "::" # parsed-literal not good
echo ""
echo "$simp"
simp=${simp//finufft/finufftf}
echo "${simp//double/float}"
echo ""
echo "$many"
many=${many//finufft/finufftf}
echo "${many//double/float}"
;;
*@G*) # guru declarations:
line=${line//@G/finufft}
echo "::"
echo ""
echo "$line"
line=${line//finufft/finufftf} # catches both instances
echo "${line//double/float}"
;;
# rest are exact matches for whole line...
@t)
echo ""
echo " Computes to precision eps, via a fast algorithm, one or more transforms of the form:"
echo ""
;;
@nt)
echo " ntr how many transforms (only for vectorized \"many\" functions, else ntr=1)"
;;
@mi)
echo " M number of $NU sources"
;;
@mo)
echo " M number of $NU targets"
;;
@n)
echo " N number of $NF""s"
;;
@xr)
echo " x $NU""s in R ($LM)"
;;
@x2r)
echo " x,y $NU $CO in R^2 ($LM""s)"
;;
@x3r)
echo " x,y,z $NU $CO in R^3 ($LM""s)"
;;
@x)
echo " x $NU""s $PI ($LM)"
;;
@x2)
echo " x,y $NU $CO $PI""^2 ($LM""s)"
;;
@x3)
echo " x,y,z $NU $CO $PI""^3 ($LM""s)"
;;
@s)
echo " s $NF""s in R ($LN)"
;;
@s2)
echo " s,t $NF $CO in R^2 ($LN""s)"
;;
@s3)
echo " s,t,u $NF $CO in R^3 ($LN""s)"
;;
@ci)
echo " c source strengths (size M*ntr complex array)"
;;
@co)
echo " c values at $NU targets (size M*ntr complex array)"
;;
@f)
echo " iflag if >=0, uses +i in complex exponential, otherwise -i"
;;
@e)
echo " eps desired relative precision; smaller is slower. This can be chosen"
echo " from 1e-1 down to ~ 1e-14 (in double precision) or 1e-6 (in single)"
;;
@o)
echo " opts pointer to options struct (see opts.rst), or NULL for defaults"
;;
@r)
echo " return value 0: success, 1: success but warning, >1: error (see error.rst)"
;;
@no)
echo ""
echo " Notes:"
echo " * complex arrays interleave Re, Im values, and their size is stated with"
echo " dimensions ordered fastest to slowest."
;;
@notes12) # specific to type 1 & 2
echo " * Fourier frequency indices in each dimension i are the integers lying"
echo " in [-Ni/2, (Ni-1)/2]. See above, and modeord in opts.rst for possible orderings."
;;
*)
# all else is passed through
echo "$line"
;;
esac
done < $i | fold -s -w 90 | sed -e '/::/! s/^/ /' > $o
# (note sneaky use of pipes above, filters lines from $i, output to $o,
# also wraps and adds initial space unless line has ::, to get .rst right)
# sed -e '/../!s/^/ /'
done
# debug note: to debug, best to echo "$stuff" 1>&2 so it goes to stderr.
|
<reponame>glameyzhou/training
package org.glamey.training.codes.hash.consistent;
/**
* 节点相关信息
*
* @author yang.zhou 2019.11.04.17
*/
public abstract class ShardInfo<R> {
private final int weight;
public ShardInfo(int weight) {
this.weight = weight;
}
public int getWeight() {
return weight;
}
public abstract String getName();
public abstract R createResource();
}
|
SELECT name
FROM employees
WHERE hours_worked > 40;
|
set -eo nounset
cd /sources
test -f expect5.45.tar.gz || \
wget --no-check-certificate \
https://downloads.sourceforge.net/expect/expect5.45.tar.gz
rm -rf expect5.45
tar xf expect5.45.tar.gz
pushd expect5.45
./configure --prefix=/usr \
--with-tcl=/usr/lib \
--enable-shared \
--mandir=/usr/share/man \
--with-tclinclude=/usr/include &&
make
make install &&
ln -svf expect5.45/libexpect5.45.so /usr/lib
popd
rm -rf expect5.45
|
<gh_stars>0
$("#form-dept-edit").validate({
rules:{
deptName:{
required:true,
},
orderNum:{
required:true,
},
},
submitHandler:function(form){
update();
}
});
function update() {
var deptId = $("input[name='deptId']").val();
var parentId = $("input[name='parentId']").val();
var orderNum = $("input[name='orderNum']").val();
var deptName = $("input[name='deptName']").val();
var leader = $("input[name='leader']").val();
var phone = $("input[name='phone']").val();
var email = $("input[name='email']").val();
var status = $("input[name='status']").is(':checked') == true ? 0 : 1;
$.ajax({
cache : true,
type : "POST",
url : "/system/dept/save",
data : {
"deptId": deptId,
"parentId": parentId,
"deptName": deptName,
"leader": leader,
"phone": phone,
"orderNum": orderNum,
"email": email,
"status": status
},
async : false,
error : function(request) {
$.modalAlert("系统错误", "error");
},
success : function(data) {
if (data.code == 0) {
parent.layer.msg('修改成功',{icon:1,time:1000});
$.modalClose();
parent.loading();
} else {
$.modalAlert(data.msg, "error");
}
}
});
}
|
#!/usr/bin/bash
set -euxo pipefail
echo "install_weak_deps=False" >> /etc/dnf/dnf.conf
# Tell RPM to skip installing documentation
echo "tsflags=nodocs" >> /etc/dnf/dnf.conf
dnf install -y python3 python3-requests epel-release 'dnf-command(config-manager)'
dnf config-manager --set-disabled epel
curl https://raw.githubusercontent.com/openstack/tripleo-repos/master/tripleo_repos/main.py | python3 - -b master current-tripleo --no-stream
dnf upgrade -y
xargs -rtd'\n' dnf install -y < /tmp/${PKGS_LIST}
if [[ ! -z ${EXTRA_PKGS_LIST:-} ]]; then
if [[ -s /tmp/${EXTRA_PKGS_LIST} ]]; then
xargs -rtd'\n' dnf install -y < /tmp/${EXTRA_PKGS_LIST}
fi
fi
dnf install -y --enablerepo=epel inotify-tools
dnf clean all
rm -rf /var/cache/{yum,dnf}/*
if [[ ! -z ${PATCH_LIST:-} ]]; then
if [[ -s "/tmp/${PATCH_LIST}" ]]; then
/bin/patch-image.sh;
fi
fi
rm -f /bin/patch-image.sh
|
<reponame>ErikWegner/imoin<gh_stars>1-10
describe('options html', () => {
const getFormTextValueStub = sinon.stub(window, 'getFormTextValue');
const getCheckboxValueStub = sinon.stub(window, 'getCheckboxValue');
const setCheckboxValueStub = sinon.stub(window, 'setCheckboxValue');
const documentQuerySelectorStub = sinon.stub(document, 'querySelector');
const documentGetElementByIdStub = sinon.stub(document, 'getElementById');
const saveOptionsSpy = sinon.spy(window, 'saveOptions');
const updateDOMforFiltersSpy = sinon.spy(window, 'updateDOMforFilters');
const updateDOMforPanelFieldsetSpy = sinon.spy(window, 'updateDOMforPanelFieldset');
const documentGetElementsByClassNameStub = sinon.stub(document, 'getElementsByClassName');
const filterOptionsNames = [
'filterOutAcknowledged',
'filterOutSoftStates',
'filterOutDisabledNotifications',
'filterOutDisabledChecks',
'filterOutServicesOnDownHosts',
'filterOutServicesOnAcknowledgedHosts',
'filterOutDowntime',
];
const otherCheckboxNames = [
'inlineresults',
]
const numberOfFilterOptions = filterOptionsNames.length;
beforeEach(() => {
// Re-Init global variables
instances = [];
selectedInstance = -1;
// Stub browser interactions
updateDOMforInstances = sinon.spy();
// Reset all spies and stubs
saveOptionsSpy.resetHistory();
updateDOMforFiltersSpy.resetHistory();
updateDOMforPanelFieldsetSpy.resetHistory();
getFormTextValueStub.reset();
getCheckboxValueStub.reset();
setCheckboxValueStub.reset();
documentQuerySelectorStub.reset();
documentGetElementsByClassNameStub.reset();
documentGetElementByIdStub.withArgs('fontsize').returns({ value: "100" });
documentGetElementByIdStub.withArgs('paneldesign1').returns({ checked: false });
documentQuerySelectorStub.withArgs('input[name = "paneldesign"]:checked').returns({ value: 1 });
loadOptions = sinon.stub().resolves({ instance: createInstance('Unit test default') });
port = {
postMessage: sinon.spy(),
disconnect: sinon.spy()
}
host = {
storage: {
local: {
set: sinon.spy()
}
},
runtime: {
connect: function () {
return port;
}
}
};
});
it('should add instance', () => {
const l = instances.length;
addInstance();
expect(instances.length).toBe(l + 1);
expect(selectedInstance).toBe(l);
expect(updateDOMforInstances.calledOnce).toBe(true);
});
it('should add three instances', () => {
const l = instances.length;
addInstance();
addInstance();
addInstance();
expect(instances.length).toBe(l + 3);
expect(selectedInstance).toBe(2);
expect(updateDOMforInstances.calledThrice).toBe(true);
});
it('should update instance', () => {
// create 4 instances
for (let i = 0; i < 4; i++) { addInstance(); }
// select instance 2
selectedInstance = 1;
let callCounter = 0;
getFormTextValueStub.callsFake((selector, defaultValue) => {
callCounter++;
if ('#timerPeriod' === selector) {
return '17'
}
return "Call " + selector;
});
updateInstance();
const probe = instances[selectedInstance];
expect(probe.instancelabel).toBe('Call #instancelabel');
expect(probe.timerPeriod).toBe(17);
expect(probe.icingaversion).toBe('Call #icingaversion');
expect(probe.url).toBe('Call #url');
expect(probe.username).toBe('Call #username');
expect(probe.password).toBe('<PASSWORD>');
});
it('should not remove last instance', () => {
const l1 = instances.length;
addInstance();
const l2 = instances.length;
updateDOMforInstances.resetHistory(); // addInstance will call updateDOM
removeInstance();
const l3 = instances.length;
expect(l1).toBe(0); // no instance when the test starts
expect(l2).toBe(1); // one instance added
expect(l3).toBe(1); // still one instance available
expect(updateDOMforInstances.notCalled).toBe(true);
});
it('should remove selected instance', () => {
// create 4 instances
for (let i = 0; i < 4; i++) { addInstance(); }
// select instance 2
selectedInstance = 1;
const removedInstance = instances[selectedInstance];
updateDOMforInstances.resetHistory(); // addInstance will call updateDOM
removeInstance();
expect(instances.indexOf(removedInstance)).toBe(-1);
expect(updateDOMforInstances.calledOnce).toBe(true);
});
it('should remove last instance and update selectedInstance', () => {
// create 4 instances
for (let i = 0; i < 4; i++) { addInstance(); }
// select instance 2
const l1 = selectedInstance;
const removedInstance = instances[selectedInstance];
updateDOMforInstances.resetHistory(); // addInstance will call updateDOM
removeInstance();
expect(instances.indexOf(removedInstance)).toBe(-1);
expect(l1).toBe(3);
expect(selectedInstance).toBe(2);
});
it('should restore options and update DOM', () => {
return restoreOptions().then(() => {
expect(updateDOMforInstances.calledOnce).toBe(true);
});
});
it('should restore with null argument', () => {
loadOptions = sinon.stub().resolves(null);
return restoreOptions().then(() => {
expect(selectedInstance).toBe(0);
expect(instances.length).toBe(1);
expect(instances[0].instancelabel).toBe('Default');
});
});
it('should restore with null value', () => {
loadOptions = sinon.stub().resolves({ instances: null });
return restoreOptions().then(() => {
expect(selectedInstance).toBe(0);
expect(instances.length).toBe(1);
expect(instances[0].instancelabel).toBe('Default');
});
});
it('should restore with empty storage', () => {
loadOptions = sinon.stub().resolves({ instances: '[]' });
return restoreOptions().then(() => {
expect(selectedInstance).toBe(0);
expect(instances.length).toBe(1);
expect(instances[0].instancelabel).toBe('Default');
});
});
it('should restore with filled storage', () => {
const r = [createInstance('should restore with filled storage instance')];
loadOptions = sinon.stub().resolves({ instances: JSON.stringify(r) });
return restoreOptions().then(() => {
expect(selectedInstance).toBe(0);
expect(instances.length).toBe(1);
expect(instances[0].instancelabel).toBe('should restore with filled storage instance');
});
});
it('should restore 4 instances', () => {
loadOptions = sinon.stub().resolves({ instances: '[{"instancelabel":"Instance 0","timerPeriod":5,"icingaversion":"cgi","url":"","username":"","password":""},{"instancelabel":"Instance 1","timerPeriod":5,"icingaversion":"cgi","url":"","username":"","password":""},{"instancelabel":"Instance 2","timerPeriod":5,"icingaversion":"cgi","url":"","username":"","password":""},{"instancelabel":"Instance 3","timerPeriod":5,"icingaversion":"cgi","url":"","username":"","password":""}]' });
return restoreOptions().then(() => {
expect(selectedInstance).toBe(0);
expect(instances.length).toBe(4);
expect(instances[2].instancelabel).toBe('Instance 2');
});
});
it('should save instances object', () => {
const setSpy = host.storage.local.set;
// create 4 instances
for (let i = 0; i < 4; i++) { addInstance(); }
saveOptions();
expect(setSpy.callCount).toBe(1);
const arg = setSpy.args[0];
expect(arg[0].instances).toBe(JSON.stringify(instances));
});
filterOptionsNames.forEach((optionname, index) => {
it('should save ' + optionname + ' value', () => {
const setSpy = host.storage.local.set;
addInstance();
const elementId = '#' + optionname;
getCheckboxValueStub.withArgs(elementId).returns(1);
saveOptions();
expect(setSpy.callCount).toBe(1);
expect(getCheckboxValueStub.callCount).toBe(numberOfFilterOptions + otherCheckboxNames.length);
expect(getCheckboxValueStub.args[index][0]).toBe(elementId);
const arg = setSpy.args[0];
const i = JSON.parse(arg[0].instances);
expect(i.length).toBe(1);
expect(i[0].filtersettings).toBeDefined();
expect(i[0].filtersettings[optionname]).toBe(true);
});
});
it('should restore options and update DOM for filters', () => {
return restoreOptions().then(() => {
expect(updateDOMforFilters.calledOnce).toBe(true);
});
});
it('should restore panel settings and update DOM', () => {
return restoreOptions().then(() => {
expect(updateDOMforPanelFieldset.calledOnce).toBe(true);
});
});
filterOptionsNames.forEach((optionname, index) => {
it('should set ' + optionname + ' checkbox when updating DOM for filters', () => {
const a0 = setCheckboxValueStub.callCount;
const elementId = '#' + optionname;
updateDOMforFilters();
expect(a0).toBe(0);
expect(setCheckboxValueStub.callCount).toBe(numberOfFilterOptions);
expect(setCheckboxValueStub.args[index][0]).toBe(elementId);
});
});
/**
* getFormTextValue uses _selector_ to find the element
*/
it('should get form text value', () => {
const selector = '#jfdsalkfds98';
getFormTextValueStub.callThrough();
documentQuerySelectorStub.onFirstCall().returns('p');
getFormTextValue(selector, 'k');
expect(documentQuerySelectorStub.callCount).toBe(1);
expect(documentQuerySelectorStub.args[0][0]).toBe(selector);
});
/**
* getFormTextValue returns value
*/
it('should get form text value', () => {
const selector = '#jfdsalkfds98';
getFormTextValueStub.callThrough();
documentQuerySelectorStub.onFirstCall().returns({ value: null });
const result = getFormTextValue(selector, 'k');
expect(documentQuerySelectorStub.callCount).toBe(1);
expect(result).toBe('k');
});
it('should get 1 (true) value for checked checkbox', () => {
const selector = '#box1';
getCheckboxValueStub.callThrough();
documentQuerySelectorStub.onFirstCall().returns({ checked: true });
const result = getCheckboxValue(selector, 5);
expect(result).toBe(1);
});
it('should get 0 (false) value for unchecked checkbox', () => {
const selector = '#box1';
getCheckboxValueStub.callThrough();
documentQuerySelectorStub.onFirstCall().returns({ checked: false });
const result = getCheckboxValue(selector, 8);
expect(result).toBe(0);
});
it('should get default value for not existing checkbox', () => {
const selector = '#box1';
getCheckboxValueStub.callThrough();
documentQuerySelectorStub.onFirstCall().returns(null);
const result = getCheckboxValue(selector, 7);
expect(result).toBe(7);
});
it('should update selected instance', () => {
// create 4 instances
for (let i = 0; i < 4; i++) { addInstance(); }
const sel1 = selectedInstance;
selectionChanged({ target: { value: '2' } });
const sel2 = selectedInstance;
// last added instance is selected
expect(sel1).toBe(3);
// selected instance has changed
expect(sel2).toBe(2);
});
it('should update dom after selected instance changes', () => {
// create 4 instances
for (let i = 0; i < 4; i++) { addInstance(); }
const dc1 = updateDOMforInstances.callCount;
selectionChanged({ target: { value: '2' } });
const dc2 = updateDOMforInstances.callCount;
// adding an instance calls updateDOM
expect(dc1).toBe(4);
// changing selected instance triggers updateDOM
expect(dc2).toBe(5);
});
it('should update instance on saving options', () => {
const setSpy = host.storage.local.set;
// create 4 instances
for (let i = 0; i < 4; i++) { addInstance(); }
// select instance 2
selectedInstance = 1;
getFormTextValueStub.callsFake((selector, defaultValue) => {
if ('#timerPeriod' === selector) {
return '17'
}
return "Call " + selector;
});
updateSettings();
expect(setSpy.callCount).toBe(1);
const arg = setSpy.args[0];
const emptyFiltersettings = {};
filterOptionsNames.forEach(
(optionname) => {
emptyFiltersettings[optionname] = false;
});
expect(arg[0].instances).toBe(JSON.stringify(
[
{
"instancelabel": "Instance 0",
"timerPeriod": 5,
"icingaversion": "api1",
"url": "",
"username": "",
"password": "",
filtersettings: emptyFiltersettings
}, {
"instancelabel": "Call #instancelabel",
"timerPeriod": 17,
"icingaversion": "Call #icingaversion",
"url": "Call #url",
"username": "Call #username",
"password": "<PASSWORD>",
filtersettings: emptyFiltersettings
}, {
"instancelabel": "Instance 2",
"timerPeriod": 5,
"icingaversion": "api1",
"url": "",
"username": "",
"password": "",
filtersettings: emptyFiltersettings
}, {
"instancelabel": "Instance 3",
"timerPeriod": 5,
"icingaversion": "api1",
"url": "",
"username": "",
"password": "",
filtersettings: emptyFiltersettings
}]));
});
describe('Sound selection', () => {
beforeEach(() => {
documentGetElementsByClassNameStub.reset();
});
it('should provide functions', () => {
expect(window.SoundFileSelectors).toBeDefined();
expect(typeof (window.SoundFileSelectors.setFiles)).toBe('function');
expect(typeof (window.SoundFileSelectors.getFiles)).toBe('function');
expect(typeof (window.SoundFileSelectors.init)).toBe('function');
});
/** When querying for dom elements, return something usable */
function fakeSoundFileSelectors(arr) {
documentGetElementsByClassNameStub
.withArgs('soundfileselector')
.returns({
length: arr.length,
item: (index) => {
return {
getAttribute: (attributeName) => {
if (attributeName == 'data-soundevent') {
return arr[index];
}
},
appendChild: () => { }
}
}
});
SoundFileSelectors.init();
}
it('should set and get files', () => {
const filedata = {
'a': {
id: 'a',
filename: 'FILE-A',
data: '1234'
},
'x': {
id: 'x',
filename: 'FILE-X',
data: '1a2b'
}
};
fakeSoundFileSelectors(['a', 'x']);
window.SoundFileSelectors.init();
window.SoundFileSelectors.setFiles(filedata);
const r = window.SoundFileSelectors.getFiles();
expect(r).toEqual(filedata);
});
it('should update filenameTextNode', () => {
const c = new SoundFileSelectorControl('a');
const m = { textContent: '' };
c.setUIFilename(m);
c.setFilename('jewp');
expect(m.textContent).toBe('jewp');
});
it('should update filenameTextNode on restore', () => {
const c = new SoundFileSelectorControl('a');
const m = { textContent: 'gfdg8' };
c.setUIFilename(m);
c.restore({ filename: '5g9d', data: 'l' })
expect(m.textContent).toBe('5g9d');
});
it('should update filename on delete', () => {
const c = new SoundFileSelectorControl('a');
const m = { textContent: 'kdlsa' };
c.setUIFilename(m);
c.setFilename('jewp');
c.deleteFile();
expect(m.textContent).toBe(SoundFileSelectorControl.noFileSetText);
});
it('should update filedata', () => {
const c = new SoundFileSelectorControl('a');
c.filedata = 'jkluoi';
const r = c.toObject();
expect(r.data).toBe('jkluoi');
});
it('should update filedata on restore', () => {
const c = new SoundFileSelectorControl('a');
c.restore({ filename: '5g9d', data: 'djlsajdlsal' });
const r = c.toObject();
expect(r.data).toBe('djlsajdlsal');
});
it('should respond false to hasAudioData', () => {
const c = new SoundFileSelectorControl('a');
expect(c.hasAudioData).toBeFalsy();
});
it('should respond true to hasAudioData', () => {
const c = new SoundFileSelectorControl('a');
c.filedata = 'abbd';
expect(c.hasAudioData).toBeTruthy();
});
it('should respond false to hasAudioData after delete', () => {
const c = new SoundFileSelectorControl('a');
c.filedata = 'abbd';
c.deleteFile();
expect(c.hasAudioData).toBeFalsy();
});
it('should restore settings from options', () => {
loadOptions = sinon.stub().resolves({ sounds: '{"sGREEN":{"id":"sGREEN","filename":"","data":null},"xRED":{"id":"xRED","filename":"","data":null}}' });
const stub = sinon.stub(SoundFileSelectors, 'setFiles');
return restoreOptions().then(() => {
expect(stub.callCount).toBe(1);
expect(stub.args[0][0]).toEqual({ sGREEN: Object({ id: 'sGREEN', filename: '', data: null }), xRED: Object({ id: 'xRED', filename: '', data: null }) });
stub.restore();
});
});
it('should update empty settings from options', () => {
const setSpy = host.storage.local.set;
addInstance();
getFormTextValueStub.callsFake((selector, defaultValue) => {
if ('#timerPeriod' === selector) {
return '17'
}
return "Call " + selector;
});
fakeSoundFileSelectors(['sGREEN', 'xRED']);
updateSettings();
expect(setSpy.callCount).toBe(1);
const arg = setSpy.args[0];
expect(arg[0].sounds).toEqual(JSON.stringify({ sGREEN: Object({ id: 'sGREEN', filename: '', data: null }), xRED: Object({ id: 'xRED', filename: '', data: null }) }));
});
});
});
|
from lxml import etree
# Create the root element
root = etree.Element('table')
# Create two columns
table_elem = etree.SubElement(root, 'column')
table_elem.set('name', 'FirstName')
table_elem = etree.SubElement(root, 'column')
table_elem.set('name', 'LastName')
# Print the whole XML
print(etree.tostring(root, pretty_print=True).decode())
|
<gh_stars>0
import { routeInfo } from '../../router';
export default {
data(){
return {
routeInfo: [],
};
},
methods: {
navigate(idx){
this.$router.push(this.routeInfo[idx].path);
},
},
mounted(){
for (let route of this.$router.options.routes) {
if (route.path !== this.$route.path) {
this.routeInfo.push(route);
}
}
},
};
|
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
## Get the directory of the build script
scriptDir=$(realpath $(dirname "${BASH_SOURCE[0]}"))
## cd to the parent directory, i.e. the root of the git repo
cd ${scriptDir}/..
# include common functions
source ${scriptDir}/common.sh
# Print out Maven & Java version
mvn -version
echo ${JOB_TYPE}
# attempt to install 3 times with exponential backoff (starting with 10 seconds)
retry_with_backoff 3 10 \
mvn install -B -V -ntp \
-DskipTests=true \
-Dclirr.skip=true \
-Denforcer.skip=true \
-Dmaven.javadoc.skip=true \
-Dgcloud.download.skip=true \
-T 1C
# if GOOGLE_APPLICATION_CREDENTIALS is specified as a relative path, prepend Kokoro root directory onto it
if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" && "${GOOGLE_APPLICATION_CREDENTIALS}" != /* ]]; then
export GOOGLE_APPLICATION_CREDENTIALS=$(realpath ${KOKORO_GFILE_DIR}/${GOOGLE_APPLICATION_CREDENTIALS})
fi
RETURN_CODE=0
set +e
case ${JOB_TYPE} in
test)
mvn test -B -Dclirr.skip=true -Denforcer.skip=true
RETURN_CODE=$?
;;
lint)
mvn com.coveo:fmt-maven-plugin:check
RETURN_CODE=$?
;;
javadoc)
mvn javadoc:javadoc javadoc:test-javadoc
RETURN_CODE=$?
;;
integration)
mvn -B ${INTEGRATION_TEST_ARGS} \
-ntp \
-Penable-integration-tests \
-DtrimStackTrace=false \
-Dclirr.skip=true \
-Denforcer.skip=true \
-fae \
verify
RETURN_CODE=$?
;;
graalvm)
# Run Unit and Integration Tests with Native Image
mvn -ntp -Pnative -Penable-integration-tests test
RETURN_CODE=$?
;;
samples)
SAMPLES_DIR=samples
# only run ITs in snapshot/ on presubmit PRs. run ITs in all 3 samples/ subdirectories otherwise.
if [[ ! -z ${KOKORO_GITHUB_PULL_REQUEST_NUMBER} ]]
then
SAMPLES_DIR=samples/snapshot
fi
if [[ -f ${SAMPLES_DIR}/pom.xml ]]
then
for FILE in ${KOKORO_GFILE_DIR}/secret_manager/*-samples-secrets; do
[[ -f "$FILE" ]] || continue
source "$FILE"
done
pushd ${SAMPLES_DIR}
mvn -B \
-ntp \
-DtrimStackTrace=false \
-Dclirr.skip=true \
-Denforcer.skip=true \
-fae \
verify
RETURN_CODE=$?
popd
else
echo "no sample pom.xml found - skipping sample tests"
fi
;;
clirr)
mvn -B -Denforcer.skip=true clirr:check
RETURN_CODE=$?
;;
*)
;;
esac
if [ "${REPORT_COVERAGE}" == "true" ]
then
bash ${KOKORO_GFILE_DIR}/codecov.sh
fi
# fix output location of logs
bash .kokoro/coerce_logs.sh
if [[ "${ENABLE_FLAKYBOT}" == "true" ]]
then
chmod +x ${KOKORO_GFILE_DIR}/linux_amd64/flakybot
${KOKORO_GFILE_DIR}/linux_amd64/flakybot -repo=googleapis/java-assured-workloads
fi
echo "exiting with ${RETURN_CODE}"
exit ${RETURN_CODE}
|
package com.padcmyanmar.charleskeith.activities;
import android.content.Intent;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.GridLayoutManager;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.LinearLayout;
import com.padcmyanmar.charleskeith.R;
import com.padcmyanmar.charleskeith.adapters.NewDetailsAdapter;
import com.padcmyanmar.charleskeith.adapters.NewsInAdapter;
import com.padcmyanmar.charleskeith.data.vos.GetNewsVO;
import com.padcmyanmar.charleskeith.delegates.CharlesNewsDelegate;
import butterknife.BindView;
public class NewsDetailsActivity extends BaseActivity implements CharlesNewsDelegate
{
private NewDetailsAdapter charlesNewAdapter;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_new_in_details);
RecyclerView rvDetailsNewsIn = findViewById(R.id.rv_news_in_detail);
charlesNewAdapter = new NewDetailsAdapter(this);
rvDetailsNewsIn.setAdapter(charlesNewAdapter);
rvDetailsNewsIn.setLayoutManager(new LinearLayoutManager(getApplicationContext(), LinearLayout.VERTICAL,false));
ImageView btnBack=findViewById(R.id.btn_back);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
Intent intent=new Intent(NewsDetailsActivity.this,NewInActivity.class);
startActivity(intent);
}
});
}
@Override
public void onTapNews(GetNewsVO newsVO) {
}
@Override
public void onTapDetailNews() {
}
}
|
#!/bin/bash
cargo build --release --features generate-api-description --target=wasm32-unknown-unknown
wasm-build target {{project-name}} --target-runtime=substrate --final={{project-name}} --save-raw=./target/{{project-name}}-deployed.wasm --target wasm32-unknown-unknown
|
<reponame>Rayissach/news-2-you
var express = require("express")
var router = express.Router();
var db = require("../models")
// var controller = require('../')
router.get("/", function( req, res) {
res.render("index", {title: 'Express'})
});
// router.get("/articles", function(req, res) {
// res.render ("index")
// })
// router.post("/articles", function(req, res) {
// res.render('index')
// })
module.exports = router;
|
#! /bin/bash
CMD="./run.sh"
if [ $# != 1 ]; then
echo "Usage: ${0} [--v2|--v3]" >&2
exit 1
elif [ ${1} != "--v2" -a ${1} != "--v3" ]; then
echo "Usage: ${0} [--v2|--v3]" >&2
exit 1
fi
version=${1#*v}
NB_TESTS=$(find tests -maxdepth 1 -type d -regex '.*/in[0-9]*\.v'"${version}"'$' | wc -l)
mkdir -p tests_tmp
for i in $(seq 1 ${NB_TESTS}); do
IN="tests/in${i}.v${version}"
REFERENCE_OUT="tests/out${i}.v${version}"
OUT=$(mktemp --tmpdir=. -d tests_tmp/out.XXX)
echo "${IN}"
${CMD} "--v${version}" -i "${IN}" -o "${OUT}"
diff -x README -ruBw "${REFERENCE_OUT}" "${OUT}"
ret_diff=$?
if [ $ret_diff -ne 0 ]; then
echo "FAILED: ${IN}"
else
rm -r ${OUT}
fi
done
|
/**
* Copyright (c) 2000-2013 Liferay, Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
* Software Foundation; either version 2.1 of the License, or (at your option)
* any later version.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*/
package org.politaktiv.map.infrastructure.model.impl;
import com.liferay.portal.kernel.util.StringBundler;
import com.liferay.portal.kernel.util.StringPool;
import com.liferay.portal.model.CacheModel;
import org.politaktiv.map.infrastructure.model.Picture;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
/**
* The cache model class for representing Picture in entity cache.
*
* @author eichi
* @see Picture
* @generated
*/
public class PictureCacheModel implements CacheModel<Picture>, Externalizable {
@Override
public String toString() {
StringBundler sb = new StringBundler(33);
sb.append("{pictureId=");
sb.append(pictureId);
sb.append(", companyId=");
sb.append(companyId);
sb.append(", groupId=");
sb.append(groupId);
sb.append(", userId=");
sb.append(userId);
sb.append(", name=");
sb.append(name);
sb.append(", description=");
sb.append(description);
sb.append(", referenceUrl=");
sb.append(referenceUrl);
sb.append(", backgroundId=");
sb.append(backgroundId);
sb.append(", rotation=");
sb.append(rotation);
sb.append(", width=");
sb.append(width);
sb.append(", height=");
sb.append(height);
sb.append(", resolution=");
sb.append(resolution);
sb.append(", ocupacy=");
sb.append(ocupacy);
sb.append(", longitude=");
sb.append(longitude);
sb.append(", latitude=");
sb.append(latitude);
sb.append(", fileEntryUuid=");
sb.append(fileEntryUuid);
sb.append("}");
return sb.toString();
}
@Override
public Picture toEntityModel() {
PictureImpl pictureImpl = new PictureImpl();
pictureImpl.setPictureId(pictureId);
pictureImpl.setCompanyId(companyId);
pictureImpl.setGroupId(groupId);
pictureImpl.setUserId(userId);
if (name == null) {
pictureImpl.setName(StringPool.BLANK);
}
else {
pictureImpl.setName(name);
}
if (description == null) {
pictureImpl.setDescription(StringPool.BLANK);
}
else {
pictureImpl.setDescription(description);
}
if (referenceUrl == null) {
pictureImpl.setReferenceUrl(StringPool.BLANK);
}
else {
pictureImpl.setReferenceUrl(referenceUrl);
}
pictureImpl.setBackgroundId(backgroundId);
pictureImpl.setRotation(rotation);
pictureImpl.setWidth(width);
pictureImpl.setHeight(height);
pictureImpl.setResolution(resolution);
pictureImpl.setOcupacy(ocupacy);
pictureImpl.setLongitude(longitude);
pictureImpl.setLatitude(latitude);
if (fileEntryUuid == null) {
pictureImpl.setFileEntryUuid(StringPool.BLANK);
}
else {
pictureImpl.setFileEntryUuid(fileEntryUuid);
}
pictureImpl.resetOriginalValues();
return pictureImpl;
}
@Override
public void readExternal(ObjectInput objectInput) throws IOException {
pictureId = objectInput.readLong();
companyId = objectInput.readLong();
groupId = objectInput.readLong();
userId = objectInput.readLong();
name = objectInput.readUTF();
description = objectInput.readUTF();
referenceUrl = objectInput.readUTF();
backgroundId = objectInput.readLong();
rotation = objectInput.readLong();
width = objectInput.readDouble();
height = objectInput.readDouble();
resolution = objectInput.readDouble();
ocupacy = objectInput.readDouble();
longitude = objectInput.readDouble();
latitude = objectInput.readDouble();
fileEntryUuid = objectInput.readUTF();
}
@Override
public void writeExternal(ObjectOutput objectOutput)
throws IOException {
objectOutput.writeLong(pictureId);
objectOutput.writeLong(companyId);
objectOutput.writeLong(groupId);
objectOutput.writeLong(userId);
if (name == null) {
objectOutput.writeUTF(StringPool.BLANK);
}
else {
objectOutput.writeUTF(name);
}
if (description == null) {
objectOutput.writeUTF(StringPool.BLANK);
}
else {
objectOutput.writeUTF(description);
}
if (referenceUrl == null) {
objectOutput.writeUTF(StringPool.BLANK);
}
else {
objectOutput.writeUTF(referenceUrl);
}
objectOutput.writeLong(backgroundId);
objectOutput.writeLong(rotation);
objectOutput.writeDouble(width);
objectOutput.writeDouble(height);
objectOutput.writeDouble(resolution);
objectOutput.writeDouble(ocupacy);
objectOutput.writeDouble(longitude);
objectOutput.writeDouble(latitude);
if (fileEntryUuid == null) {
objectOutput.writeUTF(StringPool.BLANK);
}
else {
objectOutput.writeUTF(fileEntryUuid);
}
}
public long pictureId;
public long companyId;
public long groupId;
public long userId;
public String name;
public String description;
public String referenceUrl;
public long backgroundId;
public long rotation;
public double width;
public double height;
public double resolution;
public double ocupacy;
public double longitude;
public double latitude;
public String fileEntryUuid;
}
|
<filename>one_offs/dodd_frank/dump.py
import subprocess
import sys
dockets = [docket.strip() for docket in open(sys.argv[1])]
for docket in dockets:
p = subprocess.Popen(["./run.py", 'rdg_dump_api', '-d', docket])
p.communicate()
|
#!/bin/bash
## default value
VALUE_L="1"
IS_MANUAL_CONTROLL="n"
IS_SAMPLE_CONTROLL="n"
GAME_TIME="180" # game time (s)
RESULT_LOG_JSON="result.json" # result log file
## get args level setting
while getopts l:m:s:t:f: OPT
do
case $OPT in
"l" ) VALUE_L="$OPTARG" ;;
"m" ) IS_MANUAL_CONTROLL="$OPTARG" ;;
"s" ) IS_SAMPLE_CONTROLL="$OPTARG" ;;
"t" ) GAME_TIME="$OPTARG" ;;
"f" ) RESULT_LOG_JSON="$OPTARG" ;;
esac
done
echo "level: $VALUE_L"
echo "is_manual_controll: $IS_MANUAL_CONTROLL"
## set field parameter for level 1
DROP_SPEED="1000" # drop speed (s)
RANDOM_SEED="0" # random seed for field
OBSTACLE_HEIGHT="0" # obstacle height (blocks)
OBSTACLE_PROBABILITY="0" # obstacle probability (percent)
case $VALUE_L in
"0" ) GAME_TIME="-1" ;;
"1" ) RANDOM_SEED="0" ;;
"2" ) RANDOM_SEED="-1" ;;
"3" ) RANDOM_SEED="-1"; OBSTACLE_HEIGHT="10"; OBSTACLE_PROBABILITY="40"; ;;
* ) echo "invalid level: $VALUE_L"; exit 1;;
esac
echo "game_time: $GAME_TIME"
echo "RANDOM_SEED: $RANDOM_SEED"
echo "OBSTACLE_HEIGHT: ${OBSTACLE_HEIGHT}"
echo "OBSTACLE_PROBABILITY: ${OBSTACLE_PROBABILITY}"
## start game
python3 game_manager/game_manager.py --game_time ${GAME_TIME} --seed ${RANDOM_SEED} --obstacle_height ${OBSTACLE_HEIGHT} --obstacle_probability ${OBSTACLE_PROBABILITY} --drop_speed ${DROP_SPEED} --manual ${IS_MANUAL_CONTROLL} --use_sample ${IS_SAMPLE_CONTROLL} --resultlogjson ${RESULT_LOG_JSON}
echo mv result.json result/result.json
mv result.json result/result.json
|
<reponame>TomminMC/Eris
/*
* Command Handler
*/
module.exports = (client, message) => {
// Ignore Direct Messages
if (message.channel.type !== 'text') return
require('./messageCounter')(message)
require('./reactions')(message)
const prefix = client.eris.config.prefix
// Ignore messages that are not starting with the bot prefix or were sent by a bot
if (!message.content.startsWith(prefix) || message.author.bot) return
// While in development mode, only accept messages from the super secret #bot-test-channel
if (client.eris.config.devmode && message.channel.id !== client.eris.config.guild.channels.devtest) return
const args = message.content.slice(prefix.length).split(/ +/)
const commandName = args.shift().toLowerCase()
const command = client.eris.commands.get(commandName) ||
client.eris.commands.find(cmd => cmd.aliases && cmd.aliases.includes(commandName))
if (!command) return
try {
command.execute(message, args)
} catch (error) {
console.error(error)
message.reply('There was an error trying to execute that command!')
}
}
|
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for configuring kubernetes master and node instances. It is
# uploaded in the manifests tar ball.
# TODO: this script duplicates templating logic from cluster/saltbase/salt
# using sed. It should use an actual template parser on the manifest
# files.
set -o errexit
set -o nounset
set -o pipefail
### Hardcoded constants
METADATA_SERVER_IP="${METADATA_SERVER_IP:-169.254.169.254}"
function convert-manifest-params {
# A helper function to convert the manifest args from a string to a list of
# flag arguments.
# Old format:
# command=["/bin/sh", "-c", "exec KUBE_EXEC_BINARY --param1=val1 --param2-val2"].
# New format:
# command=["KUBE_EXEC_BINARY"] # No shell dependencies.
# args=["--param1=val1", "--param2-val2"]
IFS=' ' read -ra FLAGS <<< "$1"
params=""
for flag in "${FLAGS[@]}"; do
params+="\n\"$flag\","
done
if [ -n "$params" ]; then
echo "${params::-1}" # drop trailing comma
fi
}
function append-param-if-not-present {
# A helper function to add flag to an arguments string
# if no such flag is present already
local params="$1"
local -r flag="$2"
local -r value="$3"
if [[ ! "${params}" =~ "--${flag}"[=\ ] ]]; then
params+=" --${flag}=${value}"
fi
echo "${params}"
}
function setup-os-params {
# Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
# /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
# now, set a generic core_pattern that users can work with.
echo "/core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# secure_random generates a secure random string of bytes. This function accepts
# a number of secure bytes desired and returns a base64 encoded string with at
# least the requested entropy. Rather than directly reading from /dev/urandom,
# we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
# entropy pool has been initialized sufficiently for the desired operation
# before reading from /dev/urandom.
#
# ARGS:
# #1: number of secure bytes to generate. We round up to the nearest factor of 32.
function secure_random {
local infobytes="${1}"
if ((infobytes <= 0)); then
echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
return 1
fi
local out=""
for (( i = 0; i < "${infobytes}"; i += 32 )); do
# uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
# three uuids and take their sum. The sum is encoded in ASCII hex, hence the
# 64 character cut.
out+="$(
(
uuidgen --random;
uuidgen --random;
uuidgen --random;
) | sha256sum \
| head -c 64
)";
done
# Finally, convert the ASCII hex to base64 to increase the density.
echo -n "${out}" | xxd -r -p | base64 -w 0
}
# Helper for configuring iptables rules for metadata server.
#
# $1 is the command flag (-I or -D).
# $2 is the firewall action (LOG or REJECT).
# $3 is the prefix for log output.
# $4 is "!" to optionally invert the uid range.
function gce-metadata-fw-helper {
local -r command="$1"
local action="$2"
local -r prefix="$3"
local -r invert="${4:-}"
# Expand rule action to include relevant option flags.
case "${action}" in
LOG)
action="LOG --log-prefix "${prefix}:" --log-uid --log-tcp-options --log-ip-option"
;;
esac
# Deliberately allow word split here
# shellcheck disable=SC2086
iptables -w ${command} OUTPUT -p tcp --dport 80 -d ${METADATA_SERVER_IP} -m owner ${invert:-} --uid-owner=${METADATA_SERVER_ALLOWED_UID_RANGE:-0-2999} -j ${action}
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
# Do not consider loopback addresses as martian source or destination while
# routing. This enables the use of 127/8 for local routing purposes.
sysctl -w net.ipv4.conf.all.route_localnet=1
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -w -A INPUT -w -p TCP -j ACCEPT
iptables -w -A INPUT -w -p UDP -j ACCEPT
iptables -w -A INPUT -w -p ICMP -j ACCEPT
iptables -w -A INPUT -w -p SCTP -j ACCEPT
fi
if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
iptables -w -A FORWARD -w -p TCP -j ACCEPT
iptables -w -A FORWARD -w -p UDP -j ACCEPT
iptables -w -A FORWARD -w -p ICMP -j ACCEPT
iptables -w -A FORWARD -w -p SCTP -j ACCEPT
fi
# Flush iptables nat table
iptables -w -t nat -F || true
if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then
echo "Add rules for ip masquerade"
iptables -w -t nat -N IP-MASQ
iptables -w -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ
iptables -w -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 240.0.0.0/4 -m comment --comment "ip-masq: RFC 5735 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.0.2.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 198.51.100.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 203.0.113.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 100.64.0.0/10 -m comment --comment "ip-masq: RFC 6598 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 198.18.0.0/15 -m comment --comment "ip-masq: RFC 6815 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.0.0.0/24 -m comment --comment "ip-masq: RFC 6890 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.88.99.0/24 -m comment --comment "ip-masq: RFC 7526 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
fi
# If METADATA_CONCEALMENT_NO_FIREWALL is set, don't create a firewall on this
# node because we don't expect the daemonset to run on this node.
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]] && [[ ! "${METADATA_CONCEALMENT_NO_FIREWALL:-}" == "true" ]]; then
echo "Add rule for metadata concealment"
ip addr add dev lo 169.254.169.252/32
iptables -w -t nat -I PREROUTING -p tcp ! -i eth0 -d "${METADATA_SERVER_IP}" --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:988
iptables -w -t nat -I PREROUTING -p tcp ! -i eth0 -d "${METADATA_SERVER_IP}" --dport 8080 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:987
fi
iptables -w -t mangle -I OUTPUT -s 169.254.169.254 -j DROP
# Log all metadata access not from approved processes.
case "${METADATA_SERVER_FIREWALL_MODE:-off}" in
log)
echo "Installing metadata firewall logging rules"
gce-metadata-fw-helper -I LOG "MetadataServerFirewallReject" !
gce-metadata-fw-helper -I LOG "MetadataServerFirewallAccept"
;;
esac
}
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
mkdir -p /var/lib/kube-proxy
fi
}
# Gets the total number of $(1) and $(2) type disks specified
# by the user in ${NODE_LOCAL_SSDS_EXT}
function get-local-disk-num() {
local interface="${1}"
local format="${2}"
localdisknum=0
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
for ssdgroup in "${ssdgroups[@]}"; do
IFS="," read -r -a ssdopts <<< "${ssdgroup}"
local opnum="${ssdopts[0]}"
local opinterface="${ssdopts[1]}"
local opformat="${ssdopts[2]}"
if [[ "${opformat,,}" == "${format,,}" && "${opinterface,,}" == "${interface,,}" ]]; then
localdisknum=$((localdisknum+opnum))
fi
done
fi
}
# Creates a symlink for a ($1) so that it may be used as block storage
function safe-block-symlink(){
local device="${1}"
local symdir="${2}"
mkdir -p "${symdir}"
get-or-generate-uuid "${device}"
local myuuid="${retuuid}"
local sym="${symdir}/local-ssd-${myuuid}"
# Do not "mkdir -p ${sym}" as that will cause unintended symlink behavior
ln -s "${device}" "${sym}"
echo "Created a symlink for SSD $ssd at ${sym}"
chmod a+w "${sym}"
}
# Gets a pregenerated UUID from ${ssdmap} if it exists, otherwise generates a new
# UUID and places it inside ${ssdmap}
function get-or-generate-uuid(){
local device="${1}"
local ssdmap="/home/kubernetes/localssdmap.txt"
echo "Generating or getting UUID from ${ssdmap}"
if [[ ! -e "${ssdmap}" ]]; then
touch "${ssdmap}"
chmod +w "${ssdmap}"
fi
# each line of the ssdmap looks like "${device} persistent-uuid"
local myuuid
if grep -q "${device}" "${ssdmap}"; then
#create symlink based on saved uuid
myuuid=$(grep "${device}" "${ssdmap}" | cut -d ' ' -f 2)
else
# generate new uuid and add it to the map
if ! myuuid=$(uuidgen); then
echo "Failed to generate valid UUID with uuidgen" >&2
exit 2
fi
echo "${device} ${myuuid}" >> "${ssdmap}"
fi
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${device} when symlinking." >&2
exit 2
fi
retuuid="${myuuid}"
}
#Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
local device
local mountpoint
device="$1"
mountpoint="$2"
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F "${device}"
fi
mkdir -p "${mountpoint}"
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
chmod a+w "${mountpoint}"
}
# Gets a devices UUID and bind mounts the device to mount location in
# /mnt/disks/by-id/
function unique-uuid-bind-mount(){
local mountpoint
local actual_device
mountpoint="$1"
actual_device="$2"
# Trigger udev refresh so that newly formatted devices are propagated in by-uuid
udevadm control --reload-rules
udevadm trigger
udevadm settle
# find uuid for actual_device
local myuuid
myuuid=$(find -L /dev/disk/by-uuid -maxdepth 1 -samefile /dev/"${actual_device}" -printf '%P')
# myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${actual_device} when mounting." >&2
exit 2
fi
# bindpoint should be the full path of the to-be-bound device
local bindpoint="${UUID_MNT_PREFIX}-${interface}-fs/local-ssd-${myuuid}"
safe-bind-mount "${mountpoint}" "${bindpoint}"
}
# Bind mounts device at mountpoint to bindpoint
function safe-bind-mount(){
local mountpoint="${1}"
local bindpoint="${2}"
# Mount device to the mountpoint
mkdir -p "${bindpoint}"
echo "Binding '${mountpoint}' at '${bindpoint}'"
mount --bind "${mountpoint}" "${bindpoint}"
chmod a+w "${bindpoint}"
}
# Mounts, bindmounts, or symlinks depending on the interface and format
# of the incoming device
function mount-ext(){
local ssd="${1}"
local devicenum="${2}"
local interface="${3}"
local format="${4}"
if [[ -z "${devicenum}" ]]; then
echo "Failed to get the local disk number for device ${ssd}" >&2
exit 2
fi
# TODO: Handle partitioned disks. Right now this code just ignores partitions
if [[ "${format}" == "fs" ]]; then
if [[ "${interface}" == "scsi" ]]; then
local actual_device
actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != sd* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
exit 1
fi
local mountpoint="/mnt/disks/ssd${devicenum}"
else
# This path is required because the existing Google images do not
# expose NVMe devices in /dev/disk/by-id so we are using the /dev/nvme instead
local actual_device
actual_device=$(echo "${ssd}" | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != nvme* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
exit 1
fi
local mountpoint="/mnt/disks/ssd-nvme${devicenum}"
fi
safe-format-and-mount "${ssd}" "${mountpoint}"
# We only do the bindmount if users are using the new local ssd request method
# see https://github.com/kubernetes/kubernetes/pull/53466#discussion_r146431894
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
unique-uuid-bind-mount "${mountpoint}" "${actual_device}"
fi
elif [[ "${format}" == "block" ]]; then
local symdir="${UUID_BLOCK_PREFIX}-${interface}-block"
safe-block-symlink "${ssd}" "${symdir}"
else
echo "Disk format must be either fs or block, got ${format}"
fi
}
# Local ssds, if present, are mounted or symlinked to their appropriate
# locations
function ensure-local-ssds() {
if [ "${NODE_LOCAL_SSDS_EPHEMERAL:-false}" == "true" ]; then
ensure-local-ssds-ephemeral-storage
return
fi
get-local-disk-num "scsi" "block"
local scsiblocknum="${localdisknum}"
local i=0
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "${ssd}" ]; then
local devicenum
devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/')
if [[ "${i}" -lt "${scsiblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "scsi" "block"
else
# GKE does not set NODE_LOCAL_SSDS so all non-block devices
# are assumed to be filesystem devices
mount-ext "${ssd}" "${devicenum}" "scsi" "fs"
fi
i=$((i+1))
else
echo "No local SCSI SSD disks found."
fi
done
# The following mounts or symlinks NVMe devices
get-local-disk-num "nvme" "block"
local nvmeblocknum="${localdisknum}"
get-local-disk-num "nvme" "fs"
local nvmefsnum="${localdisknum}"
# Check if NVMe SSD specified.
if [ "${nvmeblocknum}" -eq "0" ] && [ "${nvmefsnum}" -eq "0" ]; then
echo "No local NVMe SSD specified."
return
fi
local i=0
for ssd in /dev/nvme*; do
if [ -e "${ssd}" ]; then
# This workaround to find if the NVMe device is a disk is required because
# the existing Google images does not expose NVMe devices in /dev/disk/by-id
if [[ $(udevadm info --query=property --name="${ssd}" | grep DEVTYPE | sed "s/DEVTYPE=//") == "disk" ]]; then
# shellcheck disable=SC2155
local devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/')
if [[ "${i}" -lt "${nvmeblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "nvme" "block"
else
mount-ext "${ssd}" "${devicenum}" "nvme" "fs"
fi
i=$((i+1))
fi
else
echo "No local NVMe SSD disks found."
fi
done
}
# Local SSDs, if present, are used in a single RAID 0 array and directories that
# back ephemeral storage are mounted on them (kubelet root, container runtime
# root and pod logs).
function ensure-local-ssds-ephemeral-storage() {
local devices=()
# Get nvme devices
for ssd in /dev/nvme*n*; do
if [ -e "${ssd}" ]; then
# This workaround to find if the NVMe device is a local SSD is required
# because the existing Google images does not them in /dev/disk/by-id
if [[ "$(lsblk -o MODEL -dn "${ssd}")" == "nvme_card" ]]; then
devices+=("${ssd}")
fi
fi
done
if [ "${#devices[@]}" -eq 0 ]; then
echo "No local NVMe SSD disks found."
return
fi
local device="${devices[0]}"
if [ "${#devices[@]}" -ne 1 ]; then
seen_arrays=(/dev/md/*)
device=${seen_arrays[0]}
echo "Setting RAID array with local SSDs on device ${device}"
if [ ! -e "$device" ]; then
device="/dev/md/0"
echo "y" | mdadm --create "${device}" --level=0 --raid-devices=${#devices[@]} "${devices[@]}"
fi
fi
local ephemeral_mountpoint="/mnt/stateful_partition/kube-ephemeral-ssd"
safe-format-and-mount "${device}" "${ephemeral_mountpoint}"
# mount container runtime root dir on SSD
local container_runtime="${CONTAINER_RUNTIME:-docker}"
systemctl stop "$container_runtime"
# Some images remount the container runtime root dir.
umount "/var/lib/${container_runtime}" || true
# Move the container runtime's directory to the new location to preserve
# preloaded images.
if [ ! -d "${ephemeral_mountpoint}/${container_runtime}" ]; then
mv "/var/lib/${container_runtime}" "${ephemeral_mountpoint}/${container_runtime}"
fi
safe-bind-mount "${ephemeral_mountpoint}/${container_runtime}" "/var/lib/${container_runtime}"
systemctl start "$container_runtime"
# mount kubelet root dir on SSD
mkdir -p "${ephemeral_mountpoint}/kubelet"
safe-bind-mount "${ephemeral_mountpoint}/kubelet" "/var/lib/kubelet"
# mount pod logs root dir on SSD
mkdir -p "${ephemeral_mountpoint}/log_pods"
safe-bind-mount "${ephemeral_mountpoint}/log_pods" "/var/log/pods"
}
# Installs logrotate configuration files
function setup-logrotate() {
mkdir -p /etc/logrotate.d/
if [[ "${ENABLE_LOGROTATE_FILES:-true}" = "true" ]]; then
# Configure log rotation for all logs in /var/log, which is where k8s services
# are configured to write their log files. Whenever logrotate is ran, this
# config will:
# * rotate the log file if its size is > 100Mb OR if one day has elapsed
# * save rotated logs into a gzipped timestamped backup
# * log file timestamp (controlled by 'dateformat') includes seconds too. This
# ensures that logrotate can generate unique logfiles during each rotation
# (otherwise it skips rotation if 'maxsize' is reached multiple times in a
# day).
# * keep only 5 old (rotated) logs, and will discard older logs.
cat > /etc/logrotate.d/allvarlogs <<EOF
/var/log/*.log {
rotate ${LOGROTATE_FILES_MAX_COUNT:-5}
copytruncate
missingok
notifempty
compress
maxsize ${LOGROTATE_MAX_SIZE:-100M}
daily
dateext
dateformat -%Y%m%d-%s
create 0644 root root
}
EOF
fi
if [[ "${ENABLE_POD_LOG:-false}" = "true" ]]; then
# Configure log rotation for pod logs in /var/log/pods/NAMESPACE_NAME_UID.
cat > /etc/logrotate.d/allpodlogs <<EOF
/var/log/pods/*/*.log {
rotate ${POD_LOG_MAX_FILE:-5}
copytruncate
missingok
notifempty
compress
maxsize ${POD_LOG_MAX_SIZE:-5M}
daily
dateext
dateformat -%Y%m%d-%s
create 0644 root root
}
EOF
fi
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
function find-master-pd {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
# leave a directory be if it already exists.
function mount-master-pd {
find-master-pd
if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
return
fi
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/google-master-pd"
local -r mount_point="/mnt/disks/master-pd"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
# Contains all the data stored in etcd.
mkdir -p "${mount_point}/var/etcd"
chmod 700 "${mount_point}/var/etcd"
ln -s -f "${mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${mount_point}/srv/kubernetes"
ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${mount_point}/srv/sshproxy"
ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
chown -R etcd "${mount_point}/var/etcd"
chgrp -R etcd "${mount_point}/var/etcd"
}
# append_or_replace_prefixed_line ensures:
# 1. the specified file exists
# 2. existing lines with the specified ${prefix} are removed
# 3. a new line with the specified ${prefix}${suffix} is appended
function append_or_replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
local -r dirname=$(dirname "${file}")
local -r tmpfile=$(mktemp "${dirname}/filtered.XXXX")
touch "${file}"
awk -v pfx="${prefix}" 'substr($0,1,length(pfx)) != pfx { print }' "${file}" > "${tmpfile}"
echo "${prefix}${suffix}" >> "${tmpfile}"
mv "${tmpfile}" "${file}"
}
function write-pki-data {
local data="${1}"
local path="${2}"
if [[ -n "${KUBE_PKI_READERS_GROUP:-}" ]]; then
(umask 027; echo "${data}" | base64 --decode > "${path}")
chgrp "${KUBE_PKI_READERS_GROUP:-}" "${path}"
chmod g+r "${path}"
else
(umask 077; echo "${data}" | base64 --decode > "${path}")
fi
}
function create-node-pki {
echo "Creating node pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
CA_CERT_BUNDLE="${CA_CERT}"
fi
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
write-pki-data "${CA_CERT_BUNDLE}" "${CA_CERT_BUNDLE_PATH}"
if [[ -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
write-pki-data "${KUBELET_CERT}" "${KUBELET_CERT_PATH}"
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
write-pki-data "${KUBELET_KEY}" "${KUBELET_KEY_PATH}"
fi
}
function create-master-pki {
echo "Creating master pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
CA_CERT_PATH="${pki_dir}/ca.crt"
write-pki-data "${CA_CERT}" "${CA_CERT_PATH}"
# this is not true on GKE
if [[ -n "${CA_KEY:-}" ]]; then
CA_KEY_PATH="${pki_dir}/ca.key"
write-pki-data "${CA_KEY}" "${CA_KEY_PATH}"
fi
if [[ -z "${APISERVER_SERVER_CERT:-}" || -z "${APISERVER_SERVER_KEY:-}" ]]; then
APISERVER_SERVER_CERT="${MASTER_CERT}"
APISERVER_SERVER_KEY="${MASTER_KEY}"
fi
APISERVER_SERVER_CERT_PATH="${pki_dir}/apiserver.crt"
write-pki-data "${APISERVER_SERVER_CERT}" "${APISERVER_SERVER_CERT_PATH}"
APISERVER_SERVER_KEY_PATH="${pki_dir}/apiserver.key"
write-pki-data "${APISERVER_SERVER_KEY}" "${APISERVER_SERVER_KEY_PATH}"
if [[ -z "${APISERVER_CLIENT_CERT:-}" || -z "${APISERVER_CLIENT_KEY:-}" ]]; then
APISERVER_CLIENT_CERT="${KUBEAPISERVER_CERT}"
APISERVER_CLIENT_KEY="${KUBEAPISERVER_KEY}"
fi
APISERVER_CLIENT_CERT_PATH="${pki_dir}/apiserver-client.crt"
write-pki-data "${APISERVER_CLIENT_CERT}" "${APISERVER_CLIENT_CERT_PATH}"
APISERVER_CLIENT_KEY_PATH="${pki_dir}/apiserver-client.key"
write-pki-data "${APISERVER_CLIENT_KEY}" "${APISERVER_CLIENT_KEY_PATH}"
if [[ -z "${SERVICEACCOUNT_CERT:-}" || -z "${SERVICEACCOUNT_KEY:-}" ]]; then
SERVICEACCOUNT_CERT="${MASTER_CERT}"
SERVICEACCOUNT_KEY="${MASTER_KEY}"
fi
if [[ -n "${OLD_MASTER_CERT:-}" && -n "${OLD_MASTER_KEY:-}" ]]; then
OLD_MASTER_CERT_PATH="${pki_dir}/oldapiserver.crt"
echo "${OLD_MASTER_CERT}" | base64 --decode > "${OLD_MASTER_CERT_PATH}"
OLD_MASTER_KEY_PATH="${pki_dir}/oldapiserver.key"
echo "${OLD_MASTER_KEY}" | base64 --decode > "${OLD_MASTER_KEY_PATH}"
fi
SERVICEACCOUNT_CERT_PATH="${pki_dir}/serviceaccount.crt"
write-pki-data "${SERVICEACCOUNT_CERT}" "${SERVICEACCOUNT_CERT_PATH}"
SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
write-pki-data "${SERVICEACCOUNT_KEY}" "${SERVICEACCOUNT_KEY_PATH}"
if [[ -n "${REQUESTHEADER_CA_CERT:-}" ]]; then
REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
write-pki-data "${REQUESTHEADER_CA_CERT}" "${REQUESTHEADER_CA_CERT_PATH}"
PROXY_CLIENT_KEY_PATH="${pki_dir}/proxy_client.key"
write-pki-data "${PROXY_CLIENT_KEY}" "${PROXY_CLIENT_KEY_PATH}"
PROXY_CLIENT_CERT_PATH="${pki_dir}/proxy_client.crt"
write-pki-data "${PROXY_CLIENT_CERT}" "${PROXY_CLIENT_CERT_PATH}"
fi
}
# After the first boot and on upgrade, these files exist on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.) One exception is if METADATA_CLOBBERS_CONFIG is
# enabled. In that case the basic_auth.csv file will be rewritten to make
# sure it matches the metadata source of truth.
function create-master-auth {
echo "Creating master auth files"
local -r auth_dir="/etc/srv/kubernetes"
local -r basic_auth_csv="${auth_dir}/basic_auth.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
if [[ -e "${basic_auth_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
# If METADATA_CLOBBERS_CONFIG is true, we want to rewrite the file
# completely, because if we're changing KUBE_USER and KUBE_PASSWORD, we
# have nothing to match on. The file is replaced just below with
# append_or_replace_prefixed_line.
rm "${basic_auth_csv}"
fi
append_or_replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters"
fi
local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
rm "${known_tokens_csv}"
fi
if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
fi
if [[ -n "${KUBE_BOOTSTRAP_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BOOTSTRAP_TOKEN}," "gcp:kube-bootstrap,uid:gcp:kube-bootstrap,system:masters"
fi
if [[ -n "${CLOUD_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${CLOUD_CONTROLLER_MANAGER_TOKEN}," "system:cloud-controller-manager,uid:system:cloud-controller-manager"
fi
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
fi
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
fi
if [[ -n "${KUBE_CLUSTER_AUTOSCALER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}," "cluster-autoscaler,uid:cluster-autoscaler"
fi
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
fi
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${NODE_PROBLEM_DETECTOR_TOKEN}," "system:node-problem-detector,uid:node-problem-detector"
fi
if [[ -n "${GCE_GLBC_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${GCE_GLBC_TOKEN}," "system:controller:glbc,uid:system:controller:glbc"
fi
if [[ -n "${ADDON_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${ADDON_MANAGER_TOKEN}," "system:addon-manager,uid:system:addon-manager,system:masters"
fi
if [[ -n "${KONNECTIVITY_SERVER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KONNECTIVITY_SERVER_TOKEN}," "system:konnectivity-server,uid:system:konnectivity-server"
create-kubeconfig "konnectivity-server" "${KONNECTIVITY_SERVER_TOKEN}"
fi
if [[ -n "${MONITORING_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${MONITORING_TOKEN}," "system:monitoring,uid:system:monitoring,system:monitoring"
fi
if [[ -n "${EXTRA_STATIC_AUTH_COMPONENTS:-}" ]]; then
# Create a static Bearer token and kubeconfig for extra, comma-separated components.
IFS="," read -r -a extra_components <<< "${EXTRA_STATIC_AUTH_COMPONENTS:-}"
for extra_component in "${extra_components[@]}"; do
local token
token="$(secure_random 32)"
append_or_replace_prefixed_line "${known_tokens_csv}" "${token}," "system:${extra_component},uid:system:${extra_component}"
create-kubeconfig "${extra_component}" "${token}"
done
fi
local use_cloud_config="false"
cat <<EOF >/etc/gce.conf
[global]
EOF
if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
cat <<EOF >>/etc/gce.conf
api-endpoint = ${GCE_API_ENDPOINT}
EOF
fi
if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
fi
if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
container-api-endpoint = ${CONTAINER_API_ENDPOINT}
EOF
fi
if [[ -n "${PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
project-id = ${PROJECT_ID}
EOF
fi
if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-project-id = ${NETWORK_PROJECT_ID}
EOF
fi
if [[ -n "${STACK_TYPE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
stack-type = ${STACK_TYPE}
EOF
fi
if [[ -n "${NODE_NETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-name = ${NODE_NETWORK}
EOF
fi
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
EOF
fi
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
use_cloud_config="true"
if [[ -n "${NODE_TAGS:-}" ]]; then
# split NODE_TAGS into an array by comma.
IFS=',' read -r -a node_tags <<< "${NODE_TAGS}"
else
local -r node_tags=("${NODE_INSTANCE_PREFIX}")
fi
cat <<EOF >>/etc/gce.conf
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
for tag in "${node_tags[@]}"; do
cat <<EOF >>/etc/gce.conf
node-tags = ${tag}
EOF
done
fi
if [[ -n "${MULTIZONE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
# Multimaster indicates that the cluster is HA.
# Currently the only HA clusters are regional.
# If we introduce zonal multimaster this will need to be revisited.
if [[ -n "${MULTIMASTER:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
regional = ${MULTIMASTER}
EOF
fi
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
use_cloud_config="true"
# split GCE_ALPHA_FEATURES into an array by comma.
IFS=',' read -r -a alpha_features <<< "${GCE_ALPHA_FEATURES}"
for feature in "${alpha_features[@]}"; do
cat <<EOF >>/etc/gce.conf
alpha-features = ${feature}
EOF
done
fi
if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then
use_cloud_config="true"
cat <<EOF >> /etc/gce.conf
secondary-range-name = ${SECONDARY_RANGE_NAME}
EOF
fi
if [[ "${use_cloud_config}" != "true" ]]; then
rm -f /etc/gce.conf
fi
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authn.config
clusters:
- name: gcp-authentication-server
cluster:
server: ${GCP_AUTHN_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authentication-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authz.config
clusters:
- name: gcp-authorization-server
cluster:
server: ${GCP_AUTHZ_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authorization-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
cat <<EOF >/etc/srv/kubernetes/egress_selector_configuration.yaml
apiVersion: apiserver.k8s.io/v1beta1
kind: EgressSelectorConfiguration
egressSelections:
- name: cluster
connection:
proxyProtocol: GRPC
transport:
uds:
udsName: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket
- name: master
connection:
proxyProtocol: Direct
- name: etcd
connection:
proxyProtocol: Direct
EOF
elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
cat <<EOF >/etc/srv/kubernetes/egress_selector_configuration.yaml
apiVersion: apiserver.k8s.io/v1beta1
kind: EgressSelectorConfiguration
egressSelections:
- name: cluster
connection:
proxyProtocol: HTTPConnect
transport:
uds:
udsName: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket
- name: master
connection:
proxyProtocol: Direct
- name: etcd
connection:
proxyProtocol: Direct
EOF
else
echo "KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE must be set to either grpc or http-connect"
exit 1
fi
fi
if [[ -n "${WEBHOOK_GKE_EXEC_AUTH:-}" ]]; then
if [[ -z "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then
1>&2 echo "You requested GKE exec auth support for webhooks, but EXEC_AUTH_PLUGIN_URL was not specified. This configuration depends on gke-exec-auth-plugin for authenticating to the webhook endpoint."
exit 1
fi
if [[ -z "${TOKEN_URL:-}" || -z "${TOKEN_BODY:-}" || -z "${TOKEN_BODY_UNQUOTED:-}" ]]; then
1>&2 echo "You requested GKE exec auth support for webhooks, but TOKEN_URL, TOKEN_BODY, and TOKEN_BODY_UNQUOTED were not provided. gke-exec-auth-plugin requires these values for its configuration."
exit 1
fi
# kubeconfig to be used by webhooks with GKE exec auth support. Note that
# the path to gke-exec-auth-plugin is the path when mounted inside the
# kube-apiserver pod.
cat <<EOF >/etc/srv/kubernetes/webhook.kubeconfig
apiVersion: v1
kind: Config
users:
- name: '*.googleapis.com'
user:
exec:
apiVersion: "client.authentication.k8s.io/v1alpha1"
command: /usr/bin/gke-exec-auth-plugin
args:
- --mode=alt-token
- --alt-token-url=${TOKEN_URL}
- --alt-token-body=${TOKEN_BODY_UNQUOTED}
EOF
fi
if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
# Emit a basic admission control configuration file, with no plugins specified.
cat <<EOF >/etc/srv/kubernetes/admission_controller_config.yaml
apiVersion: apiserver.k8s.io/v1alpha1
kind: AdmissionConfiguration
plugins:
EOF
# Add resourcequota config to limit critical pods to kube-system by default
cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
- name: "ResourceQuota"
configuration:
apiVersion: apiserver.config.k8s.io/v1
kind: ResourceQuotaConfiguration
limitedResources:
- resource: pods
matchScopes:
- scopeName: PriorityClass
operator: In
values: ["system-node-critical", "system-cluster-critical"]
EOF
if [[ "${ADMISSION_CONTROL:-}" == *"ImagePolicyWebhook"* ]]; then
if [[ -z "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
1>&2 echo "The ImagePolicyWebhook admission control plugin was requested, but GCP_IMAGE_VERIFICATION_URL was not provided."
exit 1
fi
1>&2 echo "ImagePolicyWebhook admission control plugin requested. Configuring it to point at ${GCP_IMAGE_VERIFICATION_URL}"
# ImagePolicyWebhook does not use gke-exec-auth-plugin for authenticating
# to the webhook endpoint. Emit its special kubeconfig.
cat <<EOF >/etc/srv/kubernetes/gcp_image_review.kubeconfig
clusters:
- name: gcp-image-review-server
cluster:
server: ${GCP_IMAGE_VERIFICATION_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-image-review-server
user: kube-apiserver
name: webhook
EOF
# Append config for ImagePolicyWebhook to the shared admission controller
# configuration file.
cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
- name: ImagePolicyWebhook
configuration:
imagePolicy:
kubeConfigFile: /etc/srv/kubernetes/gcp_image_review.kubeconfig
allowTTL: 30
denyTTL: 30
retryBackoff: 500
defaultAllow: true
EOF
fi
# If GKE exec auth for webhooks has been requested, then
# ValidatingAdmissionWebhook should use it. Otherwise, run with the default
# config.
if [[ -n "${WEBHOOK_GKE_EXEC_AUTH:-}" ]]; then
1>&2 echo "ValidatingAdmissionWebhook requested, and WEBHOOK_GKE_EXEC_AUTH specified. Configuring ValidatingAdmissionWebhook to use gke-exec-auth-plugin."
# Append config for ValidatingAdmissionWebhook to the shared admission
# controller configuration file.
cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
- name: ValidatingAdmissionWebhook
configuration:
apiVersion: apiserver.config.k8s.io/v1alpha1
kind: WebhookAdmission
kubeConfigFile: /etc/srv/kubernetes/webhook.kubeconfig
EOF
fi
fi
}
# Write the config for the audit policy.
function create-master-audit-policy {
local -r path="${1}"
local -r policy="${2:-}"
if [[ -n "${policy}" ]]; then
echo "${policy}" > "${path}"
return
fi
# Known api groups
local -r known_apis='
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "node.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:cloud-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
- level: None
users: ["cluster-autoscaler"]
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["configmaps", "endpoints"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
- system:cloud-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests because of performance impact.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get responses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
EOF
}
# Writes the configuration file used by the webhook advanced auditing backend.
function create-master-audit-webhook-config {
local -r path="${1}"
if [[ -n "${GCP_AUDIT_URL:-}" ]]; then
# The webhook config file is a kubeconfig file describing the webhook endpoint.
cat <<EOF >"${path}"
clusters:
- name: gcp-audit-server
cluster:
server: ${GCP_AUDIT_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-audit-server
user: kube-apiserver
name: webhook
EOF
fi
}
function create-kubeconfig {
local component=$1
local token=$2
local path="/etc/srv/kubernetes/${component}/kubeconfig"
mkdir -p "/etc/srv/kubernetes/${component}"
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
gke-internal-create-kubeconfig "${component}" "${token}" "${path}"
else
echo "Creating kubeconfig file for component ${component}"
cat <<EOF >"${path}"
apiVersion: v1
kind: Config
users:
- name: ${component}
user:
token: ${token}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: ${component}
name: ${component}
current-context: ${component}
EOF
fi
}
# Arg 1: the IP address of the API server
function create-kubelet-kubeconfig() {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create Kubelet kubeconfig file!"
exit 1
fi
if [[ "${CREATE_BOOTSTRAP_KUBECONFIG:-true}" == "true" ]]; then
echo "Creating kubelet bootstrap-kubeconfig file"
cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: ${KUBELET_CERT_PATH}
client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
cluster:
server: https://${apiserver_address}
certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
elif [[ "${FETCH_BOOTSTRAP_KUBECONFIG:-false}" == "true" ]]; then
echo "Fetching kubelet bootstrap-kubeconfig file from metadata"
get-metadata-value "instance/attributes/bootstrap-kubeconfig" >/var/lib/kubelet/bootstrap-kubeconfig
else
echo "Fetching kubelet kubeconfig file from metadata"
get-metadata-value "instance/attributes/kubeconfig" >/var/lib/kubelet/kubeconfig
fi
}
# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
# should register to the apiserver.
function create-master-kubelet-auth {
# Only configure the kubelet on the master if the required variables are
# set in the environment.
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
REGISTER_MASTER_KUBELET="true"
create-kubelet-kubeconfig "${KUBELET_APISERVER}"
fi
}
function create-kubeproxy-user-kubeconfig {
echo "Creating kube-proxy user kubeconfig file"
cat <<EOF >/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
}
function create-kube-scheduler-config {
echo "Creating kube-scheduler config file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/config
${KUBE_SCHEDULER_CONFIG}
EOF
}
# TODO(#92143): Remove legacy policy config creation once kube-scheduler config is GA.
function create-kubescheduler-policy-config {
echo "Creating kube-scheduler policy config file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/policy-config
${SCHEDULER_POLICY_CONFIG}
EOF
}
function create-node-problem-detector-kubeconfig {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create node-problem-detector kubeconfig file!"
exit 1
fi
echo "Creating node-problem-detector kubeconfig file"
mkdir -p /var/lib/node-problem-detector
cat <<EOF >/var/lib/node-problem-detector/kubeconfig
apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: local
cluster:
server: https://${apiserver_address}
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: node-problem-detector
name: service-account-context
current-context: service-account-context
EOF
}
function create-node-problem-detector-kubeconfig-from-kubelet {
echo "Creating node-problem-detector kubeconfig from /var/lib/kubelet/kubeconfig"
mkdir -p /var/lib/node-problem-detector
cp /var/lib/kubelet/kubeconfig /var/lib/node-problem-detector/kubeconfig
}
function create-master-etcd-auth {
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes"
echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
fi
}
function create-master-etcd-apiserver-auth {
if [[ -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes/pki"
ETCD_APISERVER_CA_KEY_PATH="${auth_dir}/etcd-apiserver-ca.key"
echo "${ETCD_APISERVER_CA_KEY}" | base64 --decode > "${ETCD_APISERVER_CA_KEY_PATH}"
# Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
ETCD_APISERVER_CA_CERT_PATH="${auth_dir}/etcd-apiserver-ca.crt"
echo "${ETCD_APISERVER_CA_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CA_CERT_PATH}"
ETCD_APISERVER_SERVER_KEY_PATH="${auth_dir}/etcd-apiserver-server.key"
echo "${ETCD_APISERVER_SERVER_KEY}" | base64 --decode > "${ETCD_APISERVER_SERVER_KEY_PATH}"
ETCD_APISERVER_SERVER_CERT_PATH="${auth_dir}/etcd-apiserver-server.crt"
echo "${ETCD_APISERVER_SERVER_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_SERVER_CERT_PATH}"
# Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
ETCD_APISERVER_CLIENT_KEY_PATH="${auth_dir}/etcd-apiserver-client.key"
echo "${ETCD_APISERVER_CLIENT_KEY}" | base64 --decode > "${ETCD_APISERVER_CLIENT_KEY_PATH}"
# Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
ETCD_APISERVER_CLIENT_CERT_PATH="${auth_dir}/etcd-apiserver-client.crt"
echo "${ETCD_APISERVER_CLIENT_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CLIENT_CERT_PATH}"
fi
}
function docker-installed {
if systemctl cat docker.service &> /dev/null ; then
return 0
else
return 1
fi
}
# util function to add a docker option to daemon.json file only if the daemon.json file is present.
# accepts only one argument (docker options)
function addockeropt {
DOCKER_OPTS_FILE=/etc/docker/daemon.json
if [ "$#" -lt 1 ]; then
echo "No arguments are passed while adding docker options. Expect one argument"
exit 1
elif [ "$#" -gt 1 ]; then
echo "Only one argument is accepted"
exit 1
fi
# appends the given input to the docker opts file i.e. /etc/docker/daemon.json file
if [ -f "$DOCKER_OPTS_FILE" ]; then
cat >> "${DOCKER_OPTS_FILE}" <<EOF
$1
EOF
fi
}
function set_docker_options_non_ubuntu() {
# set docker options mtu and storage driver for non-ubuntu
# as it is default for ubuntu
if [[ -n "$(command -v lsb_release)" && $(lsb_release -si) == "Ubuntu" ]]; then
echo "Not adding docker options on ubuntu, as these are default on ubuntu. Bailing out..."
return
fi
addockeropt "\"mtu\": 1460,"
addockeropt "\"storage-driver\": \"overlay2\","
echo "setting live restore"
# Disable live-restore if the environment variable is set.
if [[ "${DISABLE_DOCKER_LIVE_RESTORE:-false}" == "true" ]]; then
addockeropt "\"live-restore\": \"false\","
fi
}
function assemble-docker-flags {
echo "Assemble docker options"
# log the contents of the /etc/docker/daemon.json if already exists
if [ -f /etc/docker/daemon.json ]; then
echo "Contents of the old docker config"
cat /etc/docker/daemon.json
fi
cat <<EOF >/etc/docker/daemon.json
{
EOF
addockeropt "\"pidfile\": \"/var/run/docker.pid\",
\"iptables\": false,
\"ip-masq\": false,"
echo "setting log-level"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
addockeropt "\"log-level\": \"debug\","
else
addockeropt "\"log-level\": \"warn\","
fi
echo "setting network bridge"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
addockeropt "\"bip\": \"169.254.123.1/24\","
else
addockeropt "\"bridge\": \"cbr0\","
fi
echo "setting registry mirror"
# TODO (vteratipally) move the registry-mirror completely to /etc/docker/daemon.json
local docker_opts=""
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
docker_opts+="--registry-mirror=${DOCKER_REGISTRY_MIRROR_URL} "
fi
set_docker_options_non_ubuntu
echo "setting docker logging options"
# Configure docker logging
addockeropt "\"log-driver\": \"${DOCKER_LOG_DRIVER:-json-file}\","
addockeropt "\"log-opts\": {
\"max-size\": \"${DOCKER_LOG_MAX_SIZE:-10m}\",
\"max-file\": \"${DOCKER_LOG_MAX_FILE:-5}\"
}"
cat <<EOF >>/etc/docker/daemon.json
}
EOF
echo "DOCKER_OPTS=\"${docker_opts}${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker
# Ensure TasksMax is sufficient for docker.
# (https://github.com/kubernetes/kubernetes/issues/51977)
echo "Extend the docker.service configuration to set a higher pids limit"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/01tasksmax.conf
[Service]
TasksMax=infinity
EOF
systemctl daemon-reload
echo "Docker command line is updated. Restart docker to pick it up"
systemctl restart docker
}
# This function assembles the kubelet systemd service file and starts it
# using systemctl.
function start-kubelet {
echo "Start kubelet"
local kubelet_bin="${KUBE_HOME}/bin/kubelet"
local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
local -r builtin_kubelet="/usr/bin/kubelet"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
# Determine which binary to use on test clusters. We use the built-in
# version only if the downloaded version is the same as the built-in
# version. This allows GCI to run some of the e2e tests to qualify the
# built-in kubelet.
if [[ -x "${builtin_kubelet}" ]]; then
local -r builtin_version="$("${builtin_kubelet}" --version=true | cut -f2 -d " ")"
if [[ "${builtin_version}" == "${version}" ]]; then
kubelet_bin="${builtin_kubelet}"
fi
fi
fi
echo "Using kubelet binary at ${kubelet_bin}"
local -r kubelet_env_file="/etc/default/kubelet"
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF
if [[ ${ENABLE_CREDENTIAL_SIDECAR:-false} == "true" ]]; then
create-sidecar-config
fi
systemctl daemon-reload
systemctl start kubelet.service
}
# This function assembles the node problem detector systemd service file and
# starts it using systemctl.
function start-node-problem-detector {
echo "Start node problem detector"
local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector"
echo "Using node problem detector binary at ${npd_bin}"
local flags="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}"
if [[ -z "${flags}" ]]; then
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
# TODO(random-liu): Handle this for alternative container runtime.
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
local -r sm_config="${KUBE_HOME}/node-problem-detector/config/systemd-monitor.json"
local -r ssm_config="${KUBE_HOME}/node-problem-detector/config/system-stats-monitor.json"
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json"
local -r custom_sm_config="${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json"
flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
flags+=" --logtostderr"
flags+=" --config.system-log-monitor=${km_config},${dm_config},${sm_config}"
flags+=" --config.system-stats-monitor=${ssm_config}"
flags+=" --config.custom-plugin-monitor=${custom_km_config},${custom_sm_config}"
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
flags+=" --port=${npd_port}"
if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then
flags+=" ${EXTRA_NPD_ARGS}"
fi
fi
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
# Write the systemd service file for node problem detector.
cat <<EOF >/etc/systemd/system/node-problem-detector.service
[Unit]
Description=Kubernetes node problem detector
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
ExecStart=${npd_bin} ${flags}
[Install]
WantedBy=multi-user.target
EOF
systemctl start node-problem-detector.service
}
# Create the log file and set its properties.
#
# $1 is the file to create.
# $2: the log owner uid to set for the log file.
# $3: the log owner gid to set for the log file. If $KUBE_POD_LOG_READERS_GROUP
# is set then this value will not be used.
function prepare-log-file {
touch "$1"
if [[ -n "${KUBE_POD_LOG_READERS_GROUP:-}" ]]; then
chmod 640 "$1"
chown "${2:-root}":"${KUBE_POD_LOG_READERS_GROUP}" "$1"
else
chmod 644 "$1"
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" "$1"
fi
}
# Prepares parameters for kube-proxy manifest.
# $1 source path of kube-proxy manifest.
# Assumptions: HOST_PLATFORM and HOST_ARCH are specified by calling detect_host_info.
function prepare-kube-proxy-manifest-variables {
local -r src_file=$1;
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
kube_docker_registry=${KUBE_DOCKER_REGISTRY}
fi
local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ "${KUBE_PROXY_MODE:-}" == "ipvs" ]];then
# use 'nf_conntrack' instead of 'nf_conntrack_ipv4' for linux kernel >= 4.19
# https://github.com/kubernetes/kubernetes/pull/70398
local -r kernel_version=$(uname -r | cut -d\. -f1,2)
local conntrack_module="nf_conntrack"
if [[ $(printf '%s\n4.18\n' "${kernel_version}" | sort -V | tail -1) == "4.18" ]]; then
conntrack_module="nf_conntrack_ipv4"
fi
if sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh ${conntrack_module}; then
params+=" --proxy-mode=ipvs"
else
# If IPVS modules are not present, make sure the node does not come up as
# healthy.
exit 1
fi
fi
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
params+=" ${KUBEPROXY_TEST_ARGS}"
fi
if [[ -n "${DETECT_LOCAL_MODE:-}" ]]; then
params+=" --detect-local-mode=${DETECT_LOCAL_MODE}"
fi
local container_env=""
local kube_cache_mutation_detector_env_name=""
local kube_cache_mutation_detector_env_value=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="env:"
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" "${src_file}"
# TODO(#99245): Use multi-arch image and get rid of this.
sed -i -e "s@{{pillar\['host_arch'\]}}@${HOST_ARCH}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" "${src_file}"
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" "${src_file}"
sed -i -e "s@{{ cpurequest }}@100m@g" "${src_file}"
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" "${src_file}"
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" "${src_file}"
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" "${src_file}"
fi
}
# Starts kube-proxy static pod.
function start-kube-proxy {
echo "Start kube-proxy static pod"
prepare-log-file /var/log/kube-proxy.log
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest"
prepare-kube-proxy-manifest-variables "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Replaces the variables in the etcd manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable 'suffix'
# $2: value for variable 'port'
# $3: value for variable 'server_port'
# $4: value for variable 'cpulimit'
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
local -r host_ip=$(${PYTHON} -c "import socket;print(socket.gethostbyname(\"${host_name}\"))")
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
local etcd_apiserver_protocol="http"
local etcd_creds=""
local etcd_apiserver_creds="${ETCD_APISERVER_CREDS:-}"
local etcd_extra_args="${ETCD_EXTRA_ARGS:-}"
local suffix="$1"
local etcd_listen_metrics_port="$2"
local etcdctl_certs=""
if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]]; then
cluster_state="${INITIAL_ETCD_CLUSTER_STATE}"
fi
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
etcd_protocol="https"
fi
# mTLS should only be enabled for etcd server but not etcd-events. if $1 suffix is empty, it's etcd server.
if [[ -z "${suffix}" && -n "${ETCD_APISERVER_CA_KEY:-}" && -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
etcd_apiserver_creds=" --client-cert-auth --trusted-ca-file ${ETCD_APISERVER_CA_CERT_PATH} --cert-file ${ETCD_APISERVER_SERVER_CERT_PATH} --key-file ${ETCD_APISERVER_SERVER_KEY_PATH} "
etcdctl_certs="--cacert ${ETCD_APISERVER_CA_CERT_PATH} --cert ${ETCD_APISERVER_CLIENT_CERT_PATH} --key ${ETCD_APISERVER_CLIENT_KEY_PATH}"
etcd_apiserver_protocol="https"
etcd_listen_metrics_port="2382"
etcd_extra_args+=" --listen-metrics-urls=http://${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}:${etcd_listen_metrics_port} "
fi
if [[ -n "${ETCD_PROGRESS_NOTIFY_INTERVAL:-}" ]]; then
etcd_extra_args+=" --experimental-watch-progress-notify-interval=${ETCD_PROGRESS_NOTIFY_INTERVAL}"
fi
for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
if [[ -n "${etcd_cluster}" ]]; then
etcd_cluster+=","
fi
etcd_cluster+="${etcd_host}"
done
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
sed -i -e "s@{{ *listen_client_ip *}}@${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}@g" "${temp_file}"
# Get default storage backend from manifest file.
local -r default_storage_backend=$( \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" "${temp_file}" | \
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=${ETCD_QUOTA_BACKEND_BYTES:-4294967296}@g" "${temp_file}"
else
sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
fi
sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
if [[ -n "${ETCD_IMAGE:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_apiserver_protocol *}}@$etcd_apiserver_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
sed -i -e "s@{{ *etcdctl_certs *}}@$etcdctl_certs@g" "${temp_file}"
sed -i -e "s@{{ *etcd_apiserver_creds *}}@$etcd_apiserver_creds@g" "${temp_file}"
sed -i -e "s@{{ *etcd_extra_args *}}@$etcd_extra_args@g" "${temp_file}"
if [[ -n "${ETCD_VERSION:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
# Replace the volume host path.
sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
# Replace the run as user and run as group
container_security_context=""
if [[ -n "${ETCD_RUNASUSER:-}" && -n "${ETCD_RUNASGROUP:-}" ]]; then
container_security_context="\"securityContext\": {\"runAsUser\": ${ETCD_RUNASUSER}, \"runAsGroup\": ${ETCD_RUNASGROUP}, \"allowPrivilegeEscalation\": false, \"capabilities\": {\"drop\": [\"all\"]}},"
fi
sed -i -e "s@{{security_context}}@${container_security_context}@g" "${temp_file}"
mv "${temp_file}" /etc/kubernetes/manifests
}
# Starts etcd server pod (and etcd-events pod if needed).
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-etcd-servers {
echo "Start etcd pods"
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
if [[ -n "${ETCD_RUNASUSER:-}" && -n "${ETCD_RUNASGROUP:-}" ]]; then
chown -R "${ETCD_RUNASUSER}":"${ETCD_RUNASGROUP}" /mnt/disks/master-pd/var/etcd
fi
prepare-log-file /var/log/etcd.log "${ETCD_RUNASUSER:-0}"
prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
prepare-log-file /var/log/etcd-events.log "${ETCD_RUNASUSER:-0}"
prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
}
# Replaces the variables in the konnectivity-server manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable "agent_port"
# $2: value for variable "health_port"
# $3: value for variable "admin_port"
function prepare-konnectivity-server-manifest {
local -r temp_file="/tmp/konnectivity-server.yaml"
params=()
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/konnectivity-server.yaml" "${temp_file}"
params+=("--log-file=/var/log/konnectivity-server.log")
params+=("--logtostderr=false")
params+=("--log-file-max-size=0")
params+=("--uds-name=/etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket")
params+=("--cluster-cert=/etc/srv/kubernetes/pki/apiserver.crt")
params+=("--cluster-key=/etc/srv/kubernetes/pki/apiserver.key")
if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
params+=("--mode=grpc")
elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
params+=("--mode=http-connect")
else
echo "KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE must be set to either grpc or http-connect"
exit 1
fi
params+=("--server-port=0")
params+=("--agent-port=$1")
params+=("--health-port=$2")
params+=("--admin-port=$3")
params+=("--agent-namespace=kube-system")
params+=("--agent-service-account=konnectivity-agent")
params+=("--kubeconfig=/etc/srv/kubernetes/konnectivity-server/kubeconfig")
params+=("--authentication-audience=system:konnectivity-server")
konnectivity_args=""
for param in "${params[@]}"; do
konnectivity_args+=", \"${param}\""
done
sed -i -e "s@{{ *konnectivity_args *}}@${konnectivity_args}@g" "${temp_file}"
sed -i -e "s@{{ *agent_port *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *health_port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *admin_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@30@g" "${temp_file}"
mv "${temp_file}" /etc/kubernetes/manifests
}
# Starts konnectivity server pod.
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-konnectivity-server {
echo "Start konnectivity server pods"
prepare-log-file /var/log/konnectivity-server.log
prepare-konnectivity-server-manifest "8132" "8133" "8134"
}
# Calculates the following variables based on env variables, which will be used
# by the manifests of several kube-master components.
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
# FLEXVOLUME_HOSTPATH_MOUNT
# FLEXVOLUME_HOSTPATH_VOLUME
# INSECURE_PORT_MAPPING
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
CLOUD_CONFIG_MOUNT=""
if [[ -f /etc/gce.conf ]]; then
CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
fi
DOCKER_REGISTRY="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
fi
FLEXVOLUME_HOSTPATH_MOUNT=""
FLEXVOLUME_HOSTPATH_VOLUME=""
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true},"
FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}},"
fi
INSECURE_PORT_MAPPING=""
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" == "true" ]]; then
# INSECURE_PORT_MAPPING is used by sed
# shellcheck disable=SC2089
INSECURE_PORT_MAPPING='{ "name": "local", "containerPort": 8080, "hostPort": 8080},'
fi
# shellcheck disable=SC2090
export INSECURE_PORT_MAPPING
}
# A helper function that bind mounts kubelet dirs for running mount in a chroot
function prepare-mounter-rootfs {
echo "Prepare containerized mounter"
mount --bind "${CONTAINERIZED_MOUNTER_HOME}" "${CONTAINERIZED_MOUNTER_HOME}"
mount -o remount,exec "${CONTAINERIZED_MOUNTER_HOME}"
CONTAINERIZED_MOUNTER_ROOTFS="${CONTAINERIZED_MOUNTER_HOME}/rootfs"
mount --rbind /var/lib/kubelet/ "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
}
# Updates node labels used by addons.
function update-legacy-addon-node-labels() {
# need kube-apiserver to be ready
until kubectl get nodes; do
sleep 5
done
update-node-label "beta.kubernetes.io/metadata-proxy-ready=true,cloud.google.com/metadata-proxy-ready!=true" "cloud.google.com/metadata-proxy-ready=true"
update-node-label "beta.kubernetes.io/kube-proxy-ds-ready=true,node.kubernetes.io/kube-proxy-ds-ready!=true" "node.kubernetes.io/kube-proxy-ds-ready=true"
update-node-label "beta.kubernetes.io/masq-agent-ds-ready=true,node.kubernetes.io/masq-agent-ds-ready!=true" "node.kubernetes.io/masq-agent-ds-ready=true"
}
# A helper function for labeling all nodes matching a given selector.
# Runs: kubectl label --overwrite nodes -l "${1}" "${2}"
# Retries on failure
#
# $1: label selector of nodes
# $2: label to apply
function update-node-label() {
local selector="$1"
local label="$2"
local retries=5
until (( retries == 0 )); do
if kubectl label --overwrite nodes -l "${selector}" "${label}"; then
break
fi
(( retries-- ))
sleep 3
done
}
# A helper function that sets file permissions for kube-controller-manager to
# run as non root.
# User and group should never contain characters that need to be quoted
# shellcheck disable=SC2086
function run-kube-controller-manager-as-non-root {
prepare-log-file /var/log/kube-controller-manager.log ${KUBE_CONTROLLER_MANAGER_RUNASUSER}
setfacl -m u:${KUBE_CONTROLLER_MANAGER_RUNASUSER}:r "${CA_CERT_BUNDLE_PATH}"
setfacl -m u:${KUBE_CONTROLLER_MANAGER_RUNASUSER}:r "${SERVICEACCOUNT_CERT_PATH}"
setfacl -m u:${KUBE_CONTROLLER_MANAGER_RUNASUSER}:r "${SERVICEACCOUNT_KEY_PATH}"
}
# A helper function that sets file permissions for cloud-controller-manager to
# run as non root.
# User and group should never contain characters that need to be quoted
# shellcheck disable=SC2086
function run-cloud-controller-manager-as-non-root {
prepare-log-file /var/log/cloud-controller-manager.log ${CLOUD_CONTROLLER_MANAGER_RUNASUSER}
setfacl -m u:${CLOUD_CONTROLLER_MANAGER_RUNASUSER}:r "${CA_CERT_BUNDLE_PATH}"
setfacl -m u:${CLOUD_CONTROLLER_MANAGER_RUNASUSER}:r "${SERVICEACCOUNT_CERT_PATH}"
setfacl -m u:${CLOUD_CONTROLLER_MANAGER_RUNASUSER}:r "${SERVICEACCOUNT_KEY_PATH}"
}
# Starts kubernetes controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-controller-manager {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
if ! deploy-kube-controller-manager-via-kube-up; then
echo "kube-controller-manager is configured to not be deployed through kube-up."
return
fi
fi
echo "Start kubernetes controller-manager"
create-kubeconfig "kube-controller-manager" "${KUBE_CONTROLLER_MANAGER_TOKEN}"
prepare-log-file /var/log/kube-controller-manager.log
# Calculate variables and assemble the command line.
local params=("${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"}" "${CONTROLLER_MANAGER_TEST_ARGS:-}" "${CLOUD_CONFIG_OPT}")
local config_path='/etc/srv/kubernetes/kube-controller-manager/kubeconfig'
params+=("--use-service-account-credentials")
params+=("--cloud-provider=external")
params+=("--external-cloud-volume-plugin=gce")
params+=("--kubeconfig=${config_path}" "--authentication-kubeconfig=${config_path}" "--authorization-kubeconfig=${config_path}")
params+=("--root-ca-file=${CA_CERT_BUNDLE_PATH}")
params+=("--service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}")
params+=("--volume-host-allow-local-loopback=false")
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=("--enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}")
fi
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=("--cluster-name=${INSTANCE_PREFIX}")
fi
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
params+=("--cluster-cidr=${CLUSTER_IP_RANGE}")
fi
if [[ -n "${CA_KEY:-}" ]]; then
params+=("--cluster-signing-cert-file=${CA_CERT_PATH}")
params+=("--cluster-signing-key-file=${CA_KEY_PATH}")
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=("--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}")
fi
if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
params+=("--concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}")
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=("--allocate-node-cidrs=true")
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=("--allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}")
fi
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=("--terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}")
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=("--cidr-allocator-type=${NODE_IPAM_MODE}")
params+=("--configure-cloud-routes=false")
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=("--feature-gates=${FEATURE_GATES}")
fi
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
params+=("--flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}")
fi
if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]]; then
params+=("--cluster-signing-duration=$CLUSTER_SIGNING_DURATION")
fi
# Disable using HPA metrics REST clients if metrics-server isn't enabled,
# or if we want to explicitly disable it by setting HPA_USE_REST_CLIENT.
if [[ "${ENABLE_METRICS_SERVER:-}" != "true" ]] ||
[[ "${HPA_USE_REST_CLIENTS:-}" == "false" ]]; then
params+=("--horizontal-pod-autoscaler-use-rest-clients=false")
fi
if [[ -n "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
params+=("--pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE")
params+=("--pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE")
fi
if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
params+=("--controllers=${RUN_CONTROLLERS}")
fi
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
local paramstring
paramstring="$(convert-manifest-params "${params[*]}")"
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
# Evaluate variables.
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
if [[ -n "${KUBE_CONTROLLER_MANAGER_RUNASUSER:-}" && -n "${KUBE_CONTROLLER_MANAGER_RUNASGROUP:-}" ]]; then
run-kube-controller-manager-as-non-root
sed -i -e "s@{{runAsUser}}@${KUBE_CONTROLLER_MANAGER_RUNASUSER}@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@${KUBE_CONTROLLER_MANAGER_RUNASGROUP}@g" "${src_file}"
else
sed -i -e "s@{{runAsUser}}@0@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@0@g" "${src_file}"
fi
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts cloud controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-cloud-controller-manager {
echo "Start cloud provider controller-manager"
setup-addon-manifests "addons" "cloud-controller-manager"
create-kubeconfig "cloud-controller-manager" ${CLOUD_CONTROLLER_MANAGER_TOKEN}
prepare-log-file /var/log/cloud-controller-manager.log
# Calculate variables and assemble the command line.
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=4"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --port=10253"
params+=" --use-service-account-credentials"
params+=" --cloud-provider=gce"
params+=" --kubeconfig=/etc/srv/kubernetes/cloud-controller-manager/kubeconfig"
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=" --cluster-name=${INSTANCE_PREFIX}"
fi
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
fi
if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}"
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=" --allocate-node-cidrs=true"
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=${NODE_IPAM_MODE}"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
params+=" --controllers=${RUN_CONTROLLERS}"
fi
params="$(convert-manifest-params "${params}")"
local kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/cloud-controller-manager.docker_tag)
kube_rc_docker_tag=$(echo ${kube_rc_docker_tag} | sed 's/+/-/g')
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cloud-controller-manager.manifest"
# Evaluate variables.
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['cloud-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${CLOUD_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
if [[ -n "${CLOUD_CONTROLLER_MANAGER_RUNASUSER:-}" && -n "${CLOUD_CONTROLLER_MANAGER_RUNASGROUP:-}" ]]; then
run-cloud-controller-manager-as-non-root
sed -i -e "s@{{runAsUser}}@${CLOUD_CONTROLLER_MANAGER_RUNASUSER}@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@${CLOUD_CONTROLLER_MANAGER_RUNASGROUP}@g" "${src_file}"
else
sed -i -e "s@{{runAsUser}}@0@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@0@g" "${src_file}"
fi
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes scheduler.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in compute-master-manifest-variables)
# DOCKER_REGISTRY
function start-kube-scheduler {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
if ! deploy-kube-scheduler-via-kube-up; then
echo "kube-scheduler is configured to not be deployed through kube-up."
return
fi
fi
echo "Start kubernetes scheduler"
create-kubeconfig "kube-scheduler" "${KUBE_SCHEDULER_TOKEN}"
# User and group should never contain characters that need to be quoted
# shellcheck disable=SC2086
prepare-log-file /var/log/kube-scheduler.log ${KUBE_SCHEDULER_RUNASUSER:-2001}
# Calculate variables and set them in the manifest.
params=("${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"}" "${SCHEDULER_TEST_ARGS:-}")
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=("--feature-gates=${FEATURE_GATES}")
fi
# Scheduler Component Config takes precedence over some flags.
if [[ -n "${KUBE_SCHEDULER_CONFIG:-}" ]]; then
create-kube-scheduler-config
params+=("--config=/etc/srv/kubernetes/kube-scheduler/config")
else
params+=("--kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig")
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
params+=("--algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}")
fi
if [[ -n "${SCHEDULER_POLICY_CONFIG:-}" ]]; then
create-kubescheduler-policy-config
params+=("--use-legacy-policy-config")
params+=("--policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config")
fi
fi
local config_path
config_path='/etc/srv/kubernetes/kube-scheduler/kubeconfig'
params+=("--authentication-kubeconfig=${config_path}" "--authorization-kubeconfig=${config_path}")
local paramstring
paramstring="$(convert-manifest-params "${params[*]}")"
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
sed -i -e "s@{{runAsUser}}@${KUBE_SCHEDULER_RUNASUSER:-2001}@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@${KUBE_SCHEDULER_RUNASGROUP:-2001}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts cluster autoscaler.
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
function start-cluster-autoscaler {
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Start kubernetes cluster autoscaler"
setup-addon-manifests "addons" "rbac/cluster-autoscaler"
create-kubeconfig "cluster-autoscaler" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}"
prepare-log-file /var/log/cluster-autoscaler.log
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
local params
read -r -a params <<< "${AUTOSCALER_MIG_CONFIG}"
params+=("${CLOUD_CONFIG_OPT}" "${AUTOSCALER_EXPANDER_CONFIG:---expander=price}")
params+=("--kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig")
# split the params into separate arguments passed to binary
local params_split
params_split=$(eval 'for param in "${params[@]}"; do echo -n \""$param"\",; done')
params_split=${params_split%?}
sed -i -e "s@{{params}}@${params_split}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{%.*%}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
fi
}
# A helper function for setting up addon manifests.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: (optional) auxiliary manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/$1/$2"
copy-manifests "${src_dir}/$2" "${dst_dir}"
# If the PodSecurityPolicy admission controller is enabled,
# set up the corresponding addon policies.
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
local -r psp_dir="${src_dir}/${3:-$2}/podsecuritypolicies"
if [[ -d "${psp_dir}" ]]; then
copy-manifests "${psp_dir}" "${dst_dir}"
fi
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
local -r nth_dir="${src_dir}/${3:-$2}/node-termination-handler"
if [[ -d "${nth_dir}" ]]; then
copy-manifests "${nth_dir}" "${dst_dir}"
fi
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
# manifests directory.
function download-extra-addons {
local -r out_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/gce-extras"
mkdir -p "${out_dir}"
local curl_cmd=(
"curl"
"--fail"
"--retry" "5"
"--retry-delay" "3"
"--silent"
"--show-error"
"--retry-connrefused"
)
if [[ -n "${EXTRA_ADDONS_HEADER:-}" ]]; then
curl_cmd+=("-H" "${EXTRA_ADDONS_HEADER}")
fi
curl_cmd+=("-o" "${out_dir}/extras.json")
curl_cmd+=("${EXTRA_ADDONS_URL}")
"${curl_cmd[@]}"
}
# A function that fetches a GCE metadata value and echoes it out.
# Args:
# $1 : URL path after /computeMetadata/v1/ (without heading slash).
# $2 : An optional default value to echo out if the fetch fails.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function get-metadata-value {
local default="${2:-}"
local status
curl \
--retry 5 \
--retry-delay 3 \
--retry-connrefused \
--fail \
--silent \
-H 'Metadata-Flavor: Google' \
"http://metadata/computeMetadata/v1/${1}" \
|| status="$?"
status="${status:-0}"
if [[ "${status}" -eq 0 || -z "${default}" ]]; then
return "${status}"
else
echo "${default}"
fi
}
# A helper function for copying manifests and setting dir/files
# permissions.
#
# $1: absolute source dir
# $2: absolute destination dir
function copy-manifests {
local -r src_dir="$1"
local -r dst_dir="$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
if [[ -n "$(ls "${src_dir}"/*.yaml 2>/dev/null)" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
if [[ -n "$(ls "${src_dir}"/*.json 2>/dev/null)" ]]; then
cp "${src_dir}/"*.json "${dst_dir}"
fi
if [[ -n "$(ls "${src_dir}"/*.yaml.in 2>/dev/null)" ]]; then
cp "${src_dir}/"*.yaml.in "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Fluentd resources are modified using ScalingPolicy CR, which may not be
# available at this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd {
local any_overrides=false
if [[ -n "${FLUENTD_GCP_MEMORY_LIMIT:-}" ]]; then
any_overrides=true
fi
if [[ -n "${FLUENTD_GCP_CPU_REQUEST:-}" ]]; then
any_overrides=true
fi
if [[ -n "${FLUENTD_GCP_MEMORY_REQUEST:-}" ]]; then
any_overrides=true
fi
if ! $any_overrides; then
# Nothing to do here.
exit
fi
# Wait until ScalingPolicy CRD is in place.
until kubectl get scalingpolicies.scalingpolicy.kope.io
do
sleep 10
done
# Single-shot, not managed by addon manager. Can be later modified or removed
# at will.
cat <<EOF | kubectl apply -f -
apiVersion: scalingpolicy.kope.io/v1alpha1
kind: ScalingPolicy
metadata:
name: fluentd-gcp-scaling-policy
namespace: kube-system
spec:
containers:
- name: fluentd-gcp
resources:
requests:
- resource: cpu
base: ${FLUENTD_GCP_CPU_REQUEST:-}
- resource: memory
base: ${FLUENTD_GCP_MEMORY_REQUEST:-}
limits:
- resource: memory
base: ${FLUENTD_GCP_MEMORY_LIMIT:-}
EOF
}
# Trigger background process that will ultimately update fluentd resource
# requirements.
function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# VolumeSnapshot CRDs and controller are installed by cluster addon manager,
# which may not be available at this point. Run this as a background process.
function wait-for-volumesnapshot-crd-and-controller {
# Wait until volumesnapshot CRDs and controller are in place.
echo "Wait until volume snapshot CRDs are installed"
until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io
do
sleep 10
done
until kubectl get volumesnapshotcontents.snapshot.storage.k8s.io
do
sleep 10
done
until kubectl get volumesnapshots.snapshot.storage.k8s.io
do
sleep 10
done
echo "Wait until volume snapshot RBAC rules are installed"
until kubectl get clusterrolebinding volume-snapshot-controller-role
do
sleep 10
done
echo "Wait until volume snapshot controller is installed"
until kubectl get statefulset volume-snapshot-controller | grep volume-snapshot-controller | grep "1/1"
do
sleep 10
done
}
# Trigger background process that will wait for volumesnapshot CRDs
# and snapshot-controller to be installed
function start-volumesnapshot-crd-and-controller {
wait-for-volumesnapshot-crd-and-controller &
}
# Update {{ fluentd_container_runtime_service }} with actual container runtime name,
# and {{ container_runtime_endpoint }} with actual container runtime
# endpoint.
function update-container-runtime {
local -r file="$1"
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}"
sed -i \
-e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-docker}}@g" \
-e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
"${file}"
}
# Remove configuration in yaml file if node journal is not enabled.
function update-node-journal {
local -r configmap_yaml="$1"
if [[ "${ENABLE_NODE_JOURNAL:-}" != "true" ]]; then
# Removes all lines between two patterns (throws away node-journal)
sed -i -e "/# BEGIN_NODE_JOURNAL/,/# END_NODE_JOURNAL/d" "${configmap_yaml}"
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration, or
# removes component if it is disabled.
function update-prometheus-to-sd-parameters {
if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then
sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1"
sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1"
else
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
# removes component if it is disabled.
function update-daemon-set-prometheus-to-sd-parameters {
if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
else
update-prometheus-to-sd-parameters "$1"
fi
}
# Updates parameters in yaml file for event-exporter configuration
function update-event-exporter {
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
sed -i -e "s@{{ exporter_sd_resource_model }}@${stackdriver_resource_model}@g" "$1"
sed -i -e "s@{{ exporter_sd_endpoint }}@${STACKDRIVER_ENDPOINT:-}@g" "$1"
}
function update-dashboard-deployment {
if [ -n "${CUSTOM_KUBE_DASHBOARD_BANNER:-}" ]; then
sed -i -e "s@\( \+\)# PLATFORM-SPECIFIC ARGS HERE@\1- --system-banner=${CUSTOM_KUBE_DASHBOARD_BANNER}\n\1- --system-banner-severity=WARNING@" "$1"
fi
}
# Sets up the manifests of coreDNS for k8s addons.
function setup-coredns-manifest {
setup-addon-manifests "addons" "0-dns/coredns"
local -r coredns_file="${dst_dir}/0-dns/coredns/coredns.yaml"
mv "${dst_dir}/0-dns/coredns/coredns.yaml.in" "${coredns_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${coredns_file}"
sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${coredns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
sed -i'' -e "s@{{.Target}}@${COREDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
fi
}
# Sets up the manifests of Fluentd configmap and yamls for k8s addons.
function setup-fluentd {
local -r dst_dir="$1"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
# Ingest logs against old resources like "gke_container" and "gce_instance" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "old".
if [[ "${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}" == "new" ]]; then
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
fluentd_gcp_configmap_name="fluentd-gcp-config"
else
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap-old.yaml"
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
fi
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-1.6.17}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-daemon-set-prometheus-to-sd-parameters "${fluentd_gcp_yaml}"
start-fluentd-resource-update "${fluentd_gcp_yaml}"
update-container-runtime "${fluentd_gcp_configmap_yaml}"
update-node-journal "${fluentd_gcp_configmap_yaml}"
}
# Sets up the manifests of kube-dns for k8s addons.
function setup-kube-dns-manifest {
setup-addon-manifests "addons" "0-dns/kube-dns"
local -r kubedns_file="${dst_dir}/0-dns/kube-dns/kube-dns.yaml"
mv "${dst_dir}/0-dns/kube-dns/kube-dns.yaml.in" "${kubedns_file}"
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
# Replace with custom GKE kube-dns deployment.
cat > "${kubedns_file}" <<EOF
$CUSTOM_KUBE_DNS_YAML
EOF
update-prometheus-to-sd-parameters "${kubedns_file}"
fi
# Replace the salt configurations with variable values.
sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${kubedns_file}"
sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${kubedns_file}"
sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${kubedns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
sed -i'' -e "s@{{.Target}}@${KUBEDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
fi
}
# Sets up the manifests of local dns cache agent for k8s addons.
function setup-nodelocaldns-manifest {
setup-addon-manifests "addons" "0-dns/nodelocaldns"
local -r localdns_file="${dst_dir}/0-dns/nodelocaldns/nodelocaldns.yaml"
setup-addon-custom-yaml "addons" "0-dns/nodelocaldns" "nodelocaldns.yaml" "${CUSTOM_NODELOCAL_DNS_YAML:-}"
# eventually all the __PILLAR__ stuff will be gone, but theyre still in nodelocaldns for backward compat.
sed -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
}
# Sets up the manifests of netd for k8s addons.
function setup-netd-manifest {
local -r netd_file="${dst_dir}/netd/netd.yaml"
mkdir -p "${dst_dir}/netd"
touch "${netd_file}"
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
# Replace with custom GCP netd deployment.
cat > "${netd_file}" <<EOF
$CUSTOM_NETD_YAML
EOF
fi
}
# A helper function to set up a custom yaml for a k8s addon.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: manifest file
# $4: custom yaml
function setup-addon-custom-yaml {
local -r manifest_path="/etc/kubernetes/$1/$2/$3"
local -r custom_yaml="$4"
if [ -n "${custom_yaml:-}" ]; then
# Replace with custom manifest.
cat > "${manifest_path}" <<EOF
$custom_yaml
EOF
fi
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/addons"
create-kubeconfig "addon-manager" "${ADDON_MANAGER_TOKEN}"
# User and group should never contain characters that need to be quoted
# shellcheck disable=SC2086
prepare-log-file /var/log/kube-addon-manager.log ${KUBE_ADDON_MANAGER_RUNASUSER:-2002}
# prep addition kube-up specific rbac objects
setup-addon-manifests "addons" "rbac/kubelet-api-auth"
setup-addon-manifests "addons" "rbac/kubelet-cert-rotation"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
setup-addon-manifests "addons" "rbac/legacy-kubelet-user"
else
setup-addon-manifests "addons" "rbac/legacy-kubelet-user-disable"
fi
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
setup-addon-manifests "addons" "podsecuritypolicies"
fi
# Set up manifests of other addons.
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
# Replace with custom GKE kube proxy.
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
$CUSTOM_KUBE_PROXY_YAML
EOF
update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
fi
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
fi
if [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]]; then
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
metadata_agent_cluster_level_cpu_request="${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-40m}"
metadata_agent_cluster_level_memory_request="${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-50Mi}"
setup-addon-manifests "addons" "metadata-agent/stackdriver"
metadata_agent_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_cluster_level_cpu_request }}@${metadata_agent_cluster_level_cpu_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_cluster_level_memory_request }}@${metadata_agent_cluster_level_memory_request}@g" "${metadata_agent_yaml}"
fi
fi
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
setup-addon-manifests "addons" "metrics-server"
base_metrics_server_cpu="40m"
base_metrics_server_memory="40Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="16"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_server_cpu="40m"
base_metrics_server_memory="35Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="5"
fi
local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
fi
if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
setup-addon-manifests "addons" "node-termination-handler"
setup-node-termination-handler-manifest ''
fi
# Setting up the konnectivity-agent daemonset
if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
setup-addon-manifests "addons" "konnectivity-agent"
setup-konnectivity-agent-manifest
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
# Create a new directory for the DNS addon and prepend a "0" on the name.
# Prepending "0" to the directory ensures that add-on manager
# creates the dns service first. This ensures no other add-on
# can "steal" the designated DNS clusterIP.
BASE_ADDON_DIR=${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty
BASE_DNS_DIR=${BASE_ADDON_DIR}/dns
NEW_DNS_DIR=${BASE_ADDON_DIR}/0-dns
mkdir "${NEW_DNS_DIR}" && mv "${BASE_DNS_DIR}"/* "${NEW_DNS_DIR}" && rm -r "${BASE_DNS_DIR}"
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-coredns-manifest
else
setup-kube-dns-manifest
fi
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
setup-nodelocaldns-manifest
fi
fi
if [[ "${ENABLE_NETD:-}" == "true" ]]; then
setup-netd-manifest
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
setup-addon-manifests "addons" "fluentd-elasticsearch"
local -r fluentd_es_configmap_yaml="${dst_dir}/fluentd-elasticsearch/fluentd-es-configmap.yaml"
update-container-runtime ${fluentd_es_configmap_yaml}
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
setup-fluentd ${dst_dir}
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
update-event-exporter ${event_exporter_yaml}
update-prometheus-to-sd-parameters ${event_exporter_yaml}
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
local -r dashboard_deployment_yaml="${dst_dir}/dashboard/dashboard-deployment.yaml"
update-dashboard-deployment ${dashboard_deployment_yaml}
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
setup-addon-manifests "addons" "node-problem-detector"
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
# Setup role binding(s) for standalone node problem detector.
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
setup-addon-manifests "addons" "node-problem-detector/standalone"
fi
setup-addon-manifests "addons" "node-problem-detector/kubelet-user-standalone" "node-problem-detector"
fi
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
setup-addon-manifests "admission-controls" "limit-range" "gce"
fi
setup-addon-manifests "addons" "admission-resource-quota-critical-pods"
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
setup-addon-custom-yaml "addons" "calico-policy-controller" "calico-node-daemonset.yaml" "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
setup-addon-custom-yaml "addons" "calico-policy-controller" "typha-deployment.yaml" "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
if [[ "${ENABLE_VOLUME_SNAPSHOTS:-}" == "true" ]]; then
setup-addon-manifests "addons" "volumesnapshots/crd"
setup-addon-manifests "addons" "volumesnapshots/volume-snapshot-controller"
start-volumesnapshot-crd-and-controller
fi
if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "ip-masq-agent"
fi
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
setup-addon-manifests "addons" "istio/auth"
else
setup-addon-manifests "addons" "istio/noauth"
fi
fi
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
download-extra-addons
setup-addon-manifests "addons" "gce-extras"
fi
# Place addon manager pod manifest.
src_file="${src_dir}/kube-addon-manager.yaml"
sed -i -e "s@{{kubectl_prune_whitelist_override}}@${KUBECTL_PRUNE_WHITELIST_OVERRIDE:-}@g" "${src_file}"
sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
sed -i -e "s@{{runAsUser}}@${KUBE_ADDON_MANAGER_RUNASUSER:-2002}@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@${KUBE_ADDON_MANAGER_RUNASGROUP:-2002}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
function setup-node-termination-handler-manifest {
local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml"
if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then
sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}"
fi
}
function setup-konnectivity-agent-manifest {
local -r manifest="/etc/kubernetes/addons/konnectivity-agent/konnectivity-agent-ds.yaml"
sed -i "s|__APISERVER_IP__|${KUBERNETES_MASTER_NAME}|g" "${manifest}"
}
# Setups manifests for ingress controller and gce-specific policies for service controller.
function start-lb-controller {
setup-addon-manifests "addons" "loadbalancing"
# Starts a l7 loadbalancing controller for ingress.
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
setup-addon-manifests "addons" "rbac/cluster-loadbalancing/glbc"
create-kubeconfig "l7-lb-controller" "${GCE_GLBC_TOKEN}"
local -r src_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
local -r dest_manifest="/etc/kubernetes/manifests/glbc.manifest"
if [[ -n "${CUSTOM_INGRESS_YAML:-}" ]]; then
echo "${CUSTOM_INGRESS_YAML}" > "${dest_manifest}"
else
cp "${src_manifest}" "${dest_manifest}"
fi
# Override the glbc image if GCE_GLBC_IMAGE is specified.
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
sed -i "s|image:.*|image: ${GCE_GLBC_IMAGE}|" "${dest_manifest}"
fi
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
mount -B /var/lib/kubelet /var/lib/kubelet/
mount -B -o remount,exec,suid,dev /var/lib/kubelet
# TODO(#60123): The kubelet should create the cert-dir directory if it doesn't exist
mkdir -p /var/lib/kubelet/pki/
# Mount /var/lib/kubelet/pki on a tmpfs so it doesn't persist across
# reboots. This can help avoid some rare instances of corrupt cert files
# (e.g. created but not written during a shutdown). Kubelet crash-loops
# in these cases. Do this after above mount calls so it isn't overwritten.
echo "Mounting /var/lib/kubelet/pki on tmpfs"
mount -t tmpfs tmpfs /var/lib/kubelet/pki
}
# Override for GKE custom master setup scripts (no-op outside of GKE).
function gke-master-start {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
echo "Running GKE internal configuration script"
. "${KUBE_HOME}/bin/gke-internal-configure-helper.sh"
gke-internal-master-start
elif [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
echo "setting up local admin kubeconfig"
create-kubeconfig "local-admin" "${KUBE_BEARER_TOKEN}"
echo "export KUBECONFIG=/etc/srv/kubernetes/local-admin/kubeconfig" > /etc/profile.d/kubeconfig.sh
fi
}
function reset-motd {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref
gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
"
gitref="${version//*+/}"
fi
cat > /etc/motd <<EOF
Welcome to Kubernetes ${version}!
You can find documentation for Kubernetes at:
http://docs.kubernetes.io/
The source for this release can be found at:
/home/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
It is based on the Kubernetes source at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
/home/kubernetes/LICENSES
EOF
}
function override-kubectl {
echo "overriding kubectl"
echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
# source the file explicitly otherwise we have
# issues on a ubuntu OS image finding the kubectl
# shellcheck disable=SC1091
source /etc/profile.d/kube_env.sh
# Add ${KUBE_HOME}/bin into sudoer secure path.
local sudo_path
sudo_path=$(sudo env | grep "^PATH=")
if [[ -n "${sudo_path}" ]]; then
sudo_path=${sudo_path#PATH=}
(
umask 027
echo "Defaults secure_path=\"${KUBE_HOME}/bin:${sudo_path}\"" > /etc/sudoers.d/kube_secure_path
)
fi
}
function override-pv-recycler {
if [[ -z "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
echo "PV_RECYCLER_OVERRIDE_TEMPLATE is not set"
exit 1
fi
PV_RECYCLER_VOLUME="{\"name\": \"pv-recycler-mount\",\"hostPath\": {\"path\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"type\": \"FileOrCreate\"}},"
PV_RECYCLER_MOUNT="{\"name\": \"pv-recycler-mount\",\"mountPath\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"readOnly\": true},"
cat > "${PV_RECYCLER_OVERRIDE_TEMPLATE}" <<EOF
version: v1
kind: Pod
metadata:
generateName: pv-recycler-
namespace: default
spec:
activeDeadlineSeconds: 60
restartPolicy: Never
volumes:
- name: vol
containers:
- name: pv-recycler
image: k8s.gcr.io/busybox:1.27
command:
- /bin/sh
args:
- -c
- test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z $(ls -A /scrub) || exit 1
volumeMounts:
- name: vol
mountPath: /scrub
EOF
}
function wait-till-apiserver-ready() {
until kubectl get nodes; do
sleep 5
done
}
function ensure-master-bootstrap-kubectl-auth {
# By default, `kubectl` uses http://localhost:8080
# If the insecure port is disabled, kubectl will need to use an admin-authenticated kubeconfig.
if [[ -n "${KUBE_BOOTSTRAP_TOKEN:-}" ]]; then
create-kubeconfig "kube-bootstrap" "${KUBE_BOOTSTRAP_TOKEN}"
export KUBECONFIG=/etc/srv/kubernetes/kube-bootstrap/kubeconfig
fi
}
function setup-containerd {
echo "Generate containerd config"
local config_path="${CONTAINERD_CONFIG_PATH:-"/etc/containerd/config.toml"}"
mkdir -p "$(dirname "${config_path}")"
local cni_template_path="${KUBE_HOME}/cni.template"
cat > "${cni_template_path}" <<EOF
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "ptp",
"mtu": 1460,
"ipam": {
"type": "host-local",
"subnet": "{{.PodCIDR}}",
"routes": [
{
"dst": "0.0.0.0/0"
}
]
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
EOF
if [[ "${KUBERNETES_MASTER:-}" != "true" ]]; then
if [[ "${NETWORK_POLICY_PROVIDER:-"none"}" != "none" || "${ENABLE_NETD:-}" == "true" ]]; then
# Use Kubernetes cni daemonset on node if network policy provider is specified
# or netd is enabled.
cni_template_path=""
fi
fi
cat > "${config_path}" <<EOF
version = 2
# Kubernetes requires the cri plugin.
required_plugins = ["io.containerd.grpc.v1.cri"]
# Kubernetes doesn't use containerd restart manager.
disabled_plugins = ["io.containerd.internal.v1.restart"]
oom_score = -999
[debug]
level = "${CONTAINERD_LOG_LEVEL:-"info"}"
[plugins."io.containerd.grpc.v1.cri"]
stream_server_address = "127.0.0.1"
max_container_log_line_size = ${CONTAINERD_MAX_CONTAINER_LOG_LINE:-262144}
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "${KUBE_HOME}/bin"
conf_dir = "/etc/cni/net.d"
conf_template = "${cni_template_path}"
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://mirror.gcr.io","https://registry-1.docker.io"]
EOF
if [[ "${CONTAINER_RUNTIME_TEST_HANDLER:-}" == "true" ]]; then
cat >> "${config_path}" <<EOF
# Setup a runtime with the magic name ("test-handler") used for Kubernetes
# runtime class tests ...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.test-handler]
runtime_type = "io.containerd.runc.v2"
EOF
fi
# Reuse docker group for containerd.
local -r containerd_gid="$(grep ^docker: /etc/group | cut -d: -f 3)"
if [[ -n "${containerd_gid:-}" ]]; then
cat >> "${config_path}" <<EOF
# reuse id of the docker group
[grpc]
gid = ${containerd_gid}
EOF
fi
chmod 644 "${config_path}"
echo "Restart containerd to load the config change"
systemctl restart containerd
}
function create-sidecar-config {
cat >> "/etc/srv/kubernetes/cri_auth_config.yaml" << EOF
kind: CredentialProviderConfig
apiVersion: kubelet.config.k8s.io/v1alpha1
providers:
- name: auth-provider-gcp
apiVersion: credentialprovider.kubelet.k8s.io/v1alpha1
matchImages:
- "container.cloud.google.com"
- "gcr.io"
- "*.gcr.io"
- "*.pkg.dev"
args:
- --v=3
defaultCacheDuration: 1m
EOF
}
# This function detects the platform/arch of the machine where the script runs,
# and sets the HOST_PLATFORM and HOST_ARCH environment variables accordingly.
# Callers can specify HOST_PLATFORM_OVERRIDE and HOST_ARCH_OVERRIDE to skip the detection.
# This function is adapted from the detect_client_info function in cluster/get-kube-binaries.sh
# and kube::util::host_os, kube::util::host_arch functions in hack/lib/util.sh
# This function should be synced with detect_host_info in ./configure.sh
function detect_host_info() {
HOST_PLATFORM=${HOST_PLATFORM_OVERRIDE:-"$(uname -s)"}
case "${HOST_PLATFORM}" in
Linux|linux)
HOST_PLATFORM="linux"
;;
*)
echo "Unknown, unsupported platform: ${HOST_PLATFORM}." >&2
echo "Supported platform(s): linux." >&2
echo "Bailing out." >&2
exit 2
esac
HOST_ARCH=${HOST_ARCH_OVERRIDE:-"$(uname -m)"}
case "${HOST_ARCH}" in
x86_64*|i?86_64*|amd64*)
HOST_ARCH="amd64"
;;
aHOST_arch64*|aarch64*|arm64*)
HOST_ARCH="arm64"
;;
*)
echo "Unknown, unsupported architecture (${HOST_ARCH})." >&2
echo "Supported architecture(s): amd64 and arm64." >&2
echo "Bailing out." >&2
exit 2
;;
esac
}
# Initializes variables used by the log-* functions.
#
# get-metadata-value must be defined before calling this function.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-init {
# Used by log-* functions.
LOG_CLUSTER_ID=$(get-metadata-value 'instance/attributes/cluster-uid' 'get-metadata-value-error')
LOG_INSTANCE_NAME=$(hostname)
LOG_BOOT_ID=$(journalctl --list-boots | grep -E '^ *0' | awk '{print $2}')
declare -Ag LOG_START_TIMES
declare -ag LOG_TRAP_STACK
LOG_STATUS_STARTED='STARTED'
LOG_STATUS_COMPLETED='COMPLETED'
LOG_STATUS_ERROR='ERROR'
}
# Sets an EXIT trap.
# Args:
# $1:... : the trap command.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-trap-push {
local t="${*:1}"
LOG_TRAP_STACK+=("${t}")
# shellcheck disable=2064
trap "${t}" EXIT
}
# Removes and restores an EXIT trap.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-trap-pop {
# Remove current trap.
unset 'LOG_TRAP_STACK[-1]'
# Restore previous trap.
if [ ${#LOG_TRAP_STACK[@]} -ne 0 ]; then
local t="${LOG_TRAP_STACK[-1]}"
# shellcheck disable=2064
trap "${t}" EXIT
else
# If no traps in stack, clear.
trap EXIT
fi
}
# Logs the end of a bootstrap step that errored.
# Args:
# $1 : bootstrap step name.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-error {
local bootstep="$1"
log-proto "${bootstep}" "${LOG_STATUS_ERROR}" "error calling '${BASH_COMMAND}'"
}
# Wraps a command with bootstrap logging.
# Args:
# $1 : bootstrap step name.
# $2... : the command to run.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-wrap {
local bootstep="$1"
local command="${*:2}"
log-trap-push "log-error ${bootstep}"
log-proto "${bootstep}" "${LOG_STATUS_STARTED}"
$command
log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}"
log-trap-pop
}
# Logs a bootstrap step start. Prefer log-wrap.
# Args:
# $1 : bootstrap step name.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-start {
local bootstep="$1"
log-trap-push "log-error ${bootstep}"
log-proto "${bootstep}" "${LOG_STATUS_STARTED}"
}
# Logs a bootstrap step end. Prefer log-wrap.
# Args:
# $1 : bootstrap step name.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-end {
local bootstep="$1"
log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}"
log-trap-pop
}
# Writes a log proto to stdout.
# Args:
# $1: bootstrap step name.
# $2: status. Either 'STARTED', 'COMPLETED', or 'ERROR'.
# $3: optional status reason.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-proto {
local bootstep="$1"
local status="$2"
local status_reason="${3:-}"
# Get current time.
local current_time
current_time="$(date --utc '+%s.%N')"
# ...formatted as UTC RFC 3339.
local timestamp
timestamp="$(date --utc --date="@${current_time}" '+%FT%T.%NZ')"
# Calculate latency.
local latency='null'
if [ "${status}" == "${LOG_STATUS_STARTED}" ]; then
LOG_START_TIMES["${bootstep}"]="${current_time}"
else
local start_time="${LOG_START_TIMES["${bootstep}"]}"
unset 'LOG_START_TIMES['"${bootstep}"']'
# Bash cannot do non-integer math, shell out to awk.
latency="$(echo "${current_time} ${start_time}" | awk '{print $1 - $2}')s"
# The default latency is null which cannot be wrapped as a string so we must
# do it here instead of the printf.
latency="\"${latency}\""
fi
printf '[cloud.kubernetes.monitoring.proto.SerialportLog] {"cluster_hash":"%s","vm_instance_name":"%s","boot_id":"%s","timestamp":"%s","bootstrap_status":{"step_name":"%s","status":"%s","status_reason":"%s","latency":%s}}\n' \
"${LOG_CLUSTER_ID}" "${LOG_INSTANCE_NAME}" "${LOG_BOOT_ID}" "${timestamp}" "${bootstep}" "${status}" "${status_reason}" "${latency}"
}
########### Main Function ###########
function main() {
echo "Start to configure instance for kubernetes"
log-wrap 'DetectHostInfo' detect_host_info
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Resource requests of master components.
CLOUD_CONTROLLER_MANAGER_CPU_REQUEST="${CLOUD_CONTROLLER_MANAGER_CPU_REQUEST:-50m}"
KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
KUBE_HOME="/home/kubernetes"
KUBE_BIN=${KUBE_HOME}/bin
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
log-start 'SetPythonVersion'
if [[ "$(python -V 2>&1)" =~ "Python 2" ]]; then
# found python2, just use that
PYTHON="python"
elif [[ -f "/usr/bin/python2.7" ]]; then
# System python not defaulted to python 2 but using 2.7 during migration
PYTHON="/usr/bin/python2.7"
else
# No python2 either by default, let's see if we can find python3
PYTHON="python3"
if ! command -v ${PYTHON} >/dev/null 2>&1; then
echo "ERROR Python not found. Aborting."
exit 2
fi
fi
echo "Version : $(${PYTHON} -V 2>&1)"
log-end 'SetPythonVersion'
log-start 'SourceKubeEnv'
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
exit 1
fi
source "${KUBE_HOME}/kube-env"
log-end 'SourceKubeEnv'
if [[ -f "${KUBE_HOME}/kubelet-config.yaml" ]]; then
echo "Found Kubelet config file at ${KUBE_HOME}/kubelet-config.yaml"
KUBELET_CONFIG_FILE_ARG="--config ${KUBE_HOME}/kubelet-config.yaml"
fi
if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
log-wrap 'SourceKubeMasterCerts' source "${KUBE_HOME}/kube-master-certs"
fi
log-start 'VerifyKubeUser'
if [[ -n "${KUBE_USER:-}" ]]; then
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER format."
exit 1
fi
fi
log-end 'VerifyKubeUser'
log-start 'GenerateTokens'
KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
CLOUD_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
GCE_GLBC_TOKEN="$(secure_random 32)"
fi
ADDON_MANAGER_TOKEN="$(secure_random 32)"
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" != "true" ]]; then
KUBE_BOOTSTRAP_TOKEN="$(secure_random 32)"
fi
if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
KONNECTIVITY_SERVER_TOKEN="$(secure_random 32)"
fi
if [[ "${ENABLE_MONITORING_TOKEN:-false}" == "true" ]]; then
MONITORING_TOKEN="$(secure_random 32)"
fi
log-end 'GenerateTokens'
log-wrap 'SetupOSParams' setup-os-params
log-wrap 'ConfigIPFirewall' config-ip-firewall
log-wrap 'CreateDirs' create-dirs
log-wrap 'EnsureLocalSSDs' ensure-local-ssds
log-wrap 'SetupKubeletDir' setup-kubelet-dir
log-wrap 'SetupLogrotate' setup-logrotate
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
log-wrap 'MountMasterPD' mount-master-pd
log-wrap 'CreateNodePKI' create-node-pki
log-wrap 'CreateMasterPKI' create-master-pki
log-wrap 'CreateMasterAuth' create-master-auth
log-wrap 'EnsureMasterBootstrapKubectlAuth' ensure-master-bootstrap-kubectl-auth
log-wrap 'CreateMasterKubeletAuth' create-master-kubelet-auth
log-wrap 'CreateMasterEtcdAuth' create-master-etcd-auth
log-wrap 'CreateMasterEtcdApiserverAuth' create-master-etcd-apiserver-auth
log-wrap 'OverridePVRecycler' override-pv-recycler
log-wrap 'GKEMasterStart' gke-master-start
else
log-wrap 'CreateNodePKI' create-node-pki
log-wrap 'CreateKubeletKubeconfig' create-kubelet-kubeconfig "${KUBERNETES_MASTER_NAME}"
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
log-wrap 'CreateKubeproxyUserKubeconfig' create-kubeproxy-user-kubeconfig
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
log-wrap 'CreateNodeProblemDetectorKubeconfig' create-node-problem-detector-kubeconfig "${KUBERNETES_MASTER_NAME}"
elif [[ -f "/var/lib/kubelet/kubeconfig" ]]; then
log-wrap 'CreateNodeProblemDetectorKubeconfigFromKubelet' create-node-problem-detector-kubeconfig-from-kubelet
else
echo "Either NODE_PROBLEM_DETECTOR_TOKEN or /var/lib/kubelet/kubeconfig must be set"
exit 1
fi
fi
fi
log-wrap 'OverrideKubectl' override-kubectl
container_runtime="${CONTAINER_RUNTIME:-docker}"
# Run the containerized mounter once to pre-cache the container image.
if [[ "${container_runtime}" == "docker" ]]; then
log-wrap 'AssembleDockerFlags' assemble-docker-flags
elif [[ "${container_runtime}" == "containerd" ]]; then
if docker-installed; then
# We still need to configure docker so it wouldn't reserver the 172.17.0/16 subnet
# And if somebody will start docker to build or pull something, logging will also be set up
log-wrap 'AssembleDockerFlags' assemble-docker-flags
# stop docker if it is present as we want to use just containerd
log-wrap 'StopDocker' systemctl stop docker || echo "unable to stop docker"
fi
log-wrap 'SetupContainerd' setup-containerd
fi
log-start 'SetupKubePodLogReadersGroupDir'
if [[ -n "${KUBE_POD_LOG_READERS_GROUP:-}" ]]; then
mkdir -p /var/log/pods/
chgrp -R "${KUBE_POD_LOG_READERS_GROUP:-}" /var/log/pods/
chmod -R g+s /var/log/pods/
fi
log-end 'SetupKubePodLogReadersGroupDir'
log-wrap 'StartKubelet' start-kubelet
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
log-wrap 'ComputeMasterManifestVariables' compute-master-manifest-variables
if [[ -z "${ETCD_SERVERS:-}" ]]; then
log-wrap 'StartEtcdServers' start-etcd-servers
fi
log-wrap 'SourceConfigureKubeApiserver' source ${KUBE_BIN}/configure-kubeapiserver.sh
log-wrap 'StartKubeApiserver' start-kube-apiserver
if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
log-wrap 'StartKonnectivityServer' start-konnectivity-server
fi
log-wrap 'StartKubeControllerManager' start-kube-controller-manager
log-wrap 'StartCloudControllerManager' start-cloud-controller-manager
log-wrap 'StartKubeScheduler' start-kube-scheduler
log-wrap 'WaitTillApiserverReady' wait-till-apiserver-ready
log-wrap 'StartKubeAddons' start-kube-addons
log-wrap 'StartClusterAutoscaler' start-cluster-autoscaler
log-wrap 'StartLBController' start-lb-controller
log-wrap 'UpdateLegacyAddonNodeLabels' update-legacy-addon-node-labels &
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
log-wrap 'StartKubeProxy' start-kube-proxy
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
log-wrap 'StartNodeProblemDetector' start-node-problem-detector
fi
fi
log-wrap 'ResetMotd' reset-motd
log-wrap 'PrepareMounterRootfs' prepare-mounter-rootfs
# Wait for all background jobs to finish.
wait
echo "Done for the configuration for kubernetes"
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
log-init
log-wrap 'ConfigureHelperMain' main "${@}"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
# Give kube-bootstrap-logs-forwarder.service some time to write all logs.
sleep 3
fi
fi
|
#!/bin/bash
_mydir="$(pwd)"
BASEDIR=$(dirname "$0")
cd "$BASEDIR"
cd ..
cd ..
old="$IFS"
IFS=';'
str="'$*'"
node remove-empty-directories.js "$str"
IFS=$old
cd $_mydir
|
<reponame>googleapis/googleapis-gen<gh_stars>1-10
# frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Ads
module GoogleAds
module V8
module Enums
# Container for enum describing possible validation statuses of a feed item.
class FeedItemValidationStatusEnum
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# The possible validation statuses of a feed item.
module FeedItemValidationStatus
# No value has been specified.
UNSPECIFIED = 0
# Used for return value only. Represents value unknown in this version.
UNKNOWN = 1
# Validation pending.
PENDING = 2
# An error was found.
INVALID = 3
# Feed item is semantically well-formed.
VALID = 4
end
end
end
end
end
end
end
|
#!/usr/bin/env bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
cd $DIR/..
REMOTE_URL_BASE=$1
REMOTE_INDEX_URL=$2
CHARTS_DIR=".helm-release-packages"
EXISTING_INDEX=$(mktemp /tmp/index.yaml.XXXXXX)
REMOTE_CODE=$(curl -s -L -w "%{http_code}" $REMOTE_INDEX_URL -o $EXISTING_INDEX)
if [ $REMOTE_CODE -eq 200 ]; then
echo Adding new packages to existing index
helm repo index --merge $EXISTING_INDEX --url $REMOTE_URL_BASE $CHARTS_DIR
else
echo Creating new index
helm repo index --url $REMOTE_URL_BASE $CHARTS_DIR
fi
rm "${EXISTING_INDEX}"
|
package healthchart.ui
import javax.swing.JFileChooser
import javax.swing.filechooser.{FileNameExtensionFilter, FileSystemView}
object FileChooser:
def chooseFile(frame: Frame,
dialogTitle: String,
fileExtensionFilterDesc: String,
fileExtensions: Array[String]): Option[String] =
val fileChooser = new JFileChooser(FileSystemView.getFileSystemView.getHomeDirectory)
fileChooser.setDialogTitle(dialogTitle)
fileChooser.setAcceptAllFileFilterUsed(false)
val filter = new FileNameExtensionFilter(fileExtensionFilterDesc, fileExtensions: _*)
fileChooser.addChoosableFileFilter(filter)
if (fileChooser.showOpenDialog(frame) == JFileChooser.APPROVE_OPTION)
Some(fileChooser.getSelectedFile.getAbsolutePath)
else None
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This script is used in Gradle to run single or a set of Python integration tests
# locally or on Jenkins. Note, this script doesn't setup python environment which is
# required for integration test. In order to do so, run Gradle tasks defined in
# :sdks:python:test-suites instead.
#
# In order to run test with customer options, use following commandline flags:
#
# Pipeline related flags:
# runner -> Runner that execute pipeline job.
# e.g. TestDataflowRunner, TestDirectRunner
# project -> Project name of the cloud service.
# gcs_location -> Base location on GCS. Some pipeline options are
# derived from it including output, staging_location
# and temp_location.
# sdk_location -> Python tar ball location. Glob is accepted.
# num_workers -> Number of workers.
# sleep_secs -> Number of seconds to wait before verification.
# streaming -> True if a streaming job.
# worker_jar -> Customized worker jar for dataflow runner.
# kms_key_name -> Name of Cloud KMS encryption key to use in some tests.
# pipeline_opts -> List of space separated pipeline options. If this
# flag is specified, all above flag will be ignored.
# Please include all required pipeline options when
# using this flag.
#
# Test related flags:
# test_opts -> List of space separated options to configure Nose test
# during execution. Commonly used options like `--attr`,
# `--tests`, `--nologcapture`. More can be found in
# https://nose.readthedocs.io/en/latest/man.html#options
# suite -> Namespace for this run of tests. Required if running
# under Jenkins. Used to differentiate runs of the same
# tests with different interpreters/dependencies/etc.
#
# Example usages:
# - Run full set of PostCommit tests with default pipeline options:
# `$ ./run_integration_test.sh`
#
# - Run single integration test with default pipeline options:
# `$ ./run_integration_test.sh --test_opts --tests=apache_beam.examples.wordcount_it_test:WordCountIT.test_wordcount_it`
#
# - Run full set of PostCommit tests with customized pipeline options:
# `$ ./run_integration_test.sh --project my-project --gcs_location gs://my-location`
###########################################################################
# Get pipeline options specified from commandline arguments.
# Default pipeline options
PROJECT=apache-beam-testing
RUNNER=TestDataflowRunner
GCS_LOCATION=gs://temp-storage-for-end-to-end-tests
SDK_LOCATION=build/apache-beam.tar.gz
NUM_WORKERS=1
SLEEP_SECS=20
STREAMING=false
WORKER_JAR=""
KMS_KEY_NAME="projects/apache-beam-testing/locations/global/keyRings/beam-it/cryptoKeys/test"
SUITE=""
# Default test (nose) options.
# Run WordCountIT.test_wordcount_it by default if no test options are
# provided.
TEST_OPTS="--tests=apache_beam.examples.wordcount_it_test:WordCountIT.test_wordcount_it --nocapture"
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--runner)
RUNNER="$2"
shift # past argument
shift # past value
;;
--project)
PROJECT="$2"
shift # past argument
shift # past value
;;
--gcs_location)
GCS_LOCATION="$2"
shift # past argument
shift # past value
;;
--sdk_location)
SDK_LOCATION="$2"
shift # past argument
shift # past value
;;
--num_workers)
NUM_WORKERS="$2"
shift # past argument
shift # past value
;;
--sleep_secs)
SLEEP_SECS="$2"
shift # past argument
shift # past value
;;
--streaming)
STREAMING="$2"
shift # past argument
shift # past value
;;
--worker_jar)
WORKER_JAR="$2"
shift # past argument
shift # past value
;;
--runner_v2)
RUNNER_V2="$2"
shift # past argument
shift # past value
;;
--kms_key_name)
KMS_KEY_NAME="$2"
shift # past argument
shift # past value
;;
--dataflow_endpoint)
DATAFLOW_ENDPOINT="$2"
shift # past argument
shift # past value
;;
--pipeline_opts)
PIPELINE_OPTS="$2"
shift # past argument
shift # past value
;;
--test_opts)
TEST_OPTS="$2"
shift # past argument
shift # past value
;;
--suite)
SUITE="$2"
shift # past argument
shift # past value
;;
*) # unknown option
echo "Unknown option: $1"
exit 1
;;
esac
done
if [[ "$JENKINS_HOME" != "" && "$SUITE" == "" ]]; then
echo "Argument --suite is required in a Jenkins environment."
exit 1
fi
XUNIT_FILE="nosetests-$SUITE.xml"
set -o errexit
###########################################################################
# Check that the script is running in a known directory.
if [[ $PWD != *sdks/python* ]]; then
echo 'Unable to locate Apache Beam Python SDK root directory'
exit 1
fi
# Go to the Apache Beam Python SDK root
if [[ $PWD != *sdks/python ]]; then
cd $(pwd | sed 's/sdks\/python.*/sdks\/python/')
fi
###########################################################################
# Build pipeline options if not provided in --pipeline_opts from commandline
if [[ -z $PIPELINE_OPTS ]]; then
# Get tar ball path
if [[ $(find ${SDK_LOCATION} 2> /dev/null) ]]; then
SDK_LOCATION=$(find ${SDK_LOCATION} | tail -n1)
else
echo "[WARNING] Could not find SDK tarball in SDK_LOCATION: $SDK_LOCATION."
fi
# Install test dependencies for ValidatesRunner tests.
# pyhamcrest==1.10.0 doesn't work on Py2.
# See: https://github.com/hamcrest/PyHamcrest/issues/131.
echo "pyhamcrest!=1.10.0,<2.0.0" > postcommit_requirements.txt
echo "mock<3.0.0" >> postcommit_requirements.txt
echo "parameterized>=0.7.1,<0.8.0" >> postcommit_requirements.txt
# Options used to run testing pipeline on Cloud Dataflow Service. Also used for
# running on DirectRunner (some options ignored).
opts=(
"--runner=$RUNNER"
"--project=$PROJECT"
"--staging_location=$GCS_LOCATION/staging-it"
"--temp_location=$GCS_LOCATION/temp-it"
"--output=$GCS_LOCATION/py-it-cloud/output"
"--sdk_location=$SDK_LOCATION"
"--requirements_file=postcommit_requirements.txt"
"--num_workers=$NUM_WORKERS"
"--sleep_secs=$SLEEP_SECS"
)
# Add --streaming if provided
if [[ "$STREAMING" = true ]]; then
opts+=("--streaming")
fi
# Add --dataflow_worker_jar if provided
if [[ ! -z "$WORKER_JAR" ]]; then
opts+=("--dataflow_worker_jar=$WORKER_JAR")
fi
# Add --runner_v2 if provided
if [[ "$RUNNER_V2" = true ]]; then
opts+=("--experiments=use_runner_v2")
# Cleanup jira BEAM-9391
opts+=("--experiments=runner_harness_container_image=gcr.io/cloud-dataflow/v1beta3/unified-harness:20200227-rc01")
if [[ "$STREAMING" = true ]]; then
# Dataflow Runner V2 only supports streaming engine.
opts+=("--enable_streaming_engine")
else
opts+=("--experiments=beam_fn_api")
fi
fi
if [[ ! -z "$KMS_KEY_NAME" ]]; then
opts+=(
"--kms_key_name=$KMS_KEY_NAME"
"--dataflow_kms_key=$KMS_KEY_NAME"
)
fi
if [[ ! -z "$DATAFLOW_ENDPOINT" ]]; then
opts+=("--dataflow_endpoint=$DATAFLOW_ENDPOINT")
fi
PIPELINE_OPTS=$(IFS=" " ; echo "${opts[*]}")
fi
###########################################################################
# Run tests and validate that jobs finish successfully.
echo ">>> RUNNING integration tests with pipeline options: $PIPELINE_OPTS"
echo ">>> test options: $TEST_OPTS"
# TODO(BEAM-3713): Pass $SUITE once migrated to pytest. xunitmp doesn't support
# suite names.
python setup.py nosetests \
--test-pipeline-options="$PIPELINE_OPTS" \
--with-xunitmp --xunitmp-file=$XUNIT_FILE \
--ignore-files '.*py3\d?\.py$' \
$TEST_OPTS
|
#!/bin/bash
##############################################################################
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
function print_logo()
{
figlet -ctf slant Compass Installer
set +x; sleep 2; set -x
}
function install_docker()
{
sudo apt-get install -y linux-image-extra-$(uname -r) linux-image-extra-virtual
sudo apt-get install -y apt-transport-https ca-certificates curl \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install -y docker-ce
sleep 5
sudo cat << EOF > /etc/docker/daemon.json
{
"storage-driver": "devicemapper"
}
EOF
sudo service docker start
sudo service docker restart
}
function extract_tar()
{
tar_name=`basename $TAR_URL`
rm -f $WORK_DIR/cache/$tar_name
curl --connect-timeout 10 -o $WORK_DIR/cache/$tar_name $TAR_URL
tar -zxf $WORK_DIR/cache/$tar_name -C $WORK_DIR/installer
}
function prepare_env() {
sudo sed -i -e 's/^#user =.*/user = "root"/g' /etc/libvirt/qemu.conf
sudo sed -i -e 's/^#group =.*/group = "root"/g' /etc/libvirt/qemu.conf
sudo service libvirt-bin restart
if sudo service openvswitch-switch status|grep stop; then
sudo service openvswitch-switch start
fi
# prepare work dir
sudo rm -rf $WORK_DIR/{installer,vm,network,iso,docker}
mkdir -p $WORK_DIR/installer
mkdir -p $WORK_DIR/vm
mkdir -p $WORK_DIR/network
mkdir -p $WORK_DIR/iso
mkdir -p $WORK_DIR/cache
mkdir -p $WORK_DIR/docker
extract_tar
chmod 755 $WORK_DIR -R
if [[ ! -d /etc/libvirt/hooks ]]; then
sudo mkdir -p /etc/libvirt/hooks
fi
sudo cp ${COMPASS_DIR}/deploy/qemu_hook.sh /etc/libvirt/hooks/qemu
}
function _prepare_python_env() {
rm -rf $WORK_DIR/venv
mkdir -p $WORK_DIR/venv
if [[ "$DEPLOY_FIRST_TIME" == "true" ]]; then
if [[ ! -z "$JHPKG_URL" ]]; then
_pre_env_setup
else
if [[ ! -f /etc/redhat-release ]]; then
sudo apt-get update -y
sudo apt-get install -y --force-yes mkisofs bc curl ipmitool openvswitch-switch
sudo apt-get install -y --force-yes git python-dev python-pip figlet sshpass
sudo apt-get install -y --force-yes libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev
sudo apt-get install -y --force-yes libffi-dev libssl-dev
else
sudo yum install -y centos-release-openstack-queens
sudo yum install -y epel-release
sudo yum install openvswitch -y --nogpgcheck
sudo yum install -y git python-devel python-pip figlet sshpass mkisofs bc curl ipmitool
sudo yum install -y libxslt-devel libxml2-devel libvirt-devel libmysqld-devel
sudo yum install -y qemu-kvm qemu-img virt-manager libvirt libvirt-python libvirt-client virt-install virt-viewer
sudo yum install -y libffi libffi-devel openssl-devel
sudo yum groupinstall -y 'Development Tools'
fi
sudo docker version >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
install_docker
fi
fi
fi
if [[ ! -z "$JHPKG_URL" ]]; then
_pre_pip_setup
else
sudo pip install --upgrade virtualenv
virtualenv $WORK_DIR/venv
source $WORK_DIR/venv/bin/activate
pip install cffi==1.10.0
pip install MarkupSafe==1.0
pip install pip==9.0.1
pip install cheetah==2.4.4
pip install pyyaml==3.12
pip install requests==2.18.1
pip install netaddr==0.7.19
pip install oslo.config==4.6.0
pip install ansible==2.3.1.0
# For sudo use
sudo pip install docker-compose==1.14.0
fi
}
function _pre_env_setup()
{
rm -rf $WORK_DIR/prepare
mkdir -p $WORK_DIR/prepare
jhpkg_url=${JHPKG_URL:7}
echo $jhpkg_url
if [[ ! -f "$jhpkg_url" ]]; then
echo "There is no jh_env_package."
exit 1
fi
tar -zxvf $jhpkg_url -C $WORK_DIR/prepare/
cd $WORK_DIR/prepare/jh_env_package
tar -zxvf jh-ppa.tar.gz
if [[ ! -z /etc/apt/sources.list.d ]]; then
mv /etc/apt/sources.list.d /etc/apt/sources.list.d.bak
fi
if [[ -f /etc/apt/apt.conf ]]; then
mv /etc/apt/apt.conf /etc/apt/apt.conf.bak
fi
sudo cat << EOF > /etc/apt/apt.conf
APT::Get::Assume-Yes "true";
APT::Get::force-yes "true";
Acquire::http::Proxy::127.0.0.1:9998 DIRECT;
EOF
if [[ -f /etc/apt/sources.list ]]; then
mv /etc/apt/sources.list /etc/apt/sources.list.bak
fi
sudo cat << EOF > /etc/apt/sources.list
deb [arch=amd64] http://127.0.0.1:9998/jh-ppa $(lsb_release -cs) main
EOF
if [[ $(lsb_release -cs) == "trusty" ]]; then
nohup python -m SimpleHTTPServer 9998 &
else
nohup python3 -m http.server 9998 &
fi
http_ppa_pid=$!
cd -
sleep 5
apt-get update
apt-get install -y mkisofs bc curl ipmitool openvswitch-switch \
git python-pip python-dev figlet \
libxslt-dev libxml2-dev libvirt-dev \
build-essential qemu-utils qemu-kvm libvirt-bin \
virtinst libmysqld-dev \
libssl-dev libffi-dev python-cffi
sudo docker version >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
sudo apt-get install -y docker-ce
sleep 5
sudo cat << EOF > /etc/docker/daemon.json
{
"storage-driver": "devicemapper"
}
EOF
sudo service docker start
sudo service docker restart
else
StorageDriver=$(sudo docker info | grep "Storage Driver" | awk '{print $3}')
if [[ $StorageDriver != "devicemapper" ]]; then
echo "The storage driver of docker currently only supports 'devicemapper'."
exit 1
fi
fi
kill -9 $http_ppa_pid
if [[ ! -d /etc/libvirt/hooks ]]; then
sudo mkdir -p /etc/libvirt/hooks
fi
sudo cp -f ${COMPASS_DIR}/deploy/qemu_hook.sh /etc/libvirt/hooks/qemu
rm -rf /etc/apt/sources.list
if [[ -f /etc/apt/sources.list.bak ]]; then
mv /etc/apt/sources.list.bak /etc/apt/sources.list
fi
rm -rf /etc/apt/apt.conf
if [[ -f /etc/apt/apt.conf.bak ]]; then
mv /etc/apt/apt.conf.bak /etc/apt/apt.conf
fi
if [[ ! -z /etc/apt/sources.list.d.bak ]]; then
mv /etc/apt/sources.list.d.bak /etc/apt/sources.list.d
fi
}
function _pre_pip_setup()
{
if [[ -d ~/.pip ]]; then
if [[ -f ~/.pip/pip.conf ]]; then
mv ~/.pip/pip.conf ~/.pip/pip.conf.bak
fi
else
mkdir -p ~/.pip
fi
rm -rf $WORK_DIR/prepare
mkdir -p $WORK_DIR/prepare
jhpkg_url=${JHPKG_URL:7}
echo $jhpkg_url
if [[ ! -f "$jhpkg_url" ]]; then
echo "There is no jh_env_package."
exit 1
fi
tar -zxvf $jhpkg_url -C $WORK_DIR/prepare/
cd $WORK_DIR/prepare/jh_env_package
tar -zxvf jh_pip.tar.gz
cat << EOF > ~/.pip/pip.conf
[global]
find-links = http://127.0.0.1:9999/jh_pip
no-index = true
[install]
trusted-host=127.0.0.1
EOF
if [[ $(lsb_release -cs) == "trusty" ]]; then
nohup python -m SimpleHTTPServer 9999 &
else
nohup python3 -m http.server 9999 &
fi
http_pip_pid=$!
echo $http_pip_pid
sleep 5
cd -
pip install --upgrade virtualenv
virtualenv $WORK_DIR/venv
source $WORK_DIR/venv/bin/activate
pip install cffi==1.10.0
pip install MarkupSafe==1.0
pip install pip==9.0.1
pip install cheetah==2.4.4
pip install pyyaml==3.12
pip install requests==2.18.1
pip install netaddr==0.7.19
pip install oslo.config==4.6.0
pip install ansible==2.3.1.0
sudo pip install docker-compose==1.14.0
if [[ $(lsb_release -cs) == "xenial" ]]; then
sudo pip install -U pyOpenSSL
fi
kill -9 $http_pip_pid
if [[ -f ~/.pip/pip.conf.bak ]]; then
mv ~/.pip/pip.conf.bak ~/.pip/pip.conf
else
rm -rf ~/.pip/pip.conf
fi
}
function prepare_python_env()
{
if [[ "$DEPLOY_FIRST_TIME" == "true" ]]; then
_prepare_python_env
else
source $WORK_DIR/venv/bin/activate
if [[ $? -ne 0 ]]; then
_prepare_python_env
fi
fi
which python
}
|
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
echo "=============================================================================================================="
echo "Please run the script as: "
echo "sh run_eval.sh OUTPUT_PATH DATANAME MODEL_CKPT DEVICE_ID"
echo "for example: bash run_eval.sh output ljspeech device0/ckpt0/tacotron2-5-118.ckpt 0"
echo "It is better to use absolute path."
echo "=============================================================================================================="
get_real_path(){
if [ "${1:0:1}" == "/" ]; then
echo "$1"
else
echo "$(realpath -m $PWD/$1)"
fi
}
OUTPUT_PATH=$1
echo $PWD/eval/$OUTPUT_PATH
DATANAME=$2
MODEL_CKPT=$(get_real_path $3)
DEVICEID=$4
export DEVICE_NUM=1
export DEVICE_ID=$DEVICEID
export RANK_ID=0
export RANK_SIZE=1
config_path="./${DATANAME}_config.yaml"
echo "config path is : ${config_path}"
if [ -d "eval" ];
then
rm -rf ./eval
fi
mkdir -p ./eval/$OUTPUT_PATH
cp ../*.py ./eval
cp ../*.yaml ./eval
cp -r ../src ./eval
cp -r ../model_utils ./eval
cp -r ../scripts/*.sh ./eval
cd ./eval || exit
echo "start evaling for device $DEVICE_ID"
env > env.log
python ../../eval.py --config_path $config_path --output_path $PWD/$OUTPUT_PATH --model_ckpt $MODEL_CKPT> eval_tacotron2.log 2>&1 &
cd ..
|
CREATE OR REPLACE VIEW v_sales_2004
(sales_id,customer_id,product_id,sale_date,
quantity,sale_value,department_id,sales_rep_id,gst_flag) AS
SELECT sales_id,customer_id,product_id,sale_date,
quantity,sale_value,department_id,sales_rep_id,gst_flag
FROM sales
WHERE sale_date BETWEEN '2004-01-01' AND '2004-12-31'
|
// Generated by script, don't edit it please.
import createSvgIcon from '../../createSvgIcon';
import CutSvg from '@rsuite/icon-font/lib/legacy/Cut';
const Cut = createSvgIcon({
as: CutSvg,
ariaLabel: 'cut',
category: 'legacy',
displayName: 'Cut'
});
export default Cut;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.