text stringlengths 1 1.05M |
|---|
<gh_stars>1-10
package libs.trustconnector.ble.pursesdk;
import android.bluetooth.BluetoothDevice;
import android.content.Context;
import libs.trustconnector.scdp.crypto.DES;
import libs.general.bluetooth.le.BluetoothDeviceWrapper;
import libs.general.bluetooth.le.GattError;
import libs.general.bluetooth.le.RfcommGatt;
import java.io.IOException;
import java.security.GeneralSecurityException;
public class BlePurseSDK {
private static final String TAG = "BlePurseSDK";
private static RfcommGatt gatt;
private static int time = 25000;
private static byte[] response;
private static String errMsg = "";
private static byte[] res3;
private static byte[] encKey;
private static byte[] skCMAC;
private static boolean isConnetSucess = false;
private static byte[] randomValue;
private static byte[] icv;
private static byte[] skDec;
private static byte[] skEnc;
public static final int CONNECT_TIME_OUT = -1;
public static final int CONNECT_FAILED = -2;
public static final int PIN_ERROR = -3;
public static final int PUK_ERROR = -4;
public BlePurseSDK() {
}
public static void initKey(String enckey, String macKey, String decKey) {
BleCommand.ENCkey = HexString.parseHexString(enckey);
BleCommand.MACkey = HexString.parseHexString(macKey);
BleCommand.DECkey = HexString.parseHexString(decKey);
}
public static boolean connectPeripheral(Context context, BluetoothDevice device) {
if (BleCommand.MACkey != null && BleCommand.ENCkey != null && BleCommand.DECkey != null) {
isConnetSucess = false;
gatt = (new BluetoothDeviceWrapper(device)).createRfcommGatt(context);
gatt.setRecvTimeout(time);
RfcommGatt.CONNECTION_PARAM_UPDATE_REQ_DELAY = 500;
try {
int retCode = gatt.connect(time);
if (retCode == 0) {
response = gatt.transmit(BleCommand.connetCommand, time);
String result = HexString.toHexString(response);
if (result.equals("9000")) {
randomValue = BleCommand.getRandom_Value();
response = gatt.transmit(Utils.addBytes(BleCommand.GET_BLE_CHECK_CODE, randomValue), 30000);
if (response.length >= 28) {
return checkBle(gatt, response);
}
if (HexString.toHexString(response).equals("100000")) {
errMsg = "蓝牙钱包连接超时";
} else {
errMsg = "钱包返回数据解析失败";
}
isConnetSucess = false;
} else {
errMsg = "蓝牙钱包选择失败";
isConnetSucess = false;
}
} else {
gatt.close(time);
isConnetSucess = false;
if (retCode == 100000) {
errMsg = "蓝牙钱包连接超时";
} else {
errMsg = "蓝牙钱包连接失败";
LogUtils.e("ble", "connect failed, retCode=" + retCode);
}
}
return isConnetSucess;
} catch (InterruptedException var4) {
var4.printStackTrace();
} catch (IllegalArgumentException var5) {
var5.printStackTrace();
} catch (IOException var6) {
var6.printStackTrace();
} catch (Exception var7) {
var7.printStackTrace();
}
return isConnetSucess;
} else {
errMsg = "请初始化key";
return false;
}
}
private static boolean checkBle(RfcommGatt gatt, byte[] checkCode) throws Exception {
byte[] KeyDiversificationData = Utils.addBytes(response, 0, 10);
byte[] KeyVer = Utils.addBytes(response, 10, 1);
byte[] SCPI = Utils.addBytes(response, 11, 1);
byte[] SequenceCounter = Utils.addBytes(response, 12, 2);
byte[] CardChallenge = Utils.addBytes(response, 14, 6);
byte[] CardCryptogram = Utils.addBytes(response, 20, 8);
skCMAC = DES.doCrypto(BleCommand.getSessionData(SequenceCounter), BleCommand.MACkey, 289);
byte[] skRMAC = DES.doCrypto(BleCommand.getSessionData2(SequenceCounter), BleCommand.MACkey, 289);
skDec = DES.doCrypto(BleCommand.getSessionData3(SequenceCounter), BleCommand.DECkey, 289);
skEnc = DES.doCrypto(BleCommand.getSessionData4(SequenceCounter), BleCommand.ENCkey, 289);
byte[] card_auth_crypoto_org = Utils.addBytes(randomValue, SequenceCounter, CardChallenge);
byte[] result = DES.doCrypto(card_auth_crypoto_org, skEnc, 801);
if (HexString.toHexString(Utils.addBytes(result, result.length - 8, 8)).equals(HexString.toHexString(CardCryptogram))) {
return checkDevice(SequenceCounter, CardChallenge, skCMAC, skEnc);
} else {
isConnetSucess = false;
errMsg = "蓝牙钱包数据比对失败";
return isConnetSucess;
}
}
private static boolean checkDevice(byte[] sequenceCounter, byte[] cardChallenge, byte[] skCMAC, byte[] skEnc) throws Exception {
byte[] hostAuthCrypto = DES.doCrypto(Utils.addBytes(sequenceCounter, cardChallenge, randomValue), skEnc, 801);
byte[] macData = BleCommand.getMacData(hostAuthCrypto);
encKey = Utils.addBytes(skCMAC, 0, 8);
byte[] decKey = Utils.addBytes(skCMAC, 8, 8);
byte[] res1 = DES.doCrypto(macData, encKey, 801);
byte[] res2 = DES.doCrypto(Utils.addBytes(res1, res1.length - 8, 8), decKey, 290);
res3 = DES.doCrypto(res2, encKey, 289);
byte[] checkDeviceCode = BleCommand.getCheckDeviceCode(hostAuthCrypto, res3);
response = gatt.transmit(checkDeviceCode, time);
String result = HexString.toHexString(response);
if (result.equals("9000")) {
isConnetSucess = true;
errMsg = "设备校验成功";
} else {
errMsg = "设备校验失败";
if (result.equals("100000")) {
errMsg = "蓝牙钱包连接超时";
}
isConnetSucess = false;
}
return isConnetSucess;
}
private static byte[] commandEnc(byte[] command) throws GeneralSecurityException {
command[0] = 4;
command[command.length - 1] = 8;
icv = DES.doCrypto(res3, encKey, 289);
res3 = DES.calcMAC(command, 8, skCMAC, icv, 13089);
return Utils.addBytes(command, res3);
}
private static byte[] commandEnc(byte[] command, byte[] data) {
command[0] = 4;
byte[] pin = DES.doCrypto(data, skEnc, 801);
byte b = (byte)(data.length + res3.length);
command[command.length - 1] = b;
byte[] newCommand = Utils.addBytes(command, data);
icv = DES.doCrypto(res3, encKey, 289);
res3 = DES.calcMAC(newCommand, 8, skCMAC, icv, 13089);
byte b2 = (byte)(pin.length + icv.length);
command[command.length - 1] = b2;
return Utils.addBytes(Utils.addBytes(command, pin), res3);
}
public static byte[] getId() {
try {
errMsg = "";
if (!isConnetSucess || gatt == null) {
errMsg = "蓝牙钱包未校验";
return null;
}
byte[] command = HexString.parseHexString("00B5000006");
byte[] commandEnc = commandEnc(command);
byte[] response = gatt.transmit(commandEnc, time);
LogUtils.e("BlePurseSDK", "ID解密前:" + HexString.toHexString(response));
byte[] bytes = DES.doCrypto(Utils.addBytes(response, 0, response.length - 2), skDec, 274);
LogUtils.e("BlePurseSDK", "skDec:" + HexString.toHexString(skDec));
if (bytes.length == 8) {
bytes = Utils.addBytes(bytes, 0, 6);
LogUtils.e("BlePurseSDK", "ID解密后:" + HexString.toHexString(bytes));
return bytes;
}
} catch (InterruptedException var4) {
var4.printStackTrace();
} catch (GeneralSecurityException var5) {
var5.printStackTrace();
} catch (GattError var6) {
var6.printStackTrace();
}
return null;
}
public static int verifyPIN(byte[] pin) {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
if (pin.length != 8) {
errMsg = "传入的pin长度不对";
return -3;
}
byte[] pinInstruct = HexString.parseHexString("0020000008");
byte[] commandEnc = commandEnc(pinInstruct, pin);
LogUtils.e("commandEnc:" + HexString.toHexString(commandEnc));
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", result);
if (result.equals("9000")) {
errMsg = "校验成功";
} else {
if (result.equals("100000")) {
errMsg = "蓝牙钱包连接超时";
return -1;
}
errMsg = "校验失败";
}
return Integer.parseInt(result, 16);
}
errMsg = "卡片未连接";
return -2;
} catch (InterruptedException var5) {
var5.printStackTrace();
} catch (GattError var6) {
var6.printStackTrace();
}
return -2;
}
public static int unblockPIN(byte[] puk, byte[] pin) {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
if (puk.length != 8) {
errMsg = "传入的puk长度不对";
return -4;
}
if (pin.length != 8) {
errMsg = "传入的pin长度不对";
return -3;
}
byte[] pinInstruct = HexString.parseHexString("002C000010");
byte[] data = Utils.addBytes(puk, pin);
byte[] commandEnc = commandEnc(pinInstruct, data);
LogUtils.e("commandEnc:" + HexString.toHexString(commandEnc));
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", result);
if (result.equals("9000")) {
errMsg = "解锁成功";
} else {
if (result.equals("100000")) {
errMsg = "蓝牙钱包连接超时";
return -1;
}
errMsg = "解锁失败";
}
return Integer.parseInt(result, 16);
}
errMsg = "卡片未连接";
return -2;
} catch (InterruptedException var7) {
var7.printStackTrace();
} catch (GattError var8) {
var8.printStackTrace();
}
return -2;
}
public static int changePIN(byte[] pin) {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
if (pin.length != 8) {
errMsg = "传入的pin长度不对";
return -3;
}
byte[] pinInstruct = HexString.parseHexString("0024000008");
byte[] commandEnc = commandEnc(pinInstruct, pin);
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", result);
if (result.equals("9000")) {
errMsg = "修改pin成功";
} else {
if (result.equals("100000")) {
errMsg = "蓝牙钱包连接超时";
return -1;
}
errMsg = "修改pin失败";
}
return Integer.parseInt(result, 16);
}
errMsg = "蓝牙钱包未连接";
return -2;
} catch (InterruptedException var5) {
var5.printStackTrace();
} catch (GattError var6) {
var6.printStackTrace();
}
return -2;
}
public static int generateKey() {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
byte[] priKeyInstruct = HexString.parseHexString("00A3000000");
byte[] commandEnc = commandEnc(priKeyInstruct);
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", result);
if (result.equals("9000")) {
errMsg = "秘钥生成成功";
} else {
if (result.equals("100000")) {
errMsg = "蓝牙钱包连接超时";
return -1;
}
errMsg = "秘钥生成失败";
}
return Integer.parseInt(result, 16);
}
errMsg = "蓝牙钱包未连接";
return -2;
} catch (InterruptedException var4) {
var4.printStackTrace();
} catch (GeneralSecurityException var5) {
var5.printStackTrace();
} catch (GattError var6) {
var6.printStackTrace();
}
return -2;
}
public static int resetKey() {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
byte[] resetKey = HexString.parseHexString("00A7000000");
byte[] commandEnc = commandEnc(resetKey);
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", result);
if (result.equals("9000")) {
errMsg = "秘钥重置成功";
} else {
if (result.equals("100000")) {
errMsg = "蓝牙钱包连接超时";
return -1;
}
errMsg = "秘钥重置失败";
}
return Integer.parseInt(result, 16);
}
errMsg = "蓝牙钱包未连接";
return -2;
} catch (InterruptedException var4) {
var4.printStackTrace();
} catch (GeneralSecurityException var5) {
var5.printStackTrace();
} catch (GattError var6) {
var6.printStackTrace();
}
return -2;
}
public static int importKey(byte[] privateKey, byte[] publicKey) {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
byte[] resetKeyInstruct = HexString.parseHexString("00A9000080");
byte[] seretKey;
if (publicKey != null) {
seretKey = Utils.addBytes(privateKey, publicKey);
} else {
seretKey = privateKey;
}
byte[] commandEnc = commandEnc(resetKeyInstruct, seretKey);
LogUtils.e("BlePurseSDK", "commandEnc:" + HexString.toHexString(commandEnc));
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", result);
if (result.equals("9000")) {
errMsg = "秘钥导入成功";
} else {
if (result.equals("100000")) {
errMsg = "蓝牙钱包连接超时";
return -1;
}
errMsg = "秘钥导入失败";
}
return Integer.parseInt(result, 16);
}
errMsg = "蓝牙钱包未连接";
return -2;
} catch (InterruptedException var7) {
var7.printStackTrace();
} catch (GattError var8) {
var8.printStackTrace();
}
return -2;
}
public static byte[] getPublicKey() {
try {
errMsg = "";
if (!isConnetSucess || gatt == null) {
errMsg = "蓝牙钱包未连接";
return null;
}
byte[] publicKeyInstruct = HexString.parseHexString("00A5000040");
byte[] commandEnc = commandEnc(publicKeyInstruct);
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", "公钥解密前:" + result);
if (result.length() > 4) {
byte[] bytes = DES.doCrypto(Utils.addBytes(response, 0, response.length - 2), skDec, 274);
result = HexString.toHexString(bytes);
LogUtils.e("BlePurseSDK", "skDec:" + HexString.toHexString(skDec));
LogUtils.e("BlePurseSDK", "公钥解密后:" + result);
errMsg = "公钥获取成功";
return bytes;
}
errMsg = "未获取到公钥";
} catch (InterruptedException var5) {
var5.printStackTrace();
} catch (GeneralSecurityException var6) {
var6.printStackTrace();
} catch (GattError var7) {
var7.printStackTrace();
}
return null;
}
public static byte[] sign(byte[] hash) {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
byte[] signInstruct = HexString.parseHexString("00D5000040");
byte[] commandEnc = commandEnc(signInstruct, hash);
byte[] response = gatt.transmit(commandEnc, time);
String result = HexString.toHexString(response);
LogUtils.e("BlePurseSDK", "签名解密前:" + result);
if (result.length() > 4) {
byte[] bytes = DES.doCrypto(Utils.addBytes(response, 0, response.length - 2), skDec, 274);
result = HexString.toHexString(bytes);
LogUtils.e("BlePurseSDK", "skDec:" + HexString.toHexString(skDec));
LogUtils.e("BlePurseSDK", "签名解密后:" + result);
errMsg = "签名成功";
return bytes;
}
errMsg = "加密失败";
return null;
}
errMsg = "蓝牙钱包未校验";
return null;
} catch (InterruptedException var6) {
var6.printStackTrace();
} catch (GattError var7) {
var7.printStackTrace();
}
return null;
}
public static int closeBlePurse() {
try {
errMsg = "";
if (isConnetSucess && gatt != null) {
int retCode = gatt.close(time);
if (retCode == 0) {
errMsg = "蓝牙钱包已关闭";
isConnetSucess = false;
return retCode;
} else {
errMsg = "蓝牙钱包关闭失败";
LogUtils.e("ble", "disconnect failed, retCode=" + retCode);
Thread.sleep(5000L);
return retCode;
}
} else {
errMsg = "蓝牙钱包未连接";
return 0;
}
} catch (InterruptedException var1) {
var1.printStackTrace();
return -2;
}
}
public static String getErrMsg() {
return errMsg;
}
public static void setDefaultTime(int timeout) {
time = timeout;
}
}
|
package ru.zahara.fluidapi.block;
import net.minecraft.block.BlockState;
import net.minecraft.fluid.FlowingFluid;
import net.minecraft.fluid.Fluid;
import net.minecraft.fluid.Fluids;
import net.minecraft.util.IStringSerializable;
import ru.zahara.fluidapi.util.EnumUtil;
import java.util.Optional;
import java.util.function.Predicate;
public enum LoggableFluidsEnum implements IStringSerializable
{
EMPTY(null, "empty", state -> true),
LAVA(Fluids.LAVA, "lava", state -> state.getMaterial().isFlammable());
public final Optional<FlowingFluid> contains;
public final String name;
public final Predicate<BlockState> isNotAllowed;
LoggableFluidsEnum(FlowingFluid contains, String name, Predicate<BlockState> isNotAllowed)
{
this.contains = Optional.ofNullable(contains);
this.name = name;
this.isNotAllowed = isNotAllowed;
}
public static boolean canContainFluid(BlockState state, Fluid fluid)
{
return !getByFluid(fluid).isNotAllowed.test(state);
}
public static LoggableFluidsEnum getByFluid(Fluid fluid)
{
for (LoggableFluidsEnum i : LoggableFluidsEnum.values())
if (i.contains.isPresent() && i.contains.get() == fluid)
return i;
return EMPTY;
}
public static boolean hasFluid(Fluid fluid)
{
return getByFluid(fluid) != EMPTY;
}
public static LoggableFluidsEnum addValue(FlowingFluid contains, String name, Predicate<BlockState> isNotAllowed)
{
return EnumUtil.addEnum(LoggableFluidsEnum.class, name, new Class[]{FlowingFluid.class, String.class, Predicate.class}, contains, name, isNotAllowed);
}
@Override
public String getName() {
return name;
}
} |
import os
from dotenv import load_dotenv
from dotenv import dotenv_values
def load_env_vars(filename):
try:
load_dotenv(filename)
env_vars = dotenv_values(filename)
for key, value in env_vars.items():
os.environ[key] = value
except (FileNotFoundError, IOError) as e:
raise e
# Example usage
try:
load_env_vars('.env')
print(os.environ['DB_HOST']) # Output: localhost
print(os.environ['DB_USER']) # Output: admin
print(os.environ['DB_PASSWORD']) # Output: secretpassword
except (FileNotFoundError, IOError) as e:
print(f"Error loading environment variables: {e}") |
export * from './graphql/session'
export * from './graphql/navigation'
export * from './graphql/author'
export * from './graphql/image'
export * from './graphql/blocks'
export * from './graphql/article'
export * from './graphql/page'
export * from './graphql/peer'
export * from './graphql/token'
export * from './graphql/richText'
export * from './graphql/slug'
export * from './graphql/mutation'
export * from './graphql/query'
export * from './graphql/schema'
export * from './graphql/permissions'
export * from './db/user'
export * from './db/userRole'
export * from './db/session'
export * from './db/navigation'
export * from './db/author'
export * from './db/image'
export * from './db/block'
export * from './db/article'
export * from './db/page'
export * from './db/common'
export * from './db/adapter'
export * from './db/peer'
export * from './db/token'
export * from './mediaAdapter'
export * from './urlAdapter'
export * from './utility'
export * from './error'
export * from './context'
export * from './server'
|
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
employees = []
class EmployeeList(Resource):
def get(self):
return {'employees': employees}
api.add_resource(EmployeeList, '/employees')
class Employee(Resource):
def get(self, employee_id):
employee = [employee for employee in employees if employee['id'] == employee_id]
if len(employee) == 0:
return {'message': 'No employee found'}, 404
return {'employee': employee[0]}
def post(self, employee_id):
data = request.get_json()
employee = {
'id': employee_id,
'name': data['name'],
'salary': data['salary']
}
employees.append(employee)
return {'message': 'Employee added successfully', 'employee': employee}, 201
api.add_resource(Employee, '/employees/<int:employee_id>')
if __name__ == '__main__':
app.run(debug=True) |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
curl -XDELETE 'http://elasticsearch:9200/zaim'
eval "`cat "$DIR/command"`"
curl -XPOST "http://elasticsearch:9200/zaim/scrape-money/_bulk?pretty" --data-binary @$DIR/data.json
|
#!/bin/bash
set -e
# Process arguments
while [ 1 ]; do
case $1 in
"--uid")
shift
HOST_UID="$1"
shift
;;
"--user")
shift
HOST_USER="$1"
shift
;;
"--gid")
shift
HOST_GID="$1"
shift
;;
"--group")
shift
HOST_GROUP="$1"
shift
;;
"--version")
shift
VERSION="$1"
shift
;;
"--install")
shift
INSTALL="YES"
;;
"--root")
shift
ROOT_LOGIN="YES"
;;
"--create-user")
shift
CREATE_USER="YES"
;;
"--init-workspace")
shift
INIT_WORKSPACE="YES"
;;
*)
break
;;
esac
done
# protects xilinx settings file from our shell arguments
function apply_xilinx_settings {
set +e
. /opt/Xilinx/${VERSION}/ISE_DS/settings$(arch | sed s/x86_64/64/ | sed s/i386/32/).sh
set -e
}
if [[ -n $CREATE_USER ]]; then
groupadd --gid "${HOST_GID}" "${HOST_GROUP}"
useradd --gid "${HOST_GID}" --uid "${HOST_UID}" --home-dir /home/workspace --no-create-home "${HOST_USER}"
fi
if [[ -n $INIT_WORKSPACE ]]; then
echo "initializing workspace"
chown "${HOST_UID}:${HOST_GID}" /home/workspace
cp -R /etc/skel /tmp/skel
chown -R "${HOST_UID}:${HOST_GID}" /tmp/skel
cp -pR /tmp/skel/. /home/workspace/
rm -rf /tmp/skel
fi
if [[ -n $INSTALL ]]; then
export PATH="$PATH:/media/install"
else
apply_xilinx_settings;
fi
if [[ $# == 0 ]]; then
set -- "/bin/bash"
fi
if [[ -n $ROOT_LOGIN ]]; then
exec "$@"
else
exec gosu "${HOST_USER}" "$@"
fi
|
<filename>lib/sunrise/config/show.rb<gh_stars>1-10
# frozen_string_literal: true
require 'sunrise/config/base'
require 'sunrise/config/has_fields'
module Sunrise
module Config
class Show < Base
include Sunrise::Config::HasFields
end
end
end
|
<filename>rest-service-provider-demo/src/main/java/com/darian/restserviceproviderdemo/reactive/web/ReactiveWebEndpointConfiguration.java
package com.darian.restserviceproviderdemo.reactive.web;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.reactive.function.server.RouterFunction;
import org.springframework.web.reactive.function.server.ServerResponse;
import reactor.core.publisher.Mono;
import java.sql.Time;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import static org.springframework.web.reactive.function.server.RequestPredicates.GET;
import static org.springframework.web.reactive.function.server.RouterFunctions.route;
/***
*
*
* @author <a href="mailto:<EMAIL>">Darian</a>
* @date 2020/5/27 10:13
*/
@Configuration
@Slf4j
public class ReactiveWebEndpointConfiguration {
@GetMapping("/helloWorld_1")
@ResponseBody
public String helloWorld() {
return "hello, world!";
}
/**
* Response
* -- status = "200"
* -- body = "hello, world";
* HTTP Method = GET
* HTTP URI = /helloWorld
* Q: 这比传统 Controller 优势在哪里
* --
* return ServerResponse
* Reactor
* Publish -> Mono(0,1) | Flux(0,n)
*/
@Bean
public RouterFunction<ServerResponse> helloWorldRouteFunction() {
return route(GET("/helloWorld"),
request -> {
waitAWhile(200);
return ServerResponse.ok() // 200
.body(Mono.just("Hello, world"), String.class);
});
}
private void waitAWhile(int time) {
long randomInt = new Random().nextInt(time);
try {
TimeUnit.MILLISECONDS.sleep(randomInt);
log.info("sleep: " + randomInt + " ms");
} catch (InterruptedException e) {
e.printStackTrace();
}
}
} |
<filename>app/helpers/gql_helper.rb
module GqlHelper
def gql
@gql
end
def graph(period)
if period == :present
gql.present_graph
else
gql.future_graph
end
end
def gql_query(q)
gql.query(q)
end
def node(key, period = :present)
graph(period).node(key)
end
def carriers(period = :present)
graph(period).nodes
end
def nodes(period = :present)
graph(period).nodes
end
def carrier(key, period = :present)
graph(period).carrier(key)
end
end
|
# Generated by Django 2.2.17 on 2021-03-31 15:44
from django.db import migrations
def remove_UserAdmin_group_from_security_if_not_FIU(apps, schema_editor):
user_model = apps.get_model('auth', 'user')
group_model = apps.get_model('auth', 'group')
security_users = user_model.objects.filter(
groups__name='Security',
is_superuser=False
).exclude(
groups__name='FIU'
).all()
for security_user in security_users:
assert not security_user.groups.filter(name='FIU').exists()
if security_user.groups.filter(name='UserAdmin').exists():
print(f'Removing UserAdmin group from user with id {security_user.pk}')
security_user.groups.remove(group_model.objects.get(name='UserAdmin'))
class Migration(migrations.Migration):
dependencies = [
('mtp_auth', '0018_Add_UserAdmin_group_to_FIU'),
]
operations = [
migrations.RunPython(code=remove_UserAdmin_group_from_security_if_not_FIU)
]
|
import JSONAPIAdapter from '@ember-data/adapter/json-api';
import JSONAPISerializer from '@ember-data/serializer/json-api';
import Model, { attr, hasMany } from '@ember-data/model';
import { module, test } from 'qunit';
import { setupRenderingTest } from 'ember-qunit';
import '@ember/test-helpers';
import RSVP from 'rsvp';
import AdapterMixin from 'ember-resource-metadata/adapter-mixin';
let answers;
let requests;
module('Integration | Adapter | adapter', function(hooks) {
setupRenderingTest(hooks);
hooks.beforeEach(function() {
answers = [];
requests = [];
this.owner.register('serializer:application', JSONAPISerializer);
this.owner.register('model:example', Model.extend({
title: attr('string'),
references: hasMany('references', { async: false })
}));
this.owner.register('model:reference', Model.extend({
source: attr('string')
}));
this.owner.register('adapter:example', JSONAPIAdapter.extend(AdapterMixin, {
ajax(url, type, options) {
requests.push({ url, type, options });
return RSVP.resolve(answers.shift());
}
}));
this.metadata = this.owner.lookup('service:resource-metadata');
this.store = this.owner.lookup('service:store');
});
test('it sets meta when loading a record for the first time', function(assert) {
answers.push({
data: {
type: 'examples',
id: 1,
meta: {
something: 42
}
}
});
return RSVP.resolve().then(() => {
return this.get('store').findRecord('example', 1);
}).then(record => {
assert.equal(this.get('metadata').read(record).get('something'), 42);
});
});
test('it passes options through to super findRecord', function(assert) {
answers.push({
data: {
type: 'examples',
id: 1,
meta: {
something: 42
}
}
});
return RSVP.resolve().then(() => {
return this.get('store').findRecord('example', 1, { include: 'other' });
}).then(() => {
assert.deepEqual(requests[0].options, { data: { include: 'other'} });
});
});
test('it sets meta when loading a record for the first time via queryRecord', function(assert) {
answers.push({
data: {
type: 'examples',
id: 1,
meta: {
something: 42
}
}
});
return RSVP.resolve().then(() => {
return this.get('store').queryRecord('example', {});
}).then(record => {
assert.equal(this.get('metadata').read(record).get('something'), 42);
});
});
test('it sets meta when loading a record for the first time via query', function(assert) {
answers.push({
data: [{
type: 'examples',
id: 1,
meta: {
something: 42
}
}]
});
return RSVP.resolve().then(() => {
return this.get('store').query('example', {});
}).then(records => {
assert.equal(this.get('metadata').read(records.get('firstObject')).get('something'), 42);
});
});
test('it updates meta when a record is updated', function(assert) {
answers.push({
data: {
type: 'examples',
id: 1,
meta: {
something: 42
}
}
});
answers.push({
data: {
type: 'examples',
id: 1,
meta: {
something: 43
}
}
});
return RSVP.resolve().then(() => {
return this.get('store').findRecord('example', 1);
}).then(record => {
return record.save();
}).then(record => {
assert.equal(this.get('metadata').read(record).get('something'), 43);
});
});
test('it updates meta when a record is created', function(assert) {
answers.push({
data: {
type: 'examples',
id: 1,
meta: {
something: 43
}
}
});
return RSVP.resolve().then(() => {
return this.get('store').createRecord('example');
}).then(record => {
return record.save();
}).then(record => {
assert.equal(this.get('metadata').read(record).get('something'), 43);
});
});
test('it sets meta in included records', function(assert) {
answers.push({
data: {
type: 'examples',
id: 1,
meta: {
something: 42
}
},
included: [
{
id: 1,
type: 'references',
meta: {
something: 24
}
},
{
id: 2,
type: 'references',
meta: {
something: 20
}
},
]
});
return RSVP.resolve().then(() => {
return this.get('store').findRecord('example', 1);
}).then(record => {
assert.equal(this.get('metadata').read(record).get('something'), 42);
assert.equal(this.get('metadata').read({ id: 1, type: 'reference' }).get('something'), 24);
assert.equal(this.get('metadata').read({ id: 2, type: 'reference' }).get('something'), 20);
});
});
});
|
<reponame>joelgtsantos/BestBuyApp
define("userfrmMainController", {
//Local variables
categoriesNav: [],
currentCategories: [],
HOME_CATEGORY_ID: "cat00000",
currentIndex: 0,
txtNavHistory: "Home",
//Local Methods
onNavigate: async function(params) {
let skipFirstLoad = false;
//For skyping first load
if (params !== null && params !== undefined) {
if (params.from === "productList") {
skipFirstLoad = true;
}
}
if (skipFirstLoad === false) {
this.currentCategories = await setupSync("cat00000");
this.view.sgmCategories.setData(this.currentCategories);
this.categoriesNav.push(this.currentCategories);
}
},
/*
* To get information about a specific category whenever a user clicks on it
* @Param row Number integer
*/
onCategoryClick: async function onCategoryClick(indexCat) {
//Get subcategory list by id
const tmpCategories = await setupSync(this.currentCategories[indexCat].id);
if (tmpCategories.length > 0) {
this.view.sgmCategories.removeAll();
this.view.sgmCategories.setData(tmpCategories);
this.categoriesNav.push(tmpCategories);
this.view.lblNavHistory.text += `-> ${this.currentCategories[indexCat].lblCategoryName}`;
this.currentCategories = tmpCategories;
} else {
const ntf = new kony.mvc.Navigation("frmProductsList");
const params = {};
params.categoryId = this.currentCategories[indexCat].id;
params.categoryName = this.currentCategories[indexCat].lblCategoryName;
ntf.navigate(params);
kony.application.dismissLoadingScreen();
}
//Enable back button
if (this.categoriesNav.length > 1) {
this.view.HeaderJoel.isVisibleBtnBack = true;
}
},
/*
* To go back to a previous Product List within the same form
*/
onBtnBackClick: function onBtnBackClick() {
//Remove last subcategory list
this.categoriesNav.pop();
let txtNav = "";
//Disable back button
if (this.categoriesNav.length <= 1) {
this.view.HeaderJoel.isVisibleBtnBack = false;
}
//Remove last nav text
let navHistory = this.view.lblNavHistory.text;
navHistory = navHistory.slice(0, navHistory.lastIndexOf("->"));
this.view.lblNavHistory.text = navHistory;
//Get Previous subcategory list
this.currentCategories = this.categoriesNav[this.categoriesNav.length - 1];
this.view.sgmCategories.removeAll();
this.view.sgmCategories.setData(this.currentCategories);
},
/*
* To go back to a previous Product List within the same form
*/
onClickBtnSearch: function onClickBtnSearch() {
try {
this.view.flxHMenu.animate(kony.ui.createAnimation({
"100": {
"top": "0%",
"stepConfig": {
"timingFunction": kony.anim.EASE
}
}
}), {
"delay": 0,
"iterationCount": 1,
"fillMode": kony.anim.FILL_MODE_FORWARDS,
"duration": 0.7
}, {
"animationEnd": function() {}
});
} catch (e) {}
},
/*
* To go back to a previous Product List within the same form
*/
onClickBtnClose: function onClickBtnClose() {
try {
this.view.flxHMenu.animate(kony.ui.createAnimation({
"100": {
"top": "-100%",
"stepConfig": {
"timingFunction": kony.anim.EASE
}
}
}), {
"delay": 0,
"iterationCount": 1,
"fillMode": kony.anim.FILL_MODE_FORWARDS,
"duration": 0.7
}, {
"animationEnd": function() {}
});
} catch (e) {}
},
/*
* To go back to a previous Product List within the same form
*/
onClickBtnSearchP: function onClickBtnSearchP() {
const searchText = this.view.txtSearch.text;
const ntf = new kony.mvc.Navigation("frmProductsList");
const params = {};
params.from = "search";
params.searchText = searchText;
ntf.navigate(params);
kony.application.dismissLoadingScreen();
},
/*
* Animates the scroll
*/
moveSegmentAnimation: function moveSegmentAnimation() {
let transformObj1 = kony.ui.makeAffineTransform();
transformObj1.translate(250, 0);
let transformObj2 = kony.ui.makeAffineTransform();
transformObj2.translate(0, 0);
let animationObject = kony.ui.createAnimation({
"0": {
"transform": transformObj1,
"stepConfig": {
"timingFunction": kony.anim.LINEAR
}
},
"100": {
"transform": transformObj2,
"stepConfig": {
"timingFunction": kony.anim.LINEAR
}
}
});
let animationConfig = {
duration: 1,
fillMode: kony.anim.FILL_MODE_FORWARDS
};
let animationDefObject = {
definition: animationObject,
config: animationConfig
};
this.view.sgmCategories.setAnimations({
visible: animationDefObject
});
}
});
define("frmMainControllerActions", {
/*
This is an auto generated file and any modifications to it may result in corruption of the action sequence.
*/
/** onClickBtnBack defined for HeaderJoel **/
AS_UWI_g705f89ccccf4d8ca6bf0b005c9b2501: function AS_UWI_g705f89ccccf4d8ca6bf0b005c9b2501(eventobject) {
var self = this;
this.onBtnBackClick();
},
/** onClickBtnSearch defined for HeaderJoel **/
AS_UWI_a1289c20276d4b4ab5e22e65a85101ff: function AS_UWI_a1289c20276d4b4ab5e22e65a85101ff(eventobject) {
var self = this;
this.onClickBtnSearch();
},
/** onRowClick defined for sgmCategories **/
AS_Segment_e216b94e64b140a0b33d3495915c6005: function AS_Segment_e216b94e64b140a0b33d3495915c6005(eventobject, sectionNumber, rowNumber) {
var self = this;
this.onCategoryClick(rowNumber);
},
/** onTouchStart defined for imgReturn **/
AS_Image_f95c257016484171b47c03c2f035d9a0: function AS_Image_f95c257016484171b47c03c2f035d9a0(eventobject, x, y) {
var self = this;
this.onClickBtnClose();
},
/** onTouchStart defined for btnSearch **/
AS_Button_a9914bca47c64c99990bb086d18ffb1d: function AS_Button_a9914bca47c64c99990bb086d18ffb1d(eventobject, x, y) {
var self = this;
this.onClickBtnSearchP();
},
/** postShow defined for frmMain **/
AS_Form_dda301d161704c0594d8f54dc594af84: function AS_Form_dda301d161704c0594d8f54dc594af84(eventobject) {
var self = this;
this.moveSegmentAnimation();
}
});
define("frmMainController", ["userfrmMainController", "frmMainControllerActions"], function() {
var controller = require("userfrmMainController");
var controllerActions = ["frmMainControllerActions"];
return kony.visualizer.mixinControllerActions(controller, controllerActions);
});
|
<filename>src/common/dna/ArrayRenderer.ts<gh_stars>0
import { Shader } from './Shader'
import { Texture2D } from './Texture2D'
import { Vec3, Mat4 } from './tglm'
declare var gl: WebGL2RenderingContext;
const WHITE: Float32Array = Vec3.Create(1, 1, 1)
export class Rect {
x: number
y: number
width: number
height: number
public constructor(x: number, y:number, width:number, height:number) {
this.x = x
this.y = y
this.width = width
this.height = height
}
}
export class ArrayRenderer {
shader: Shader
VAO: WebGLVertexArrayObject
VBO: WebGLBuffer
/**
*
* @param shader
*/
public constructor(shader: Shader) {
this.shader = shader
const vertices = new Float32Array([
// Pos // Tex
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 0.0, 1.0, 0.0
])
this.VAO = gl.createVertexArray()
this.VBO = gl.createBuffer()
gl.bindBuffer(gl.ARRAY_BUFFER, this.VBO)
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW)
gl.bindVertexArray(this.VAO)
gl.enableVertexAttribArray(0)
gl.vertexAttribPointer(0, 4, gl.FLOAT, false, 4 * 4, 0)
gl.bindBuffer(gl.ARRAY_BUFFER, null)
gl.bindVertexArray(null)
}
public Draw(texture: Texture2D, bounds:Rect, rotate: number=0, color:Float32Array=WHITE) {
this.shader.Use()
var model = Mat4.Create(1)
/**
* transformations are:
* scale happens first,
* then rotation and
* then finally translation happens;
*
* reversed the order to get:
*/
// 1) translate position:
Mat4.Translate(model, model, Vec3.Create(bounds.x, bounds.y, 0))
// 2) rotate:
// a. Move origin of rotation to center of quad
Mat4.Translate(model, model, Vec3.Create(0.5*bounds.width, 0.5*bounds.height, 0))
// b. Then rotate
Mat4.Rotate(model, model, rotate, Vec3.Create(0, 0, 1))
// c. Move origin back
Mat4.Translate(model, model, Vec3.Create(-0.5*bounds.width, -0.5*bounds.height, 0))
// 3) scale
Mat4.Scale(model, model, Vec3.Create(bounds.width, bounds.height, 1))
this.shader.SetMatrix("model", model)
this.shader.SetVector3("spriteColor", color)
gl.activeTexture(gl.TEXTURE0)
texture.Bind()
gl.bindVertexArray(this.VAO)
gl.drawArrays(gl.TRIANGLES, 0, 6)
gl.bindVertexArray(null)
}
}
|
#!/bin/bash -e
# The script does automatic checking on a Go package and its sub-packages, including:
# 1. gofmt (http://golang.org/cmd/gofmt/)
# 2. goimports (https://github.com/bradfitz/goimports)
# 3. golint (https://github.com/golang/lint)
# 4. go vet (http://golang.org/cmd/vet)
# 5. race detector (http://blog.golang.org/race-detector)
# 6. test coverage (http://blog.golang.org/cover)
# Capture what test we should run
TEST_SUITE=$1
if [[ $TEST_SUITE == "unit" ]]; then
go get github.com/axw/gocov/gocov
go get github.com/mattn/goveralls
go get -u github.com/golang/lint/golint
go get golang.org/x/tools/cmd/goimports
go get github.com/smartystreets/goconvey/convey
go get golang.org/x/tools/cmd/cover
go get github.com/fsouza/go-dockerclient
go get github.com/stretchr/testify/mock
COVERALLS_TOKEN=t47LG6BQsfLwb9WxB56hXUezvwpED6D11
TEST_DIRS="main.go ./docker ./client"
VET_DIRS=". ./docker/... ./client/..."
set -e
# Automatic checks
echo "gofmt"
test -z "$(gofmt -l -d $TEST_DIRS | tee /dev/stderr)"
echo "goimports"
test -z "$(goimports -l -d $TEST_DIRS | tee /dev/stderr)"
# Useful but should not fail on link per: https://github.com/golang/lint
# "The suggestions made by golint are exactly that: suggestions. Golint is not perfect,
# and has both false positives and false negatives. Do not treat its output as a gold standard.
# We will not be adding pragmas or other knobs to suppress specific warnings, so do not expect
# or require code to be completely "lint-free". In short, this tool is not, and will never be,
# trustworthy enough for its suggestions to be enforced automatically, for example as part of
# a build process"
# echo "golint"
# golint ./...
echo "go vet"
go vet $VET_DIRS
# go test -race ./... - Lets disable for now
# Run test coverage on each subdirectories and merge the coverage profile.
echo "mode: count" > profile.cov
# Standard go tooling behavior is to ignore dirs with leading underscors
for dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -not -path './examples/*' -not -path './scripts/*' -not -path './build/*' -not -path './Godeps/*' -type d);
do
if ls $dir/*.go &> /dev/null; then
go test --tags=unit -covermode=count -coverprofile=$dir/profile.tmp $dir
if [ -f $dir/profile.tmp ]
then
cat $dir/profile.tmp | tail -n +2 >> profile.cov
rm $dir/profile.tmp
fi
fi
done
go tool cover -func profile.cov
# Disabled Coveralls.io for now
# To submit the test coverage result to coveralls.io,
# use goveralls (https://github.com/mattn/goveralls)
# goveralls -coverprofile=profile.cov -service=travis-ci -repotoken t47LG6BQsfLwb9WxB56hXUezvwpED6D11
#
# If running inside Travis we update coveralls. We don't want his happening on Macs
# if [ "$TRAVIS" == "true" ]
# then
# n=1
# until [ $n -ge 6 ]
# do
# echo "posting to coveralls attempt $n of 5"
# goveralls -v -coverprofile=profile.cov -service travis.ci -repotoken $COVERALLS_TOKEN && break
# n=$[$n+1]
# sleep 30
# done
# fi
fi
|
<filename>lib/tasks/seed.rake
namespace :db do
namespace :seed do
Dir[Rails.root.join('db', 'seeds', '*.rb')].each do |filename|
desc "Loads the seed data from db/seeds/#{File.basename(filename)}"
task File.basename(filename, '.rb').to_sym => :environment do
load(filename)
Rake::Task["db:seed"].invoke
end
end
end
end
|
package com.example.android;
import android.os.AsyncTask;
import android.os.Bundle;
import android.view.View;
import android.widget.ListView;
import androidx.appcompat.app.AppCompatActivity;
import androidx.swiperefreshlayout.widget.SwipeRefreshLayout;
import org.json.JSONArray;
import org.json.JSONException;
import java.io.IOException;
import java.util.ArrayList;
public class MainActivity extends AppCompatActivity {
private SwipeRefreshLayout swipeRefreshLayout;
private ListView listView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
listView = findViewById(R.id.list_view);
swipeRefreshLayout = findViewById(R.id.swipe_refresh_layout);
swipeRefreshLayout.setOnRefreshListener(new SwipeRefreshLayout.OnRefreshListener() {
@Override
public void onRefresh() {
new ApiAsyncTask().execute();
}
});
}
class ApiAsyncTask extends AsyncTask<Void, Void, String> {
@Override
protected String doInBackground(Void... voids) {
// Get data from REST API
try {
// ...
} catch (IOException e) {
e.printStackTrace();
}
return data; // Return response data.
}
@Override
protected void onPostExecute(String data) {
// Parse the response data and update the list.
ArrayList<String> list = new ArrayList<>();
try {
JSONArray jsonArray = new JSONArray(data);
for (int i = 0; i < jsonArray.length(); i++) {
list.add(jsonArray.getString(i));
}
} catch (JSONException e) {
e.printStackTrace();
}
// Set the list view adapter.
listView.setAdapter(new ListViewAdapter(list));
// Stop the swipe refresh layout.
swipeRefreshLayout.setRefreshing(false);
}
}
} |
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-shuffled-N/7-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-shuffled-N/7-1024+0+512-N-VB-FILL-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function remove_all_but_nouns_and_verbs_fill_first_two_thirds_sixth --eval_function penultimate_sixth_eval |
export * from "./cellMaterial";
|
#!/bin/bash
case "$1" in
start)
yarn start
;;
develop)
type docker-compose >/dev/null 2>&1 || { echo >&2 "docker-compose is required but it's not installed. Aborting."; exit 1; }
docker-compose -f docker-compose-develop.yml build && docker-compose -f docker-compose-develop.yml up
;;
test)
type docker-compose >/dev/null 2>&1 || { echo >&2 "docker-compose is required but it's not installed. Aborting."; exit 1; }
docker-compose -f docker-compose-test.yml build && docker-compose -f docker-compose-test.yml up
;;
*)
echo "Usage: service.sh {test-e2e|test-unit|start|develop|test}" >&2
exit 1
;;
esac
exit 0
|
<reponame>ender8282/jython
/* Copyright (c) Jython Developers */
package org.python.core;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
import java.util.logging.Level;
import org.python.core.util.FileUtil;
import org.python.core.util.StringUtil;
import org.python.core.util.importer;
import org.python.expose.ExposedMethod;
import org.python.expose.ExposedNew;
import org.python.expose.ExposedType;
import org.python.util.Generic;
@Untraversable
@ExposedType(name="ClasspathPyImporter")
public class ClasspathPyImporter extends importer<String> {
public static final String PYCLASSPATH_PREFIX = "__pyclasspath__/";
public static final PyType TYPE = PyType.fromClass(ClasspathPyImporter.class);
public ClasspathPyImporter(PyType subType) {
super(subType);
}
public ClasspathPyImporter() {
super();
}
@ExposedNew
@ExposedMethod
final void ClasspathPyImporter___init__(PyObject[] args, String[] kwds) {
ArgParser ap = new ArgParser("__init__", args, kwds, new String[] {"path"});
String path = ap.getString(0);
if (path == null || !path.startsWith(PYCLASSPATH_PREFIX)) {
throw Py.ImportError("path isn't for classpath importer");
}
if (!path.endsWith("/")) {
path += "/";
}
this.path = path;
}
/**
* Return the contents of the jarred file at the specified path
* as bytes.
*
* @param path a String path name within the archive
* @return a String of data in binary mode (no CRLF)
*/
@Override
public String get_data(String path) {
return ClasspathPyImporter_get_data(path);
}
@ExposedMethod
final String ClasspathPyImporter_get_data(String path) {
// Strip any leading occurrence of the hook string
int len = PYCLASSPATH_PREFIX.length();
if (len < path.length() && path.startsWith(PYCLASSPATH_PREFIX)) {
path = path.substring(len);
}
// Bundle wraps the stream together with a close operation
try (Bundle bundle = makeBundle(path, makeEntry(path))) {
byte[] data = FileUtil.readBytes(bundle.inputStream);
return StringUtil.fromBytes(data);
} catch (IOException ioe) {
throw Py.IOError(ioe);
}
}
/**
* Return the source code for the module as a string (using
* newline characters for line endings)
*
* @param fullname the fully qualified name of the module
* @return a String of the module's source code or null
*/
public String get_source(String fullname) {
return ClasspathPyImporter_get_source(fullname);
}
@ExposedMethod
final String ClasspathPyImporter_get_source(String fullname) {
ModuleInfo moduleInfo = getModuleInfo(fullname);
if (moduleInfo == ModuleInfo.ERROR) {
return null;
} else if (moduleInfo == ModuleInfo.NOT_FOUND) {
throw Py.ImportError(String.format("can't find module '%s'", fullname));
} else {
// Turn the module name into a source file name
String path = makeFilename(fullname);
if (moduleInfo == ModuleInfo.PACKAGE) {
path += File.separator + "__init__.py";
} else {
path += ".py";
}
// Bundle wraps the stream together with a close operation
try (Bundle bundle = makeBundle(path, makeEntry(path))) {
InputStream is = bundle.inputStream;
if (is != null) {
byte[] data = FileUtil.readBytes(is);
return StringUtil.fromBytes(data);
} else {
// we have the module, but no source
return null;
}
} catch (IOException ioe) {
throw Py.IOError(ioe);
}
}
}
/**
* Find the module for the fully qualified name.
*
* @param fullname the fully qualified name of the module
* @param path if not installed on the meta-path None or a module path
* @return a loader instance if this importer can load the module, None
* otherwise
*/
@ExposedMethod(defaults = "null")
final PyObject ClasspathPyImporter_find_module(String fullname, String path) {
return importer_find_module(fullname, path);
}
/**
* Determine whether a module is a package.
*
* @param fullname the fully qualified name of the module
* @return whether the module is a package
*/
@ExposedMethod
final boolean ClasspathPyImporter_is_package(String fullname) {
return importer_is_package(fullname);
}
/**
* Return the code object associated with the module.
*
* @param fullname the fully qualified name of the module
* @return the module's PyCode object or None
*/
@ExposedMethod
final PyObject ClasspathPyImporter_get_code(String fullname) {
ModuleCodeData moduleCodeData = getModuleCode(fullname);
if (moduleCodeData != null) {
return moduleCodeData.code;
}
return Py.None;
}
/**
* Load a module for the fully qualified name.
*
* @param fullname the fully qualified name of the module
* @return a loaded PyModule
*/
@ExposedMethod
final PyObject ClasspathPyImporter_load_module(String fullname) {
return importer_load_module(fullname);
}
@Override
protected long getSourceMtime(String path) {
// Can't determine this easily
return -1;
}
@Override
protected Bundle makeBundle(String fullFilename, String entry) {
InputStream is = entries.remove(entry);
return new Bundle(is) {
@Override
public void close() {
try {
inputStream.close();
} catch (IOException e) {
throw Py.JavaError(e);
}
}
};
}
@Override
protected String makeEntry(String filename) {
// In some contexts, the resource string arrives as from os.path.join(*parts)
if (!getSeparator().equals(File.separator)) {
filename = filename.replace(File.separator, getSeparator());
}
if (entries.containsKey(filename)) {
return filename;
}
InputStream is;
if (Py.getSystemState().getClassLoader() != null) {
is = tryClassLoader(filename, Py.getSystemState().getClassLoader(), "sys");
} else {
is = tryClassLoader(filename, imp.getParentClassLoader(), "parent");
}
if (is != null) {
entries.put(filename, is);
return filename;
}
return null;
}
private InputStream tryClassLoader(String fullFilename, ClassLoader loader, String place) {
if (loader != null) {
logger.log(Level.FINE, "# trying {0} in {1} class loader",
new Object[] {fullFilename, place});
return loader.getResourceAsStream(fullFilename);
}
return null;
}
@Override
protected String makeFilename(String fullname) {
return path.replace(PYCLASSPATH_PREFIX, "") + fullname.replace('.', '/');
}
@Override
protected String makeFilePath(String fullname) {
return path + fullname.replace('.', '/');
}
@Override
protected String makePackagePath(String fullname) {
return path;
}
@Override
protected String getSeparator() {
return "/";
}
private Map<String, InputStream> entries = Generic.map();
private String path;
}
|
#!/usr/bin/env bash
set -e
# Get/update Leiningen
sudo wget https://raw.github.com/technomancy/leiningen/stable/bin/lein -O /usr/bin/lein
sudo chmod a+x /usr/bin/lein
# Make sure we have a javac
sudo aptitude install openjdk-6-jdk
cd riemann
lein deps
lein javac
echo "Riemann is ready to go. To start the server:"
echo
echo "cd ./riemann"
echo "lein run /etc/riemann/riemann.config.clj"
|
/* global expect:false, jest:false, test:false */
const hljs = require("highlight.js");
const markdownIt = require("markdown-it");
const mdAnchor = require("markdown-it-anchor");
const mdToc = require("markdown-it-table-of-contents");
const setupMarkdownIt = require("../../src/markdown/setup-markdown-it");
jest.mock("highlight.js");
jest.mock("markdown-it-anchor");
jest.mock("markdown-it-table-of-contents");
test("it returns MarkdownIt instance", () => {
const md = setupMarkdownIt();
expect(md).toBeInstanceOf(markdownIt);
});
test("it can receive markdown-it options", () => {
const highlight = () => "foo";
const md = setupMarkdownIt({
html: true,
xhtmlOut: true,
langPrefix: "foo",
highlight
});
expect(md.options).toMatchObject({
html: true,
xhtmlOut: true,
langPrefix: "foo",
highlight
});
});
test("it has default markdown-it highlight function", () => {
const md = setupMarkdownIt();
expect(md.options.highlight).toBeInstanceOf(Function);
});
test("it uses highlight.js for code highlighting", () => {
hljs.getLanguage = jest.fn().mockReturnValue("js");
hljs.highlight = jest.fn().mockReturnValue({
language: "js",
value: "foo"
});
const md = setupMarkdownIt();
const result = md.options.highlight("12", "js");
expect(hljs.getLanguage).toHaveBeenCalledWith("js");
expect(hljs.highlight).toHaveBeenCalledWith("js", "12", true);
expect(result).toMatch(/<pre[\s\S]*><code[\s\S]*>[\s\S]*foo/i);
});
test("it uses highlight.js highlightAuto if lang is empty", () => {
hljs.highlightAuto = jest.fn().mockReturnValue({
language: "js",
value: "foo"
});
const md = setupMarkdownIt();
const result = md.options.highlight("12");
expect(hljs.highlightAuto).toHaveBeenCalledWith("12");
expect(result).toMatch(/<pre[\s\S]*><code[\s\S]*>[\s\S]*foo/i);
});
test("it registers markdown-it-anchor plugin", () => {
setupMarkdownIt();
expect(mdAnchor).toHaveBeenCalled();
});
test("it registers markdown-it-table-of-contents plugin", () => {
setupMarkdownIt();
expect(mdToc).toHaveBeenCalled();
});
|
<filename>ax-boot-initialzr/src/main/resources/templates/java/domain/user/auth/UserAuthRepository.java
package ${basePackage}.domain.user.auth;
import com.chequer.axboot.core.domain.base.AXBootJPAQueryDSLRepository;
import org.springframework.stereotype.Repository;
import java.util.List;
@Repository
public interface UserAuthRepository extends AXBootJPAQueryDSLRepository<UserAuth, UserAuthId> {
List<UserAuth> findByUserCd(String userCd);
}
|
<gh_stars>10-100
(page,done) =>
{
var that = this;
let lable = "GSC";
let msg = "";
let type = "warning";
let what = null;
let prio = null;
//GSC API
const { googleApiAccessToken } = this.getGlobals();
if(!googleApiAccessToken)
{
msg = "To get the most out of this app, connect it with Google Search Console and Google Analytics. "+'<a href="'+that.getGlobals().rulesUrl+'" target="_blank">Settings</a>.';
done(that.createResult(lable, msg, type, what, prio));
return null;
}
done();
return null;
}
|
#!/usr/bin/env bash
BUILD_VERSION=${BUILD_VERSION:-"2021.6.0-rc.1"}
ARRAY=(${BUILD_VERSION//-/ })
RPM_VERSION=${ARRAY[0]}
WITHOUT_RPM_VERSION="${BUILD_VERSION/$RPM_VERSION/}"
if [[ "$WITHOUT_RPM_VERSION" == "" ]]; then
RPM_RELEASE="2"
elif [[ "$WITHOUT_RPM_VERSION" =~ -rc.[0-9]+ ]]; then
RPM_RELEASE="1.${WITHOUT_RPM_VERSION/-/}"
else
RPM_RELEASE="0.${WITHOUT_RPM_VERSION/-/}"
fi
ENDPOINT="https://releases.cortezaproject.org/files"
docker build --build-arg CORTEZA_PATH="$ENDPOINT/corteza-$BUILD_VERSION-linux-amd64.tar.gz" -t corteza-rpm .
docker run -v $(pwd)/BUILD:/root/rpmbuild/BUILD \
-v $(pwd)/RPMS:/root/rpmbuild/RPMS \
-v $(pwd)/SPECS:/root/rpmbuild/SPECS \
corteza-rpm rpmbuild -bb --define "_version $BUILD_VERSION" \
--define "_rpm_version $RPM_VERSION" \
--define "_rpm_release $RPM_RELEASE" \
SPECS/corteza.spec
|
<filename>day_fresh/df_order/views.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,HttpResponse,redirect
from df_user import user_decorator
from django.db import transaction #这一个是处理事物的
from models import *
import datetime
from decimal import Decimal
from df_cart.models import *
from df_goods.models import *
from df_user.models import *
# Create your views here.
@user_decorator.login
def order(request):
listId = request.GET.getlist('carts_id')
uid = request.session.get('user_id')
user_obj = UserInfo.objects.get(pk=uid)
if listId == []:
context = {
'val': "1",
'user_obj':user_obj,
}
return render(request, 'df_order/place_order.html', context)
else:
cart_list = []
for id in listId:
cart_obj = CartInfo.objects.get(id=id)
cart_list.append(cart_obj)
context={
'val':"1",
'cart_list':cart_list,
'user_obj': user_obj,
}
print cart_list
return render(request,'df_order/place_order.html',context)
"""
用事物来完成:一旦操作失败则全部回退
处理订单的思路:
1. 创建订单对象
2.判断商品的库存
3.创建详单对象
4.修改商品库存
5.删除购物车
"""
@transaction.atomic()
@user_decorator.login
def order_handle(request):
carts_ids = request.GET.getlist('cart_id')
total = request.GET.get('total')
#django事物的应用
#先保存一个点,如果操作失误,则能立马回到这个点上来,这个点o同时有保存数据的左右
tran_id= transaction.savepoint()
try:
# 创建订单对象
order = OrderInfo()
now = datetime.datetime.now()
uid= request.session.get('user_id')
order.oid = "%s%d"%(now.strftime("%Y%m%d%H%M%S"), uid)
order.user_id = uid
order.odate = now
order.ototal = Decimal(total)
order.save()
# # 创建详单对象
cart_ids1 = [int(item) for item in carts_ids]
for id1 in cart_ids1:
# 创建订购详单对象
detail = OrderDetailInfo()
# detail.order 表示的是外键所对应的对象
# 表明属于哪个订单对象
detail.order = order
# # 查询购物车的信息
cart = CartInfo.objects.get(id=id1)
# # 判断商品库存
goods = cart.goods
if goods.gkucun >= cart.count:#如果库存大于钩盖数量
print '-----------------1-------------'
# 减少商品库存
print "开始"+str(goods.gkucun)
goods.gkucun = cart.goods.gkucun-cart.count
print "开始" + str(goods.gkucun)
goods.save()
# # 完善订单详情
detail.goods_id = goods.id
detail.price = goods.gprice
detail.count = cart.count
# # 提交保存
detail.save()
# # 删除购物车数据
cart.delete()
else:
print '------------------2---------'
transaction.savepoint_rollback(tran_id)
return redirect('/cart/')
# # 到了最后要提交上去这个事物点保存的内容
transaction.savepoint_commit(tran_id)
except Exception as e:
print "=================%s"%e
transaction.savepoint_rollback(tran_id)
# return HttpResponse('ok')
return redirect('/user/user_order/')
|
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This contains util code for testing kubectl.
set -o errexit
set -o nounset
set -o pipefail
# Set locale to ensure english responses from kubectl commands
export LANG=C
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
API_PORT=${API_PORT:-8080}
SECURE_API_PORT=${SECURE_API_PORT:-6443}
API_HOST=${API_HOST:-127.0.0.1}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="k8s.gcr.io/perl"
IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0"
IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest"
IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
# Define variables for resource types to prevent typos.
clusterroles="clusterroles"
configmaps="configmaps"
csr="csr"
deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
namespaces="namespaces"
nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims"
persistentvolumes="persistentvolumes"
pods="pods"
podtemplates="podtemplates"
replicasets="replicasets"
replicationcontrollers="replicationcontrollers"
roles="roles"
secrets="secrets"
serviceaccounts="serviceaccounts"
services="services"
statefulsets="statefulsets"
static="static"
storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews"
selfsubjectaccessreviews="selfsubjectaccessreviews"
customresourcedefinitions="customresourcedefinitions"
daemonsets="daemonsets"
controllerrevisions="controllerrevisions"
job="jobs"
# include shell2junit library
sh2ju="${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
if [[ -f "${sh2ju}" ]]; then
source "${sh2ju}"
else
echo "failed to find third_party/forked/shell2junit/sh2ju.sh"
exit 1
fi
# record_command runs the command and records its output/error messages in junit format
# it expects the first to be the name of the command
# Example:
# record_command run_kubectl_tests
#
# WARNING: Variable changes in the command will NOT be effective after record_command returns.
# This is because the command runs in subshell.
function record_command() {
set +o nounset
set +o errexit
local name="$1"
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "Recording: ${name}"
echo "Running command: $@"
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
if [[ $? -ne 0 ]]; then
echo "Error when running ${name}"
foundError="${foundError}""${name}"", "
fi
set -o nounset
set -o errexit
}
# Stops the running kubectl proxy, if there is one.
function stop-proxy()
{
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
PROXY_PID=
PROXY_PORT=
PROXY_PORT_FILE=
}
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
function start-proxy()
{
stop-proxy
PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX)
kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"
if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
fi
PROXY_PID=$!
PROXY_PORT=
local attempts=0
while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then
kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
fi
sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
attempts=$((attempts+1))
done
kube::log::status "kubectl proxy running on port ${PROXY_PORT}"
# We try checking kubectl proxy 30 times with 1s delays to avoid occasional
# failures.
if [ $# -eq 0 ]; then
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
else
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
fi
}
function cleanup()
{
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
stop-proxy
kube::etcd::cleanup
rm -rf "${KUBE_TEMP}"
local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "junit report dir:" ${junit_dir}
kube::log::status "Clean up complete"
}
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
function check-curl-proxy-code()
{
local status
local -r address=$1
local -r desired=$2
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
if [ "${status}" == "${desired}" ]; then
return 0
fi
echo "For address ${full_address}, got ${status} but wanted ${desired}"
return 1
}
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
function kubectl-with-retry()
{
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE-false}
for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
if [ "$preserve_err_file" != true ] ; then
rm "${ERROR_FILE}"
fi
break
fi
done
}
# Waits for the pods with the given label to match the list of names. Don't call
# this function unless you know the exact pod names, or expect no pods.
# $1: label to match
# $2: list of pod names sorted by name
# Example invocation:
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
function wait-for-pods-with-label()
{
local i
for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --output=go-template --template='{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
if [[ $kubeout = $2 ]]; then
return
fi
echo Waiting for pods: $2, found $kubeout
sleep $i
done
kube::log::error_exit "Timeout waiting for pods with label $1"
}
# Code to be run before running the tests.
setup() {
kube::util::trap_add cleanup EXIT SIGINT
kube::util::ensure-temp-dir
# ensure ~/.kube/config isn't loaded by tests
HOME="${KUBE_TEMP}"
kube::etcd::start
# Find a standard sed instance for use with edit scripts
kube::util::ensure-gnu-sed
kube::log::status "Building kubectl"
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl"
# Check kubectl
kube::log::status "Running kubectl with no options"
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
# TODO: we need to note down the current default namespace and set back to this
# namespace after the tests are done.
kubectl config view
CONTEXT="test"
kubectl config set-context "${CONTEXT}"
kubectl config use-context "${CONTEXT}"
kube::log::status "Setup complete"
}
########################################################
# Kubectl version (--short, --client, --output) #
########################################################
run_kubectl_version_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl version"
TEMP="${KUBE_TEMP}"
kubectl get "${kube_flags[@]}" --raw /version
# create version files, one for the client, one for the server.
# these are the files we will use to ensure that the remainder output is correct
kube::test::version::object_to_file "Client" "" "${TEMP}/client_version_test"
kube::test::version::object_to_file "Server" "" "${TEMP}/server_version_test"
kube::log::status "Testing kubectl version: check client only output matches expected output"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/client_only_version_test"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/server_client_only_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_version_test" "the flag '--client' shows correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_version_test" "the flag '--client' correctly has no server version info"
kube::log::status "Testing kubectl version: verify json output"
kube::test::version::json_client_server_object_to_file "" "clientVersion" "${TEMP}/client_json_version_test"
kube::test::version::json_client_server_object_to_file "" "serverVersion" "${TEMP}/server_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_json_version_test" "--output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_json_version_test" "--output json has correct server info"
kube::log::status "Testing kubectl version: verify json output using additional --client flag does not contain serverVersion"
kube::test::version::json_client_server_object_to_file "--client" "clientVersion" "${TEMP}/client_only_json_version_test"
kube::test::version::json_client_server_object_to_file "--client" "serverVersion" "${TEMP}/server_client_only_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_json_version_test" "--client --output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_json_version_test" "--client --output json has no server info"
kube::log::status "Testing kubectl version: compare json output using additional --short flag"
kube::test::version::json_client_server_object_to_file "--short" "clientVersion" "${TEMP}/client_short_json_version_test"
kube::test::version::json_client_server_object_to_file "--short" "serverVersion" "${TEMP}/server_short_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_short_json_version_test" "--short --output client json info is equal to non short result"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_short_json_version_test" "--short --output server json info is equal to non short result"
kube::log::status "Testing kubectl version: compare json output with yaml output"
kube::test::version::json_object_to_file "" "${TEMP}/client_server_json_version_test"
kube::test::version::yaml_object_to_file "" "${TEMP}/client_server_yaml_version_test"
kube::test::version::diff_assert "${TEMP}/client_server_json_version_test" "eq" "${TEMP}/client_server_yaml_version_test" "--output json/yaml has identical information"
set +o nounset
set +o errexit
}
# Runs all pod related tests.
run_pod_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:pods)"
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
# Repeat above test using jsonpath template
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
# Describe command should print detailed information
kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_object_events_assert pods 'valid-pod'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert pods 'valid-pod' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert pods 'valid-pod' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert pods
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert pods false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert pods true
### Validate Export ###
kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" '<no value> valid-pod' "--export=true"
### Dump current valid-pod POD
output_pod=$(kubectl get pod valid-pod -o yaml "${kube_flags[@]}")
### Delete POD valid-pod by id
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --now
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --now
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --grace-period=0
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command succeeds without --force by waiting
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from dumped YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
echo "${output_pod}" | ${SED} '/namespace:/d' | kubectl create -f - "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod from JSON
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod with label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
### Create POD valid-pod from YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
output_message=$(kubectl get pods --field-selector metadata.name=valid-pod "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "valid-pod"
# Command
phase=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .status.phase }}')
output_message=$(kubectl get pods --field-selector status.phase="${phase}" "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "valid-pod"
### Delete PODs with no parameter mustn't kill everything
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with --all and a label selector is not permitted
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete all PODs
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
# Post-condition: no POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
# Detailed tests for describe pod output
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-kubectl-describe-pod
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'
### Create a generic secret
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type'
### Create a generic configmap
# Pre-condition: no CONFIGMAP exists
kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
### Create a pod disruption budget with minAvailable
# Command
kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-1 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '2'
# Command
kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
### Create a pod disruption budget with maxUnavailable
# Command
kubectl create pdb test-pdb-3 --selector=app=rails --max-unavailable=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-3 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '2'
# Command
kubectl create pdb test-pdb-4 --selector=app=rails --max-unavailable=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'
### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod
# Create a pod that consumes secret, configmap, and downward API keys as envs
kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod
kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Describe command (resource only) should print detailed information about environment variables
kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Clean-up
kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod
kubectl delete namespace test-kubectl-describe-pod
### Create two PODs
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/kubectl/redis-master-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-master PODs are created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
### Delete multiple PODs at once
# Pre-condition: valid-pod and redis-master PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
# Command
kubectl delete pods valid-pod redis-master "${kube_flags[@]}" --grace-period=0 --force # delete multiple pods at once
# Post-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create valid-pod POD
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Label the valid-pod POD
# Pre-condition: valid-pod is not labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
# Command
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
# Post-condition: valid-pod is labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
### Label the valid-pod POD with empty label value
# Pre-condition: valid-pod does not have label "emptylabel"
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
# Command
kubectl label pods valid-pod emptylabel="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptylabel" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
### Annotate the valid-pod POD with empty annotation value
# Pre-condition: valid-pod does not have annotation "emptyannotation"
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '<no value>'
# Command
kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptyannotation" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
### Record label change
# Pre-condition: valid-pod does not have record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
# Command
kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
# Post-condition: valid-pod has record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Do not record label change
# Command
kubectl label pods valid-pod no-record-change=true --record=false "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation still contains command with --record=true
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Record label change with specified flag and previous change already recorded
### we are no longer tricked by data from another user into revealing more information about our client
# Command
kubectl label pods valid-pod new-record-change=true --record=true "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation contains new change
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create pod-with-precision POD
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'
## Patch preserves precision
# Command
kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
# Post-condition: pod-with-precision POD has patched annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
# Command
kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has label
kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
# Command
kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Cleanup
kubectl delete pod pod-with-precision "${kube_flags[@]}"
### Annotate POD YAML file locally without effecting the live pod.
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Command
kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue "${kube_flags[@]}"
# Pre-condition: annotationkey is annotationvalue
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Command
output_message=$(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
kube::test::if_has_string "${output_message}" "localvalue"
# Cleanup
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
### Create valid-pod POD
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
TEMP=$(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/mock/modified/g\" \$1" > ${TEMP}
chmod +x ${TEMP}
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
rm ${TEMP}
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
## kubectl create --edit won't create anything if user makes no changes
[ "$(EDITOR=cat kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1 | grep 'Edit cancelled')" ]
## Create valid-pod POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## Patch can modify a local object
kubectl patch --local -f pkg/kubectl/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o yaml | grep -q "Never"
## Patch fails with error message "not patched" and exit code 1
output_message=$(! kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"replicas":7}}' 2>&1)
kube::test::if_has_string "${output_message}" 'not patched'
## Patch pod can change image
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# Post-condition: valid-pod has the record annotation
kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation}"
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# prove that yaml input works too
YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
## Patch pod from JSON can change image
# Command
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "k8s.gcr.io/pause:3.1"}]}}'
# Post-condition: valid-pod POD has expected image
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause:3.1:'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error"
## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
# Command
# Needs to retry because other party may change the resource.
for count in {0..3}; do
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
rm "${ERROR_FILE}"
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
break
fi
done
## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
((resourceVersion+=100))
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the conflict
if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
## --force replace pod can change other field, e.g., spec.container.name
# Command
kubectl get "${kube_flags[@]}" pod valid-pod -o json | ${SED} 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
## check replace --grace-period requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
## check replace --timeout requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-timeout must have \-\-force specified'
#cleaning
rm /tmp/tmp-valid-pod.json
## replace of a cluster scoped resource can succeed
# Pre-condition: a node exists
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test"
}
}
__EOF__
kubectl replace -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test",
"annotations": {"a":"b"},
"resourceVersion": "0"
}
}
__EOF__
# Post-condition: the node command succeeds
kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
kubectl delete node node-v1-test "${kube_flags[@]}"
## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/nginx/k8s.gcr.io\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
# Pre-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
[[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]]
# Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:'
# cleaning
rm /tmp/tmp-editor.sh
## kubectl edit should work on Windows
[ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ]
[ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ]
[ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ]
[ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ]
[ "$(EDITOR=cat kubectl edit ns | grep 'kind: List')" ]
### Label POD YAML file locally without effecting the live pod.
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
output_message=$(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: name is still valid-pod in the live pod, but command output is the new value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
kube::test::if_has_string "${output_message}" "localonlyvalue"
### Overwriting an existing label is not permitted
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
### --overwrite must be used to overwrite existing label, can be applied to all resources
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is valid-pod-super-sayan
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two PODs from 1 yaml file
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: redis-master and valid-pod PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
### Delete two PODs from 1 yaml file
# Pre-condition: redis-master and valid-pod PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: no PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply should update configuration annotations only if apply is already called
## 1. kubectl create doesn't set the annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is applied
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
## 4. kubectl replace updates an existing annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
# runs specific kubectl create tests
run_create_secret_tests() {
set -o nounset
set -o errexit
### Create generic secret with explicit namespace
# Pre-condition: secret 'mysecret' does not exist
output_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'secrets "mysecret" not found'
# Command
output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run --from-literal=foo=bar -o jsonpath='{.metadata.namespace}' --namespace=user-specified)
# Post-condition: mysecret still not created since --dry-run was used
# Output from 'create' command should contain the specified --namespace value
failure_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${failure_message}" 'secrets "mysecret" not found'
kube::test::if_has_string "${output_message}" 'user-specified'
# Command
output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run --from-literal=foo=bar -o jsonpath='{.metadata.namespace}')
# Post-condition: jsonpath for .metadata.namespace should be empty for object since --namespace was not explicitly specified
kube::test::if_empty_string "${output_message}"
kubectl create configmap tester-create-cm -o json --dry-run | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f -
kubectl delete -ndefault "${kube_flags[@]}" configmap tester-create-cm
set +o nounset
set +o errexit
}
# Runs tests related to kubectl apply.
run_kubectl_apply_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply"
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl apply should be able to clear defaulted fields.
# Pre-Condition: no deployment exists
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}"
# Post-Condition: deployment "test-deployment-retainkeys" created
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys'
# Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Command: apply a deployment "test-deployment-retainkeys" should clear
# defaulted fields and successfully update the deployment
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]]
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Clean up
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}"
## kubectl apply -f with label selector should only apply matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
## kubectl apply --prune
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "a" not found'
# cleanup
kubectl delete pods b
# same thing without prune for a sanity check
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check both pods exist
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
# cleanup
kubectl delete pod/a pod/b
## kubectl apply --prune requires a --all flag to select everything
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" \
'all resources selected for prune without explicitly passing --all'
# should apply everything
kubectl apply --all --prune -f hack/testdata/prune
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
kubectl delete pod/a pod/b
## kubectl apply --prune should fallback to delete for non reapable types
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
## kubectl apply --prune --prune-whitelist
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and don't prune pod a by overwriting whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and prune pod a with default whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# cleanup
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
set +o nounset
set +o errexit
}
# Runs tests related to kubectl create --filename(-f) --selector(-l).
run_kubectl_create_filter_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create filter"
## kubectl create -f with label selector should only create matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# create
kubectl create -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
set +o nounset
set +o errexit
}
run_kubectl_apply_deployments_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply deployments"
## kubectl apply should propagate user defined null values
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply base deployment
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
# Post-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# kubectl apply deployment --overwrite=true --force=true
# Pre-Condition: no deployment exists
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
# apply deployment nginx
kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx'
# apply deployment with new labels and a conflicting resourceVersion
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
# apply deployment with --force and --overwrite will succeed
kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
# check the changed deployment
output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2)
kube::test::if_has_string "${output_message}" '"name": "nginx2"'
# applying a resource (with --force) that is both conflicting and invalid will
# cause the server to only return a "Conflict" error when we attempt to patch.
# This means that we will delete the existing resource after receiving 5 conflict
# errors in a row from the server, and will attempt to create the modified
# resource that we are passing to "apply". Since the modified resource is also
# invalid, we will receive an invalid error when we attempt to create it, after
# having deleted the old resource. Ensure that when this case is reached, the
# old resource is restored once again, and the validation error is printed.
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Invalid value'
# Ensure that the old object has been restored
kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2'
# cleanup
kubectl delete deployments --all --grace-period=10
set +o nounset
set +o errexit
}
# Runs tests for --save-config tests.
run_save_config_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --save-config"
## Configuration annotations should be set when --save-config is enabled
## 1. kubectl create --save-config should generate configuration annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 2. kubectl edit --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 3. kubectl replace --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service exists
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# tests kubectl group prefix matching
output_message=$(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the rc reaper.
kubectl delete hpa frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_run_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl run"
## kubectl run should create deployments, jobs or cronjob
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Post-condition: no pods exist.
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx-extensions "--image=$IMAGE_NGINX" "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
# new generator was used
output_message=$(kubectl get deployment.extensions/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-extensions "${kube_flags[@]}"
# Command
kubectl run nginx-apps "--image=$IMAGE_NGINX" --generator=deployment/apps.v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-apps "${kube_flags[@]}"
# Pre-Condition: no Job exists
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --schedule="*/5 * * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: CronJob "pi" is created
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Pre-condition: cronjob has perl image, not custom image
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
kube::test::if_has_not_string "${output_message}" "custom-image"
kube::test::if_has_string "${output_message}" "${IMAGE_PERL}"
# Set cronjob image
kubectl set image cronjob/pi '*=custom-image'
# Post-condition: cronjob has custom image, not perl image
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
kube::test::if_has_string "${output_message}" "custom-image"
kube::test::if_has_not_string "${output_message}" "${IMAGE_PERL}"
# Clean up
kubectl delete cronjobs pi "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_old_print_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl get --server-print=false"
### Test retrieval of all types in discovery
# Pre-condition: no resources exist
output_message=$(kubectl get pods --server-print=false 2>&1 "${kube_flags[@]}")
# Post-condition: Expect text indicating no resources were found
kube::test::if_has_string "${output_message}" 'No resources found.'
### Test retrieval of pods against server-side printing
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get pod "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get pod --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of daemonsets against server-side printing
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Post-condition: daemonset is created
kube::test::get_object_assert ds "{{range.items}}{{$id_field}}:{{end}}" 'bind:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get ds "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get ds --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of replicationcontrollers against server-side printing
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get rc "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get rc --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of replicasets against server-side printing
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get rs "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get rs --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of jobs against server-side printing
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: assertion object exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get jobs/pi "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get jobs/pi --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of clusterroles against server-side printing
kubectl create "${kube_flags[@]}" clusterrole sample-role --verb=* --resource=pods
# Post-Condition: assertion object exists
kube::test::get_object_assert clusterrole/sample-role "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get clusterroles/sample-role "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get clusterroles/sample-role --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of crds against server-side printing
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "foos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "foos",
"kind": "Foo"
}
}
}
__EOF__
# Post-Condition: assertion object exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'foos.company.com:'
# Test that we can list this new CustomResource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Compare "old" output with experimental output and ensure both are the same
expected_output=$(kubectl get foos "${kube_flags[@]}")
actual_output=$(kubectl get foos --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
# teardown
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
kubectl delete clusterroles/sample-role "${kube_flags_with_token[@]}"
kubectl delete jobs pi "${kube_flags[@]}"
kubectl delete rs frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
kubectl delete ds bind "${kube_flags[@]}"
kubectl delete pod valid-pod "${kube_flags[@]}"
}
run_kubectl_get_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl get"
### Test retrieval of non-existing pods
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of non-existing POD with output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of pods when none exist with non-human readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
### Test retrieval of pods when none exist, with human-readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods --ignore-not-found 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test retrieval of non-existing POD with json output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
# Post-condition: make sure we don't display an empty List
if kube::test::if_has_string "${output_message}" 'List'; then
echo 'Unexpected List output'
echo "${LINENO} $(basename $0)"
exit 1
fi
### Test kubectl get all
output_message=$(kubectl --v=6 --namespace default get all --chunk-size=0 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get 200 OK from all the url(s)
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/daemonsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/replicasets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/statefulsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/daemonsets 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
### Test kubectl get chunk size
output_message=$(kubectl --v=6 get clusterrole --chunk-size=10 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get a limit and continue
kube::test::if_has_string "${output_message}" "/clusterroles?limit=10 200 OK"
kube::test::if_has_string "${output_message}" "/v1/clusterroles?continue="
### Test kubectl get chunk size defaults to 500
output_message=$(kubectl --v=6 get clusterrole 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get a limit and continue
kube::test::if_has_string "${output_message}" "/clusterroles?limit=500 200 OK"
### Test kubectl get chunk size does not result in a --watch error when resource list is served in multiple chunks
# Pre-condition: no ConfigMaps exist
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: Create three configmaps and ensure that we can --watch them with a --chunk-size of 1
kubectl create cm one "${kube_flags[@]}"
kubectl create cm two "${kube_flags[@]}"
kubectl create cm three "${kube_flags[@]}"
output_message=$(kubectl get configmap --chunk-size=1 --watch --request-timeout=1s 2>&1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
output_message=$(kubectl get configmap --chunk-size=1 --watch-only --request-timeout=1s 2>&1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
### Test --allow-missing-template-keys
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --allow-missing-template-keys defaults to true for jsonpath templates
kubectl get "${kube_flags[@]}" pod valid-pod -o jsonpath='{.missing}'
## check --allow-missing-template-keys defaults to true for go templates
kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{.missing}}'
## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'missing is not found'
## check --allow-missing-template-keys=false results in an error for a missing key with go
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'map has no entry for key "missing"'
### Test kubectl get watch
output_message=$(kubectl get pods -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'STATUS' # headers
kube::test::if_has_string "${output_message}" 'valid-pod' # pod details
output_message=$(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'pod/valid-pod' # resource name
output_message=$(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'name: valid-pod' # yaml
output_message=$(! kubectl get pods/invalid-pod -w --request-timeout=1 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" '"invalid-pod" not found'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: PODs redis-master and valid-pod exist
# Check that all items in the list are printed
output_message=$(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "redis-master valid-pod"
# cleanup
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_request_timeout_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl request timeout"
### Test global request timeout option
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --request-timeout on 'get pod'
output_message=$(kubectl get pod valid-pod --request-timeout=1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout on 'get pod' with --watch
output_message=$(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
kube::test::if_has_string "${output_message}" 'Timeout exceeded while reading body'
## check --request-timeout value with no time unit
output_message=$(kubectl get pod valid-pod --request-timeout=1 2>&1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout value with invalid time unit
output_message=$(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
kube::test::if_has_string "${output_message}" 'Invalid timeout value'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_crd_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl crd"
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "foos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "foos",
"kind": "Foo"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'foos.company.com:'
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "bars.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "bars",
"kind": "Bar"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'bars.company.com:foos.company.com:'
# This test ensures that the name printer is able to output a resource
# in the proper "kind.group/resource_name" format, and that the
# resource builder is able to resolve a GVK when a kind.group pair is given.
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "resources.mygroup.example.com"
},
"spec": {
"group": "mygroup.example.com",
"version": "v1alpha1",
"scope": "Namespaced",
"names": {
"plural": "resources",
"singular": "resource",
"kind": "Kind",
"listKind": "KindList"
}
}
}
__EOF__
# Post-Condition: assertion crd with non-matching kind and resource exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:'
run_non_native_resource_tests
# teardown
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
kubectl delete customresourcedefinitions/bars.company.com "${kube_flags_with_token[@]}"
set +o nounset
set +o errexit
}
kube::util::non_native_resources() {
local times
local wait
local failed
times=30
wait=10
local i
for i in $(seq 1 $times); do
failed=""
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/foos' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/bars' || failed=true
if [ -z "${failed}" ]; then
return 0
fi
sleep ${wait}
done
kube::log::error "Timed out waiting for non-native-resources; tried ${times} waiting ${wait}s between each"
return 1
}
run_non_native_resource_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl non-native resources"
kube::util::non_native_resources
# Test that we can list this new CustomResource (foos)
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new CustomResource (bars)
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new CustomResource (resources)
kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Kind
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}"
# Test that -o name returns kind.group/resourcename
output_message=$(kubectl "${kube_flags[@]}" get resource/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
output_message=$(kubectl "${kube_flags[@]}" get resources/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
output_message=$(kubectl "${kube_flags[@]}" get kind.mygroup.example.com/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
# Delete the resource with cascade.
kubectl "${kube_flags[@]}" delete resources myobj --cascade=true
# Make sure it's gone
kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Foo
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}"
# Test that we can list this new custom resource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test alternate forms
kube::test::get_object_assert foo "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test all printers, with lists and individual items
kube::log::status "Testing CustomResource printing"
kubectl "${kube_flags[@]}" get foos
kubectl "${kube_flags[@]}" get foos/test
kubectl "${kube_flags[@]}" get foos -o name
kubectl "${kube_flags[@]}" get foos/test -o name
kubectl "${kube_flags[@]}" get foos -o wide
kubectl "${kube_flags[@]}" get foos/test -o wide
kubectl "${kube_flags[@]}" get foos -o json
kubectl "${kube_flags[@]}" get foos/test -o json
kubectl "${kube_flags[@]}" get foos -o yaml
kubectl "${kube_flags[@]}" get foos/test -o yaml
kubectl "${kube_flags[@]}" get foos -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "jsonpath={.someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
output_message=$(kubectl "${kube_flags[@]}" get foos/test -o name)
kube::test::if_has_string "${output_message}" 'foo.company.com/test'
# Test patching
kube::log::status "Testing CustomResource patching"
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value1"}' --type=merge
kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value2"}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":null}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
# Get local version
CRD_RESOURCE_FILE="${KUBE_TEMP}/crd-foos-test.json"
kubectl "${kube_flags[@]}" get foos/test -o json > "${CRD_RESOURCE_FILE}"
# cannot apply strategic patch locally
CRD_PATCH_ERROR_FILE="${KUBE_TEMP}/crd-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}"
if grep -q "try --type merge" "${CRD_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat ${CRD_PATCH_ERROR_FILE})"
else
kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${CRD_PATCH_ERROR_FILE})"
exit 1
fi
# can apply merge patch locally
kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
# can apply merge patch remotely
kubectl "${kube_flags[@]}" patch --record -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
rm "${CRD_RESOURCE_FILE}"
rm "${CRD_PATCH_ERROR_FILE}"
# Test labeling
kube::log::status "Testing CustomResource labeling"
kubectl "${kube_flags[@]}" label foos --all listlabel=true
kubectl "${kube_flags[@]}" label foo/test itemlabel=true
# Test annotating
kube::log::status "Testing CustomResource annotating"
kubectl "${kube_flags[@]}" annotate foos --all listannotation=true
kubectl "${kube_flags[@]}" annotate foo/test itemannotation=true
# Test describing
kube::log::status "Testing CustomResource describing"
kubectl "${kube_flags[@]}" describe foos
kubectl "${kube_flags[@]}" describe foos/test
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
# Delete the resource with cascade.
kubectl "${kube_flags[@]}" delete foos test --cascade=true
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Bar
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}"
# Test that we can list this new custom resource
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that we can watch the resource.
# Start watcher in background with process substitution,
# so we can read from stdout asynchronously.
kube::log::status "Testing CustomResource watching"
exec 3< <(kubectl "${kube_flags[@]}" get bars --request-timeout=1m --watch-only -o name & echo $! ; wait)
local watch_pid
read <&3 watch_pid
# We can't be sure when the watch gets established,
# so keep triggering events (in the background) until something comes through.
local tries=0
while [ ${tries} -lt 10 ]; do
tries=$((tries+1))
kubectl "${kube_flags[@]}" patch bars/test -p "{\"patched\":\"${tries}\"}" --type=merge
sleep 1
done &
local patch_pid=$!
# Wait up to 30s for a complete line of output.
local watch_output
read <&3 -t 30 watch_output
# Stop the watcher and the patch loop.
kill -9 ${watch_pid}
kill -9 ${patch_pid}
kube::test::if_has_string "${watch_output}" 'bar.company.com/test'
# Delete the resource without cascade.
kubectl "${kube_flags[@]}" delete bars test --cascade=false
# Make sure it's gone
kube::test::wait_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create single item via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
# Test that we have create a foo named test
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply an empty patch doesn't change fields
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'
# Update a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-updated-subfield.yaml
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'
# Delete a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-deleted-subfield.yaml
# Test that apply has deleted the field
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'
# Test that the field does not exist
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-added-subfield.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/foo.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create list via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
# Test that we have create a foo and a bar from a list
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that re-apply an list doesn't change anything
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that the fields have the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Update fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-updated-field.yaml
# Test that apply has updated the fields
kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'
# Delete fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-deleted-field.yaml
# Test that apply has deleted the fields
kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'
# Test that the fields does not exist
kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-added-field.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/multi-crd-list.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply --prune
# Test that no foo or bar exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on foo.yaml that has foo/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right crds exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on bar.yaml that has bar/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right crds exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/bar.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test 'kubectl create' with namespace, and namespace cleanup.
kubectl "${kube_flags[@]}" create namespace non-native-resources
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml --namespace=non-native-resources
kube::test::get_object_assert bars '{{len .items}}' '1' --namespace=non-native-resources
kubectl "${kube_flags[@]}" delete namespace non-native-resources
# Make sure objects go away.
kube::test::wait_object_assert bars '{{len .items}}' '0' --namespace=non-native-resources
# Make sure namespace goes away.
local tries=0
while kubectl "${kube_flags[@]}" get namespace non-native-resources && [ ${tries} -lt 10 ]; do
tries=$((tries+1))
sleep ${tries}
done
set +o nounset
set +o errexit
}
run_recursive_resources_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing recursive resources"
### Create multiple busybox PODs recursively from directory of YAML files
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
echo -e '#!/usr/bin/env bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
# The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
# a list but since it contains invalid objects, it will never open.
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# cleaning
rm /tmp/tmp-editor.sh
## Replace multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Describe multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "app=busybox0"
kube::test::if_has_string "${output_message}" "app=busybox1"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Annotate multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Apply multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
### Convert deployment YAML file locally without affecting the live deployment.
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Command
output_message=$(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 -o yaml "${kube_flags[@]}")
# Post-condition: apiVersion is still extensions/v1beta1 in the live deployment, but command output is the new value
kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'extensions/v1beta1'
kube::test::if_has_string "${output_message}" "apps/v1beta1"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
## Convert multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Get multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
kube::test::if_has_string "${output_message}" "busybox0:busybox1:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Label multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Patch multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Create replication controller recursively from directory of YAML files
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
### Autoscale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are autoscaled
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kubectl delete hpa busybox0 "${kube_flags[@]}"
kubectl delete hpa busybox1 "${kube_flags[@]}"
### Expose multiple replication controllers as service recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Scale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Rollout on multiple deployments recursively
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create deployments (revision 1) recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
## Rollback the deployments to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Pause the deployments recursively
PRESERVE_ERR_FILE=true
kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Resume the deployments recursively
kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Retrieve the rollout history of the deployments recursively
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
kube::test::if_has_string "${output_message}" "nginx0-deployment"
kube::test::if_has_string "${output_message}" "nginx1-deployment"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# Clean up
unset PRESERVE_ERR_FILE
rm "${ERROR_FILE}"
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create replication controllers recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
## Attempt to rollback the replication controllers to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Attempt to pause the replication controllers recursively
output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" pausing is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" pausing is not supported'
## Attempt to resume the replication controllers recursively
output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
# Clean up
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
set +o nounset
set +o errexit
}
run_namespace_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:namespaces)"
### Create a new namespace
# Pre-condition: only the "default" namespace exists
# The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test.
# kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:'
# Command
kubectl create namespace my-namespace
# Post-condition: namespace 'my-namespace' is created.
kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
# Clean up
kubectl delete namespace my-namespace
######################
# Pods in Namespaces #
######################
if kube::test::if_supports_resource "${pods}" ; then
### Create a new namespace
# Pre-condition: the other namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace other
# Post-condition: namespace 'other' is created.
kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'
### Create POD valid-pod in specific namespace
# Pre-condition: no POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: a resource cannot be retrieved by name across all namespaces
output_message=$(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
kube::test::if_has_string "${output_message}" "a resource cannot be retrieved by name across all namespaces"
### Delete POD valid-pod in specific namespace
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Clean up
kubectl delete namespace other
fi
set +o nounset
set +o errexit
}
run_secrets_test() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing secrets"
# Ensure dry run succeeds and includes kind, apiVersion and data, and doesn't require a server connection
output_message=$(kubectl create secret generic test --from-literal=key1=value1 --dry-run -o yaml --server=example.com --v=6)
kube::test::if_has_string "${output_message}" 'kind: Secret'
kube::test::if_has_string "${output_message}" 'apiVersion: v1'
kube::test::if_has_string "${output_message}" 'key1: dmFsdWUx'
kube::test::if_has_not_string "${output_message}" 'example.com'
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-secrets
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'
### Create a generic secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a docker-registry secret in a specific namespace
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockerconfigjson'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockerconfigjson:')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a tls secret
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Create a secret using stringData
kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": "secret-string-data"
},
"data": {
"k1":"djE=",
"k2":""
},
"stringData": {
"k2":"v2"
}
}
__EOF__
# Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
# Clean up
kubectl delete secret secret-string-data --namespace=test-secrets
### Create a secret using output flags
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no secret exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
[[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]]
## Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Clean up
kubectl delete namespace test-secrets
set +o nounset
set +o errexit
}
run_configmap_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing configmaps"
kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap'
kubectl delete configmap test-configmap "${kube_flags[@]}"
### Create a new namespace
# Pre-condition: the test-configmaps namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-configmaps
# Post-condition: namespace 'test-configmaps' is created.
kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'
### Create a generic configmap in a specific namespace
# Pre-condition: no configmaps namespace exists
kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
kubectl create configmap test-binary-configmap --from-file <( head -c 256 /dev/urandom ) --namespace=test-configmaps
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
kube::test::get_object_assert 'configmap/test-binary-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-binary-configmap'
[[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]]
[[ "$(kubectl get configmap/test-binary-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'binaryData')" ]]
# Clean-up
kubectl delete configmap test-configmap --namespace=test-configmaps
kubectl delete configmap test-binary-configmap --namespace=test-configmaps
kubectl delete namespace test-configmaps
set +o nounset
set +o errexit
}
run_service_tests() {
set -o nounset
set -o errexit
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:services)"
### Create redis-master service from JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Describe command should print detailed information
kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_object_events_assert services 'redis-master'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert services 'redis-master' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert services 'redis-master' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert services
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert services false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert services true
### set selector
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Set selector of a local file without talking to the server
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}"
! kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}"
# Set command to change the selector.
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan
# prove role=padawan
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
# Set command to reset the selector back to the original one.
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Show dry-run works on running selector
kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}"
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
### Dump current redis-master service
output_service=$(kubectl get service redis-master -o json "${kube_flags[@]}")
### Delete redis-master-service by id
# Pre-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create redis-master-service from dumped JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
# Post-condition: redis-master service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
### Create redis-master-v1-test service
# Pre-condition: redis-master-service service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "service-v1-test"
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 80
}
]
}
}
__EOF__
# Post-condition: service-v1-test service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
### Identity
kubectl get service "${kube_flags[@]}" service-v1-test -o json | kubectl replace "${kube_flags[@]}" -f -
### Delete services by id
# Pre-condition: service-v1-test exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
kubectl delete service "service-v1-test" "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create two services
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master and redis-slave services are created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
### Custom columns can be specified
# Pre-condition: generate output using custom columns
output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
# Post-condition: should contain name column
kube::test::if_has_string "${output_message}" 'redis-master'
### Delete multiple services at once
# Pre-condition: redis-master and redis-slave services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
# Command
kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create an ExternalName service
# Pre-condition: Only the default kubernetes service exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create service externalname beep-boop --external-name bar.com
# Post-condition: beep-boop service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
### Delete beep-boop service by id
# Pre-condition: beep-boop service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
# Command
kubectl delete service beep-boop "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
set +o nounset
set +o errexit
}
run_rc_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicationcontrollers)"
### Create and stop controller, make sure it doesn't leak pods
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend controller
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replication controller frontend from JSON
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rc 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rc 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rc 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rc 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rc "Name:" "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rc
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rc false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rc true
### Scale replication controller frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with (wrong) current-replicas and replicas
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with replicas only
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
### Scale replication controller from JSON with replicas only
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Clean-up
kubectl delete rc frontend "${kube_flags[@]}"
### Scale multiple replication controllers
kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
# Post-condition: 4 replicas each
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
# Clean-up
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
### Scale a job
kubectl create -f test/fixtures/doc-yaml/user-guide/job.yaml "${kube_flags[@]}"
# Command
kubectl scale --replicas=2 job/pi
# Post-condition: 2 replicas for pi
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
### Scale a deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
### Expose a deployment as a service
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
# Command
kubectl expose deployment/nginx-deployment
# Post-condition: service exists and exposes deployment port (80)
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
# Clean-up
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
### Expose replication controller as service
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl expose rc frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Command
kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
# Create a service using service/v1 generator
kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
# Verify that expose service works without specifying a port.
kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
# Post-condition: service exists with the same port as the original service.
kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
# Cleanup services
kubectl delete pod valid-pod "${kube_flags[@]}"
kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
### Expose negative invalid resource test
# Pre-condition: don't need
# Command
output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
# Post-condition: the error message has "cannot expose" string
kube::test::if_has_string "${output_message}" 'cannot expose'
### Try to generate a service with invalid name (exceeding maximum valid size)
# Pre-condition: use --name flag
output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: should fail due to invalid name
kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
# Pre-condition: default run without --name flag; should succeed by truncating the inherited name
output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: inherited name from pod has been truncated
kube::test::if_has_string "${output_message}" 'kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed'
# Clean-up
kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len "${kube_flags[@]}"
### Expose multiport object as a new service
# Pre-condition: don't use --port flag
output_message=$(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
# Post-condition: expose succeeded
kube::test::if_has_string "${output_message}" 'etcd-server exposed'
# Post-condition: generated service has both ports from the exposed pod
kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 2379'
# Clean-up
kubectl delete svc etcd-server "${kube_flags[@]}"
### Delete replication controller with id
# Pre-condition: frontend replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replication controllers
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple controllers at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Auto scale replication controller
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, rc specified by file
kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, rc specified by name
kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
## Set resource limits/request of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Set resources of a local file without talking to the server
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's cpu limits
kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set a non-existing container should fail
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
# Set the limit of a specific container in deployment
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set limits/requests of a deployment specified by a file
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Show dry-run works on running deployments
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Clean up
kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_deployment_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing deployments"
# Test kubectl create deployment (using default - old generator)
kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Clean up
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
# Clean up
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
### Test kubectl create deployment should not fail validation
# Pre-Condition: No deployment exists.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
# Post-Condition: Deployment "deployment-with-unixuserid" is created.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
# Clean up
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
### Test cascading deletion
## Test that rs is deleted when deployment is deleted.
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
# Deleting the deployment should delete the rs.
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
## Test that rs is not deleted when deployment is deleted with cascade set to false.
# Pre-condition: no deployment and rs exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
# Wait for the deployment to be deleted and then verify that rs is not
# deleted.
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Cleanup
# Find the name of the rs to be deleted.
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
kubectl delete rs ${output_message} "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Update the deployment (revision 2)
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
# The resumed deployment can now be rolled back
kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Check that the new replica set has all old revisions stored in an annotation
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
### Set image of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's image
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a deployment specified by file
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a local file without talking to the server
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of all containers of the deployment
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Set image of all containners of the deployment again when image not change
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
### Set env of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-config:'
kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
# Set env of deployments for all container
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
# Set env of deployments for specific container
kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}"
# Set env of deployments by configmap
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
# Set env of deployments by secret
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
# Remove specific env of deployment
kubectl set env deployment nginx-deployment env-
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
### Delete a deployment with initializer
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/deployment-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'deployment web' "{{$id_field}}" 'web'
# Delete a deployment
kubectl delete deployment web "${kube_flags[@]}"
# Check Deployment web doesn't exist
output_message=$(! kubectl get deployment web 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"web" not found'
set +o nounset
set +o errexit
}
run_rs_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rs 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rs 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rs 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rs
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rs false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rs true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Set up three deploy, two deploy have same label
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --selector
kubectl scale deploy --replicas=2 -l run=hello
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
# Test kubectl scale --all
kubectl scale deploy --replicas=3 --all
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
### Expose replica set as service
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
# Test set commands
# Pre-condition: frontend replica set exists at generation 1
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete a rs with initializer
# Pre-condition: no rs exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a rs
kubectl create --request-timeout=1 -f hack/testdata/replicaset-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'rs nginx' "{{$id_field}}" 'nginx'
# Delete a rs
kubectl delete rs nginx "${kube_flags[@]}"
# check rs nginx doesn't exist
output_message=$(! kubectl get rs nginx 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"nginx" not found'
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]}"
# Clean up
kubectl delete rs frontend "${kube_flags[@]}"
fi
set +o nounset
set +o errexit
}
run_daemonset_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets)"
### Create a rolling update DaemonSet
# Pre-condition: no DaemonSet exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should be 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
# Test set commands
kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_daemonset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
### Test rolling back a DaemonSet
# Pre-condition: no DaemonSet or its pods exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a DaemonSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the DaemonSet (revision 2)
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo daemonset "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_statefulset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
### Test rolling back a StatefulSet
# Pre-condition: no statefulset or its pods exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a StatefulSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the statefulset (revision 2)
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo statefulset "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up - delete newest configuration
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_multi_resources_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:multiple resources)"
FILES="hack/testdata/multi-resource-yaml
hack/testdata/multi-resource-list
hack/testdata/multi-resource-json
hack/testdata/multi-resource-rclist
hack/testdata/multi-resource-svclist"
YAML=".yaml"
JSON=".json"
for file in $FILES; do
if [ -f $file$YAML ]
then
file=$file$YAML
replace_file="${file%.yaml}-modify.yaml"
else
file=$file$JSON
replace_file="${file%.json}-modify.json"
fi
has_svc=true
has_rc=true
two_rcs=false
two_svcs=false
if [[ "${file}" == *rclist* ]]; then
has_svc=false
two_rcs=true
fi
if [[ "${file}" == *svclist* ]]; then
has_rc=false
two_svcs=true
fi
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
echo "Testing with file ${file} and replace with file ${replace_file}"
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f "${file}" "${kube_flags[@]}"
# Post-condition: mock service (and mock2) exists
if [ "$has_svc" = true ]; then
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Post-condition: mock rc (and mock2) exists
if [ "$has_rc" = true ]; then
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Command
kubectl get -f "${file}" "${kube_flags[@]}"
# Command: watching multiple resources should return "not supported" error
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
fi
kubectl describe -f "${file}" "${kube_flags[@]}"
# Command
kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
# Command: kubectl edit multiple resources
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service and mock rc (and mock2) are labeled
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
# Cleanup resources created
kubectl delete -f "${file}" "${kube_flags[@]}"
done
#############################
# Multiple Resources via URL#
#############################
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: service(mock) and rc(mock) exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
# Clean up
kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_kubectl_config_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:config set)"
kubectl config set-cluster test-cluster --server="https://does-not-work"
# Get the api cert and add a comment to avoid flag parsing problems
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
r_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
encoded=$(echo -n "$cert_data" | base64)
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
e_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
test "$e_written" == "$r_written"
set +o nounset
set +o errexit
}
run_kubectl_local_proxy_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl local proxy"
start-proxy
check-curl-proxy-code /api/kubernetes 404
check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /metrics 200
fi
if kube::test::if_supports_resource "${static}" ; then
check-curl-proxy-code /static/ 200
fi
stop-proxy
# Make sure the in-development api is accessible by default
start-proxy
check-curl-proxy-code /apis 200
check-curl-proxy-code /apis/extensions/ 200
stop-proxy
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/api/kubernetes 404
check-curl-proxy-code /custom/api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /custom/metrics 200
fi
check-curl-proxy-code /custom/api/v1/namespaces 200
stop-proxy
set +o nounset
set +o errexit
}
run_RESTMapper_evaluation_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing RESTMapper"
RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error"
### Non-existent resource type should give a recognizeable error
# Pre-condition: None
# Command
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
else
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
exit 1
fi
rm "${RESTMAPPER_ERROR_FILE}"
# Post-condition: None
set +o nounset
set +o errexit
}
run_clusterroles_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing clusterroles"
# make sure the server was properly bootstrapped with clusterroles and bindings
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
# test `kubectl create clusterrole`
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
kubectl create "${kube_flags[@]}" clusterrole url-reader --verb=get --non-resource-url=/logs/* --non-resource-url=/healthz/*
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:'
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:'
# test `kubectl create clusterrolebinding`
# test `kubectl set subject clusterrolebinding`
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding multi-users --clusterrole=admin --user=user-1 --user=user-2
kube::test::get_object_assert clusterrolebinding/multi-users "{{range.subjects}}{{.name}}:{{end}}" 'user-1:user-2:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-group --clusterrole=admin --group=the-group
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-group --group=foo
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding multi-groups --clusterrole=admin --group=group-1 --group=group-2
kube::test::get_object_assert clusterrolebinding/multi-groups "{{range.subjects}}{{.name}}:{{end}}" 'group-1:group-2:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-sa --serviceaccount=otherfoo:foo
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
# test `kubectl create rolebinding`
# test `kubectl set subject rolebinding`
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin
kube::test::get_object_assert rolebinding/admin "{{.roleRef.kind}}" 'ClusterRole'
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
kubectl set subject "${kube_flags[@]}" rolebinding admin --user=foo
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:'
kubectl create "${kube_flags[@]}" rolebinding localrole --role=localrole --group=the-group
kube::test::get_object_assert rolebinding/localrole "{{.roleRef.kind}}" 'Role'
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" rolebinding localrole --group=foo
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" rolebinding sarole --serviceaccount=otherfoo:foo
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
set +o nounset
set +o errexit
}
run_role_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing role"
# Create Role from command (only resource)
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-pod-admin --verb=* --resource=invalid-resource 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"invalid-resource\""
# Create Role from command (resource + group)
kubectl create "${kube_flags[@]}" role group-reader --verb=get,list --resource=deployments.extensions
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'deployments:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=deployments.invalid-group 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"deployments\" in group \"invalid-group\""
# Create Role from command (resource / subresource)
kubectl create "${kube_flags[@]}" role subresource-reader --verb=get,list --resource=pods/status
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
# Create Role from command (resource + group / subresource)
kubectl create "${kube_flags[@]}" role group-subresource-reader --verb=get,list --resource=replicasets.extensions/scale
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'replicasets/scale:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=rs.invalid-group/scale 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"rs\" in group \"invalid-group\""
# Create Role from command (resource + resourcename)
kubectl create "${kube_flags[@]}" role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
# Create Role from command (multi-resources)
kubectl create "${kube_flags[@]}" role resource-reader --verb=get,list --resource=pods/status,deployments.extensions
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:deployments:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
set +o nounset
set +o errexit
}
run_assert_short_name_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing assert short name"
kube::log::status "Testing propagation of short names for resources"
output_message=$(kubectl get --raw=/api/v1)
## test if a short name is exported during discovery
kube::test::if_has_string "${output_message}" '{"name":"configmaps","singularName":"","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'
set +o nounset
set +o errexit
}
run_assert_categories_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing propagation of categories for resources"
output_message=$(kubectl get --raw=/api/v1 | grep -o '"name":"pods"[^}]*}')
kube::test::if_has_string "${output_message}" '"categories":\["all"\]'
set +o nounset
set +o errexit
}
run_kubectl_create_error_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create with error"
# Passing no arguments to create is an error
! kubectl create
## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"
kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the empty string
if grep -q "unknown object type \"nil\" in ReplicationController" "${ERROR_FILE}"; then
kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
# Posting a pod to namespaces should fail. Also tests --raw forcing the post location
[ "$( kubectl convert -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f - --v=8 2>&1 | grep 'cannot be handled as a Namespace: converting (v1.Pod)')" ]
[ "$( kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --edit 2>&1 | grep 'raw and --edit are mutually exclusive')" ]
set +o nounset
set +o errexit
}
run_cmd_with_img_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing cmd with image"
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment.apps/test1 created'
kubectl delete deployments test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'
set +o nounset
set +o errexit
}
run_client_config_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing client config"
# Command
# Pre-condition: kubeconfig "missing" is not a file or directory
output_message=$(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: kubeconfig "missing" is not a file or directory
# Command
output_message=$(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
# Post-condition: --user contains a valid / empty value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Command
output_message=$(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
# Post-condition: --cluster contains a "valid" value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: context "missing-context" does not exist
# Command
output_message=$(! kubectl get pod --context="missing-context" 2>&1)
kube::test::if_has_string "${output_message}" 'context was not found for specified context: missing-context'
# Post-condition: invalid or missing context returns error
# Pre-condition: cluster "missing-cluster" does not exist
# Command
output_message=$(! kubectl get pod --cluster="missing-cluster" 2>&1)
kube::test::if_has_string "${output_message}" 'no server found for cluster "missing-cluster"'
# Post-condition: invalid or missing cluster returns error
# Pre-condition: user "missing-user" does not exist
# Command
output_message=$(! kubectl get pod --user="missing-user" 2>&1)
kube::test::if_has_string "${output_message}" 'auth info "missing-user" does not exist'
# Post-condition: invalid or missing user returns error
# test invalid config
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
kube::test::if_has_string "${output_message}" "Error loading config file"
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
kube::test::if_has_string "${output_message}" 'no such file or directory'
set +o nounset
set +o errexit
}
run_service_accounts_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing service accounts"
### Create a new namespace
# Pre-condition: the test-service-accounts namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-service-accounts
# Post-condition: namespace 'test-service-accounts' is created.
kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
### Create a service account in a specific namespace
# Command
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
# Clean-up
kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
# Clean up
kubectl delete namespace test-service-accounts
set +o nounset
set +o errexit
}
run_job_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing job"
### Create a new namespace
# Pre-condition: the test-jobs namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-jobs\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-jobs
# Post-condition: namespace 'test-jobs' is created.
kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs'
### Create a cronjob in a specific namespace
kubectl run pi --schedule="59 23 31 2 *" --namespace=test-jobs --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: assertion object exists
kube::test::get_object_assert 'cronjob/pi --namespace=test-jobs' "{{$id_field}}" 'pi'
kubectl get cronjob/pi --namespace=test-jobs
kubectl describe cronjob/pi --namespace=test-jobs
### Create a job in dry-run mode
output_message=$(kubectl create job test-job --from=cronjob/pi --dry-run=true --namespace=test-jobs -o name)
# Post-condition: The text 'job.batch/test-job' should be part of the output
kube::test::if_has_string "${output_message}" 'job.batch/test-job'
# Post-condition: The test-job wasn't created actually
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}{{end}}" ''
### Create a job in a specific namespace
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs
# Post-Condition: assertion object exists
kube::test::get_object_assert 'job/test-job --namespace=test-jobs' "{{$id_field}}" 'test-job'
kubectl get job/test-job --namespace=test-jobs
kubectl describe job/test-job --namespace=test-jobs
#Clean up
kubectl delete job test-job --namespace=test-jobs
kubectl delete cronjob pi --namespace=test-jobs
kubectl delete namespace test-jobs
set +o nounset
set +o errexit
}
run_pod_templates_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing pod templates"
### Create PODTEMPLATE
# Pre-condition: no PODTEMPLATE
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
# Post-condition: nginx PODTEMPLATE is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
### Printing pod templates works
kubectl get podtemplates "${kube_flags[@]}"
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_stateful_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets)"
### Create and stop statefulset, make sure it doesn't leak pods
# Pre-condition: no statefulset exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create statefulset
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
# Command: Scale up
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
# Post-condition: 1 replica, named nginx-0
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
# doesn't start the scheduler, so pet-0 will block all others.
# TODO: test robust scaling in an e2e.
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
### Clean up
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_lists_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:lists)"
### Create a List with objects from multiple versions
# Command
kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}"
### Delete the List with objects from multiple versions
# Command
kubectl delete service/list-service-test deployment/list-deployment-test
set +o nounset
set +o errexit
}
run_persistent_volumes_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes"
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_persistent_volume_claims_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes claims"
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_storage_class_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing storage class"
### Create and delete storage class
# Pre-condition: no storage classes currently exist
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "StorageClass",
"apiVersion": "storage.k8s.io/v1",
"metadata": {
"name": "storage-class-name"
},
"provisioner": "kubernetes.io/fake-provisioner-type",
"parameters": {
"zone":"us-east-1b",
"type":"ssd"
}
}
__EOF__
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kubectl delete storageclass storage-class-name "${kube_flags[@]}"
# Post-condition: no storage classes
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_nodes_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:nodes)"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_object_events_assert nodes "127.0.0.1"
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert nodes "127.0.0.1" false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert nodes "127.0.0.1" true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert nodes
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert nodes false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert nodes true
### kubectl patch update can mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
# Post-condition: node is unschedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
# Post-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1beta1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false
set +o nounset
set +o errexit
}
run_authorization_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing authorization"
# check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
set +o nounset
set +o errexit
}
run_retrieve_multiple_tests() {
set -o nounset
set -o errexit
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:multiget)"
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
set +o nounset
set +o errexit
}
run_resource_aliasing_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing resource aliasing"
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
# all 4 cassandra's might not be in the request immediately...
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:'
kubectl delete all -l app=cassandra "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_explain_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:explain)"
kubectl explain pods
# shortcuts work
kubectl explain po
kubectl explain po.status.message
# cronjob work
kubectl explain cronjob
set +o nounset
set +o errexit
}
run_swagger_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing swagger"
# Verify schema
file="${KUBE_TEMP}/schema-v1.json"
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]]
[[ "$(grep "List of services" "${file}")" ]]
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
set +o nounset
set +o errexit
}
run_kubectl_sort_by_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --sort-by"
### sort-by should not panic if no pod exists
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl get pods --sort-by="{metadata.name}"
kubectl get pods --sort-by="{metadata.creationTimestamp}"
### sort-by should works if pod exists
# Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Check output of sort-by
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_has_string "${output_message}" "valid-pod"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### sort-by should works by sorting by name
# Create three PODs
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod1.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod2.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod3.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Check output of sort-by '{metadata.name}'
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod1:sorted-pod2:sorted-pod3:"
# Check output of sort-by '{metadata.labels.name}'
output_message=$(kubectl get pods --sort-by="{metadata.labels.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod3:sorted-pod2:sorted-pod1:"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Command
kubectl delete "${kube_flags[@]}" pod --grace-period=0 --force --all
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_kubectl_all_namespace_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --all-namespace"
# Pre-condition: the "default" namespace exists
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
### Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Verify a specific namespace is ignored when all-namespaces is provided
# Command
kubectl get pods --all-namespaces --namespace=default
### Check --all-namespaces option shows namespaces
# Create objects in multiple namespaces
kubectl create "${kube_flags[@]}" namespace all-ns-test-1
kubectl create "${kube_flags[@]}" serviceaccount test -n all-ns-test-1
kubectl create "${kube_flags[@]}" namespace all-ns-test-2
kubectl create "${kube_flags[@]}" serviceaccount test -n all-ns-test-2
# Ensure listing across namespaces displays the namespace
output_message=$(kubectl get serviceaccounts --all-namespaces "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "all-ns-test-1"
kube::test::if_has_string "${output_message}" "all-ns-test-2"
# Clean up
kubectl delete "${kube_flags[@]}" namespace all-ns-test-1
kubectl delete "${kube_flags[@]}" namespace all-ns-test-2
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Verify flag all-namespaces is ignored for rootScoped resources
# Pre-condition: node exists
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
# Command
output_message=$(kubectl get nodes --all-namespaces 2>&1)
# Post-condition: output with no NAMESPACE field
kube::test::if_has_not_string "${output_message}" "NAMESPACE"
set +o nounset
set +o errexit
}
run_certificates_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing certificates"
# approve
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
# deny
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
set +o nounset
set +o errexit
}
run_cluster_management_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing cluster-management commands"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
# create test pods we can work with
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "test-pod-1",
"labels": {
"e": "f"
}
},
"spec": {
"containers": [
{
"name": "container-1",
"resources": {},
"image": "test-image"
}
]
}
}
__EOF__
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "test-pod-2",
"labels": {
"c": "d"
}
},
"spec": {
"containers": [
{
"name": "container-1",
"resources": {},
"image": "test-image"
}
]
}
}
__EOF__
# taint/untaint
# Pre-condition: node has no taints
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.taints}}" '<no value>'
# taint can add a taint
kubectl taint node 127.0.0.1 dedicated=foo:PreferNoSchedule
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{.effect}}{{end}}' 'PreferNoSchedule'
# taint can remove a taint
kubectl taint node 127.0.0.1 dedicated-
# Post-condition: node has no taints
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.taints}}" '<no value>'
### kubectl cordon update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl cordon "127.0.0.1" --dry-run
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl drain "127.0.0.1" --dry-run
# Post-condition: node still exists, node is still schedulable
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain with --pod-selector only evicts pods that match the given selector
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# Pre-condition: test-pod-1 and test-pod-2 exist
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
# delete pod no longer in use
kubectl delete pod/test-pod-2
# Post-condition: node is schedulable
kubectl uncordon "127.0.0.1"
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl uncordon update with --dry-run is a no-op
# Pre-condition: node is already schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
response=$(kubectl uncordon "127.0.0.1" --dry-run)
kube::test::if_has_string "${response}" 'already uncordoned'
# Post-condition: node is still schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain command fails when both --selector and a node argument are given
# Pre-condition: node exists and contains label test=label
kubectl label node "127.0.0.1" "test=label"
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
kube::test::if_has_string "${response}" 'cannot specify both a node name'
### kubectl cordon command fails when no arguments are passed
# Pre-condition: node exists
response=$(! kubectl cordon 2>&1)
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
### kubectl cordon selects no nodes with an empty --selector=
# Pre-condition: node "127.0.0.1" is uncordoned
kubectl uncordon "127.0.0.1"
response=$(! kubectl cordon --selector= 2>&1)
kube::test::if_has_string "${response}" 'must provide one or more resources'
# test=label matches our node
response=$(kubectl cordon --selector test=label)
kube::test::if_has_string "${response}" 'node/127.0.0.1 cordoned'
# invalid=label does not match any nodes
response=$(kubectl cordon --selector invalid=label)
kube::test::if_has_not_string "${response}" 'cordoned'
# Post-condition: node "127.0.0.1" is cordoned
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
set +o nounset
set +o errexit
}
run_plugins_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl plugins"
# top-level plugin command
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl -h 2>&1)
kube::test::if_has_string "${output_message}" 'plugin\s\+Runs a command-line plugin'
# no plugins
output_message=$(! kubectl plugin 2>&1)
kube::test::if_has_string "${output_message}" 'no plugins installed'
# single plugins path
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin 2>&1)
kube::test::if_has_string "${output_message}" 'echo\s\+Echoes for test-cmd'
kube::test::if_has_string "${output_message}" 'get\s\+The wonderful new plugin-based get!'
kube::test::if_has_string "${output_message}" 'error\s\+The tremendous plugin that always fails!'
kube::test::if_has_not_string "${output_message}" 'The hello plugin'
kube::test::if_has_not_string "${output_message}" 'Incomplete plugin'
kube::test::if_has_not_string "${output_message}" 'no plugins installed'
# multiple plugins path
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin -h 2>&1)
kube::test::if_has_string "${output_message}" 'echo\s\+Echoes for test-cmd'
kube::test::if_has_string "${output_message}" 'get\s\+The wonderful new plugin-based get!'
kube::test::if_has_string "${output_message}" 'error\s\+The tremendous plugin that always fails!'
kube::test::if_has_string "${output_message}" 'hello\s\+The hello plugin'
kube::test::if_has_not_string "${output_message}" 'Incomplete plugin'
# don't override existing commands
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl get -h 2>&1)
kube::test::if_has_string "${output_message}" 'Display one or many resources'
kube::test::if_has_not_string "$output_message{output_message}" 'The wonderful new plugin-based get'
# plugin help
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello -h 2>&1)
kube::test::if_has_string "${output_message}" 'The hello plugin is a new plugin used by test-cmd to test multiple plugin locations.'
kube::test::if_has_string "${output_message}" 'Usage:'
# run plugin
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello 2>&1)
kube::test::if_has_string "${output_message}" '#hello#'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin echo 2>&1)
kube::test::if_has_string "${output_message}" 'This plugin works!'
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin hello 2>&1)
kube::test::if_has_string "${output_message}" 'unknown command'
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin error 2>&1)
kube::test::if_has_string "${output_message}" 'error: exit status 1'
# plugin tree
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree 2>&1)
kube::test::if_has_string "${output_message}" 'Plugin with a tree of commands'
kube::test::if_has_string "${output_message}" 'child1\s\+The first child of a tree'
kube::test::if_has_string "${output_message}" 'child2\s\+The second child of a tree'
kube::test::if_has_string "${output_message}" 'child3\s\+The third child of a tree'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 --help 2>&1)
kube::test::if_has_string "${output_message}" 'The first child of a tree'
kube::test::if_has_not_string "${output_message}" 'The second child'
kube::test::if_has_not_string "${output_message}" 'child2'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 2>&1)
kube::test::if_has_string "${output_message}" 'child one'
kube::test::if_has_not_string "${output_message}" 'child1'
kube::test::if_has_not_string "${output_message}" 'The first child'
# plugin env
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env -h 2>&1)
kube::test::if_has_string "${output_message}" "This is a flag 1"
kube::test::if_has_string "${output_message}" "This is a flag 2"
kube::test::if_has_string "${output_message}" "This is a flag 3"
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env --test1=value1 -t value2 2>&1)
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CURRENT_NAMESPACE'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CALLER'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_COMMAND=./env.sh'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_SHORT_DESC=The plugin envs plugin'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT=0'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST1=value1'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST2=value2'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST3=default'
set +o nounset
set +o errexit
}
run_impersonation_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing impersonation"
output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1)
kube::test::if_has_string "${output_message}" 'without impersonating a user'
if kube::test::if_supports_resource "${csr}" ; then
# --as
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1
kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated'
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
# --as-group
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon '
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
fi
set +o nounset
set +o errexit
}
# Runs all kubectl tests.
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
runTests() {
foundError=""
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
exit 1
fi
kube::log::status "Checking kubectl version"
kubectl version
# Generate a random namespace name, based on the current time (to make
# debugging slightly easier) and a random number. Don't use `date +%N`
# because that doesn't work on OSX.
create_and_use_new_namespace() {
local ns_name
ns_name="namespace-$(date +%s)-${RANDOM}"
kube::log::status "Creating namespace ${ns_name}"
kubectl create namespace "${ns_name}"
kubectl config set-context "${CONTEXT}" --namespace="${ns_name}"
}
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
)
# token defined in hack/testdata/auth-tokens.csv
kube_flags_with_token=(
-s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true
)
if [[ -z "${ALLOW_SKEW:-}" ]]; then
kube_flags+=("--match-server-version")
kube_flags_with_token+=("--match-server-version")
fi
if kube::test::if_supports_resource "${nodes}" ; then
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
pod_container_name_field="(index .spec.containers 0).name"
container_name_field="(index .spec.template.spec.containers 0).name"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
template_labels=".spec.template.metadata.labels.name"
statefulset_replicas_field=".spec.replicas"
statefulset_observed_generation=".status.observedGeneration"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
change_cause_annotation='.*kubernetes.io/change-cause.*'
pdb_min_available=".spec.minAvailable"
pdb_max_unavailable=".spec.maxUnavailable"
generation_field=".metadata.generation"
template_generation_field=".spec.templateGeneration"
container_len="(len .spec.template.spec.containers)"
image_field0="(index .spec.template.spec.containers 0).image"
image_field1="(index .spec.template.spec.containers 1).image"
# Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
# Create default namespace
kubectl create "${kube_flags[@]}" ns default
fi
fi
# Make sure "kubernetes" service exists.
if kube::test::if_supports_resource "${services}" ; then
# Attempt to create the kubernetes service, tolerating failure (since it might already exist)
kubectl create "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml || true
# Require the service to exist (either we created it or the API server did)
kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml
fi
#########################
# Kubectl version #
#########################
record_command run_kubectl_version_tests
#######################
# kubectl config set #
#######################
record_command run_kubectl_config_set_tests
#######################
# kubectl local proxy #
#######################
record_command run_kubectl_local_proxy_tests
#########################
# RESTMapper evaluation #
#########################
record_command run_RESTMapper_evaluation_tests
# find all resources
kubectl "${kube_flags[@]}" api-resources
# find all namespaced resources that support list by name and get them
kubectl "${kube_flags[@]}" api-resources --verbs=list --namespaced -o name | xargs -n 1 kubectl "${kube_flags[@]}" get -o name
################
# Cluster Role #
################
if kube::test::if_supports_resource "${clusterroles}" ; then
record_command run_clusterroles_tests
fi
########
# Role #
########
if kube::test::if_supports_resource "${roles}" ; then
record_command run_role_tests
fi
#########################
# Assert short name #
#########################
record_command run_assert_short_name_tests
#########################
# Assert categories #
#########################
## test if a category is exported during discovery
if kube::test::if_supports_resource "${pods}" ; then
record_command run_assert_categories_tests
fi
###########################
# POD creation / deletion #
###########################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_pod_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_save_config_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_create_error_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_apply_tests
record_command run_kubectl_run_tests
record_command run_kubectl_create_filter_tests
fi
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_kubectl_apply_deployments_tests
fi
###############
# Kubectl get #
###############
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_get_tests
record_command run_kubectl_old_print_tests
fi
######################
# Create #
######################
if kube::test::if_supports_resource "${secrets}" ; then
record_command run_create_secret_tests
fi
##################
# Global timeout #
##################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_request_timeout_tests
fi
#####################################
# CustomResourceDefinitions #
#####################################
# customresourcedefinitions cleanup after themselves.
if kube::test::if_supports_resource "${customresourcedefinitions}" ; then
record_command run_crd_tests
fi
#################
# Run cmd w img #
#################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_cmd_with_img_tests
fi
#####################################
# Recursive Resources via directory #
#####################################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_recursive_resources_tests
fi
##############
# Namespaces #
##############
if kube::test::if_supports_resource "${namespaces}" ; then
record_command run_namespace_tests
fi
###########
# Secrets #
###########
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${secrets}" ; then
record_command run_secrets_test
fi
fi
######################
# ConfigMap #
######################
if kube::test::if_supports_resource "${namespaces}"; then
if kube::test::if_supports_resource "${configmaps}" ; then
record_command run_configmap_tests
fi
fi
####################
# Client Config #
####################
record_command run_client_config_tests
####################
# Service Accounts #
####################
if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then
record_command run_service_accounts_tests
fi
####################
# Job #
####################
if kube::test::if_supports_resource "${job}" ; then
record_command run_job_tests
fi
#################
# Pod templates #
#################
if kube::test::if_supports_resource "${podtemplates}" ; then
record_command run_pod_templates_tests
fi
############
# Services #
############
if kube::test::if_supports_resource "${services}" ; then
record_command run_service_tests
fi
##################
# DaemonSets #
##################
if kube::test::if_supports_resource "${daemonsets}" ; then
record_command run_daemonset_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_daemonset_history_tests
fi
fi
###########################
# Replication controllers #
###########################
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_rc_tests
fi
fi
######################
# Deployments #
######################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_deployment_tests
fi
######################
# Replica Sets #
######################
if kube::test::if_supports_resource "${replicasets}" ; then
record_command run_rs_tests
fi
#################
# Stateful Sets #
#################
if kube::test::if_supports_resource "${statefulsets}" ; then
record_command run_stateful_set_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_statefulset_history_tests
fi
fi
######################
# Lists #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_lists_tests
fi
fi
######################
# Multiple Resources #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_multi_resources_tests
fi
fi
######################
# Persistent Volumes #
######################
if kube::test::if_supports_resource "${persistentvolumes}" ; then
record_command run_persistent_volumes_tests
fi
############################
# Persistent Volume Claims #
############################
if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then
record_command run_persistent_volume_claims_tests
fi
############################
# Storage Classes #
############################
if kube::test::if_supports_resource "${storageclass}" ; then
record_command run_storage_class_tests
fi
#########
# Nodes #
#########
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_nodes_tests
fi
########################
# authorization.k8s.io #
########################
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
record_command run_authorization_tests
fi
# kubectl auth can-i
# kube-apiserver is started with authorization mode AlwaysAllow, so kubectl can-i always returns yes
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
output_message=$(kubectl auth can-i '*' '*' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get invalid_resource 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type"
output_message=$(kubectl auth can-i get /logs/ 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(! kubectl auth can-i get /logs/ --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "subresource can not be used with NonResourceURL"
output_message=$(kubectl auth can-i list jobs.batch/bar -n foo --quiet 2>&1 "${kube_flags[@]}")
kube::test::if_empty_string "${output_message}"
fi
# kubectl auth reconcile
if kube::test::if_supports_resource "${clusterroles}" ; then
kubectl auth reconcile "${kube_flags[@]}" -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml
kube::test::get_object_assert 'rolebindings -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-RB:'
kube::test::get_object_assert 'roles -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-R:'
kube::test::get_object_assert 'clusterrolebindings -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CRB:'
kube::test::get_object_assert 'clusterroles -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CR:'
kubectl delete "${kube_flags[@]}" rolebindings,role,clusterroles,clusterrolebindings -n some-other-random -l test-cmd=auth
fi
#####################
# Retrieve multiple #
#####################
if kube::test::if_supports_resource "${nodes}" ; then
if kube::test::if_supports_resource "${services}" ; then
record_command run_retrieve_multiple_tests
fi
fi
#####################
# Resource aliasing #
#####################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_resource_aliasing_tests
fi
fi
###########
# Explain #
###########
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_explain_tests
fi
###########
# Swagger #
###########
record_command run_swagger_tests
#####################
# Kubectl --sort-by #
#####################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_sort_by_tests
fi
############################
# Kubectl --all-namespaces #
############################
if kube::test::if_supports_resource "${pods}" ; then
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_kubectl_all_namespace_tests
fi
fi
################
# Certificates #
################
if kube::test::if_supports_resource "${csr}" ; then
record_command run_certificates_tests
fi
######################
# Cluster Management #
######################
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_cluster_management_tests
fi
###########
# Plugins #
###########
record_command run_plugins_tests
#################
# Impersonation #
#################
record_command run_impersonation_tests
kube::test::clear_all
if [[ -n "${foundError}" ]]; then
echo "FAILED TESTS: ""${foundError}"
exit 1
fi
}
run_initializer_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing --include-uninitialized"
### Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/initializer-deployments.yaml 2>&1 "${kube_flags[@]}" || true
### Test kubectl get --include-uninitialized
# Command
output_message=$(kubectl get deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Command
output_message=$(kubectl get deployments web 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test kubectl describe --include-uninitialized
# Command
output_message=$(kubectl describe deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl describe deployments web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments web --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
### Test kubectl label --include-uninitialized
# Command
output_message=$(kubectl label deployments labelkey1=labelvalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey1}}" 'labelvalue1'
# Command
output_message=$(kubectl label deployments labelkey2=labelvalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey3=labelvalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey4=labelvalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey4}}" 'labelvalue4'
# Command
output_message=$(kubectl label deployments labelkey5=labelvalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey6=labelvalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey6}}" 'labelvalue6'
# Command
output_message=$(kubectl label deployments web labelkey7=labelvalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey7}}" 'labelvalue7'
# Found All Labels
kube::test::get_object_assert 'deployments web' "{{${labels_field}}}" 'map[labelkey1:labelvalue1 labelkey4:labelvalue4 labelkey6:labelvalue6 labelkey7:labelvalue7 run:web]'
### Test kubectl annotate --include-uninitialized
# Command
output_message=$(kubectl annotate deployments annotatekey1=annotatevalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey1}}" 'annotatevalue1'
# Command
output_message=$(kubectl annotate deployments annotatekey2=annotatevalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey3=annotatevalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey4=annotatevalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey4}}" 'annotatevalue4'
# Command
output_message=$(kubectl annotate deployments annotatekey5=annotatevalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey6=annotatevalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey6}}" 'annotatevalue6'
# Command
output_message=$(kubectl annotate deployments web annotatekey7=annotatevalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey7}}" 'annotatevalue7'
### Test kubectl edit --include-uninitialized
[ "$(EDITOR=cat kubectl edit deployments 2>&1 "${kube_flags[@]}" | grep 'edit cancelled, no objects found')" ]
[ "$(EDITOR=cat kubectl edit deployments --include-uninitialized 2>&1 "${kube_flags[@]}" | grep 'Edit cancelled, no changes made.')" ]
### Test kubectl set image --include-uninitialized
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.12 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.13 -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
### Test kubectl set resources --include-uninitialized
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=200m,memory=256Mi -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=512Mi -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
### Test kubectl set selector --include-uninitialized
# Create a service with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-redis-master-service.yaml 2>&1 "${kube_flags[@]}" || true
# Command
output_message=$(kubectl set selector services role=padawan --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "selector updated" should be part of the output
kube::test::if_has_string "${output_message}" 'selector updated'
# Command
output_message=$(kubectl set selector services role=padawan --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl set subject --include-uninitialized
# Create a create clusterrolebinding with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-clusterrolebinding.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
### Test kubectl set serviceaccount --include-uninitialized
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "serviceaccount updated" should be part of the output
kube::test::if_has_string "${output_message}" 'serviceaccount updated'
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl delete --include-uninitialized
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl delete clusterrolebinding --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl delete clusterrolebinding --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "deleted" should be part of the output
kube::test::if_has_string "${output_message}" 'deleted'
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.items}}{{$id_field}}:{{end}}" ''
### Test kubectl apply --include-uninitialized
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune --request-timeout=20 --include-uninitialized=false --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --include-uninitialized --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
kubectl delete --request-timeout=1 deploy web
kubectl delete --request-timeout=1 service redis-master
set +o nounset
set +o errexit
}
|
from msys.core import Module,Connectable, Type
class SQL(Module):
def __init__(self):
super().__init__(inputs=[], outputs=[]) |
<filename>src/log4qt/binaryloggingevent.cpp
#include "binaryloggingevent.h"
#include "logger.h"
#include "helpers/datetime.h"
#ifndef QT_NO_DATASTREAM
#include <QDataStream>
#endif
namespace Log4Qt
{
static const char binMarker[] = "@@@ binary message @@@";
BinaryLoggingEvent::BinaryLoggingEvent() = default;
BinaryLoggingEvent::BinaryLoggingEvent(const Logger *logger, Level level, const QByteArray &message)
: LoggingEvent(logger, level, QString(binMarker))
, mBinaryMessage(message)
{
}
BinaryLoggingEvent::BinaryLoggingEvent(const Logger *logger, Level level, const QByteArray &message, qint64 timeStamp)
: LoggingEvent(logger, level, QString(binMarker), timeStamp)
, mBinaryMessage(message)
{
}
BinaryLoggingEvent::BinaryLoggingEvent(const Logger *logger, Level level, const QByteArray &message, const QString &ndc, const QHash<QString, QString> &properties, const QString &threadName, qint64 timeStamp)
: LoggingEvent(logger, level, QString(binMarker), ndc, properties, threadName, timeStamp)
, mBinaryMessage(message)
{
}
QByteArray BinaryLoggingEvent::binaryMessage() const
{
return mBinaryMessage;
}
QString BinaryLoggingEvent::toString() const
{
return level().toString() + QLatin1Char(':') + mBinaryMessage.toHex();
}
QString BinaryLoggingEvent::binaryMarker()
{
return binMarker;
}
#ifndef QT_NO_DATASTREAM
QDataStream &operator<<(QDataStream &out, const BinaryLoggingEvent &loggingEvent)
{
out << static_cast<const LoggingEvent &>(loggingEvent);
out << loggingEvent.mBinaryMessage;
return out;
}
QDataStream &operator>>(QDataStream &in, BinaryLoggingEvent &loggingEvent)
{
in >> static_cast<LoggingEvent &>(loggingEvent);
in >> loggingEvent.mBinaryMessage;
return in;
}
#endif // QT_NO_DATASTREAM
} // namespace Log4Qt
|
#!/bin/sh
#set -o xtrace
# SPDX-FileCopyrightText: Volker Mische <volker.mische@gmail.com>
# SPDX-License-Identifier: MIT
# Creates a directory with the given name (sub-directories are not supported)
# in the root of the repo returns its name together with the upload link as
# JSON, where the directory name is the key and the upload link is the value.
# You can get your auth token via
# curl -X POST --data "username=<your-username>&password=<your-password>" '<you-server>/api2/auth-token/'
if [ "${#}" -lt 4 ]; then
echo "Usage: $(basename "${0}") <base-url> <auth-token> <repo-id> <directory-name>"
echo ""
echo "Example: $(basename "${0}") https://example.org fe91e764226cc534811f0ba32c62a6ac41ad0d7b 280b593a-f868-0594-d97a-23d88822a35f directory_to_create"
exit 1
fi
base_url=${1}
token=${2}
repo_id=${3}
dir_name=${4}
api_v20="${base_url}/api2"
api_v21="${base_url}/api/v2.1"
mkdir_ret=$(curl --silent -X POST --header "Authorization: Token ${token}" "${api_v20}/repos/${repo_id}/dir/?p=/${dir_name}" --data 'operation=mkdir')
if [ "${mkdir_ret}" != '"success"' ]; then
echo "Error: cannot create directory '${dir_name}'."
exit 2
fi
# Output the directory name to stderr (so that you can still pipe the expected
# output into a fil) as progress indicator
echo "Creating ${dir_name} on Seafile…" >&2
upload_link_ret=$(curl --silent -X POST --header "Authorization: Token ${token}" "${api_v21}/upload-links/" --data "path=/${dir_name}/&repo_id=${repo_id}"|jq --compact-output '{(.obj_name): .link}')
echo "${upload_link_ret}"
|
package com.vc.easy
object L389 {
def findTheDifference(s: String, t: String): Char = {
var charCode = t(s.length).toInt
s.indices.foreach(i => {
charCode += t(i)
charCode -= s(i)
})
charCode.toChar
}
}
|
<reponame>gdem/openbank
package ch.raiffeisen.openbank.beneficiary.controller.api;
import org.springframework.hateoas.ResourceSupport;
import org.springframework.hateoas.core.Relation;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
@Relation(value = "beneficiary", collectionRelation = "beneficiaries")
@ApiModel(description = "Beneficiary")
public class BeneficiaryResource extends ResourceSupport {
@ApiModelProperty(notes = "A unique and immutable identifier used to identify the account resource. This identifier has no meaning to the account owner.")
private String accountId;
@ApiModelProperty(notes = "A unique and immutable identifier used to identify the beneficiary resource. This identifier has no meaning to the account owner.")
private String beneficiaryId;
@ApiModelProperty(
notes = "Unique reference, as assigned by the creditor, to unambiguously refer to the payment transaction. Usage: If available, the initiating party should provide this reference in the structured remittance information, to enable reconciliation by the creditor upon receipt of the amount of money. If the business context requires the use of a creditor reference or a payment remit identification, and only one identifier can be passed through the end-to-end chain, the creditor's reference or payment remittance identification should be quoted in the end-to-end transaction identification.")
private String reference;
private CreditorAgent creditorAgent;
private CreditorAccount creditorAccount;
public String getAccountId() {
return accountId;
}
public void setAccountId(String accountId) {
this.accountId = accountId;
}
public String getBeneficiaryId() {
return beneficiaryId;
}
public void setBeneficiaryId(String beneficiaryId) {
this.beneficiaryId = beneficiaryId;
}
public String getReference() {
return reference;
}
public void setReference(String reference) {
this.reference = reference;
}
public CreditorAgent getCreditorAgent() {
return creditorAgent;
}
public void setCreditorAgent(CreditorAgent creditorAgent) {
this.creditorAgent = creditorAgent;
}
public CreditorAccount getCreditorAccount() {
return creditorAccount;
}
public void setCreditorAccount(CreditorAccount creditorAccount) {
this.creditorAccount = creditorAccount;
}
}
|
# MariaDB SQL server.
# Copyright (C) 2010 Kristian Nielsen and Monty Program AB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA.
# Setting cpu options.
get_cpuopt () {
case "$(uname -o)" in
*Linux*)
case "$(gcc -dumpmachine)" in
x86_64-*)
# gcc barfs on -march=... on x64
CPUOPT="-m64 -mtune=generic"
;;
*)
# we'd use i586 to not trip up mobile/lowpower devices
CPUOPT="-m32 -march=i586 -mtune=generic"
;;
esac
;;
*Solaris*)
# ToDo: handle 32-bit build? For now default to 64-bit.
CPUOPT="-D__sun -m64 -mtune=athlon64"
;;
esac
return 0
}
# Default to a parallel build, but only if AM_MAKEFLAGS is not set.
# (So buildbots can easily disable this behaviour if required.)
get_make_parallel_flag () {
if test -z "$AM_MAKEFLAGS"
then
AM_MAKEFLAGS="-j 6"
fi
return 0
}
|
<filename>src/main/java/team/thegoldenhoe/cameraobscura/item/camera/DigitalCameraItem.java
package team.thegoldenhoe.cameraobscura.item.camera;
import net.minecraft.entity.player.PlayerEntity;
import net.minecraft.item.ItemStack;
import net.minecraft.screen.NamedScreenHandlerFactory;
import net.minecraft.screen.SimpleNamedScreenHandlerFactory;
import net.minecraft.text.LiteralText;
import net.minecraft.text.TranslatableText;
import team.thegoldenhoe.cameraobscura.item.SdCardItem;
import team.thegoldenhoe.cameraobscura.screen.DigitalCameraScreenHandler;
import team.thegoldenhoe.cameraobscura.util.CameraType;
import java.util.UUID;
public class DigitalCameraItem extends CameraItem {
public DigitalCameraItem(Settings settings) {
super(CameraType.DIGITAL, settings);
}
@Override
protected boolean canTakePhoto(ItemStack camera, PlayerEntity user) {
ItemStack stack = CameraStorage.getItems(camera).get(0);
if (stack.getItem() instanceof SdCardItem) {
if (SdCardItem.getRemainingUses(stack) > 0) {
return true;
} else {
user.sendMessage(new TranslatableText("cameraobscura.chat.full_sd"), true);
return false;
}
}
user.sendMessage(new TranslatableText("cameraobscura.chat.missing_sd"), true);
return false;
}
@Override
protected void onTakePicture(ItemStack camera, PlayerEntity user, UUID photoName) {
super.onTakePicture(camera, user, photoName);
ItemStack stack = CameraStorage.getItems(camera).get(0);
if (stack.getItem() instanceof SdCardItem) {
SdCardItem.savePhoto(stack, photoName);
CameraStorage.setItem(camera, 0, stack);
}
}
@Override
protected NamedScreenHandlerFactory createScreenHandlerFactory(ItemStack camera) {
return new SimpleNamedScreenHandlerFactory((syncId, inventory, player) -> {
return new DigitalCameraScreenHandler(syncId, inventory, camera);
}, new LiteralText(""));
}
}
|
#/bin/bash
apt-get update
apt-get install -y curl
apt-get install -y docker.io
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add
sudo apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
apt-get install -y kubeadm
docker --version
kubeadm version
swapoff -a
|
/*
*
* * Copyright 2017 陈志鹏
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package xyz.zpayh.hdimage.datasource.interceptor;
import android.graphics.BitmapRegionDecoder;
import android.net.Uri;
import android.util.Log;
import com.facebook.binaryresource.BinaryResource;
import com.facebook.binaryresource.FileBinaryResource;
import com.facebook.cache.common.CacheKey;
import com.facebook.common.internal.Closeables;
import com.facebook.common.memory.PooledByteBuffer;
import com.facebook.common.memory.PooledByteBufferInputStream;
import com.facebook.common.references.CloseableReference;
import com.facebook.common.util.UriUtil;
import com.facebook.datasource.DataSource;
import com.facebook.datasource.DataSources;
import com.facebook.imagepipeline.cache.DefaultCacheKeyFactory;
import com.facebook.imagepipeline.core.ImagePipeline;
import com.facebook.imagepipeline.core.ImagePipelineFactory;
import com.facebook.imagepipeline.request.ImageRequest;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import xyz.zpayh.hdimage.datasource.Interceptor;
/**
* 文 件 名: FrescoInterceptor
* 创 建 人: 陈志鹏
* 创建日期: 2017/7/30 16:07
* 邮 箱: <EMAIL>
* 修改时间:
* 修改备注: 只加载网络图片
*/
public class FrescoInterceptor implements Interceptor {
@Override
public BitmapRegionDecoder intercept(Chain chain) throws IOException {
final Uri uri = chain.uri();
BitmapRegionDecoder decoder = chain.chain(uri);
if (decoder != null){
return decoder;
}
if (UriUtil.isNetworkUri(uri)){
ImagePipeline imagePipeline = ImagePipelineFactory.getInstance().getImagePipeline();
ImageRequest request = ImageRequest.fromUri(uri);
DataSource<CloseableReference<PooledByteBuffer>> dataSource = imagePipeline.fetchEncodedImage(request,null);
try {
CloseableReference<PooledByteBuffer> ref = DataSources.waitForFinalResult(dataSource);
if (ref == null){
return null;
}
PooledByteBuffer result = ref.get();
if (BuildConfig.DEBUG) {
Log.d("FrescoInterceptor", "从我这加载");
}
try {
InputStream inputStream = new PooledByteBufferInputStream(result);
Closeables.closeQuietly(inputStream);
return BitmapRegionDecoder.newInstance(inputStream,false);
} catch (IOException e) {
ImageRequest imageRequest=ImageRequest.fromUri(uri);
CacheKey cacheKey= DefaultCacheKeyFactory.getInstance().getEncodedCacheKey(imageRequest,null);
BinaryResource resource = ImagePipelineFactory.getInstance().getMainFileCache().getResource(cacheKey);
File file=((FileBinaryResource)resource).getFile();
if (BuildConfig.DEBUG) {
Log.d("FrescoInterceptor", file.getName());
}
return Interceptors.fixJPEGDecoder(file,e);
}
} catch (Throwable throwable) {
if (BuildConfig.DEBUG) {
Log.d("FrescoInterceptor", "intercept: 加载失败了");
}
throwable.printStackTrace();
return null;
}
}
return null;
}
}
|
'use strict';
const _ = require('lodash');
const ScrollableList = require('nodeca.core/lib/app/scrollable_list');
// Page state
//
// - user_hid: user hid
// - album_id: album id
// - selection_ids: array of currently selected images
// - selection_started: true if user is currently in select mode (checkboxes are shown)
//
let pageState = {};
let scrollable_list;
let $window = $(window);
// an amount of media files we try to load when user scrolls
// to the end of the page
const LOAD_MEDIA_ALL = 30;
const LOAD_MEDIA_ALBUM = 100;
function load(start, direction) {
let media = document.getElementById('users-media-list').getElementsByClassName('user-medialist__item');
let first_offset = media[0].getBoundingClientRect().top;
let i;
for (i = 1; i < media.length; i++) {
if (media[i].getBoundingClientRect().top !== first_offset) break;
}
let columns = i;
let load_count = pageState.album_id ? LOAD_MEDIA_ALBUM : LOAD_MEDIA_ALL;
// Make sure we will have filled lines after load (if possible)
//
load_count -= (load_count + media.length) % columns;
return N.io.rpc('users.album.list', {
user_hid: pageState.user_hid,
album_id: pageState.album_id,
media_id: start,
before: direction === 'top' ? load_count : 0,
after: direction === 'bottom' ? load_count : 0
}).then(res => {
return {
$html: $(N.runtime.render('users.album.list', res)),
locals: res,
reached_end: !(direction === 'top' ? res.prev_media : res.next_media)
};
}).catch(err => {
// Album deleted, refreshing the page so user can see the error
if (err.code === N.io.NOT_FOUND) return N.wire.emit('navigate.reload');
throw err;
});
}
let update_url;
function on_list_scroll(item, index, item_offset) {
// Use a separate debouncer that only fires when user stops scrolling,
// so it's executed a lot less frequently.
//
// The reason is that `history.replaceState` is very slow in FF
// on large pages: https://bugzilla.mozilla.org/show_bug.cgi?id=1250972
//
update_url = update_url || _.debounce((item, index, item_offset) => {
let href, state;
if (item) {
state = {
media: $(item).data('media-id'),
offset: item_offset
};
}
/* eslint-disable no-undefined */
href = N.router.linkTo('users.album', {
user_hid: pageState.user_hid,
album_id: pageState.album_id,
media_id: item ? $(item).data('media-id') : undefined
});
N.wire.emit('navigate.replace', { href, state })
.catch(err => N.wire.emit('error', err));
}, 500);
update_url(item, index, item_offset);
}
/////////////////////////////////////////////////////////////////////
// init on page load
//
N.wire.on('navigate.done:' + module.apiPath, function page_setup(data) {
pageState.user_hid = data.params.user_hid;
pageState.album_id = data.params.album_id;
pageState.selection_ids = null;
pageState.selection_started = false;
let navbar_height = parseInt($('body').css('margin-top'), 10) + parseInt($('body').css('padding-top'), 10);
// account for some spacing between posts
navbar_height += 50;
let scroll_done = false;
if (!scroll_done && data.state &&
typeof data.state.media !== 'undefined' && typeof data.state.offset !== 'undefined') {
let el = $('#media' + data.state.media);
if (el.length) {
$window.scrollTop(el.offset().top - navbar_height + data.state.offset);
scroll_done = true;
}
}
if (!scroll_done && data.params.media_id) {
let el = $('#media' + data.params.media_id);
if (el.length) {
$window.scrollTop(el.offset().top - navbar_height);
scroll_done = true;
}
}
// If we're on the first page, scroll to the top;
// otherwise, scroll to the first topic on that page
//
if (!scroll_done) {
if (N.runtime.page_data.prev_media && $('#users-media-list').length) {
$window.scrollTop($('#users-media-list').offset().top - navbar_height);
} else {
$window.scrollTop(0);
}
scroll_done = true;
}
// disable automatic scroll to an anchor in the navigator
data.no_scroll = true;
scrollable_list = new ScrollableList({
N,
list_selector: '.user-medialist',
item_selector: '.user-medialist__item',
placeholder_top_selector: '.user-album-root__loading-prev',
placeholder_bottom_selector: '.user-album-root__loading-next',
get_content_id: media => $(media).data('media-id'),
load,
reached_top: !N.runtime.page_data.prev_media,
reached_bottom: !N.runtime.page_data.next_media,
navbar_height,
on_list_scroll
});
});
N.wire.on('navigate.exit:' + module.apiPath, function page_teardown() {
scrollable_list.destroy();
scrollable_list = null;
if (update_url) update_url.cancel();
pageState = {};
});
////////////////////////////////////////////////////////////////////////////////
// Uploader
//
let $dropZone;
N.wire.after('navigate.done:' + module.apiPath, function uploader_setup() {
$dropZone = $('.user-album-upload');
$('#user-album-upload__files').on('change', function () {
var files = Array.prototype.slice.call($(this).get(0).files); // clone filelist
// reset input, so uploading the same file again will trigger 'change' event
$(this).val('');
if (files.length > 0) {
let params = {
files,
rpc: [ 'users.media.upload', { album_id: pageState.album_id } ],
config: 'users.uploader_config',
uploaded: null
};
N.wire.emit('users.uploader:add', params)
.then(() => {
$('#users-media-list').prepend(
$(N.runtime.render('users.album.list', { media: params.uploaded, user_hid: pageState.user_hid }))
);
$('.user-album-root').removeClass('no-files');
})
.catch(err => N.wire.emit('error', err));
}
});
});
N.wire.once('navigate.done:' + module.apiPath, function page_once() {
// Handles the event when user drag file to drag drop zone
//
N.wire.on(module.apiPath + ':dd_area', function user_album_dd(data) {
let x0, y0, x1, y1, ex, ey;
switch (data.event.type) {
case 'dragenter':
$dropZone.addClass('active');
break;
case 'dragleave':
// 'dragleave' occurs when user move cursor over child HTML element
// track this situation and don't remove 'active' class
// http://stackoverflow.com/questions/10867506/
x0 = $dropZone.offset().left;
y0 = $dropZone.offset().top;
x1 = x0 + $dropZone.outerWidth();
y1 = y0 + $dropZone.outerHeight();
ex = data.event.originalEvent.pageX;
ey = data.event.originalEvent.pageY;
if (ex > x1 || ex < x0 || ey > y1 || ey < y0) {
$dropZone.removeClass('active');
}
break;
case 'drop':
$dropZone.removeClass('active');
if (data.files?.length) {
let params = {
files: data.files,
rpc: [ 'users.media.upload', { album_id: pageState.album_id } ],
config: 'users.uploader_config',
uploaded: null
};
return N.wire.emit('users.uploader:add', params)
.then(() => {
$('#users-media-list').prepend(
$(N.runtime.render('users.album.list', { media: params.uploaded, user_hid: pageState.user_hid }))
);
$('.user-album-root').removeClass('no-files');
});
}
break;
default:
}
});
});
////////////////////////////////////////////////////////////////////////////////
// Dropdown menu buttons handlers
//
N.wire.once('navigate.done:' + module.apiPath, function page_once() {
// Create medialink
//
N.wire.on(module.apiPath + ':add_medialink', function add_medialink(data) {
let params = {
album_id: pageState.album_id,
providers: data.$this.data('providers'),
media_url: null
};
return Promise.resolve()
.then(() => N.wire.emit('users.album.add_medialink', params))
.then(() => N.io.rpc('users.media.add_medialink', { album_id: params.album_id, media_url: params.media_url }))
.then(res => {
$('#users-media-list').prepend(
$(N.runtime.render('users.album.list', { media: [ res.media ], user_hid: pageState.user_hid }))
);
$('.user-album-root').removeClass('no-files');
});
});
// Delete
//
N.wire.before(module.apiPath + ':delete', function confirm_delete_album() {
return N.wire.emit('common.blocks.confirm', t('delete_album_confirm'));
});
N.wire.on(module.apiPath + ':delete', function delete_album() {
let params = { user_hid: N.runtime.user_hid };
return N.io.rpc('users.album.destroy', { album_id: pageState.album_id })
.then(() => N.wire.emit('navigate.to', { apiPath: 'users.albums_root', params }));
});
});
///////////////////////////////////////////////////////////////////////////////
// Multiselect
//
function update_toolbar() {
$('.user-album-root__toolbar-controls')
.replaceWith(N.runtime.render(module.apiPath + '.blocks.toolbar_controls', {
album: N.runtime.page_data.album,
user_hid: N.runtime.page_data.user_hid,
medialink_providers: N.runtime.page_data.medialink_providers,
selection_ids: pageState.selection_ids,
selection_started: pageState.selection_started
}));
}
// Check or uncheck media
function check_media(media_id, checked) {
let container = $('#media' + media_id);
let checkbox = container.find('.user-medialist-item__select-cb');
checkbox.prop('checked', checked);
container.toggleClass('selected', checked);
if (checked && pageState.selection_ids.indexOf(media_id) === -1) {
pageState.selection_ids.push(media_id);
} else if (!checked && pageState.selection_ids.indexOf(media_id) !== -1) {
pageState.selection_ids = pageState.selection_ids.filter(x => x !== media_id);
}
}
function stop_selection() {
for (let media_id of pageState.selection_ids) {
check_media(media_id, false);
}
pageState.selection_ids = null;
pageState.selection_started = false;
update_toolbar();
$('.user-medialist').removeClass('user-medialist__m-selection');
}
// Init handlers
//
N.wire.once('navigate.done:' + module.apiPath, function album_selection_init() {
// User starts selection: show checkboxes, etc.
//
N.wire.on(module.apiPath + ':selection_start', function selection_start() {
pageState.selection_ids = [];
pageState.selection_started = true;
update_toolbar();
$('.user-medialist').addClass('user-medialist__m-selection');
});
// User stops selection: hide checkboxes, reset selection state
//
N.wire.on(module.apiPath + ':selection_stop', function selection_stop() {
stop_selection();
});
// User toggles checkbox near an image
//
N.wire.on(module.apiPath + ':media_check', function media_check(data) {
let media_id = data.$this.closest('.user-medialist__item').data('media-id');
let checkbox = data.$this;
check_media(media_id, checkbox.prop('checked'));
update_toolbar();
});
// Mass-move media
//
N.wire.on('users.album:move_many', function media_move() {
let media_ids = pageState.selection_ids.slice(0);
return N.wire.emit('users.album.media_move', {
src_album: pageState.album_id,
media_ids
}).then(() => {
stop_selection();
let media = $(media_ids.map(id => '#media' + id).join(','));
media.fadeOut(() => media.remove());
return N.wire.emit('notify.info', t('media_move_done', { count: media_ids.length }));
});
});
// Mass-delete media
//
N.wire.before(module.apiPath + ':delete_many', function confirm_media_delete() {
return N.wire.emit('common.blocks.confirm', t('delete_media_confirm', {
count: pageState.selection_ids.length
}));
});
N.wire.on(module.apiPath + ':delete_many', function media_delete() {
let media_ids = pageState.selection_ids.slice(0);
return N.io.rpc('users.album.media_destroy', {
src_album: pageState.album_id,
media_ids
}).then(() => {
stop_selection();
let media = $(media_ids.map(id => '#media' + id).join(','));
media.fadeOut(() => media.remove());
return N.wire.emit('notify.info', t('media_delete_done', { count: media_ids.length }));
});
});
});
|
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --which_model_netG unet_256 --which_direction BtoA --align_data
|
/*
* GestureManager
*
* Copyright (c) 2019 <NAME>
*
* This software is released under the MIT License.
* https://opensource.org/licenses/MIT
*/
import GestureManager from './gesture-manager.js';
import GestureManagerIE from './gesture-manager-ie.js';
export default function createGestureManager(element) {
if (typeof element == 'string') {
element = document.querySelector(element);
}
var ua = navigator.userAgent.toLowerCase();
if (window.navigator.msPointerEnabled) {
return new GestureManagerIE(element);
} else {
return new GestureManager(element);
}
}
|
#!/usr/bin/env nix-shell
#!nix-shell -p grub2_efi -p dejavu_fonts -i bash
# This script can be used to (dirtily) build fonts for grub2.
# This script assumes only DejaVu, and only one size.
set -e
set -u
PS4=" $ "
set -x
DEJA=
for p in $buildInputs; do
DEJA="$p"
done
grub-mkfont -s 20 "$DEJA/share/fonts/truetype/DejaVuSans.ttf" -o dejavu.pf2
|
import * as React from 'react';
import { RouteComponentProps } from 'react-router';
import Second from './components/Second';
export class SecondPage extends React.Component<RouteComponentProps<any>, void> {
render() {
return (
<Second />
);
}
}
export default (SecondPage as any as React.StatelessComponent<RouteComponentProps<any>>);
|
#!/bin/sh
# check-libelf.sh by Naomi Peori (naomi@peori.ca)
( ls /usr/include/libelf.h || ls /usr/local/include/libelf.h || ls /opt/local/include/libelf.h || ls /opt/local/include/libelf/libelf.h || ls /opt/homebrew/include/libelf/libelf.h || ls /usr/local/include/libelf/libelf.h ) 1>/dev/null 2>&1 || { echo "ERROR: Install libelf before continuing."; exit 1; }
|
<reponame>boczeratul/ntu-lifeguard-bot
import { DB, Constants } from '../lib';
const checkSingleData = data => (
(data.length === 1) ?
Promise.resolve(data[0]) :
Promise.reject(new Error(Constants.NO_SUCH_TRAINEE))
);
const queryByTempOrder = (tempOrder) => {
const SQL = `
SELECT * from ntu_lifeguard.Trainee
WHERE order_temp = ?
`;
return DB.promiseQuery(SQL, [tempOrder])
.then(checkSingleData);
};
const queryByPermanentOrder = (permanentOrder) => {
const SQL = `
SELECT * from ntu_lifeguard.Trainee
WHERE order_permanent = ?
`;
return DB.promiseQuery(SQL, [permanentOrder])
.then(checkSingleData);
};
const queryByOrder = order => (
/^\d+-\d+$/g.test(order) ?
queryByTempOrder(order) : queryByPermanentOrder(order)
);
export {
queryByOrder,
queryByTempOrder,
queryByPermanentOrder,
};
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/BDLibWorkingSwift/BDLibWorkingSwift.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/BDLibWorkingSwift/BDLibWorkingSwift.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
#ycsb-xope.sh(mocc)
tuple=1000000
maxope=10
rratio=95
rmw=off
skew=0.9
ycsb=on
cpumhz=2400
epochtime=40
extime=3
epoch=3
host=`hostname`
chris41="chris41.omni.hpcc.jp"
dbs11="dbs11"
#basically
thread=24
if test $host = $dbs11 ; then
thread=224
fi
result=result_mocc_ycsbB_tuple1m_val1k_skew09_ope10-100.dat
rm $result
echo "#worker threads, avg-tps, min-tps, max-tps, avg-ar, min-ar, max-ar, avg-camiss, min-camiss, max-camiss" >> $result
echo "#sudo perf stat -e cache-misses,cache-references -o ana.txt numactl --interleave=all ../mocc.exe tuple $maxope $thread $rratio $rmw $skew $ycsb $cpumhz $epochtime $extime" >> $result
for ((maxope = 10; maxope <= 100; maxope+=10))
do
echo "sudo perf stat -e cache-misses,cache-references -o ana.txt numactl --interleave=all ../mocc.exe $tuple $maxope $thread $rratio $rmw $skew $ycsb $cpumhz $epochtime $extime"
sumTH=0
sumAR=0
sumCA=0
maxTH=0
maxAR=0
maxCA=0
minTH=0
minAR=0
minCA=0
for ((i = 1; i <= epoch; ++i))
do
if test $host = $dbs11 ; then
sudo perf stat -e cache-misses,cache-references -o ana.txt numactl --interleave=all ../mocc.exe $tuple $maxope $thread $rratio $rmw $skew $ycsb $cpumhz $epochtime $extime > exp.txt
fi
if test $host = $chris41 ; then
perf stat -e cache-misses,cache-references -o ana.txt numactl --interleave=all ../mocc.exe $tuple $maxope $thread $rratio $rmw $skew $ycsb $cpumhz $epochtime $extime > exp.txt
fi
tmpTH=`grep Throughput ./exp.txt | awk '{print $2}'`
tmpAR=`grep abortRate ./exp.txt | awk '{print $2}'`
tmpCA=`grep cache-misses ./ana.txt | awk '{print $4}'`
sumTH=`echo "$sumTH + $tmpTH" | bc`
sumAR=`echo "scale=4; $sumAR + $tmpAR" | bc | xargs printf %.4f`
sumCA=`echo "$sumCA + $tmpCA" | bc`
echo "tmpTH: $tmpTH, tmpAR: $tmpAR, tmpCA: $tmpCA"
if test $i -eq 1 ; then
maxTH=$tmpTH
maxAR=$tmpAR
maxCA=$tmpCA
minTH=$tmpTH
minAR=$tmpAR
minCA=$tmpCA
fi
flag=`echo "$tmpTH > $maxTH" | bc`
if test $flag -eq 1 ; then
maxTH=$tmpTH
fi
flag=`echo "$tmpAR > $maxAR" | bc`
if test $flag -eq 1 ; then
maxAR=$tmpAR
fi
flag=`echo "$tmpCA > $maxCA" | bc`
if test $flag -eq 1 ; then
maxCA=$tmpCA
fi
flag=`echo "$tmpTH < $minTH" | bc`
if test $flag -eq 1 ; then
minTH=$tmpTH
fi
flag=`echo "$tmpAR < $minAR" | bc`
if test $flag -eq 1 ; then
minAR=$tmpAR
fi
flag=`echo "$tmpCA < $minCA" | bc`
if test $flag -eq 1 ; then
minCA=$tmpCA
fi
done
avgTH=`echo "$sumTH / $epoch" | bc`
avgAR=`echo "scale=4; $sumAR / $epoch" | bc | xargs printf %.4f`
avgCA=`echo "$sumCA / $epoch" | bc`
echo "sumTH: $sumTH, sumAR: $sumAR, sumCA: $sumCA"
echo "avgTH: $avgTH, avgAR: $avgAR, avgCA: $avgCA"
echo "maxTH: $maxTH, maxAR: $maxAR, maxCA: $maxCA"
echo "minTH: $minTH, minAR: $minAR, minCA: $minCA"
echo ""
echo "$maxope $avgTH $minTH $maxTH $avgAR $minAR $maxAR, $avgCA $minCA $maxCA" >> $result
done
|
import React, { PureComponent } from 'react';
import { Row, Col, Card, Table, Select } from 'antd';
import { View } from '@antv/data-set';
import { Chart, Axis, Geom, Legend, Tooltip } from 'bizcharts';
import styles from './index.less';
export default class RContract extends PureComponent {
state = {
tabsKey: '1',
}
onTabChange = (key) => {
this.setState({ tabsKey: key });
}
render() {
const { tabsKey } = this.state;
const columns = [{
title: '编号',
dataIndex: 'id',
key: 'id',
}, {
title: `${tabsKey === '1' ? '出库' : '入库'}日期`,
dataIndex: 'time',
key: 'time',
}, {
title: `${tabsKey === '1' ? '出库' : '入库'}数量`,
dataIndex: 'number',
key: 'number',
}, {
title: `${tabsKey === '1' ? '出库' : '入库'}单价`,
dataIndex: 'price',
key: 'price',
}, {
title: '库存',
dataIndex: 'aount',
}, {
title: '仓库名称',
dataIndex: 'wname',
key: 'wname',
}];
const dataSourceInput = [{
id: 1,
time: '2017-08-22',
number: '28吨',
aount: '28吨',
price: '10万元',
wname: '散货仓库',
}];
const dataSourceOut = [{
id: 1,
time: '2017-08-23',
number: '5吨',
aount: '23吨',
price: '11.5万元',
wname: '散货仓库',
}, {
id: 2,
time: '2017-08-26',
number: '23吨',
aount: '0吨',
price: '11.5万元',
wname: '散货仓库',
}];
const topColResponsiveProps = {
xs: 24,
sm: 12,
md: 12,
lg: 12,
xl: 6,
style: { marginBottom: 24 },
};
const salesData = [
{ name: '采购', '一月 ': 18, '二月 ': 28, '三月 ': 39, '四月 ': 81, '五月 ': 47, '六月 ': 20, '七月 ': 24, '八月 ': 35, '九月 ': 41, '十月 ': 70, '十一月 ': 55, '十二月 ': 68 },
{ name: '销售', '一月 ': 20, '二月 ': 33, '三月 ': 44, '四月 ': 99, '五月 ': 52, '六月 ': 35, '七月 ': 37, '八月 ': 42, '九月 ': 45, '十月 ': 80, '十一月 ': 65, '十二月 ': 78 },
];
const trendsData = new View().source(salesData);
trendsData.transform({
type: 'fold',
fields: ['一月 ', '二月 ', '三月 ', '四月 ', '五月 ', '六月 ', '七月 ', '八月 ', '九月 ', '十月 ', '十一月 ', '十二月 '], // 展开字段集
key: 'month', // key字段
value: 'members', // value字段
});
return (
<div>
<Row>
<Card>
<Col {...topColResponsiveProps} xl={{ span: 6 }}>
<h3>货物编号:<strong>SDGSWLKH232</strong></h3>
</Col>
<Col {...topColResponsiveProps} xl={{ span: 6 }}>
<Row gutter={{ md: 8, lg: 24, xl: 48 }}>
<h3>
货物名称:
<Select defaultValue="1" style={{ width: '60%' }}>
<Select.Option value="1">铝锭</Select.Option>
</Select>
</h3>
</Row>
</Col>
<Col {...topColResponsiveProps} xl={{ span: 4 }}>
<h3>货物总重:<strong>20吨</strong></h3>
</Col>
<Col {...topColResponsiveProps} xl={{ span: 4 }}>
<h3>货物均价:<strong>10万/吨</strong></h3>
</Col>
<Col {...topColResponsiveProps} xl={{ span: 4 }}>
<h3>当前货物总库存:<strong>5吨</strong></h3>
</Col>
</Card>
</Row>
<br />
<Row>
<Col span={24}>
<Card title="采购/销售(吨)">
<Chart height={250} data={trendsData} forceFit>
<Axis name="month" />
<Axis name="members" />
<Legend />
<Tooltip crosshairs={{ type: 'y' }} />
<Geom
type="interval"
position="month*members"
color="name"
adjust={[{ type: 'dodge', marginRatio: 1 / 32 }]}
/>
</Chart>
</Card>
</Col>
</Row>
<br />
<Row>
<Col span={24}>
<Card style={{ width: '100%' }} tabList={[{ key: '1', tab: '出库记录' }, { key: '2', tab: '入库记录' }]} onTabChange={(key) => { this.onTabChange(key); }}>
{tabsKey === '1' ? (
<Table className={styles.defaultCursor} dataSource={dataSourceOut} columns={columns} scroll={{ x: 1000 }} rowKey="id" />
) : (
<Table dataSource={dataSourceInput} columns={columns} scroll={{ x: 1000 }} rowKey="id" />
)}
</Card>
</Col>
</Row>
</div>
);
}
}
|
#!/bin/bash
# Source library
source ../utils/helper.sh
source ../utils/ccloud_library.sh
# Source demo-specific configurations
source config/demo.cfg
ccloud::validate_cloud_storage config/demo.cfg || exit 1
bucket_list=$(gsutil ls | grep $GCS_BUCKET)
if [[ ! "$bucket_list" =~ "$GCS_BUCKET" ]]; then
echo "gsutil mb -l $STORAGE_REGION gs://$GCS_BUCKET"
gsutil mb -l $STORAGE_REGION gs://$GCS_BUCKET
fi
ccloud::create_connector connectors/gcs_no_avro.json || exit 1
ccloud::wait_for_connector_up connectors/gcs_no_avro.json 300 || exit 1
ccloud::create_connector connectors/gcs_avro.json || exit 1
ccloud::wait_for_connector_up connectors/gcs_avro.json 300 || exit 1
exit 0
|
// Function Objects (functors) 101
#include <iostream>
#include <string>
#include <cstring> // strcmp
#include <cstdlib> // qsort
using namespace std;
// the idea behind function objects is to use an overoaded () operator for a class as function pointer
// illustrate using the c-code qsort routine test code from 'man qsort'
static int
cmpstringp(const void *p1, const void *p2) // man qsort sample code
{
/* The actual arguments to this function are "pointers to
pointers to char", but strcmp(3) arguments are "pointers
to char", hence the following cast plus dereference */
return strcmp(* (char * const *) p1, * (char * const *) p2);
}
enum Order_e { ASCEND, DESCEND };
class X {
Order_e o;
public:
X(Order_e O) : o(O) {}
int operator()(const void*p1, const void*p2) {
return o == ASCEND?
strcmp((const char*)p1, (const char*)p2):
-1 * strcmp((const char*)p1, (const char*)p2);
}
};
X a(ASCEND);
int ascend(const void*p1, const void*p2) {
return a(p1,p2); // function object
}
X d(DESCEND);
int descend(const void*p1, const void*p2) {
return d(p1,p2); // function object
}
int main(int argc, char**argv)
{
cout << "input data\n";
for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
cout << "c-code function compare\n";
qsort(argv+1, argc-1, sizeof(char*), cmpstringp); // c-code function
for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
cout << "function ascend\n";
qsort(argv+1, argc-1, sizeof(char*), ascend); // function
for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
cout << "function descend\n";
qsort(argv+1, argc-1, sizeof(char*), descend); // function
for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
cout << "function object lambda ascend\n";
qsort(argv+1, argc-1, sizeof(char*), [](const void*p1,const void*p2) {X x(ASCEND); return x(p1,p2); } ); // lambda function object
for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
cout << "function object lambda descend\n";
qsort(argv+1, argc-1, sizeof(char*), [](const void*p1,const void*p2) {X x(DESCEND); return x(p1,p2); } ); // lambda function object
for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
// cout << "function object a (ascend)\n";
// X a(ASCEND);
// qsort(argv+1, argc-1, sizeof(char*), a); // function object
// for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
// cout << "function object d (descend)\n";
// X d(DESCEND);
// qsort(argv+1, argc-1, sizeof(char*), d); // function object
// for(int arg=1; arg<argc; arg++) cout << argv[arg] << "\n";
}
|
#!/bin/bash
CONNECT_PORT=${CONNECT_PORT:-8083}
FORWARDLOGS="${FORWARDLOGS:-1}"
# LOGS=(broker schema-registry rest-proxy connect-distributed zookeeper)
LOGS=(broker)
if [[ "$FORWARDLOGS" == "0" ]]; then
echo "Skipping sinking logs to kafka due to \$FORWARDLOGS = 0."
exit 0
fi
for (( i=0; i<${#LOGS[@]}; i++)); do
cat <<EOF >/tmp/connector
{
"name": "logs-${LOGS[$i]}",
"config": {
"connector.class": "org.apache.kafka.connect.file.FileStreamSourceConnector",
"tasks.max": "1",
"topic": "logs_${LOGS[$i]}",
"file":"/var/log/${LOGS[$i]}.log"
}
}
EOF
curl -vs --stderr - -X POST -H "Content-Type: application/json" \
--data @/tmp/connector "http://localhost:$CONNECT_PORT/connectors"
done
rm /tmp/connector
|
#!/usr/bin/env bash
set -eo pipefail
host="$( (hostname -i || echo '127.0.0.1') | while IFS=$' ' read -a _line ; do echo $_line; break; done )"
# just test that the port is open
if nc -w 1 -z "$host" 8080 ; then
exit 0
fi
exit 1
|
<reponame>minuk8932/Algorithm_BaekJoon
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.StringTokenizer;
public class Boj3682 {
private static ArrayList<Integer>[] map, revMap;
private static ArrayDeque<Integer> stack = new ArrayDeque<>();
private static boolean[] isVisited;
private static int count;
private static final String NEW_LINE = "\n";
public static void main(String[] args) throws Exception{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringBuilder sb = new StringBuilder();
int T = Integer.parseInt(br.readLine());
while(T-- > 0) {
StringTokenizer st = new StringTokenizer(br.readLine());
int n = Integer.parseInt(st.nextToken());
int m = Integer.parseInt(st.nextToken());
map = new ArrayList[n + 1];
revMap = new ArrayList[n + 1];
isVisited = new boolean[n + 1];
for(int i = 0; i < n + 1; i++) {
map[i] = new ArrayList<>();
revMap[i] = new ArrayList<>();
}
while(m-- > 0) {
st = new StringTokenizer(br.readLine());
int a = Integer.parseInt(st.nextToken());
int b = Integer.parseInt(st.nextToken());
map[a].add(b);
revMap[b].add(a);
}
for(int start = 1; start < n + 1; start++) {
if(isVisited[start]) continue;
backTracking(map, start, true);
stack.push(start);
}
count = 0;
isVisited = new boolean[n + 1];
while(!stack.isEmpty()) {
int start = stack.pop();
if(isVisited[start]) continue;
count++;
backTracking(revMap, start, false);
}
sb.append(count).append(NEW_LINE);
}
System.out.println(sb);
}
private static void backTracking(ArrayList<Integer>[] arr, int current, boolean save) {
if(isVisited[current]) return;
isVisited[current] = true;
for(int next: arr[current]) {
if(isVisited[next]) continue;
if(!save) count++;
backTracking(arr, next, save);
if(save) stack.push(next);
}
}
}
|
#!/bin/bash
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$HOME/.local/lib:$HOME/.local/lib64"
black solve -B mathsat --finite "$1"
|
package com.example.damio.imaginarycityguide;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentTransaction;
public class ActivityUtils {
public static void addFragmentToActivity(FragmentManager fragmentManager,
Fragment fragment, int frameId) {
FragmentTransaction transaction = fragmentManager.beginTransaction();
transaction.add(frameId, fragment, fragment.getClass().getSimpleName());
transaction.commit();
}
}
|
import { Home, CreateEvent, CreateOrganization, NewPicture } from '../views';
const routes = [
{
path: ['/'],
component: Home,
key: 'HOME',
exact: true,
},
{
path: ['/pictures/new'],
component: NewPicture,
key: 'NEW_PICTURE',
exact: true,
},
{
path: ['/events/new'],
component: CreateEvent,
key: 'CREATE_EVENT',
exact: true,
},
{
path: ['/organizations/new'],
component: CreateOrganization,
key: 'CREATE_ORGANIZATION',
exact: true,
},
];
export default routes;
|
<reponame>VerdaPegasus/FarmersDelight
package vectorwing.farmersdelight.data;
import com.google.common.collect.Sets;
import com.google.gson.GsonBuilder;
import mezz.jei.api.MethodsReturnNonnullByDefault;
import net.minecraft.advancements.Advancement;
import net.minecraft.advancements.AdvancementRewards;
import net.minecraft.advancements.FrameType;
import net.minecraft.advancements.RequirementsStrategy;
import net.minecraft.advancements.critereon.*;
import net.minecraft.data.DataGenerator;
import net.minecraft.data.DataProvider;
import net.minecraft.data.HashCache;
import net.minecraft.data.advancements.AdvancementProvider;
import net.minecraft.resources.ResourceLocation;
import net.minecraft.world.level.ItemLike;
import net.minecraft.world.level.block.Blocks;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import vectorwing.farmersdelight.FarmersDelight;
import vectorwing.farmersdelight.common.advancement.CuttingBoardTrigger;
import vectorwing.farmersdelight.common.registry.ModBlocks;
import vectorwing.farmersdelight.common.registry.ModEffects;
import vectorwing.farmersdelight.common.registry.ModItems;
import vectorwing.farmersdelight.common.utility.TextUtils;
import javax.annotation.ParametersAreNonnullByDefault;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Set;
import java.util.function.Consumer;
@ParametersAreNonnullByDefault
@MethodsReturnNonnullByDefault
public class Advancements extends AdvancementProvider
{
private final Path PATH;
public static final Logger LOGGER = LogManager.getLogger();
public Advancements(DataGenerator generatorIn) {
super(generatorIn);
PATH = generatorIn.getOutputFolder();
}
@Override
public void run(HashCache cache) {
Set<ResourceLocation> set = Sets.newHashSet();
Consumer<Advancement> consumer = (advancement) -> {
if (!set.add(advancement.getId())) {
throw new IllegalStateException("Duplicate advancement " + advancement.getId());
} else {
Path path1 = getPath(PATH, advancement);
try {
DataProvider.save((new GsonBuilder()).setPrettyPrinting().create(), cache, advancement.deconstruct().serializeToJson(), path1);
}
catch (IOException ioexception) {
LOGGER.error("Couldn't save advancement {}", path1, ioexception);
}
}
};
new FarmersDelightAdvancements().accept(consumer);
}
private static Path getPath(Path pathIn, Advancement advancementIn) {
return pathIn.resolve("data/" + advancementIn.getId().getNamespace() + "/advancements/" + advancementIn.getId().getPath() + ".json");
}
public static class FarmersDelightAdvancements implements Consumer<Consumer<Advancement>>
{
@Override
@SuppressWarnings("unused")
public void accept(Consumer<Advancement> consumer) {
Advancement farmersDelight = Advancement.Builder.advancement()
.display(ModItems.COOKING_POT.get(),
TextUtils.getTranslation("advancement.root"),
TextUtils.getTranslation("advancement.root.desc"),
new ResourceLocation("minecraft:textures/block/bricks.png"),
FrameType.TASK, false, false, false)
.addCriterion("seeds", InventoryChangeTrigger.TriggerInstance.hasItems(new ItemLike[]{}))
.save(consumer, getNameId("main/root"));
// Farming Branch
Advancement huntAndGather = getAdvancement(farmersDelight, ModItems.FLINT_KNIFE.get(), "craft_knife", FrameType.TASK, true, true, false)
.addCriterion("flint_knife", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.FLINT_KNIFE.get()))
.addCriterion("iron_knife", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.IRON_KNIFE.get()))
.addCriterion("diamond_knife", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.DIAMOND_KNIFE.get()))
.addCriterion("golden_knife", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.GOLDEN_KNIFE.get()))
.addCriterion("netherite_knife", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.NETHERITE_KNIFE.get()))
.requirements(RequirementsStrategy.OR)
.save(consumer, getNameId("main/craft_knife"));
Advancement graspingAtStraws = getAdvancement(huntAndGather, ModItems.STRAW.get(), "harvest_straw", FrameType.TASK, true, true, false)
.addCriterion("harvest_straw", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.STRAW.get()))
.save(consumer, getNameId("main/harvest_straw"));
Advancement wildButcher = getAdvancement(huntAndGather, ModItems.HAM.get(), "get_ham", FrameType.TASK, true, true, false)
.addCriterion("ham", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.HAM.get()))
.save(consumer, getNameId("main/get_ham"));
Advancement dippingYourRoots = getAdvancement(graspingAtStraws, ModItems.RICE_PANICLE.get(), "plant_rice", FrameType.TASK, true, true, false)
.addCriterion("plant_rice", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.RICE_CROP.get()))
.save(consumer, getNameId("main/plant_rice"));
Advancement cropRotation = getAdvancement(dippingYourRoots, ModItems.CABBAGE.get(), "plant_all_crops", FrameType.CHALLENGE, true, true, false)
.addCriterion("wheat", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.WHEAT))
.addCriterion("beetroot", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.BEETROOTS))
.addCriterion("carrot", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.CARROTS))
.addCriterion("potato", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.POTATOES))
.addCriterion("brown_mushroom", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.BROWN_MUSHROOM))
.addCriterion("red_mushroom", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.RED_MUSHROOM))
.addCriterion("sugar_cane", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.SUGAR_CANE))
.addCriterion("melon", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.MELON_STEM))
.addCriterion("pumpkin", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.PUMPKIN_STEM))
.addCriterion("sweet_berries", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.SWEET_BERRY_BUSH))
.addCriterion("cocoa", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.COCOA))
.addCriterion("cabbage", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.CABBAGE_CROP.get()))
.addCriterion("tomato", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.TOMATO_CROP.get()))
.addCriterion("onion", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.ONION_CROP.get()))
.addCriterion("rice", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.RICE_CROP.get()))
.addCriterion("nether_wart", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.NETHER_WART))
.addCriterion("chorus_flower", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.CHORUS_FLOWER))
.rewards(AdvancementRewards.Builder.experience(100))
.save(consumer, getNameId("main/plant_all_crops"));
Advancement plantFood = getAdvancement(dippingYourRoots, ModItems.RICH_SOIL.get(), "get_rich_soil", FrameType.GOAL, true, true, false)
.addCriterion("get_rich_soil", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.RICH_SOIL.get()))
.save(consumer, getNameId("main/get_rich_soil"));
Advancement fungusAmongUs = getAdvancement(plantFood, ModItems.RED_MUSHROOM_COLONY.get(), "get_mushroom_colony", FrameType.TASK, true, true, false)
.addCriterion("brown_mushroom_colony", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.BROWN_MUSHROOM_COLONY.get()))
.addCriterion("red_mushroom_colony", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.RED_MUSHROOM_COLONY.get()))
.requirements(RequirementsStrategy.OR)
.save(consumer, getNameId("main/get_mushroom_colony"));
Advancement cantTakeTheHeat = getAdvancement(huntAndGather, ModItems.NETHERITE_KNIFE.get(), "obtain_netherite_knife", FrameType.CHALLENGE, true, true, false)
.addCriterion("obtain_netherite_knife", InventoryChangeTrigger.TriggerInstance.hasItems(ModItems.NETHERITE_KNIFE.get()))
.rewards(AdvancementRewards.Builder.experience(200))
.save(consumer, getNameId("main/obtain_netherite_knife"));
// Cooking Branch
Advancement bonfireLit = getAdvancement(farmersDelight, Blocks.CAMPFIRE, "place_campfire", FrameType.TASK, true, true, false)
.addCriterion("campfire", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.CAMPFIRE))
.addCriterion("soul_campfire", PlacedBlockTrigger.TriggerInstance.placedBlock(Blocks.SOUL_CAMPFIRE))
.requirements(RequirementsStrategy.OR)
.save(consumer, getNameId("main/place_campfire"));
Advancement fireUpTheGrill = getAdvancement(bonfireLit, ModItems.STOVE.get(), "place_stove", FrameType.TASK, true, true, false)
.addCriterion("stove", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.STOVE.get()))
.save(consumer, getNameId("main/place_stove"));
Advancement dinnerIsServed = getAdvancement(fireUpTheGrill, ModItems.COOKING_POT.get(), "place_cooking_pot", FrameType.GOAL, true, true, false)
.addCriterion("cooking_pot", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.COOKING_POT.get()))
.save(consumer, getNameId("main/place_cooking_pot"));
Advancement portableCooking = getAdvancement(fireUpTheGrill, ModItems.SKILLET.get(), "use_skillet", FrameType.TASK, true, true, false)
.addCriterion("skillet", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.SKILLET.get()))
.save(consumer, getNameId("main/use_skillet"));
Advancement sizzlingHot = getAdvancement(portableCooking, ModItems.SKILLET.get(), "place_skillet", FrameType.TASK, true, true, false)
.addCriterion("skillet", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.SKILLET.get()))
.save(consumer, getNameId("main/place_skillet"));
Advancement cupOfHappiness = getAdvancement(dinnerIsServed, ModItems.HOT_COCOA.get(), "drink_hot_cocoa", FrameType.TASK, true, true, false)
.addCriterion("hot_cocoa", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.HOT_COCOA.get()))
.save(consumer, getNameId("main/drink_hot_cocoa"));
Advancement warmAndCozy = getAdvancement(dinnerIsServed, ModItems.CHICKEN_SOUP.get(), "eat_comfort_food", FrameType.TASK, true, true, false)
.addCriterion("comfort", EffectsChangedTrigger.TriggerInstance.hasEffects(MobEffectsPredicate.effects().and(ModEffects.COMFORT.get())))
.save(consumer, getNameId("main/eat_comfort_food"));
Advancement wellServed = getAdvancement(warmAndCozy, ModItems.STEAK_AND_POTATOES.get(), "eat_nourishing_food", FrameType.TASK, true, true, false)
.addCriterion("nourished", EffectsChangedTrigger.TriggerInstance.hasEffects(MobEffectsPredicate.effects().and(ModEffects.NOURISHMENT.get())))
.save(consumer, getNameId("main/eat_nourishing_food"));
Advancement gloriousFeast = getAdvancement(wellServed, ModItems.ROAST_CHICKEN_BLOCK.get(), "place_feast", FrameType.TASK, true, true, false)
.addCriterion("roast_chicken", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.ROAST_CHICKEN_BLOCK.get()))
.addCriterion("stuffed_pumpkin", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.STUFFED_PUMPKIN_BLOCK.get()))
.addCriterion("honey_glazed_ham", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.HONEY_GLAZED_HAM_BLOCK.get()))
.addCriterion("shepherds_pie", PlacedBlockTrigger.TriggerInstance.placedBlock(ModBlocks.SHEPHERDS_PIE_BLOCK.get()))
.requirements(RequirementsStrategy.OR)
.save(consumer, getNameId("main/place_feast"));
Advancement watchYourFingers = getAdvancement(fireUpTheGrill, ModItems.CUTTING_BOARD.get(), "use_cutting_board", FrameType.TASK, true, true, false)
.addCriterion("cutting_board", CuttingBoardTrigger.Instance.simple())
.save(consumer, getNameId("main/use_cutting_board"));
Advancement masterChef = getAdvancement(gloriousFeast, ModItems.HONEY_GLAZED_HAM.get(), "master_chef", FrameType.CHALLENGE, true, true, false)
.addCriterion("mixed_salad", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.MIXED_SALAD.get()))
.addCriterion("beef_stew", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.BEEF_STEW.get()))
.addCriterion("chicken_soup", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.CHICKEN_SOUP.get()))
.addCriterion("vegetable_soup", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.VEGETABLE_SOUP.get()))
.addCriterion("fish_stew", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.FISH_STEW.get()))
.addCriterion("fried_rice", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.FRIED_RICE.get()))
.addCriterion("pumpkin_soup", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.PUMPKIN_SOUP.get()))
.addCriterion("baked_cod_stew", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.BAKED_COD_STEW.get()))
.addCriterion("noodle_soup", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.NOODLE_SOUP.get()))
.addCriterion("pasta_with_meatballs", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.PASTA_WITH_MEATBALLS.get()))
.addCriterion("pasta_with_mutton_chop", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.PASTA_WITH_MUTTON_CHOP.get()))
.addCriterion("roasted_mutton_chops", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.ROASTED_MUTTON_CHOPS.get()))
.addCriterion("vegetable_noodles", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.VEGETABLE_NOODLES.get()))
.addCriterion("steak_and_potatoes", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.STEAK_AND_POTATOES.get()))
.addCriterion("ratatouille", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.RATATOUILLE.get()))
.addCriterion("squid_ink_pasta", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.SQUID_INK_PASTA.get()))
.addCriterion("grilled_salmon", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.GRILLED_SALMON.get()))
.addCriterion("roast_chicken", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.ROAST_CHICKEN.get()))
.addCriterion("stuffed_pumpkin", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.STUFFED_PUMPKIN.get()))
.addCriterion("honey_glazed_ham", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.HONEY_GLAZED_HAM.get()))
.addCriterion("shepherds_pie", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.SHEPHERDS_PIE.get()))
.addCriterion("bacon_and_eggs", ConsumeItemTrigger.TriggerInstance.usedItem(ModItems.BACON_AND_EGGS.get()))
.rewards(AdvancementRewards.Builder.experience(200))
.save(consumer, getNameId("main/master_chef"));
}
protected static Advancement.Builder getAdvancement(Advancement parent, ItemLike display, String name, FrameType frame, boolean showToast, boolean announceToChat, boolean hidden) {
return Advancement.Builder.advancement().parent(parent).display(display,
TextUtils.getTranslation("advancement." + name),
TextUtils.getTranslation("advancement." + name + ".desc"),
null, frame, showToast, announceToChat, hidden);
}
private String getNameId(String id) {
return FarmersDelight.MODID + ":" + id;
}
}
}
|
package uk.ac.cam.ahk44.chess;
import java.util.ArrayList;
import java.util.List;
public class Bishop extends Piece {
public Bishop(char name, Position piecePosition, PieceColor pieceColor, Board board) {
super(name, piecePosition, pieceColor, board);
this.name = name;
this.position = piecePosition;
this.pieceColor = pieceColor;
this.board = board;
}
@Override
List<Position> validNextPositions(){
List<Position> nextPositions = new ArrayList<>();
position.getAllDiagonalMoves(8, board(), nextPositions);
return nextPositions;
}
@Override
char icon() {
boolean colourSwitch = pieceColor == PieceColor.BLACK;
return colourSwitch ? '♝' : '♗';
}
@Override
int value() {
return 3;
}
@Override
char name() {
return 'B';
}
}
|
def maxProfit(prices: List[int]) -> int:
if not prices:
return 0
min_price = prices[0]
max_profit = 0
for price in prices:
if price < min_price:
min_price = price
else:
max_profit = max(max_profit, price - min_price)
return max_profit |
<filename>js/backgroundlayer.js<gh_stars>0
var BackgroundLayer = Class.create(Group, {
initialize: function(level, dist) {
Group.call(this);
this.level = level;
this.distance = dist;
},
setScroll: function(x, y) {
this.x = -x * this.distance;
this.y = -y * this.distance;
}
});
|
<filename>flashlib.frc.nt/src/main/java/com/flash3388/frc/nt/beans/NtProperties.java
package com.flash3388.frc.nt.beans;
import com.beans.BooleanProperty;
import com.beans.DoubleProperty;
import com.beans.IntProperty;
import com.beans.Property;
import edu.wpi.first.networktables.NetworkTable;
import edu.wpi.first.networktables.NetworkTableEntry;
import edu.wpi.first.networktables.NetworkTableInstance;
public class NtProperties {
public static BooleanProperty newBooleanProperty(String tableName, String entryName) {
return newBooleanProperty(NetworkTableInstance.getDefault(), tableName, entryName);
}
public static BooleanProperty newBooleanProperty(NetworkTableInstance instance, String tableName, String entryName) {
return newBooleanProperty(instance.getTable(tableName), entryName);
}
public static BooleanProperty newBooleanProperty(NetworkTable table, String entryName) {
return newBooleanProperty(table.getEntry(entryName));
}
public static BooleanProperty newBooleanProperty(NetworkTableEntry entry) {
return new NtBooleanProperty(entry);
}
public static DoubleProperty newDoubleProperty(String tableName, String entryName) {
return newDoubleProperty(NetworkTableInstance.getDefault(), tableName, entryName);
}
public static DoubleProperty newDoubleProperty(NetworkTableInstance instance, String tableName, String entryName) {
return newDoubleProperty(instance.getTable(tableName), entryName);
}
public static DoubleProperty newDoubleProperty(NetworkTable table, String entryName) {
return newDoubleProperty(table.getEntry(entryName));
}
public static DoubleProperty newDoubleProperty(NetworkTableEntry entry) {
return new NtDoubleProperty(entry);
}
public static IntProperty newIntProperty(String tableName, String entryName) {
return newIntProperty(NetworkTableInstance.getDefault(), tableName, entryName);
}
public static IntProperty newIntProperty(NetworkTableInstance instance, String tableName, String entryName) {
return newIntProperty(instance.getTable(tableName), entryName);
}
public static IntProperty newIntProperty(NetworkTable table, String entryName) {
return newIntProperty(table.getEntry(entryName));
}
public static IntProperty newIntProperty(NetworkTableEntry entry) {
return new NtIntProperty(entry);
}
public static Property<String> newStringProperty(String tableName, String entryName) {
return newStringProperty(NetworkTableInstance.getDefault(), tableName, entryName);
}
public static Property<String> newStringProperty(NetworkTableInstance instance, String tableName, String entryName) {
return newStringProperty(instance.getTable(tableName), entryName);
}
public static Property<String> newStringProperty(NetworkTable table, String entryName) {
return newStringProperty(table.getEntry(entryName));
}
public static Property<String> newStringProperty(NetworkTableEntry entry) {
return new NtStringProperty(entry);
}
}
|
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
pwd=`pwd`
cwd=$(cd `dirname $0`; pwd)
cd $cwd
version=`grep version ../BUILD_INFO | awk -F= '{print $2}'`
cd ../jvm
mvn clean package -DskipTests
cd ..
if [[ ! -d "lib" ]]; then
mkdir lib
fi
cp -r jvm/core/target/eggroll-core-${version}.jar lib
cp -r jvm/core/target/lib/* lib
cp -r jvm/roll_pair/target/eggroll-roll-pair-${version}.jar lib
cp -r jvm/roll_pair/target/lib/* ./lib
cp -r jvm/roll_site/target/eggroll-roll-site-${version}.jar lib
cp -r jvm/roll_site/target/lib/* lib
cp jvm/core/main/resources/create-eggroll-meta-tables.sql conf
tar -czf eggroll.tar.gz lib bin conf data python deploy
cd $pwd
|
<reponame>StellarCrow/wfh-client<filename>src/app/core/services/local-storage.service.ts
import {Injectable} from '@angular/core';
@Injectable({
providedIn: 'root'
})
export class LocalStorageService {
public getItem(key: string): any {
const itemString = localStorage.getItem(key);
try {
const item = JSON.parse(itemString);
return item;
} catch (error) {
return null;
}
}
public setItem(key: string, value: object): void {
try {
const stringValue = JSON.stringify(value);
localStorage.setItem(key, stringValue);
} catch (error) {
return null;
}
}
}
|
#!/bin/bash
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
# Set the path to the crypto material to ensure it may be used
CRYPTOGEN=$1
export PATH=${CRYPTOGEN}:${PATH}
# Get current location to ensure things go to the correct place
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# Start generating
echo "Creating new crypto material and tx blocks from within directory ${BASEDIR}"
rm -rf "${BASEDIR}/../crypto-config"
rm -rf "${BASEDIR}/../channel-config"
mkdir "${BASEDIR}/../crypto-config"
mkdir "${BASEDIR}/../channel-config"
echo 'Generating base crypto-material and channel tx files....'
export FABRIC_CFG_PATH="${BASEDIR}"
cryptogen generate --config="${BASEDIR}/crypto-config.yaml" --output="${BASEDIR}/../crypto-config"
# Genesis block
configtxgen -profile TwoOrgsOrdererGenesis -outputBlock "${BASEDIR}/twoorgs.genesis.block"
# Channel tx
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx "${BASEDIR}/../channel-config/baseapichannel.tx" -channelID baseapichannel # scenario test base api
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx "${BASEDIR}/../channel-config/channelopschannel.tx" -channelID channelopschannel # scenario test channel query
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx "${BASEDIR}/../channel-config/deprecatedchannel.tx" -channelID deprecatedchannel # scenario test deprecated sdk
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx "${BASEDIR}/../channel-config/discoverychannel.tx" -channelID discoverychannel # sceanrio test discovery feature
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx "${BASEDIR}/../channel-config/eventschannel.tx" -channelID eventschannel # sceanrio test discovery feature
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx "${BASEDIR}/../channel-config/gatewaychannel.tx" -channelID gatewaychannel # sceanrio test gateway feature
echo 'Generating crypto-material complete, now renaming keys...'
# Rename the key files we use to be key.pem instead of a uuid
for KEY in $(find "${BASEDIR}/../crypto-config" -type f -name "*_sk"); do
KEY_DIR="$(dirname ${KEY})"
mv ${KEY} "${KEY_DIR}/key.pem"
done
echo 'Renaming keys complete'
|
import os
from PIL import Image
from io import BytesIO
from typing import BinaryIO
class ImageModel:
def __init__(self, file_name: str, width: int, height: int, path: str):
self.file_name = file_name
self.width = width
self.height = height
self.path = path
def save(self):
# Save the image metadata to the database
# Implementation not provided
def process_and_save_image(image: BinaryIO, filename: str, directory: str) -> None:
if not os.path.exists(directory):
os.makedirs(directory)
pil_image = Image.open(BytesIO(image.read()))
image_model = ImageModel(
file_name=filename,
width=pil_image.size[0],
height=pil_image.size[1],
path=os.path.join(directory, filename)
)
image_model.save()
pil_image.save(os.path.join(directory, filename)) |
# Installs Calico CNI using Tigera operators for networking between nodes
# https://projectcalico.docs.tigera.io/getting-started/kubernetes/quickstart
calico_version=3.22
if ! kubectl wait pod --all --for=condition=Ready --namespace tigera-operator --timeout=0s
then
kubectl apply -f https://projectcalico.docs.tigera.io/archive/v$calico_version/manifests/tigera-operator.yaml
until kubectl wait pod --all --for=condition=Ready --namespace tigera-operator
do
echo "warning: resources don't exist"
sleep 1
done
echo "info: resources exist"
kubectl wait pod --all --for=condition=Ready --namespace tigera-operator --timeout=120s
fi
if ! kubectl wait pod --all --for=condition=Ready --namespace calico-system --timeout=0s
then
kubectl apply -f https://projectcalico.docs.tigera.io/archive/v$calico_version/manifests/custom-resources.yaml
until kubectl wait pod --all --for=condition=Ready --namespace calico-system
do
echo "warning: resources don't exist"
sleep 1
done
echo "info: resources exist"
kubectl wait pod --all --for=condition=Ready --namespace calico-system --timeout=120s
fi
calicoctl_version=${calico_version}.2
calicoctl_os=linux
calicoctl_platform=amd64
calicoctl_binary=calicoctl-$calicoctl_os-$calicoctl_platform
calicoctl_url=https://github.com/projectcalico/calico/releases/download/v$calicoctl_version/$calicoctl_binary
wget --no-verbose --timestamping $calicoctl_url
sudo install \
--verbose \
--mode=+x $calicoctl_binary /usr/local/bin/calicoctl
|
class StatesController < ApplicationController
def index
if params[:state] && params[:state][:name] && state = State.find(params[:state][:name])
redirect_to state_path(id: state.slug) and return
end
@states = State.all
end
def show
@state = State.find(params[:id])
if !@state
redirect_to root_path and return
end
end
def findme
end
end
|
module Migrations
module RewardMigrator
def create_reward_migration
generate 'migration', "create_referable_rewards"
add_reward_migration_details
end
private
def add_reward_migration_details
return unless migration_created?('create_referable_rewards')
inject_into_file(last_migration, after: 'do |t|') do
<<~TEXT
\n
t.string "reward", null: false
t.integer "referrals_required", null: false
t.integer "acquireable_by", null: false
t.timestamps
TEXT
end
end
end
end
|
import React, { Component } from 'react';
import {withGetScreen} from 'react-getscreen'
import Image1 from '../components/image-slide1'
import Image1m from '../components/image-slide1m'
import Image2 from '../components/image-slide2'
import Image2m from '../components/image-slide2m'
import Image3 from '../components/image-slide3'
import Image3m from '../components/image-slide3m'
class Test extends Component {
render() {
if(this.props.image === '1')
{
return this.props.isMobile() ? <Image1m/> : <Image1/>
}
if(this.props.image === '2')
{
return this.props.isMobile() ? <Image2m/> : <Image2/>
}
if(this.props.image === '3')
{
return this.props.isMobile() ? <Image3m/> : <Image3/>
}
if (this.props.isMobile()) return <div>Mobile</div>;
if (this.props.isTablet()) return <div>Tablet</div>;
return <div>Desktop</div>;
}
}
export default withGetScreen(Test); |
<reponame>gorisanson/spring-framework
/*
* Copyright 2002-2022 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aot.nativex;
import java.util.LinkedHashMap;
import java.util.Map;
import org.springframework.aot.hint.JavaSerializationHints;
import org.springframework.aot.hint.TypeReference;
/**
* Write a {@link JavaSerializationHints} to the JSON output expected by the
* GraalVM {@code native-image} compiler, typically named
* {@code serialization-config.json}.
*
* @author <NAME>
* @author <NAME>
* @since 6.0
* @see <a href="https://www.graalvm.org/22.0/reference-manual/native-image/BuildConfiguration/">Native Image Build Configuration</a>
*/
class JavaSerializationHintsWriter {
public static final JavaSerializationHintsWriter INSTANCE = new JavaSerializationHintsWriter();
public void write(BasicJsonWriter writer, JavaSerializationHints hints) {
writer.writeArray(hints.types().map(this::toAttributes).toList());
}
private Map<String, Object> toAttributes(TypeReference typeReference) {
LinkedHashMap<String, Object> attributes = new LinkedHashMap<>();
attributes.put("name", typeReference);
return attributes;
}
}
|
<reponame>gems-uff/prov-viewer<gh_stars>10-100
/*
* The MIT License
*
* Copyright 2017 Kohwalter.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package br.uff.ic.utility;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
/**
* Class to define a vertex-graph attribute (collapsed vertices)
* @author Kohwalter
*/
public class GraphAttribute {
private String name;
private String value; // Need to change to only use this variable for speedup purposes during queries.
private double minValue;
private double maxValue;
// private int quantity;
private Map<String, String> originalValues; // Need to change to a Map<String, String> to represent (OriginGraph, Value)
/**
* Default constructor
* @param name is the attribute name
* @param value is the attribute value
* @param origin is the name of the graph that has this attribute
*/
public GraphAttribute(String name, String value, String origin) {
this.name = name;
this.value = value;
// this.quantity = 1;
if (Utils.tryParseDouble(value)){
this.minValue = Utils.convertDouble(value.trim());
this.maxValue = Utils.convertDouble(value.trim());
double v = Utils.convertDouble(value.trim());
// double v = ((int) (Utils.convertDouble(value.trim()) * 10000)) * 0.0001f;
// this.value = String.valueOf(v);
}
else {
this.minValue = 0;
this.maxValue = 0;
}
this.originalValues = new HashMap<>();
this.originalValues.put(origin, value);
}
public GraphAttribute(String name, Map<String, String> values) {
this.name = name;
this.originalValues = new HashMap<>();
this.originalValues.putAll(values);
updateAttribute(values);
}
/**
* Constructor with all variables (used when quantity >=3)
* @param name
* @param value
* @param min
* @param max
* @param quantity
* @param values
*/
public GraphAttribute(String name, String value, String min, String max, Map<String, String> values) {
this.name = name;
this.value = value;
// this.quantity = Integer.valueOf(quantity);
this.minValue = Utils.convertStringToDouble(min);
this.maxValue = Utils.convertStringToDouble(max);
this.originalValues = new HashMap<>();
this.originalValues.putAll(values);
}
/**
* Method to update the attribute when computing the collapsed set
* @param values is the Map that contains all original values
*/
public void updateAttribute(Map<String, String> values) {
this.minValue = Double.POSITIVE_INFINITY;
this.maxValue = Double.NEGATIVE_INFINITY;
String testFirstValue = (String) values.values().toArray()[0];
if (Utils.tryParseDouble(testFirstValue) && Utils.tryParseDouble(testFirstValue)) {
originalValues.putAll(values);
double v = 0;
for(String s : originalValues.values()) {
v += Utils.convertStringToDouble(s);
this.minValue = Math.min(this.minValue, Utils.convertStringToDouble(s));
this.maxValue = Math.max(this.maxValue, Utils.convertStringToDouble(s));
}
this.value = Double.toString(v);
} else { // This value is a String
originalValues.putAll(values);
this.value = "";
for(String s : originalValues.values()) {
if(!this.value.contains(s))
this.value += ", " + s;
}
this.value = this.value.replaceFirst(", ", "");
}
// this.quantity = originalValues.size();
}
/**
* Method to return the attribute name
* @return name
*/
public String getName() {
return this.name;
}
/**
* method to return the attribute value
* @return value
*/
public String getAverageValue() {
// Return the average number
if ((this.originalValues.size() > 1) && Utils.tryParseDouble(this.value))
return Double.toString(Utils.convertDouble(this.value) / this.originalValues.size());
else
return this.value;
}
public String getValue() {
return this.value;
}
/**
* Method to return the minimum value for this attribute in the vertex-graph
* @return
*/
public String getMin() {
return Double.toString(this.minValue);
}
/**
* Method to return the maximum value for this attribute in the vertex-graph
* @return max value
*/
public String getMax() {
return Double.toString(this.maxValue);
}
/**
* Method to return the quantity of vertices that has this attribute in the vertex-graph
* @return
*/
public String getQuantity() {
return Integer.toString(this.originalValues.size());
}
public Map<String, String> getOriginalValues() {
return this.originalValues;
}
public Collection<String> getValues() {
// Need to refactor to return the Map
return this.originalValues.values();
}
/**
* Method to set the attribute name
* @param t is the new attribute name
*/
public void setName(String t) {
this.name = t;
}
/**
* Method to set the attribute value
* @param t is the new value
*/
public void setValue(String t) {
this.value = t;
}
/**
* Function to print the attribute
* @return string with the attribute
*/
public String printAttribute() {
if(this.originalValues.size() == 1)
return this.getName() + ": " + this.getAverageValue() + " <br>";
else
return this.getName() + ": " + printValue();
}
/**
* Function to print the attribute parameters
* @return a string with the attribute characteristics
*/
public String printValue() {
if (Utils.tryParseDouble(this.value)) {
if(this.originalValues.size() > 2) {
return (Utils.convertStringToDouble(this.value) / this.originalValues.size())
+ " (" + this.getMin() + " ~ "
+ this.get1stQuartile() + " ~"
+ this.getMedian() + " ~"
+ this.get3rdQuartile() + " ~"
+ this.getMax() + ")" + "<br>";
} else if(this.originalValues.size() > 1){
return (Utils.convertStringToDouble(this.value) / this.originalValues.size())
+ " (" + this.getMin() + " ~ "
+ this.getMax() + ")" + "<br>";
}
else
return this.value + "<br>";
}
else {
return this.value + "<br>";
}
}
public String toNotationString() {
return this.getName() + "=" + this.getAverageValue();
}
// This method is only used for tests cases
// public void incrementQuantity() {
// quantity++;
// }
// This method is only used for tests cases
public void setMax(double t) {
this.maxValue = t;
}
// This method is only used for tests cases
public void setMin(double t) {
this.minValue = t;
}
/**
* Method to retrieve the median
* @return
*/
public String getMedian() {
return Utils.median(originalValues.values().toArray(), 0, originalValues.size());
}
/**
* Method to return the 1st quartile
* @return
*/
public String get1stQuartile() {
return getQuartile(1);
}
/**
* Method to return the 3rd quartile
* @return
*/
public String get3rdQuartile() {
return getQuartile(3);
}
/**
* Method to retrieve the quartile value from an array
* @param quartile The quartile desires (25 for 1st and 75 for 3rd)
* @return
*/
public String getQuartile(int quartile) {
return Utils.quartile(originalValues.values().toArray(), quartile);
}
/**
* Method created to rename the "origin" key from the originalValues Map to
* be "Graph Origin + vertex ID".
* @param ID
*/
public void updateOriginalValuesWithID(String ID){
Map<String, String> temporaryValues;
temporaryValues = new HashMap<>();
for(String key : originalValues.keySet())
temporaryValues.put(key + "_VERTEXID_" + ID, originalValues.get(key));
originalValues.clear();
originalValues.putAll(temporaryValues);
}
}
|
#!/bin/bash
LIGHT_GREEN='\033[1;32m'
GREEN='\033[0;32m'
CLEAR='\033[0m'
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CACHE_DIR=${SCRIPT_DIR}/../_tmp/lanyrd
echo -e "${LIGHT_GREEN}-> Updating timestamp of all cached files ...${CLEAR}"
find . -exec touch {} \;
echo -e "${LIGHT_GREEN}-> Restoring to _tmp ...${CLEAR}"
[ -d $CACHE_DIR ] || mkdir -p $CACHE_DIR
rm -rf $CACHE_DIR/*
cp -R ${SCRIPT_DIR}/lanyrd/* $CACHE_DIR
echo -e "${GREEN}Done${CLEAR}"
|
<reponame>ben5en/MSP430_sensorlessBLDC
#ifndef BLDC_PI_H_
#define BLDC_PI_H_
#ifdef __cplusplus
extern "C" {
#endif
// ----------------------------------------------------------------------
// info and license
// ----------------------------------------------------------------------
//
// filename: pi.h
//
// MIT License
//
// Copyright (c) 2019 <NAME>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
// target: Texas Instruments MSP430
//
// ----------------------------------------------------------------------
// history
// ----------------------------------------------------------------------
// 03.09.2019 - initial programming
// ----------------------------------------------------------------------
// header files
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// #defines
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// global variables
// ----------------------------------------------------------------------
typedef volatile struct
{
_q Kp; // the proportional gain for the PI controller
_q Ki; // the integral gain for the PI controller
_q Ref_pu; // the reference value [pu]
_q Fbk_pu; // the feedback value [pu]
_q OutMax_pu; // the saturation high limit for the controller output [pu]
_q OutMin_pu; // the saturation low limit for the controller output [pu]
_q Out_pu; // the controller output [pu]
_q Ui; // the integrator value for the PI controller
bool SatFlag; // flag to signal controller saturation
} PI_t;
// ----------------------------------------------------------------------
// functions
// ----------------------------------------------------------------------
// function to set default values for the object
inline void PI_objectInit(volatile PI_t *pPi_obj)
{
PI_t *obj = (PI_t *)pPi_obj;
// Function initializes the object with default values
obj->Kp = _Q(1.0);
obj->Ki = 0;
obj->Fbk_pu = 0;
obj->Ref_pu = 0;
obj->Out_pu = 0;
obj->OutMax_pu = 0;
obj->OutMin_pu = 0;
obj->Ui = 0;
obj->SatFlag = false;
}
// ----------------------------------------------------------------------
//
inline void PI_run(volatile PI_t *pPi_obj)
{
PI_t *obj = (PI_t *)pPi_obj;
_q up, error, preOut;
// Compute the controller error
error = obj->Ref_pu - obj->Fbk_pu;
// Compute the proportional term
up = _Qmpy(obj->Kp, error);
// Compute the integral term in parallel form and saturate
obj->Ui = _Qsat(obj->Ui + _Qmpy(obj->Ki, up), obj->OutMax_pu, obj->OutMin_pu);
preOut = up + obj->Ui;;
// Saturate the output
obj->Out_pu = _Qsat(preOut, obj->OutMax_pu, obj->OutMin_pu);
// if saturation flag is needed, comment out:
// obj->SatFlag = (out == preOut) ? false : true;
}
// ----------------------------------------------------------------------
//
inline _q PI_calcKp(float Ls_H, float deviceCurrent_A, float deviceVoltage_V,
float deviceCtrlPeriode_Sec)
{
// calculation is based on "Betragsoptimum"
// Kp = Ls/(2*tau)
float x1;
float y1;
_q Kp;
// multiplication with deviceCurrent_A is to get per unit values
x1 = Ls_H * deviceCurrent_A;
y1 = 2.0 * deviceCtrlPeriode_Sec;
// multiplication with deviceVoltage_V is to get per unit values
y1 = y1 * deviceVoltage_V;
Kp = _Q(x1 / y1);
return Kp;
}
// ----------------------------------------------------------------------
//
inline _q PI_calcKi(float Rs_Ohm, float Ls_H, float deviceCtrlPeriode_Sec)
{
// calculation is based on "TI - MotorWare's documentation"
float RsByLs = Rs_Ohm / Ls_H;
_q Ki = _Q(RsByLs * deviceCtrlPeriode_Sec);
return Ki;
}
// ----------------------------------------------------------------------
// something...
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// end of file
// ----------------------------------------------------------------------
#ifdef __cplusplus
}
#endif /* extern "C" */
#endif /* BLDC_PI_H_ */
|
#!/usr/bin/env bash
sh ~/dotfiles-manjaro-i3/install/software.sh
echo "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
echo "- STARTING CONFIG SETUP -"
echo "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
echo "ssh config"
if [ -f ~/.ssh/config ]; then
mv ~/.ssh/config ~/.ssh/config-pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/ssh/config ~/.ssh/config
echo "git config"
if [ -f ~/.gitconfig ]; then
mv ~/.gitconfig ~/.gitconfig-pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/.gitconfig ~/.gitconfig
echo "global .gitignore"
if [ -f ~/.gitignore_global]; then
mv ~/.gitignore_global ~/.gitignore_global-pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/.gitignore_global ~/.gitignore_global
echo "zshrc"
if [ -f ~/.zshrc ]; then
mv ~/.zshrc ~/.zshrc-pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/.zshrc ~/.zshrc
sudo ln -fs /home/jim/dotfiles-manjaro-i3/config/.zshrc /root/.zshrc
echo "dconf"
if [ -d ~/.config/dconf/user.d ]; then
mv ~/.config/dconf/user.d ~/.config/dconf/user.d-pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/dconf/user.d ~/.config/dconf
echo "ssh-agent"
if [ ! -d ~/.config/systemd/user ]; then
mkdir -p ~/.config/systemd/user
fi
if [ -f ~/.config/systemd/user/ssh-agent.service ]; then
mv ~/.config/systemd/user/ssh-agent.service ~/.config/systemd/user/ssh-agent.service.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/systemd/user/ssh-agent.service ~/.config/systemd/user/ssh-agent.service
systemctl --user enable ssh-agent.service
echo "motd"
if [ -d ~/.motd.d ]; then
if [ ! -L ~/.motd.d ]; then
mv ~/.motd.d ~/.motd.d-pre-dotfiles-bak
else
rm ~/.motd.d
fi
fi
ln -fs ~/dotfiles-manjaro-i3/config/.motd.d ~/
echo "file templates"
sh ~/dotfiles-manjaro-i3/install/templates.sh
echo "termite"
if [ ! -d ~/.config/termite ]; then
mkdir ~/.config/termite
fi
if [ -f ~/.config/termite/config ]; then
mv ~/.config/termite/config ~/.config/termite/config.pre-dotfiles-bak
fi
echo "terminator"
if [ ! -d ~/.config/terminator ]; then
mkdir ~/.config/terminator
fi
if [ -f ~/.config/terminator/config ]; then
mv ~/.config/terminator/config ~/.config/terminator/config.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/terminator/config ~/.config/terminator/config
echo "rofi themes"
if [ ! -d ~/.config/rofi ]; then
mkdir ~/.config/rofi
fi
echo "nano"
if [ ! -d ~/.config/nano ]; then
mkdir ~/.config/nano
fi
if [ -f ~/.config/nano/nanorc ]; then
mv ~/.config/nano/nanorc ~/.config/nano/nanorc.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/nano/nanorc ~/.config/nano/nanorc
echo "Dunst (notifications)"
if [ ! -d ~/.config/dunst ]; then
mkdir ~/.config/dunst
fi
if [ -f ~/.config/dunst/dunstrc ]; then
mv ~/.config/dunst/dunstrc ~/.config/dunst/dunstrc.pre-dotfiles-bak
fi
echo "Clipit"
if [ ! -d ~/.config/clipit ]; then
mkdir ~/.config/clipit
fi
if [ -f ~/.config/clipit/clipitrc ]; then
mv ~/.config/clipit/clipitrc ~/.config/clipit/clipitrc.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/clipitrc ~/.config/clipit/clipitrc
echo "Ranger"
if [ ! -d ~/.config/ranger ]; then
mkdir ~/.config/ranger
fi
if [ ! -d ~/.config/ranger/colorschemes ]; then
mkdir ~/.config/ranger/colorschemes
fi
if [ -f ~/.config/ranger/rc.conf ]; then
mv ~/.config/ranger/rc.conf ~/.config/ranger/rc.conf.pre-dotfiles-bak
fi
if [ -f ~/.config/ranger/rifle.conf ]; then
mv ~/.config/ranger/rifle.conf ~/.config/ranger/rifle.conf.pre-dotfiles-bak
fi
if [ -f ~/.config/ranger/scope.sh ]; then
mv ~/.config/ranger/scope.sh ~/.config/ranger/scope.sh.pre-dotfiles-bak
fi
if [ ! -f ~/.config/ranger/colorschemes/darkest_space.py ]; then
ln -fs ~/dotfiles-manjaro-i3/config/ranger/colorschemes/darkest_space.py ~/.config/ranger/colorschemes/darkest_space.py
fi
ln -fs ~/dotfiles-manjaro-i3/config/ranger/rc.conf ~/.config/ranger/rc.conf
ln -fs ~/dotfiles-manjaro-i3/config/ranger/rifle.conf ~/.config/ranger/rifle.conf
ln -fs ~/dotfiles-manjaro-i3/config/ranger/scope.sh ~/.config/ranger/scope.sh
echo "i3"
if [ ! -d ~/.config/i3 ]; then
mkdir ~/.config/i3
fi
if [ -f ~/.config/i3/config ]; then
mv ~/.config/i3/config ~/.config/i3/config.pre-dotfiles-bak
fi
echo "i3 blocks"
if [ -f ~/.config/i3/i3blocks.conf ]; then
mv ~/.config/i3/i3blocks.conf ~/.config/i3/i3blocks.conf.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/i3/i3blocks.conf ~/.config/i3
echo "i3 status"
if [ -f ~/.i3status.conf ]; then
mv ~/.i3status.conf ~/.i3status.conf.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/i3/i3status.conf ~/.i3status.conf
echo "gtk"
if [ -f ~/.gtkrc-2.0.mine ]; then
mv ~/.gtkrc-2.0.mine ~/.gtkrc-2.0.mine.pre-dotfiles-bak
fi
if [ -f ~/.config/gtk-3.0/settings.ini ]; then
mv ~/.config/gtk-3.0/settings.ini ~/.config/gtk-3.0/settings.ini.pre-dotfiles-bak
fi
if [ -f ~/.config/gtk-3.0/gtk.css ]; then
mv ~/.config/gtk-3.0/gtk.css ~/.config/gtk-3.0/gtk.css.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/gtk-3.0/gtk.css ~/.config/gtk-3.0/
echo "screen layouts"
if [ -d ~/.screenlayout ]; then
mv ~/.screenlayout ~/.screenlayout.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/screenlayout ~/.screenlayout
echo "compton"
if [ -f ~/.compton.conf ]; then
mv ~/.compton.conf ~/.compton.conf.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/.compton.conf ~/.config/compton.conf
echo "OneDrive"
if [ ! -d ~/.config/onedrive ]; then
mkdir ~/.config/onedrive
fi
if [ -f ~/.config/onedrive/sync_list ]; then
mv ~/.config/onedrive/sync_list ~/.config/onedrive/sync_list.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/onedrive/sync_list ~/.config/onedrive/sync_list
echo ".xprofile"
if [ -f ~/.xprofile ]; then
mv ~/.xprofile ~/.xprofile.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/.xprofile ~/.xprofile
echo ".xinitrc"
if [ -f ~/.xinitrc ]; then
mv ~/.xinitrc ~/.xinitrc.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/.xinitrc ~/.xinitrc
echo "X settings"
if [ -f ~/.Xresources ]; then
mv ~/.Xresources ~/.Xresources.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/.Xresources ~/.Xresources
echo "redshift"
if [ -f ~/.config/redshift.conf ]; then
mv ~/.config/redshift.conf ~/.config/redshift.conf.pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/redshift.conf ~/.config/redshift.conf
echo "MIME apps"
if [ -f ~/.config/mimeapps.list ]; then
mv ~/.config/mimeapps.list ~/.config/mimeapps.list-pre-dotfiles-bak
fi
ln -fs ~/dotfiles-manjaro-i3/config/mimeapps.list ~/.config/mimeapps.list
echo "/etc/default/grub"
if [ -f /etc/default/grub ]; then
sudo mv /etc/default/grub /etc/default/grub.pre-dotfiles-bak
fi
sudo ln -fs ~/dotfiles-manjaro-i3/config/etc/default/grub /etc/default/grub
echo "nitrogen (wallpaper)"
if [ -d ~/.config/nitrogen ]; then
mv ~/.config/nitrogen ~/.config/nitrogen.pre-dotfiles-bak
else
mkdir ~/.config/nitrogen
fi
ln -fs ~/dotfiles-manjaro-i3/config/nitrogen/bg-saved.cfg ~/.config/nitrogen/bg-saved.cfg
ln -fs ~/dotfiles-manjaro-i3/config/nitrogen/nitrogen.cfg ~/.config/nitrogen/nitrogen.cfg
echo "simlink DropBox images to /Pictures"
ln -fs ~/Dropbox/images/ ~/Pictures/Dropbox
echo "changing default shell to zsh"
sudo chsh -s /usr/bin/zsh
sudo chsh -s /usr/bin/zsh jim
echo "setting up trackpad"
sh ~/dotfiles-manjaro-i3/scripts/trackpad-setup.sh
echo "setting compose key to right-alt"
setxkbmap -option compose:ralt
echo "executable scripts"
sudo ln -fs ~/dotfiles-manjaro-i3/scripts/mx /usr/local/bin/mx
echo "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
echo "- FINISHED CONFIG SETUP -"
echo "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
|
package model
type Reference interface {
Value
StringID() string
}
|
<gh_stars>1-10
/*
* (C) Copyright 2017-2018, by <NAME> and Contributors.
*
* JGraphT : a free Java graph-theory library
*
* This program and the accompanying materials are dual-licensed under
* either
*
* (a) the terms of the GNU Lesser General Public License version 2.1
* as published by the Free Software Foundation, or (at your option) any
* later version.
*
* or (per the licensee's choosing)
*
* (b) the terms of the Eclipse Public License v1.0 as published by
* the Eclipse Foundation.
*/
package org.jgrapht.alg.matching;
import java.util.*;
import org.jgrapht.*;
import org.jgrapht.alg.interfaces.*;
import org.jgrapht.alg.util.*;
import org.jgrapht.generate.*;
import org.jgrapht.graph.*;
import junit.framework.*;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
/**
* Tests for GreedyMaximumCardinalityMatching
*
* @author <NAME>
*/
public class GreedyMaximumCardinalityMatchingTest
{
/**
* Generate a number of random graphs, find a random matching and check whether the matching
* returned is valid. Not sorted
*/
@Test
public void testRandomGraphs()
{
GraphGenerator<Integer, DefaultEdge, Integer> generator =
new GnmRandomGraphGenerator<>(200, 120);
IntegerVertexFactory vertexFactory = new IntegerVertexFactory();
Graph<Integer, DefaultEdge> graph = new SimpleGraph<>(DefaultEdge.class);
for (int i = 0; i < 100; i++) {
generator.generateGraph(graph, vertexFactory, null);
MatchingAlgorithm<Integer, DefaultEdge> matcher =
new GreedyMaximumCardinalityMatching<>(graph, false);
MatchingAlgorithm.Matching<Integer, DefaultEdge> m = matcher.getMatching();
Set<Integer> matched = new HashSet<>();
double weight = 0;
for (DefaultEdge e : m.getEdges()) {
Integer source = graph.getEdgeSource(e);
Integer target = graph.getEdgeTarget(e);
if (matched.contains(source))
fail("vertex is incident to multiple matches in the matching");
matched.add(source);
if (matched.contains(target))
fail("vertex is incident to multiple matches in the matching");
matched.add(target);
weight += graph.getEdgeWeight(e);
}
assertEquals(m.getWeight(), weight, 0.0000001);
}
}
/**
* Generate a number of random graphs, find a random matching and check whether the matching
* returned is valid. Sorted.
*/
@Test
public void testRandomGraphs2()
{
GraphGenerator<Integer, DefaultEdge, Integer> generator =
new GnmRandomGraphGenerator<>(200, 120);
IntegerVertexFactory vertexFactory = new IntegerVertexFactory();
Graph<Integer, DefaultEdge> graph = new SimpleGraph<>(DefaultEdge.class);
for (int i = 0; i < 1; i++) {
generator.generateGraph(graph, vertexFactory, null);
MatchingAlgorithm<Integer, DefaultEdge> matcher =
new GreedyMaximumCardinalityMatching<>(graph, true);
MatchingAlgorithm.Matching<Integer, DefaultEdge> m = matcher.getMatching();
Set<Integer> matched = new HashSet<>();
double weight = 0;
for (DefaultEdge e : m.getEdges()) {
Integer source = graph.getEdgeSource(e);
Integer target = graph.getEdgeTarget(e);
if (matched.contains(source))
fail("vertex is incident to multiple matches in the matching");
matched.add(source);
if (matched.contains(target))
fail("vertex is incident to multiple matches in the matching");
matched.add(target);
weight += graph.getEdgeWeight(e);
}
assertEquals(m.getWeight(), weight, 0.0000001);
}
}
}
|
<reponame>openstacker/splunk_app_catalyst_cloud
#!/usr/bin/python
# Copyright (c) 2017 Catalyst Cloud Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import splunk.admin as admin
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for arg in ['baseurl', 'tenant']:
self.supportedArgs.addOptArg(arg)
def handleList(self, confInfo):
confDict = self.readConf("myconf")
if confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['baseurl', 'tenant'] and not val:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('myconf', 'userinfo', self.callerArgs.data)
admin.init(ConfigApp, admin.CONTEXT_NONE)
|
export PATH="./bin:/usr/local/bin:/usr/local/sbin:$ZSH/bin:$PATH"
export MANPATH="/usr/local/man:/usr/local/mysql/man:/usr/local/git/man:$MANPATH"
export GOPATH=$HOME/Code/Go/
|
# replace DATASET_DIR with your ImageNet dataset folder
# SGD with momentum and weight decay:
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer sgd --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.9 --wd 0.0001 --epoch 90
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer sgd --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.9 --wd 0.0001 --epoch 90
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer sgd --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.9 --wd 0.0001 --epoch 90
# SGD with momentum, without weight decay:
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer sgd --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.9 --wd 0.0 --epoch 90
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer sgd --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.9 --wd 0.0 --epoch 90
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer sgd --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.9 --wd 0.0 --epoch 90
# Nero out-of-the-box:
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer nero --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.0 --wd 0.0 --epoch 90
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer nero --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.0 --wd 0.0 --epoch 90
python main.py -a resnet50 --lr 0.1 DATASET_DIR --optimizer nero --sch cos --workers 24 -b 400 --epochs 90 --momentum 0.0 --wd 0.0 --epoch 90 |
#docker run --rm -v "$(PWD)":/usr/src/multi-process-web-server -w /usr/src/multi-process-web-server -it gcc:4.9 bash
docker run --rm -v "$(PWD)":/usr/src/multi-process-web-server -w /usr/src/multi-process-web-server -p 12345:12345 -p 11777:11777 -it rikorose/gcc-cmake bash
|
#!/bin/sh
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
# if you want to use your own PyInstaller Build.py, set the full path
# to this in the ENV variable INSTALLER_SCRIPT
# go to the directory that contains makePackage.sh (i.e. devide/installer)
cd `dirname $0`
if [ "$?" -ne "0" ]; then
echo "ERROR: could not change to devide/installer."
exit 1
fi
# nuke all .pyc files (to be sure)
find ../ -name "*.pyc" -exec rm {} \;
# nuke all backup files
find ../ -name "*~" -exec rm {} \;
find ../ -name "#*#" -exec rm {} \;
# run the McMillan Installer
if [ `uname` = Linux ]; then
# this is so that you can stuff this in the environment
if [ -z "$PYINSTALLER_SCRIPT" ]; then
PYINSTALLER_SCRIPT='/home/cpbotha/build/Installer/Build.py'
fi
INSTALLER="python $PYINSTALLER_SCRIPT"
$INSTALLER devide.spec
if [ "$?" -ne "0" ]; then
echo "ERROR: PyInstaller not successfully executed."
exit 1
fi
# strip all the libraries
find distdevide/ -name *.so | xargs strip
# remove rpath information (else the installation doesn't work everywhere)
find distdevide -name *.so | xargs chrpath --delete
# rename the binary and create an invoking script
# we only have to set LD_LIBRARY_PATH, PYTHONPATH is correct
mv distdevide/devide distdevide/devide.bin
SCRIPTFILE='distdevide/devide'
cp devideInvokingScript.sh $SCRIPTFILE
chmod +x $SCRIPTFILE
else
# this is so that you can stuff this in the environment
if [ -z "$PYINSTALLER_SCRIPT" ]; then
PYINSTALLER_SCRIPT='c:/build/Installer/Build.py'
fi
# run the installer
INSTALLER="python $PYINSTALLER_SCRIPT"
$INSTALLER devide.spec
# also copy the manifest file to distdevide
# (we are in the installer directory)
cp devide.exe.manifest distdevide/
# since MSVS 2005 (8.0) we also need to copy the whole assembly to
# which some of the runtimes belong to. At the time of writing
# (20070901), this is: MSVS 8\VC\redist\x86\Microsoft.VC80.CRT\
cp msvcm80.dll Microsoft.VC80.CRT.manifest distdevide/
# pyinstaller already grabs msvcp80.dll and msvcr80.dll, so we only
# grab the rest of the assembly. Also see
# http://channel9.msdn.com/ShowPost.aspx?PostID=23261 for more info.
fi
|
SELECT
[String]
FROM Strings
ORDER BY
levenshtein_ratio(String, 'hello')
DESC
LIMIT 1; |
#!/bin/bash
brew install graphviz
pip install pygraphviz \
--install-option="--include-path=/usr/local/include" \
--install-option="--library-path=/usr/local/lib"
|
<gh_stars>0
function solve() {
let productsElement = document.querySelectorAll('.product');
let resultElement = document.querySelector('textarea');
let products = {};
for (let currentProductElement of productsElement) {
let button = currentProductElement
.children[3];
let name = currentProductElement
.children[1]
.textContent;
let price = Number(currentProductElement
.children[2]
.textContent
.split(' ')[1]);
button.addEventListener('click', () => {
if (products.hasOwnProperty(name)) {
products[name] += price;
} else {
products[name] = price;
}
resultElement.value += `Added ${name} for ${price.toFixed(2)} to the cart.\n`;
});
}
let buyButton = document.querySelector('#exercise > button');
buyButton.addEventListener('click', () => {
let totalPrice = Object.values(products)
.reduce((a, b) => a + b, 0);
let allProduct = Object.keys(products);
resultElement.value += `You bought ${allProduct.join(', ')} for ${totalPrice.toFixed(2)}.\n`;
});
} |
import numpy as np
import tensorflow as tf
# Create neural network model
model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(1000,)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(16, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(inputs, labels, epochs=100) |
import { reactive, toRefs, SetupContext } from '@vue/composition-api'
// import { apiState, globalState } from '~/types/State'
interface Options {
ctx: SetupContext
}
interface ArticleConfig {
articleSlug?: string
id?: number
subcategory: string
}
interface ApiState {
response: object
error: object | null
fetching: boolean
}
interface globalState {
articles: object | null
article: object[] | null
}
export default function usePosts({ ctx }: Options) {
// Setting up the endpoint
const apiState: ApiState = reactive({
response: [],
error: null,
fetching: false
})
const globalState: globalState = reactive({
articles: {},
article: [{}]
})
const Cookie = process.client ? require('js-cookie') : undefined
const cookieLang = process.client ? (Cookie.get('i18n_redirected') ? Cookie.get('i18n_redirected') : null) : null
const fetchArticlesList = async (subtype: string = 'posts') => {
apiState.fetching = true
const { data } = await ctx.root.$axios.get(
`${process.env.NUXT_ENV_WORDPRESS_API_URL}/wp-json/wp/v2/${subtype}?orderby=date&per_page=10&_embed`,
{
params: {
_embed: true,
lang: cookieLang
}
}
)
globalState.articles = data
}
const fetchArticleForUserLang = async (config: ArticleConfig) => {
await fetchArticleData(config)
if (globalState.article) {
// @ts-ignore
if (globalState.article[0].lang !== ctx.root.$i18n.locale) {
// @ts-ignore
const articleIdLangMatch = globalState.article[0].translations[ctx.root.$i18n.locale]
// await fetchArticleData({undefined, articleIdLangMatch, config.subcategory})
}
}
}
const fetchArticleData = async (config: ArticleConfig) => {
apiState.fetching = true
const { data } = await ctx.root.$axios.get(
`${process.env.NUXT_ENV_WORDPRESS_API_URL}/wp-json/wp/v2/${config.subcategory}`,
{
params: {
slug: config.articleSlug,
id: config.id,
lang: cookieLang,
_embed: true
}
}
)
globalState.article = data
return data
}
return {
// @ts-ignore
...toRefs(apiState),
// @ts-ignore
...toRefs(globalState),
fetchArticlesList,
fetchArticleForUserLang
}
}
|
package dal.dao.interfaces;
import domaine.dto.ChoixMobiliteDto;
import domaine.dto.DepartementDto;
import domaine.dto.PartenaireDto;
import domaine.dto.UserDto;
import java.util.List;
/**
* Gert les CRUD de partenaire.
*
* @author candy
*
*/
public interface PartenaireDao extends Dao<PartenaireDto> {
/**
* Permet d'insérer d'un partenaire dans la BD.
*
* @param partenaire - Le {@link PartenaireDto} à insérer.
* @return Un {@link PartenaireDto} si l'insertion du partenaire s'est bien déroulée.
*/
PartenaireDto inserer(PartenaireDto partenaire);
/**
* Permet de lier un partenaire à un département.
*
* @param partenaire - Le partenaire recemment inseré
* @param departement - Le departement de ce partenaire.
* @return Le {@link PartenaireDto};
*/
PartenaireDto lierADepartement(PartenaireDto partenaire, DepartementDto departement);
/**
* Permet de lister tous les partenaires correspondant à la recherchre souhaitée.
*
* @param partenaire - Le partenaire recherché.
* @return Une liste de {@link PartenaireDto} correspondant au critére de recherche.
*/
List<PartenaireDto> rechercher(PartenaireDto partenaire);
/**
* Permet de trouver le partenaire lié a une mobilité.
*
* @param mobi - La {@link ChoixMobiliteDto} pour laquelle on recherche le partenaire.
* @return La {@link PartenaireDto} récupérée.
*/
PartenaireDto findByMobilite(ChoixMobiliteDto mobi);
/**
* Permet de changer la visibilté d'un partenaire.
*
* @param part - Le partenaire à rendre invisible ou visible.
* @return La {@link PartenaireDto} rendue visible ou visible.
*/
PartenaireDto changerVisibilite(PartenaireDto part);
/**
* Permet de lister les partenaires selectionnables par les étudiants. C-à-d ceux qui ne sont pas
* cachés.
*
* @return La {@link PartenaireDto} des partenaires selectionnables.
*/
List<PartenaireDto> listerPartenairesSelectionnablesPourEtudiants();
/**
* Permet de lister les partenaires selectionnables par étudiant. C-à-d ceux qui ne sont pas
* cachés et qui sont dans le même département que l'étudiant.
*
* @param etudiant - L'étudiant avec son département.
* @return La {@link PartenaireDto} des partenaires selectionnables pour l'étudiant.
*/
List<PartenaireDto> listerPartenairesSelectionnables(UserDto etudiant);
}
|
package com.qht.model;
import java.io.Serializable;
import java.math.BigDecimal;
/**
* 课程列表
*/
public class ClassListModel implements Serializable {
//课程包封面
private String cover;
//运营商id
private String tenant_id;
//课程报名称
private String pkg_name;
//课程包类型
private String pkt_type_id;
//总价格
private BigDecimal total_price;
//老师名称
private String nickname;
//难易度
private Integer easy;
public String getCover() {
return cover;
}
public void setCover(String cover) {
this.cover = cover;
}
public String getTenant_id() {
return tenant_id;
}
public void setTenant_id(String tenant_id) {
this.tenant_id = tenant_id;
}
public String getPkg_name() {
return pkg_name;
}
public void setPkg_name(String pkg_name) {
this.pkg_name = pkg_name;
}
public String getPkt_type_id() {
return pkt_type_id;
}
public void setPkt_type_id(String pkt_type_id) {
this.pkt_type_id = pkt_type_id;
}
public BigDecimal getTotal_price() {
return total_price;
}
public void setTotal_price(BigDecimal total_price) {
this.total_price = total_price;
}
public String getNickname() {
return nickname;
}
public void setNickname(String nickname) {
this.nickname = nickname;
}
public Integer getEasy() {
return easy;
}
public void setEasy(Integer easy) {
this.easy = easy;
}
@Override
public String toString() {
return "ClassListModel{" +
"cover='" + cover + '\'' +
", tenant_id='" + tenant_id + '\'' +
", pkg_name='" + pkg_name + '\'' +
", pkt_type_id='" + pkt_type_id + '\'' +
", total_price=" + total_price +
", nickname='" + nickname + '\'' +
", easy=" + easy +
'}';
}
}
|
package org.firstinspires.ftc.team8201;
import com.qualcomm.robotcore.eventloop.opmode.Autonomous;
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
import com.qualcomm.robotcore.hardware.DcMotor;
import com.qualcomm.robotcore.util.ElapsedTime;
@Autonomous(name = "encoderTest", group = "Autonomous")
public class mecAutoFront extends LinearOpMode {
mechard robot = new mechard();
private ElapsedTime runtime = new ElapsedTime();
//Start declaring the variables
static final double COUNTS_PER_MOTOR_REV = 280; //The Motor we have Encoder
static final double COUNTS_PER_INCH = COUNTS_PER_MOTOR_REV/ Math.PI;
static final double DRIVE_SPEED = 0.5;
static final double TURN_SPEED = 0.7;
@Override
public void runOpMode() {
robot.init(hardwareMap);
// Reset encoders
stopAndResetEncoders();
// Wait for "PLAY"
waitForStart();
//Testing
encoderDrive(DRIVE_SPEED, 30.0, 30.0, 30.0, 30.0, 8);
sleep(2000);
encoderTurn(-90);
sleep(2000);
encoderDrive(DRIVE_SPEED, 30.0, 30.0, 30.0, 30.0, 8);
sleep(2000);
encoderTurn(-90);
sleep(2000);
encoderDrive(DRIVE_SPEED, 30.0, 30.0, 30.0, 30.0, 8);
}
// Reset encoders and kill motors
public void stopAndResetEncoders() {
robot.leftWheelFront.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
robot.rightWheelFront.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
robot.leftWheelBack.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
robot.rightWheelBack.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
sleep(50); // Wait 50ms to make sure it fully processes
robot.leftWheelFront.setMode(DcMotor.RunMode.RUN_USING_ENCODER);
robot.rightWheelFront.setMode(DcMotor.RunMode.RUN_USING_ENCODER);
robot.leftWheelBack.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
robot.rightWheelBack.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
}
public void encoderDrive(double speed, double leftFrontInches, double rightFrontInches, double leftBackInches, double rightBackInches , double timeoutS){
int newLeftFront;
int newRightFront;
int newLeftBack;
int newRightBack;
// Ensure that the opmode is still active
if (opModeIsActive()) {
// Determine new target position, and pass to motor controller
newLeftFront = robot.leftWheelFront.getCurrentPosition() + (int) (leftFrontInches * COUNTS_PER_INCH);
newRightFront = robot.rightWheelFront.getCurrentPosition() + (int) (rightFrontInches * COUNTS_PER_INCH);
newLeftBack = robot.leftWheelBack.getCurrentPosition() + (int) (leftBackInches * COUNTS_PER_INCH);
newRightBack = robot.rightWheelBack.getCurrentPosition() + (int) (rightBackInches * COUNTS_PER_INCH);
robot.leftWheelFront.setTargetPosition(newLeftFront);
robot.rightWheelFront.setTargetPosition(newRightFront);
robot.leftWheelBack.setTargetPosition(newLeftBack);
robot.rightWheelBack.setTargetPosition(newRightBack);
// Turn On RUN_TO_POSITION
robot.leftWheelFront.setMode(DcMotor.RunMode.RUN_TO_POSITION);
robot.rightWheelFront.setMode(DcMotor.RunMode.RUN_TO_POSITION);
robot.leftWheelBack.setMode(DcMotor.RunMode.RUN_TO_POSITION);
robot.rightWheelBack.setMode(DcMotor.RunMode.RUN_TO_POSITION);
// reset the timeout time and start motion.
runtime.reset();
robot.leftWheelFront.setPower(Math.abs(speed));
robot.rightWheelFront.setPower(Math.abs(speed));
robot.leftWheelBack.setPower(Math.abs(speed));
robot.rightWheelBack.setPower(Math.abs(speed));
while (opModeIsActive() &&
(runtime.seconds() < timeoutS) &&
(robot.leftWheelFront.isBusy() && robot.rightWheelFront.isBusy())) {
// Display it for the driver.
telemetry.addData("lf" , newLeftFront);
telemetry.addData("rb" , newRightBack);
telemetry.update();
}
}
}
public void encoderTurn(double degrees) {
double circumference = 92.5;
double arc = circumference * (degrees / 360);
encoderDrive(TURN_SPEED, arc, -arc, arc, -arc, 10.0);
}
public void encoderMoveLeft(double inches){ //Test it out
encoderDrive(DRIVE_SPEED, -inches, inches, inches, -inches, 5.0);
}
public void encoderMoveRight(double inches){ //Test it out
encoderDrive(DRIVE_SPEED, inches, -inches, -inches, inches, 5.0);
}
} |
import csv
import logging
from typing import List, Tuple, Dict
from cloud.clouds import CloudRegion, get_cloud_region, Cloud
from history.results import load_results_csv
from util.utils import dedup
__attempted_tests_csv = "./data/attempted_tests.csv"
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%H:%M:%S",
)
def remove_already_attempted(
region_pairs: List[Tuple[CloudRegion, CloudRegion]]
) -> List[Tuple[CloudRegion, CloudRegion]]:
already_attempted = __results_dict_to_cloudregion_pairs_with_dedup(__already_attempted())
successful_results = __results_dict_to_cloudregion_pairs_with_dedup(load_results_csv())
old_failures = [p for p in already_attempted if p not in successful_results]
ret = list(filter(lambda r: r not in already_attempted, region_pairs))
print(
f"Of {len(region_pairs)} to be tested; "
f"Will not do any of the {len(successful_results)} successful ; "
f"Or {len(old_failures)} failures; "
f"Testing {len(ret)} pairs"
)
return ret
def __results_dict_to_cloudregion_pairs_with_dedup(dicts):
return dedup( [
(
get_cloud_region(Cloud(d["from_cloud"]), d["from_region"]),
get_cloud_region(Cloud(d["to_cloud"]), d["to_region"]),
)
for d in dicts
])
def write_attempted_tests(region_pairs):
attempts = __already_attempted()
for pair in region_pairs:
attempts.append(
{
"from_cloud": pair[0].cloud,
"from_region": pair[0].region_id,
"to_cloud": pair[1].cloud,
"to_region": pair[1].region_id,
}
)
with open(__attempted_tests_csv, "w") as f:
dict_writer = csv.DictWriter(
f, ["from_cloud", "from_region", "to_cloud", "to_region"]
)
dict_writer.writeheader()
dict_writer.writerows(attempts)
def __already_attempted() -> List[Dict]:
try:
with open(__attempted_tests_csv) as f:
reader = csv.reader(f, skipinitialspace=True)
header = next(reader)
attempts = [dict(zip(header, row)) for row in reader]
return attempts
except FileNotFoundError:
return []
|
const { describe, it } = intern.getInterface("bdd");
import { tsx } from "@dojo/framework/core/vdom";
import renderer, { assertion, wrap } from "@dojo/framework/testing/renderer";
import FontAwesomeIcon from "@blocklang/dojo-fontawesome/FontAwesomeIcon";
import MiniProgramNavigator from "../../MiniProgramNavigator";
import * as css from "../../../theme/default/mini-program-navigator.m.css";
const WrappedTitleStrong = wrap("strong");
const baseAssertion = assertion(() => (
<div classes={[null, css.root]}>
<div classes={[css.left]}></div>
<WrappedTitleStrong classes={[css.center]}>Mini Program</WrappedTitleStrong>
<div classes={[css.right]}>
<div classes={[css.capsule]}>
<span classes={[css.iconMore]}>
<FontAwesomeIcon icon="ellipsis-h" />
</span>
<span classes={[css.iconClose]}>
<FontAwesomeIcon icon="dot-circle" />
</span>
</div>
</div>
</div>
));
describe("MiniProgramNavigator", () => {
it("Should render using the default properties", () => {
const r = renderer(() => <MiniProgramNavigator />);
r.expect(baseAssertion);
});
it("title property", () => {
const titlePropAssertion = baseAssertion.replaceChildren(WrappedTitleStrong, () => ["foo"]);
const r = renderer(() => <MiniProgramNavigator title="foo" />);
r.expect(titlePropAssertion);
});
});
|
import locale
def find_valid_german_locale(possible_locales: list) -> str:
for locale_setting in possible_locales:
try:
locale.setlocale(locale.LC_ALL, locale_setting)
return locale_setting # Return the first valid German locale setting found
except locale.Error:
pass # Continue to the next locale setting if the current one is invalid
return None # Return None if no valid German locale setting is found in the list |
export default {
names: ["name", "income", "description"],
types: {
name: "string",
income: "number",
description: "string"
}
};
|
<filename>src/components/Badge/Badge.tsx
// Dependencies
import React, { FC } from 'react'
import { cxGenerator } from '@contentpi/lib'
// Types
import { Color, Shape } from '../../types'
// Styles
import { Badge, BASE_CLASS_NAME } from './Badge.styled'
interface IProps {
color?: Color
shape?: Shape
}
const BadgeComponent: FC<IProps> = ({ children, color = Color.primary, shape = Shape.regular }) => {
const classes = [shape, color]
const classNames = cxGenerator({
ccn: BASE_CLASS_NAME,
data: classes,
})
return (
<Badge data-component="Badge" className={classNames}>
{children}
</Badge>
)
}
export default BadgeComponent
|
#!/bin/bash
docker run -e uid="$(id -u)" -e gid="$(id -g)" -v $PWD:/work -v $(realpath ../../../../../../api/wasm/cpp):/external_sdk -w /work wasmsdk:v2 bash ./docker_cpp_builder.sh
|
<gh_stars>10-100
package io.quarkus.qson.test;
import io.quarkus.qson.GenericType;
import io.quarkus.qson.QsonValue;
import io.quarkus.qson.generator.ClassMapping;
import io.quarkus.qson.generator.Generator;
import io.quarkus.qson.generator.QsonGenerator;
import io.quarkus.qson.generator.QsonMapper;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.List;
public class ValueClassTest {
public static class MyConstructorStringValue {
private String string;
@QsonValue
public MyConstructorStringValue(String str) {
this.string = str;
}
@QsonValue
public String value() {
return string;
}
}
public static class MyMethodStringValue {
private String string;
@QsonValue
public void setString(String string) {
this.string = string;
}
@QsonValue
public String value() {
return string;
}
}
public static class MyStringValue {
private String string;
public String stringValue() {
return string;
}
public void create(String string) {
this.string = string;
}
}
public static class MyConstructorIntValue {
private int val;
@QsonValue
public MyConstructorIntValue(int val) {
this.val = val;
}
@QsonValue
public int getVal() {
return val;
}
}
public static class MyMethodIntValue {
private int val;
@QsonValue
public void value(int val) {
this.val = val;
}
@QsonValue
public int value() {
return val;
}
}
public static class MyIntValue {
private int val;
public int value() {
return val;
}
public void create(int val) {
this.val = val;
}
}
public static MyStringValue createMyStringValue(String string) {
MyStringValue val = new MyStringValue();
val.create(string);
return val;
}
public static String writeMyStringValue(MyStringValue val) {
return val.stringValue();
}
public static MyIntValue createMyIntValue(int val) {
MyIntValue obj = new MyIntValue();
obj.create(val);
return obj;
}
public static int writeMyIntValue(MyIntValue val) {
return val.value();
}
public static class ContainsValue {
MyMethodStringValue methodString;
MyConstructorStringValue constructorString;
MyStringValue valueString;
List<MyMethodStringValue> listMethodString;
MyMethodIntValue methodInt;
MyConstructorIntValue constructorInt;
MyIntValue valueInt;
List<MyMethodIntValue> listMethodInt;
public MyMethodStringValue getMethodString() {
return methodString;
}
public void setMethodString(MyMethodStringValue methodString) {
this.methodString = methodString;
}
public MyConstructorStringValue getConstructorString() {
return constructorString;
}
public void setConstructorString(MyConstructorStringValue constructorString) {
this.constructorString = constructorString;
}
public List<MyMethodStringValue> getListMethodString() {
return listMethodString;
}
public void setListMethodString(List<MyMethodStringValue> listMethodString) {
this.listMethodString = listMethodString;
}
public MyStringValue getValueString() {
return valueString;
}
public void setValueString(MyStringValue valueString) {
this.valueString = valueString;
}
public MyMethodIntValue getMethodInt() {
return methodInt;
}
public void setMethodInt(MyMethodIntValue methodInt) {
this.methodInt = methodInt;
}
public MyConstructorIntValue getConstructorInt() {
return constructorInt;
}
public void setConstructorInt(MyConstructorIntValue constructorInt) {
this.constructorInt = constructorInt;
}
public MyIntValue getValueInt() {
return valueInt;
}
public void setValueInt(MyIntValue valueInt) {
this.valueInt = valueInt;
}
public List<MyMethodIntValue> getListMethodInt() {
return listMethodInt;
}
public void setListMethodInt(List<MyMethodIntValue> listMethodInt) {
this.listMethodInt = listMethodInt;
}
}
@Test
public void generateClass() throws Exception {
Generator generator = new Generator();
myStringValueMapping(generator);
generator.parser(MyConstructorStringValue.class).output(new TestClassOutput()).generate();
generator.parser(MyMethodStringValue.class).output(new TestClassOutput()).generate();
generator.parser(MyStringValue.class).output(new TestClassOutput()).generate();
generator.writer(MyStringValue.class).output(new TestClassOutput()).generate();
generator.parser(MyMethodIntValue.class).output(new TestClassOutput()).generate();
generator.parser(ContainsValue.class).output(new TestClassOutput()).generate();
generator.parser(new GenericType<List<MyMethodStringValue>>() {}).output(new TestClassOutput()).generate();
}
@Test
public void staticStringTest() throws Exception {
QsonMapper mapper = new QsonMapper();
myStringValueMapping(mapper);
MyStringValue val = new MyStringValue();
val.create("hello");
String json = mapper.writeString(val);
Assertions.assertEquals("\"hello\"", json);
val = mapper.read(json, MyStringValue.class);
Assertions.assertEquals("hello", val.stringValue());
}
private void myStringValueMapping(QsonGenerator mapper) throws NoSuchMethodException {
ClassMapping mapping = mapper.overrideMappingFor(MyStringValue.class);
mapping.valueReader(ValueClassTest.class.getMethod("createMyStringValue", String.class));
mapping.valueWriter(ValueClassTest.class.getMethod("writeMyStringValue", MyStringValue.class));
}
@Test
public void constructorStringTest() {
QsonMapper mapper = new QsonMapper();
MyConstructorStringValue val = mapper.read("\"hello\"", MyConstructorStringValue.class);
Assertions.assertEquals("hello", val.value());
String json = mapper.writeString(val);
Assertions.assertEquals("\"hello\"", json);
}
@Test
public void methodStringTest() {
QsonMapper mapper = new QsonMapper();
MyMethodStringValue val = mapper.read("\"hello\"", MyMethodStringValue.class);
Assertions.assertEquals("hello", val.value());
String json = mapper.writeString(val);
Assertions.assertEquals("\"hello\"", json);
}
@Test
public void staticIntTest() throws Exception {
QsonMapper mapper = new QsonMapper();
mapMyIntValue(mapper);
MyIntValue val = mapper.read("42", MyIntValue.class);
Assertions.assertEquals(42, val.value());
String json = mapper.writeString(val);
Assertions.assertEquals("42", json);
}
private void mapMyIntValue(QsonGenerator mapper) throws NoSuchMethodException {
ClassMapping mapping = mapper.overrideMappingFor(MyIntValue.class);
mapping.valueReader(ValueClassTest.class.getMethod("createMyIntValue", int.class));
mapping.valueWriter(ValueClassTest.class.getMethod("writeMyIntValue", MyIntValue.class));
}
@Test
public void constructorIntTest() {
QsonMapper mapper = new QsonMapper();
MyConstructorIntValue val = mapper.read("42", MyConstructorIntValue.class);
Assertions.assertEquals(42, val.getVal());
}
@Test
public void methodIntTest() {
QsonMapper mapper = new QsonMapper();
MyMethodIntValue val = mapper.read("42", MyMethodIntValue.class);
Assertions.assertEquals(42, val.value());
}
@Test
public void containsValue() throws Exception {
String json = "{\n" +
" \"methodString\": \"methodString\",\n" +
" \"constructorString\": \"constructorString\",\n" +
" \"valueString\": \"valueString\",\n" +
" \"listMethodString\" : [\n" +
" \"mOne\",\n" +
" \"mTwo\"\n" +
" ],\n" +
" \"methodInt\": 1,\n" +
" \"constructorInt\": 2,\n" +
" \"valueInt\": 3,\n" +
" \"listMethodInt\" : [\n" +
" 4,\n" +
" 5" +
" ]\n" +
"}";
QsonMapper mapper = new QsonMapper();
myStringValueMapping(mapper);
mapMyIntValue(mapper);
ContainsValue val = mapper.read(json, ContainsValue.class);
test(val);
json = mapper.writeString(val);
val = mapper.read(json, ContainsValue.class);
test(val);
}
private void test(ContainsValue val) {
Assertions.assertEquals("methodString", val.getMethodString().value());
Assertions.assertEquals("constructorString", val.getConstructorString().value());
Assertions.assertEquals("valueString", val.getValueString().stringValue());
Assertions.assertEquals("mOne", val.getListMethodString().get(0).value());
Assertions.assertEquals("mTwo", val.getListMethodString().get(1).value());
Assertions.assertEquals(1, val.getMethodInt().value());
Assertions.assertEquals(2, val.getConstructorInt().getVal());
Assertions.assertEquals(3, val.getValueInt().value());
Assertions.assertEquals(4, val.getListMethodInt().get(0).value());
Assertions.assertEquals(5, val.getListMethodInt().get(1).value());
}
}
|
package com.tzutalin.dlib;
import android.graphics.Bitmap;
import android.graphics.Point;
import android.util.Log;
import java.util.ArrayList;
/**
* A VisionDetRet contains all the information identifying the location and confidence value of the detected object in a bitmap.
*/
public final class VisionDetRet extends EyePoint{
private String mLabel;
private float mConfidence;
private int mLeft;
private int mTop;
private int mRight;
private int mBottom;
// 오른쪽 눈
public int mStartRightX;
public int mStartRightY;
public int mEndRightX;
public int mEndRightY;
public int mHightRight;
public int mWidthRight;
// 왼쪽 눈
public int mStartLeftX;
public int mStartLeftY;
public int mEndLeftX;
public int mEndLeftY;
public int mHightLeft;
public int mWidthLeft;
// 눈 전체 크기
public int mHight;
public int mWidth;
private ArrayList<Point> mLandmarkPoints = new ArrayList<>();
VisionDetRet() { }
public VisionDetRet(String label, float confidence, int l, int t, int r, int b) {
mLabel = label;
mLeft = l;
mTop = t;
mRight = r;
mBottom = b;
mConfidence = confidence;
}
/**
* @return The X coordinate of the left side of the result
*/
public int getLeft() {
return mLeft;
}
/**
* @return The Y coordinate of the top of the result
*/
public int getTop() {
return mTop;
}
/**
* @return The X coordinate of the right side of the result
*/
public int getRight() {
return mRight;
}
/**
* @return The Y coordinate of the bottom of the result
*/
public int getBottom() {
return mBottom;
}
/**
* @return A confidence factor between 0 and 1. This indicates how certain what has been found is actually the label.
*/
public float getConfidence() {
return mConfidence;
}
/**
* @return The label of the result
*/
public String getLabel() {
return mLabel;
}
// 오른쪽 눈
public int getStartRightX() {
return mStartRightX;
}
public int getStartRightY() {
return mStartRightY;
}
public int getEndRightX() {
return mEndRightX;
}
public int getEndRightY() {
return mEndRightY;
}
public int getHightRight() {
return mHightRight;
}
public int getWidthRight() {
return mWidthRight;
}
// 왼쪽 눈
public int getStartLeftX() {
return mStartLeftX;
}
public int getStartLeftY() { return mStartLeftY; }
public int getEndLeftX() {
return mEndLeftX;
}
public int getEndLeftY() {
return mEndLeftY;
}
public int getHightLeft() {
return mHightLeft;
}
public int getWidthLeft() {
return mWidthLeft;
}
/**
* Add landmark to the list. Usually, call by jni
* @param x Point x
* @param y Point y
* @return true if adding landmark successfully
*/
public boolean addLandmark(int x, int y) {
return mLandmarkPoints.add(new Point(x, y));
}
/**
* Return the list of landmark points
* @return ArrayList of android.graphics.Point
*/
public ArrayList<Point> getFaceLandmarks() {
return mLandmarkPoints;
}
}
|
#!/usr/bin/env bash
set -e
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
#Make sure to check and clean previously failed deployment
echo "Checking if previous deployment exist..."
if [ "`helm ls --short`" == "" ]; then
echo "Nothing to clean, ready for deployment"
else
helm delete $(helm ls --short)
fi
echo "Deploying Splunk OTel Collector for Kubernetes"
helm install ci-sck --set splunkPlatform.index=$CI_INDEX_EVENTS \
--set splunkPlatform.token=$CI_SPLUNK_HEC_TOKEN \
--set splunkPlatform.endpoint=https://$CI_SPLUNK_HOST:8088/services/collector \
-f ci_scripts/sck_otel_values.yaml helm-charts/splunk-otel-collector/
#--set containerLogs.containerRuntime=$CONTAINER_RUNTIME \
#wait for deployment to finish
until kubectl get pod | grep Running | [[ $(wc -l) == 1 ]]; do
sleep 1;
done
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.tdb.base.objectfile;
import static org.apache.jena.tdb.base.BufferTestLib.sameValue ;
import static org.apache.jena.tdb.base.objectfile.AbstractTestObjectFile.fill ;
import java.nio.ByteBuffer ;
import org.apache.jena.atlas.junit.BaseTest ;
import org.apache.jena.tdb.base.block.Block ;
import org.apache.jena.tdb.base.file.BufferChannel ;
import org.apache.jena.tdb.base.file.BufferChannelMem ;
import org.apache.jena.tdb.base.objectfile.ObjectFile ;
import org.apache.jena.tdb.base.objectfile.ObjectFileStorage ;
import org.junit.Test ;
public class TestObjectFileBuffering extends BaseTest
{
protected ObjectFile make(int bufferSize)
{
BufferChannel chan = BufferChannelMem.create() ;
return new ObjectFileStorage(chan, bufferSize) ;
}
private void write(int sizeOfBuffer, int... sizes)
{
ObjectFile file = make(sizeOfBuffer) ;
int N = sizes.length ;
ByteBuffer bb[] = new ByteBuffer[N] ;
long loc[] = new long[N] ;
ByteBuffer read[] = new ByteBuffer[N] ;
for ( int i = 0 ; i < N ; i++ )
{
bb[i] = ByteBuffer.allocate(sizes[i]) ;
fill(bb[i]) ;
loc[i] = file.write(bb[i]) ;
}
//file.sync() ;
for ( int i = 0 ; i < N ; i++ )
{
read[i] = file.read(loc[i]) ;
assertNotSame(bb[i], read[i]) ;
sameValue(bb[i], read[i]) ;
}
}
private void writePrealloc(int sizeOfBuffer, int... sizes)
{
ObjectFile file = make(sizeOfBuffer) ;
int N = sizes.length ;
Block blocks[] = new Block[N] ;
ByteBuffer read[] = new ByteBuffer[N] ;
for ( int i = 0 ; i < N ; i++ )
{
blocks[i] = file.allocWrite(sizes[i]) ;
fill(blocks[i].getByteBuffer()) ;
file.completeWrite(blocks[i]) ;
}
for ( int i = 0 ; i < N ; i++ )
{
read[i] = file.read(blocks[i].getId()) ;
assertNotSame(blocks[i].getByteBuffer(), read[i]) ;
sameValue(blocks[i].getByteBuffer(), read[i]) ;
}
}
@Test public void objectfile_50() { write(5, 10) ; }
@Test public void objectfile_51() { writePrealloc(5, 10) ; }
@Test public void objectfile_52() { write(12, 10) ; }
@Test public void objectfile_53() { writePrealloc(12, 10) ; }
@Test public void objectfile_54() { write(12, 10, 8) ; } // 10 is too big
@Test public void objectfile_55() { writePrealloc(12, 10, 8) ; } // 10 is too big
@Test public void objectfile_56() { write(12, 6, 10) ; }
@Test public void objectfile_57() { writePrealloc(12, 6, 10) ; }
@Test public void objectfile_58() { write(20, 6, 10, 5) ; }
@Test public void objectfile_59() { writePrealloc(20, 6, 10, 5) ; }
@Test public void objectfile_60() { write(20, 4, 4, 8) ; }
@Test public void objectfile_61() { writePrealloc(20, 4, 4, 8) ; }
}
|
<filename>src/main/java/org/codingmatters/tests/reflect/matchers/type/TypeParameterInfo.java
package org.codingmatters.tests.reflect.matchers.type;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.lang.reflect.WildcardType;
import java.util.ArrayList;
import java.util.List;
/**
* Created by nelt on 10/21/16.
*/
public class TypeParameterInfo {
public static TypeParameterInfo from(Type type) {
if(type instanceof TypeVariable) {
return fromTypeVariable((TypeVariable) type);
} else if(type instanceof Class) {
return fromClass((Class) type);
} else if(type instanceof WildcardType) {
return fromWildcard((WildcardType) type);
} else if(type instanceof ParameterizedType) {
return fromParameterizedType((ParameterizedType) type);
} else {
throw new RuntimeException("NYIMPL type parameter info from : " + type + " (" + type.getClass().getName() + ")");
}
}
private static TypeParameterInfo fromTypeVariable(TypeVariable type) {
return new TypeParameterInfo(type.getName(), null, boundsFrom(type.getBounds()), null, null);
}
private static TypeParameterInfo fromClass(Class type) {
return new TypeParameterInfo(type.getName(), TypeInfo.from(type), null, null, type);
}
private static TypeParameterInfo fromWildcard(WildcardType type) {
return new TypeParameterInfo("?", null, boundsFrom(type.getUpperBounds()), boundsFrom(type.getLowerBounds()), null);
}
private static TypeParameterInfo fromParameterizedType(ParameterizedType type) {
return new TypeParameterInfo(type.getTypeName(), TypeInfo.from(type), null, null, null);
}
private static ArrayList<TypeInfo> boundsFrom(Type[] typeBounds) {
ArrayList<TypeInfo> bounds = new ArrayList<>(typeBounds.length);
for (Type bound : typeBounds) {
bounds.add(TypeInfo.from(bound));
}
return bounds;
}
private final String name;
private final TypeInfo type;
private final List<TypeInfo> upperBounds;
private final List<TypeInfo> lowerBounds;
private final Class clazz;
private TypeParameterInfo(String name, TypeInfo type, List<TypeInfo> upperBounds, List<TypeInfo> lowerBounds, Class clazz) {
this.name = name;
this.type = type;
this.upperBounds = upperBounds != null ? upperBounds : new ArrayList<>(0);
this.lowerBounds = lowerBounds != null ? lowerBounds : new ArrayList<>(0);
this.clazz = clazz;
}
public String name() {
return this.name;
}
public TypeInfo type() {
return type;
}
public List<TypeInfo> upperBounds() {
return this.upperBounds;
}
public List<TypeInfo> lowerBounds() {
return lowerBounds;
}
public boolean isWildcard() {
return this.name.equals("?");
}
public Class clazz() {
return clazz;
}
@Override
public String toString() {
return "TypeParameterInfo{" +
"name='" + name + '\'' +
", type=" + type +
", upperBounds=" + upperBounds +
", lowerBounds=" + lowerBounds +
", clazz=" + clazz +
'}';
}
}
|
<gh_stars>1-10
import config from 'corona/config/environment'
import { decorate as cached } from 'corona/utils/weak-cache'
const {
APP: { sortMethod }
} = config
const DAY = 86400000
const META_FIELDS = ['population']
/**************************************
* Base field classes & helpers
*/
function zipApply(func, argArrays) {
return argArrays[0].map((_, index) =>
func(...argArrays.map((arr) => arr[index]))
)
}
function zipDates(points, values) {
return points.map((point, index) => ({
date: point.date,
value: values[index]
}))
}
class Field {
constructor(compute, name) {
this.compute = compute
this.name = name || '<unknown>'
}
canApply() {
throw new Error('Not implemented')
}
@cached
apply(zone) {
let { compute } = this
return zipDates(zone.points, compute(zone))
}
@cached
sortValue(zone) {
let values = this.apply(zone)
if (sortMethod === 'most-recent') {
let mostRecent = [...values].reverse().find(({ value }) => !isNaN(value))
return mostRecent && mostRecent.value
} else {
return Math.max(
...values.filter(({ value }) => !isNaN(value)).map(({ value }) => value)
)
}
}
}
class SinglePointField extends Field {
constructor(get, name) {
super(({ points }) => points.map(get), name)
}
}
class MultiField extends Field {
constructor(fun, fields, name) {
super(
(zone) =>
zipApply(
fun,
fields.map((f) => f.apply(zone).map((p) => p.value))
),
name
)
}
}
/**************************************
* Concrete field implementations
*/
class Constant extends SinglePointField {
constructor(value) {
super(() => value, `${value}`)
}
canApply() {
return true
}
}
class Source extends SinglePointField {
constructor(name) {
super(
(point) => (typeof point[name] === 'number' ? point[name] : NaN),
name
)
this.fieldName = name
}
canApply({ fields }) {
return fields.has(this.fieldName)
}
}
class Meta extends Field {
constructor(name) {
super((zone) => zone.points.map(() => zone[this.metaName]), name)
this.metaName = name
}
canApply(zone) {
return Boolean(zone[this.metaName])
}
}
class Scale extends MultiField {
constructor(field, scale) {
super(
(a, b) => a * b,
[field, scale],
`scale(${field.name} * ${scale.name})`
)
this.fields = [field, scale]
}
canApply(zone) {
return this.fields.every((f) => f.canApply(zone))
}
}
class Ratio extends MultiField {
constructor(num, denom) {
super(
(a, b) => (!isNaN(b) && b !== 0 ? a / b : NaN),
[num, denom],
`ratio(${num.name} / ${denom.name})`
)
this.fields = [num, denom]
}
canApply(zone) {
return this.fields.every((f) => f.canApply(zone))
}
}
class Offset extends MultiField {
constructor(field, offset) {
super(
(a, b) => a + b,
[field, offset],
`offset(${field.name} / ${offset.name})`
)
this.fields = [field, offset]
}
canApply(zone) {
return this.fields.every((f) => f.canApply(zone))
}
}
class Coalesce extends MultiField {
constructor(...fields) {
super(
(...values) => {
let value = NaN
while (isNaN(value) && values.length) {
value = values.shift()
}
return value
},
fields,
`coalesce(${fields.map((f) => f.name).join(', ')})`
)
this.fields = fields
}
canApply(zone) {
return this.fields.some((f) => f.canApply(zone))
}
}
class Lag extends Field {
constructor(field, days) {
super((zone) => {
let points = field.apply(zone)
let offsets = days.apply(zone)
return points.map((point, index) => {
let offset = Math.round(offsets[index].value)
if (isNaN(offset)) {
return NaN
}
let sourceIndex = index - offset
if (sourceIndex < 0 || sourceIndex >= points.length) {
return NaN
}
return points[sourceIndex].value
})
}, `lag(${field.name},${days.name})`)
this.fields = [field, days]
}
canApply(zone) {
return this.fields.every((f) => f.canApply(zone))
}
}
class Change extends Field {
constructor(field) {
super((zone) => {
let points = field.apply(zone)
return points.map((point, index) => {
let prev = points[index - 1]
if (prev && prev.date === point.date - DAY) {
return point.value - prev.value
}
return NaN
})
}, `change(${field.name})`)
this.field = field
}
canApply(zone) {
return this.field.canApply(zone)
}
}
class Accumulate extends Field {
constructor(field) {
super((zone) => {
let points = field.apply(zone)
let acc = NaN
let values = []
for (let point of points) {
if (!isNaN(point.value)) {
if (isNaN(acc)) {
acc = 0
}
acc += point.value
}
values.push(acc)
}
return values
}, `accumulate(${field.name})`)
this.field = field
}
canApply(zone) {
return this.field.canApply(zone)
}
}
class Weekly extends Field {
constructor(field) {
super((zone) => {
let points = field.apply(zone)
let values = points.map(({ value }) => value)
let firstNumber = values.findIndex((v) => !isNaN(v))
let lastNumber =
values.length - values.reverse().findIndex((v) => !isNaN(v))
return points.map(({ date }, index) => {
if (firstNumber !== -1 && index >= firstNumber && index < lastNumber) {
let windowValues = [-3, -2, -1, 0, 1, 2, 3]
.map((offset) => {
let point = points[index + offset]
if (
point &&
!isNaN(point.value) &&
Math.abs(point.date - date) <= 3 * DAY
) {
return point.value
}
})
.filter((v) => typeof v === 'number')
if (windowValues.length) {
return (
windowValues.reduce((sum, value) => sum + value, 0) /
windowValues.length
)
}
}
return NaN
})
}, `weekly(${field.name})`)
this.field = field
}
canApply(zone) {
return this.field.canApply(zone)
}
}
class NonZero extends Field {
constructor(field) {
super((zone) => {
let points = field.apply(zone)
return points.map(({ value }) =>
typeof value === 'number' && value !== 0 ? value : NaN
)
})
this.field = field
}
canApply(zone) {
return this.field.canApply(zone)
}
}
/**************************************
* Field creation helpers
*/
const sourceCache = {}
const constCache = {}
function fieldify(thing) {
if (Array.isArray(thing)) {
return thing.map((t) => fieldify(t))
}
if (thing instanceof Field) {
return thing
}
if (typeof thing === 'number') {
if (!(thing in constCache)) {
constCache[thing] = new Constant(thing)
}
return constCache[thing]
}
if (typeof thing === 'string') {
if (!(thing in sourceCache)) {
sourceCache[thing] = META_FIELDS.includes(thing)
? new Meta(thing)
: new Source(thing)
}
return sourceCache[thing]
}
throw new Error(`Cannot fieldify ${thing}`)
}
function fieldifyArgs(func) {
return (...args) => func(...fieldify(args))
}
const scale = fieldifyArgs((f, s) => new Scale(f, s))
const ratio = fieldifyArgs((n, d) => new Ratio(n, d))
const offset = fieldifyArgs((f, o) => new Offset(f, o))
const change = fieldifyArgs((f) => new Change(f))
const weekly = fieldifyArgs((f) => new Weekly(f))
const accumulate = fieldifyArgs((f) => new Accumulate(f))
const lag = fieldifyArgs((f, o) => new Lag(f, o))
const reverse = fieldifyArgs((f) => scale(f, -1))
const inverse = fieldifyArgs((f) => ratio(1, f))
const coalesce = fieldifyArgs((...fields) => new Coalesce(...fields))
const nonzero = fieldifyArgs((f) => new NonZero(f))
const field = (f) => fieldify(f)
export {
accumulate,
change,
coalesce,
field,
inverse,
lag,
nonzero,
offset,
ratio,
reverse,
scale,
weekly
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.