text stringlengths 1 1.05M |
|---|
#!/bin/bash
FFMPEG=$HOME/.imageio/ffmpeg/ffmpeg-linux64-v3.3.1
$FFMPEG -i video.mp4 -i audio.mp3 -c:v copy -c:a aac -map 0:v:0 -map 1:a:0 video_audio.mp4
|
#!/usr/bin/env bash
if [ "$LIB_HELP_OUTPUT_LOAD" ]; then
return
fi
LIB_HELP_OUTPUT_LOAD="FINISH"
. "$TOOLS_LIB_PATH/lib-env.sh"
include "output"
include "log"
|
<filename>scrapydd/migrates/versions/023_job_settings.py
from sqlalchemy import *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
job_table = Table('spider_execution_queue', meta, autoload=True)
job_settings = Column('settings', Text, nullable=True)
job_settings.create(job_table)
def downgrade(migrate_engine):
meta.bind = migrate_engine
job_table = Table('spider_execution_queue', meta, autoload=True)
job_table.c['settings'].drop()
|
success()
{
output="$1"
tput setaf 2; echo "$output"; tput sgr0
}
success "$@"
|
import React from 'react'
import {Rating, Grid} from 'semantic-ui-react'
const UserReviews = props => {
console.log('rendering')
const review = props.review
return (
<Grid.Row style={{marginLeft: 15}}>
<h4>
<Rating defaultRating={review.rating} icon="star" maxRating={10} />
</h4>
{review.content}
<hr />
</Grid.Row>
)
}
export default UserReviews
|
<reponame>aliraza114/JamstackTODO
import React, { ReactNode, useContext } from "react"
import { Navbar, Nav, Container } from "react-bootstrap"
import "bootstrap/dist/css/bootstrap.min.css"
import { Link } from "gatsby"
import { identityContext } from "../context/authContext"
const styles = require("./pageLayout.module.css")
interface props {
children: ReactNode
}
export default function PageLayout({ children }: props) {
const { user } = useContext(identityContext)
return (
<div>
<Navbar collapseOnSelect expand="sm" bg="dark" variant="dark">
<Navbar.Brand>Todo-App</Navbar.Brand>
<Navbar.Toggle aria-controls="responsive-navbar-nav" />
<Navbar.Collapse id="responsive-navbar-nav">
<Nav className="mr-auto">
<Nav.Link as={Link} to={"/"}>
Home
</Nav.Link>
<Nav.Link as={Link} to={"/dashboard"}>
Dashboard{" "}
</Nav.Link>
</Nav>
{!!user && (
<Nav>
<Navbar.Text>
Signed in as:{" "}
<span className={styles.userName}>
{user.user_metadata.full_name}
</span>
</Navbar.Text>
</Nav>
)}
</Navbar.Collapse>
</Navbar>
<Container fluid>{children}</Container>
</div>
)
}
|
def string_to_integer(string):
try:
return int(string)
except ValueError:
return None |
package org.jruby.ir.operands;
import org.jruby.ir.IRVisitor;
import org.jruby.ir.persistence.IRWriterEncoder;
import org.jruby.ir.transformations.inlining.SimpleCloneInfo;
import org.jruby.parser.StaticScope;
import org.jruby.runtime.DynamicScope;
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.builtin.IRubyObject;
public class Self extends Variable {
public static final Self SELF = new Self();
private static final String NAME = "%self";
private Self() {
super();
}
@Override
public String getName() {
return NAME;
}
@Override
public OperandType getOperandType() {
return OperandType.SELF;
}
public boolean isSelf() {
return true;
}
@Override
public boolean equals(Object obj) {
return obj == this;
}
@Override
public Object retrieve(ThreadContext context, IRubyObject self, StaticScope currScope, DynamicScope currDynScope, Object[] temp) {
return self;
}
@Override
public Variable clone(SimpleCloneInfo ii) {
return this;
}
@Override
public void encode(IRWriterEncoder e) {
// No super because we don't want to dump %self and offset of 0
e.encode(getOperandType().getCoded());
}
@Override
public void visit(IRVisitor visitor) {
visitor.Self(this);
}
@Override
public int compareTo(Object o) {
return this == o ? 0 : -1;
}
}
|
#!/usr/bin/env bash
set -euo pipefail
aleph-e2e-client --node $NODE_URL
echo "Done!"
|
#!/bin/bash
cd git/beam/production/application-sfbay/calibration/experiments/*/suggestions
for d in */; do
echo ${d::-1}
gunzip -c ${d::-1}/ITERS/it.0/0.events.xml.gz | grep ModeChoice
#grep -Eo "mode=\"\w" sort uniq -c;
done
|
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
install_ubuntu_16_pip_deps pip2.7
# Install bazelisk
install_bazelisk
# Run configure.
export TF_NEED_GCP=1
export TF_NEED_HDFS=1
export TF_NEED_S3=1
export TF_NEED_CUDA=1
export TF_CUDA_VERSION=10
export TF_CUDNN_VERSION=7
export TF_NEED_TENSORRT=1
export TENSORRT_INSTALL_PATH=/usr/local/tensorrt
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which python2.7)
export TF2_BEHAVIOR=1
export PROJECT_NAME="tensorflow_gpu"
export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib"
export TF_CUDA_COMPUTE_CAPABILITIES=sm_35,sm_37,sm_52,sm_60,sm_61,compute_70
yes "" | "$PYTHON_BIN_PATH" configure.py
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/DEFAULT_TEST_TARGETS.sh
tag_filters="gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py2"
set +e
bazel test --config=cuda --config=opt \
--crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \
--linkopt=-lrt \
--action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \
--test_lang_filters=py \
--build_tag_filters="${tag_filters}" \
--test_tag_filters="${tag_filters}" \
--test_timeout="300,450,1200,3600" --local_test_jobs=4 \
--test_output=errors --verbose_failures=true --keep_going \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
-- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/...
test_xml_summary_exit
|
import cv2
from keras.models import load_model
import numpy as np
def preprocess_image(image_path):
# Load the image in grayscale
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
# Resize the image to 28x28 pixels
image = cv2.resize(image, (28, 28))
# Normalize the pixel values to be between 0 and 1
image = image / 255.0
# Reshape the image to match the input shape of the model
image = np.reshape(image, (1, 28, 28, 1))
return image
def classify_digits(image_paths, model_path):
# Load the pre-trained Keras model
model = load_model(model_path)
predicted_digits = []
for path in image_paths:
# Preprocess the input image
preprocessed_image = preprocess_image(path)
# Use the model to predict the digit
prediction = model.predict(preprocessed_image)
# Get the index of the predicted digit
predicted_digit = np.argmax(prediction)
predicted_digits.append(predicted_digit)
return predicted_digits |
using System;
namespace Pyramid
{
class Program
{
static void Main(string[] args)
{
int rows, i, j;
rows = 1;
for(i = 1; i <= 9; i++)
{
// Print spaces
for (j = i; j <= 9; j++)
Console.Write(" ");
// Print numbers
for (j = 1; j <= (rows * 2) - 1; j++)
Console.Write("{0} ", rows);
Console.Write("\n");
rows ++;
}
Console.ReadKey();
}
}
} |
#!/bin/bash
set -ex
section "install.base.requirements"
# Install v1.7 or newer of nginx to support 'if' statement for logging
sudo apt-add-repository -y ppa:nginx/stable
sudo apt update
sudo apt install -y nginx firefox
nginx -v
firefox --version
pip install --upgrade pip
hash -d pip # find upgraded pip
section_end "install.base.requirements"
section "install.baselayer.requirements"
npm -g install npm@next
npm --version
node --version
# TODO replace w/ baselayer dependent build info
if [[ -n ${TRIGGERED_FROM_REPO} ]]; then
mkdir cesium-clone
cd cesium-clone
git init
git remote add origin git://github.com/${TRIGGERED_FROM_REPO}
git fetch --depth=1 origin ${TRIGGERED_FROM_BRANCH}
git checkout -b ${TRIGGERED_FROM_BRANCH} ${TRIGGERED_FROM_SHA}
pip install .
cd ..
fi
pip list --format=columns
section_end "install.baselayer.requirements"
section "init.db"
make db_init
section_end "init.db"
section "run.make.dependencies"
make dependencies
pip install -r requirements.docs.txt
pip list --format=columns
section_end "run.make.dependencies"
section "install.geckodriver.and.selenium"
GECKO_VER=0.24.0
wget https://github.com/mozilla/geckodriver/releases/download/v${GECKO_VER}/geckodriver-v${GECKO_VER}-linux64.tar.gz
sudo tar -xzf geckodriver-v${GECKO_VER}-linux64.tar.gz -C /usr/local/bin
rm geckodriver-v${GECKO_VER}-linux64.tar.gz
which geckodriver
geckodriver --version
pip install --upgrade selenium
python -c "import selenium; print(f'Selenium {selenium.__version__}')"
section_end "install.geckodriver.and.selenium"
section "install.deps"
make dependencies
pip list --format=columns
nginx -v
section_end "install.deps"
|
SELECT COUNT(*)
FROM customers
WHERE city = 'New York' |
from base_platform import BasePlatform
class UbuntuPlatform(BasePlatform):
APACHE_SERVICE = 'apache2ctl'
APACHE_RESTART_COMMAND = 'service apache2 restart' |
function hax(parent, name, label, creator) {
parent[name] = function (...rest) {
const value = creator(rest);
console.log("HAAAAX", label, "(", ...rest, ") -> ", value);
return value;
};
}
function execFunction(parent, func, rest) {
if (this === undefined) {
return func.apply(parent, rest);
}
else {
return func.apply(this, rest);
}
}
export function haxFunction(parent, name, label) {
const oldFunction = parent[name];
hax(parent, name, "call " + (label || name),
function (rest) {
const params = ["HAAAAX", "call", label, "(", ...rest, ")"],
success = (value) => console.log(...params, "->", value),
failure = (exp) => console.error(...params, "->", exp);
try {
const value = execFunction(this, oldFunction, rest);
if (value === undefined) {
console.log(...params);
}
else {
success(value);
if (value instanceof Promise) {
value.then(success)
.catch(failure);
}
}
return value;
}
catch (exp) {
failure(exp);
throw exp;
}
});
}
export function haxMethod(parent, name) {
const oldMethod = parent.prototype[name];
const label = `${parent.name}::${name}`;
parent.prototype[name] = function (...rest) {
const params = ["HAAAAX", "call", label, "(", ...rest, ")"],
success = (value) => console.log(...params, "->", value),
failure = (exp) => console.error(...params, "->", exp);
try {
const value = oldMethod.apply(this, rest);
if (value === undefined) {
console.log(...params);
}
else {
success(value);
if (value instanceof Promise) {
value.then(success)
.catch(failure);
}
}
return value;
}
catch (exp) {
failure(exp);
throw exp;
}
}
}
let haxCounter = 0;
export function haxClass(parent, OldClass, methods) {
const ctor = `constructor(...rest) {
super(...rest);
console.log("HAAAAX", "create", "${OldClass.name}", "(", ...rest, ")");
}`;
const script = `return class Hax${Date.now()}${haxCounter++} extends ${OldClass.name} {
${ctor}
}`;
parent[OldClass.name] = (new Function(script))();
if (methods !== undefined) {
for (let method of methods) {
haxMethod(OldClass, method);
}
}
} |
package hu.unideb.inf.dejavu.objects;
import static org.junit.Assert.*;
import org.junit.Test;
public class PositionTest {
@Test
public void testEquals() {
Position pos1 = new Position(1, 1, 2);
Position pos2 = new Position(1, 1, 2);
Position pos3 = new Position(1, 1, 3);
assertEquals(true, pos1.equals(pos2));
assertEquals(false, pos2.equals(pos3));
}
@Test
public void testSetPos() {
Position pos1 = new Position(1, 1, 2);
Position pos2 = new Position(5, 5, 5);
pos2.setPos(1, 1, 2);
assertEquals(pos2, pos1);
}
@Test
public void testGetFirst() {
Position pos1 = new Position(1, 1, 2);
assertEquals(1, pos1.getFirst());
Position pos2 = new Position(2, 1, 2);
assertEquals(2, pos2.getFirst());
Position pos3 = new Position(3, 1, 2);
assertEquals(3, pos3.getFirst());
Position pos4 = new Position(4, 1, 2);
assertEquals(4, pos4.getFirst());
}
@Test
public void testGetSecond() {
Position pos1 = new Position(1, 1, 2);
assertEquals(1, pos1.getSecond());
Position pos2 = new Position(1, 2, 2);
assertEquals(2, pos2.getSecond());
Position pos3 = new Position(1, 3, 2);
assertEquals(3, pos3.getSecond());
Position pos4 = new Position(1, 4, 2);
assertEquals(4, pos4.getSecond());
}
}
|
<reponame>igorhrcek/flame
const fs = require('fs');
class Logger {
constructor() {
this.logFileHandler();
}
logFileHandler() {
if (!fs.existsSync('./flame.log')) {
fs.writeFileSync('./flame.log', '');
} else {
console.log('file exists');
}
}
writeLog(logMsg, logType) {
}
generateLog(logMsg, logType) {
const now = new Date();
const date = `${this.parseNumber(now.getDate())}-${this.parseNumber(now.getMonth() + 1)}-${now.getFullYear()}`;
const time = `${this.parseNumber(now.getHours())}:${this.parseNumber(now.getMinutes())}:${this.parseNumber(now.getSeconds())}.${now.getMilliseconds()}`;
const log = `[${date} ${time}]: ${logType} ${logMsg}`;
return log;
// const timestamp = new Date().toISOString();
}
parseNumber(number) {
if (number > 9) {
return number;
} else {
return `0${number}`;
}
}
}
// console.log(logger.generateLog('testMsg', 'INFO'));
module.exports = new Logger(); |
<reponame>manuel-hegner/conquery
import { TFunction } from "react-i18next";
// See index.html for an inject marker, that we use to inject env vars
// at caontainer runtime
function runtimeVar(variable: string): string | null {
return window.env && window.env[variable] !== "null"
? window.env[variable]
: null;
}
// Needs to be explicit because weback statically replaces process.env.XXX through DefinePlugin
const isProductionEnv = runtimeVar("NODE_ENV") || process.env.NODE_ENV;
const languageEnv = runtimeVar("REACT_APP_LANG") || process.env.REACT_APP_LANG;
const apiUrlEnv =
runtimeVar("REACT_APP_API_URL") || process.env.REACT_APP_API_URL;
const disableLoginEnv =
runtimeVar("REACT_APP_DISABLE_LOGIN") || process.env.REACT_APP_DISABLE_LOGIN;
const enableIDPEnv =
runtimeVar("REACT_APP_IDP_ENABLE") || process.env.REACT_APP_IDP_ENABLE;
const basenameEnv =
runtimeVar("REACT_APP_BASENAME") || process.env.REACT_APP_BASENAME;
const idpUrlEnv =
runtimeVar("REACT_APP_IDP_URL") || process.env.REACT_APP_IDP_URL;
const idpRealmEnv =
runtimeVar("REACT_APP_IDP_REALM") || process.env.REACT_APP_IDP_REALM;
const idpClientIdEnv =
runtimeVar("REACT_APP_IDP_CLIENT_ID") || process.env.REACT_APP_IDP_CLIENT_ID;
export const isProduction = isProductionEnv === "production" || true;
export const language = languageEnv === "de" ? "de" : "en";
export const apiUrl = apiUrlEnv || "";
export const isLoginDisabled = disableLoginEnv === "true";
export const isIDPEnabled = enableIDPEnv === "true";
export const basename = basenameEnv || "";
export const idpUrl = idpUrlEnv || "";
export const idpRealm = idpRealmEnv || "";
export const idpClientId = idpClientIdEnv || "";
export interface CustomEnvironment {
getExternalSupportedErrorMessage?: (
t: TFunction,
code: string,
context?: Record<string, string>,
) => string | undefined;
}
let customEnvironment: CustomEnvironment | null = null;
export const initializeEnvironment = (env: CustomEnvironment) => {
customEnvironment = env;
};
export const getExternalSupportedErrorMessage = (
t: TFunction,
code: string,
context?: Record<string, string>,
) =>
customEnvironment && customEnvironment.getExternalSupportedErrorMessage
? customEnvironment.getExternalSupportedErrorMessage(t, code, context)
: undefined;
|
// Basic Imports
import React from 'react'
import styled from '@emotion/styled'
// Echarts Imports
import * as echarts from 'echarts/core'
import {
GridComponent,
LegendComponent,
TitleComponent,
ToolboxComponent,
TooltipComponent
} from 'echarts/components'
import { LineChart } from 'echarts/charts'
import { UniversalTransition } from 'echarts/features'
import { CanvasRenderer } from 'echarts/renderers'
// Types Imports
import type { Health } from '../../../../models'
// UI Imports
import { Col, Empty, Row, Spin, TabPane, Tabs } from '@sarair/desktop/shared/ui'
import { Chart } from './Chart'
import { HealthFieldForCharts } from '../../../../models'
interface ChartProps {
loading: boolean
data: Health[]
}
echarts.use([
TitleComponent,
ToolboxComponent,
TooltipComponent,
GridComponent,
LegendComponent,
LineChart,
CanvasRenderer,
UniversalTransition
])
export const Charts: React.FC<ChartProps> = ({ loading, data }) => {
if (!data.length) return null
return (
<ChartsWrapper spinning={loading}>
{!data && <Empty image={Empty.PRESENTED_IMAGE_SIMPLE} />}
<Chart data={data} field="weight" />
<ChartRow>
<Col span={12}>
<Chart data={data} field="bmi" />
</Col>
<Col span={12}>
<Chart data={data} field="bodyFatRate" />
</Col>
</ChartRow>
<ChartRow>
<Col span={8}>
<Chart data={data} field="muscle" />
</Col>
<Col span={8}>
<Chart data={data} field="water" />
</Col>
<Col span={8}>
<Chart data={data} field="protein" />
</Col>
</ChartRow>
<ChartRow>
<Col span={8}>
<Chart data={data} field="subcutaneousFat" />
</Col>
<Col span={8}>
<Chart data={data} field="weightWithoutFat" />
</Col>
<Col span={8}>
<Chart data={data} field="skeletalMuscleRate" />
</Col>
</ChartRow>
</ChartsWrapper>
)
}
const ChartsWrapper = styled(Spin)`
height: 20rem;
`
const ChartRow = styled(Row)`
padding: 1.6rem 0;
`
|
<gh_stars>0
var dir_59425e443f801f1f2fd8bbe4959a3ccf =
[
[ "CaffeAlexNet-Armnn", "dir_d698a154d517a03a06d5fad0e3e733b6.xhtml", "dir_d698a154d517a03a06d5fad0e3e733b6" ],
[ "CaffeCifar10AcrossChannels-Armnn", "dir_65ac97f38e9d3ca0585c7b96ca70cecd.xhtml", "dir_65ac97f38e9d3ca0585c7b96ca70cecd" ],
[ "CaffeInception_BN-Armnn", "dir_48a2db3a7dfd617a8e238148a0ebbd10.xhtml", "dir_48a2db3a7dfd617a8e238148a0ebbd10" ],
[ "CaffeMnist-Armnn", "dir_8a5c7c84882017634ef061fdeb3b0441.xhtml", "dir_8a5c7c84882017634ef061fdeb3b0441" ],
[ "CaffeResNet-Armnn", "dir_7b52c58ad98dc32aaa058d594604c036.xhtml", "dir_7b52c58ad98dc32aaa058d594604c036" ],
[ "CaffeSqueezeNet1_0-Armnn", "dir_71148bc6b478166b35c2d3e840e4e410.xhtml", "dir_71148bc6b478166b35c2d3e840e4e410" ],
[ "CaffeVGG-Armnn", "dir_8594d24bed2e2f1c030c76290e88a484.xhtml", "dir_8594d24bed2e2f1c030c76290e88a484" ],
[ "CaffeYolo-Armnn", "dir_4648a32206b18698904bc2ccafd6affa.xhtml", "dir_4648a32206b18698904bc2ccafd6affa" ],
[ "ExecuteNetwork", "dir_3502d64799b714c597b8fa7662494b65.xhtml", "dir_3502d64799b714c597b8fa7662494b65" ],
[ "ImageCSVFileGenerator", "dir_ed4050ecbb48c03aa3407c24a3e522b1.xhtml", "dir_ed4050ecbb48c03aa3407c24a3e522b1" ],
[ "ImageTensorGenerator", "dir_db50b4cd131a46c9b297c117b868403e.xhtml", "dir_db50b4cd131a46c9b297c117b868403e" ],
[ "ModelAccuracyTool-Armnn", "dir_6ace9d3935bcb28cfb421db2ada147b2.xhtml", "dir_6ace9d3935bcb28cfb421db2ada147b2" ],
[ "MultipleNetworksCifar10", "dir_bea2aaa5ee1b83ed83ac6ec2539c5ff9.xhtml", "dir_bea2aaa5ee1b83ed83ac6ec2539c5ff9" ],
[ "NetworkExecutionUtils", "dir_bee5dd02b9a5e046b34f7fb0b8e9850a.xhtml", "dir_bee5dd02b9a5e046b34f7fb0b8e9850a" ],
[ "OnnxMnist-Armnn", "dir_ec7a325c83f8443032f9dde4fcfaccf8.xhtml", "dir_ec7a325c83f8443032f9dde4fcfaccf8" ],
[ "OnnxMobileNet-Armnn", "dir_b7b5c8fc192b21f861d175be19c847ed.xhtml", "dir_b7b5c8fc192b21f861d175be19c847ed" ],
[ "profiling", "dir_659c7018c274c4a27f289b6765351cd5.xhtml", "dir_659c7018c274c4a27f289b6765351cd5" ],
[ "TfCifar10-Armnn", "dir_34e7e67bbeb3dbc4028ce68deefb463b.xhtml", "dir_34e7e67bbeb3dbc4028ce68deefb463b" ],
[ "TfInceptionV3-Armnn", "dir_da6a0802f12c31973b4970c3066af295.xhtml", "dir_da6a0802f12c31973b4970c3066af295" ],
[ "TfLiteInceptionV3Quantized-Armnn", "dir_874206588432fd12d1ec9d6f5b95e9e3.xhtml", "dir_874206588432fd12d1ec9d6f5b95e9e3" ],
[ "TfLiteInceptionV4Quantized-Armnn", "dir_86d6d19239ab3d558024a4bb1d9884fd.xhtml", "dir_86d6d19239ab3d558024a4bb1d9884fd" ],
[ "TfLiteMnasNet-Armnn", "dir_9cbed784c02e6a8d4c1840c9f6687c89.xhtml", "dir_9cbed784c02e6a8d4c1840c9f6687c89" ],
[ "TfLiteMobilenetQuantized-Armnn", "dir_660eb08a01451f3d7d39c9d8f2c054f4.xhtml", "dir_660eb08a01451f3d7d39c9d8f2c054f4" ],
[ "TfLiteMobileNetQuantizedSoftmax-Armnn", "dir_5b37c868d8ac40f3f76299adcfed0b6d.xhtml", "dir_5b37c868d8ac40f3f76299adcfed0b6d" ],
[ "TfLiteMobileNetSsd-Armnn", "dir_fdc95ac0b83376e0461da52976cdade9.xhtml", "dir_fdc95ac0b83376e0461da52976cdade9" ],
[ "TfLiteMobilenetV2Quantized-Armnn", "dir_1075e102905d1a36e67f4e04b4e0e2a5.xhtml", "dir_1075e102905d1a36e67f4e04b4e0e2a5" ],
[ "TfLiteResNetV2-50-Quantized-Armnn", "dir_4f82c93223384f878c3c997040612747.xhtml", "dir_4f82c93223384f878c3c997040612747" ],
[ "TfLiteResNetV2-Armnn", "dir_a9bacd355dfe679d06247ec5361689c1.xhtml", "dir_a9bacd355dfe679d06247ec5361689c1" ],
[ "TfLiteVGG16Quantized-Armnn", "dir_62131064c86aabd7a3b4ada003cd03e4.xhtml", "dir_62131064c86aabd7a3b4ada003cd03e4" ],
[ "TfMnist-Armnn", "dir_841bcd8b9ede33180006c1bc01d7965d.xhtml", "dir_841bcd8b9ede33180006c1bc01d7965d" ],
[ "TfMobileNet-Armnn", "dir_b114117bbe88e96947e0d531db08a441.xhtml", "dir_b114117bbe88e96947e0d531db08a441" ],
[ "TfResNext_Quantized-Armnn", "dir_0e467354ced3595f0aa9578c498f5e57.xhtml", "dir_0e467354ced3595f0aa9578c498f5e57" ],
[ "CaffePreprocessor.cpp", "_caffe_preprocessor_8cpp.xhtml", "_caffe_preprocessor_8cpp" ],
[ "CaffePreprocessor.hpp", "_caffe_preprocessor_8hpp.xhtml", "_caffe_preprocessor_8hpp" ],
[ "Cifar10Database.cpp", "_cifar10_database_8cpp.xhtml", "_cifar10_database_8cpp" ],
[ "Cifar10Database.hpp", "_cifar10_database_8hpp.xhtml", [
[ "Cifar10Database", "class_cifar10_database.xhtml", "class_cifar10_database" ]
] ],
[ "ClassifierTestCaseData.hpp", "_classifier_test_case_data_8hpp.xhtml", [
[ "ClassifierTestCaseData", "class_classifier_test_case_data.xhtml", "class_classifier_test_case_data" ]
] ],
[ "DeepSpeechV1Database.hpp", "_deep_speech_v1_database_8hpp.xhtml", null ],
[ "DeepSpeechV1InferenceTest.hpp", "_deep_speech_v1_inference_test_8hpp.xhtml", null ],
[ "ImagePreprocessor.cpp", "_image_preprocessor_8cpp.xhtml", null ],
[ "ImagePreprocessor.hpp", "_image_preprocessor_8hpp.xhtml", "_image_preprocessor_8hpp" ],
[ "InferenceModel.hpp", "_inference_model_8hpp.xhtml", "_inference_model_8hpp" ],
[ "InferenceTest.cpp", "_inference_test_8cpp.xhtml", "_inference_test_8cpp" ],
[ "InferenceTest.hpp", "_inference_test_8hpp.xhtml", "_inference_test_8hpp" ],
[ "InferenceTest.inl", "_inference_test_8inl.xhtml", "_inference_test_8inl" ],
[ "InferenceTestImage.cpp", "_inference_test_image_8cpp.xhtml", "_inference_test_image_8cpp" ],
[ "InferenceTestImage.hpp", "_inference_test_image_8hpp.xhtml", "_inference_test_image_8hpp" ],
[ "LstmCommon.hpp", "_lstm_common_8hpp.xhtml", null ],
[ "MnistDatabase.cpp", "_mnist_database_8cpp.xhtml", "_mnist_database_8cpp" ],
[ "MnistDatabase.hpp", "_mnist_database_8hpp.xhtml", [
[ "MnistDatabase", "class_mnist_database.xhtml", "class_mnist_database" ]
] ],
[ "MobileNetSsdDatabase.hpp", "_mobile_net_ssd_database_8hpp.xhtml", null ],
[ "MobileNetSsdInferenceTest.hpp", "_mobile_net_ssd_inference_test_8hpp.xhtml", null ],
[ "ObjectDetectionCommon.hpp", "_object_detection_common_8hpp.xhtml", null ],
[ "YoloDatabase.cpp", "_yolo_database_8cpp.xhtml", "_yolo_database_8cpp" ],
[ "YoloDatabase.hpp", "_yolo_database_8hpp.xhtml", "_yolo_database_8hpp" ],
[ "YoloInferenceTest.hpp", "_yolo_inference_test_8hpp.xhtml", "_yolo_inference_test_8hpp" ]
]; |
#!/bin/bash
xcake make && pod install
|
###############################################################################
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
#!/bin/sh
mvn clean install -U -Dmaven.test.skip=true
echo "EZMQ build done"
|
#!/bin/bash
if [ "$#" -ne 2 ]; then
echo -e "Usage: $0 [name of cat] [color of cat]"
exit
fi
CAT_API_URL=`ibmcloud fn api list | tail -1 | awk '{print $5}'`
curl -X POST \
-H "Content-Type: application/json" \
-d '{"name":"'$1'","color":"'$2'"}' \
$CAT_API_URL
|
def validateCreditCardNumber(number):
# Reverse of the card number
rev_number = number[::-1]
# Declare a sum of all digits
sum_of_digits = 0
for i in range(len(rev_number)):
digit = int(rev_number[i])
# For alternate digits multiply by 2
if i % 2 != 0:
digit *= 2
# Add the higher digit of each result to the sum
if digit > 9:
sum_of_digits += digit // 10 + digit % 10
else:
sum_of_digits += digit
# If the sum is divisible by 10 its a valid card number
return sum_of_digits % 10 == 0
print(validateCreditCardNumber('45397431 85747991'))
// Output: True |
from random import shuffle
def shuffle_list(arr):
shuffle(arr)
return arr
words = ["apple", "banana", "cucumber", "dates"]
print(shuffle_list(words)) |
#!/usr/bin/env bash
# Copyright 2018 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
googleapis=/go/src/agones.dev/agones/proto/googleapis
cd /go/src/agones.dev/agones
protoc \
-I ${googleapis} -I . sdk.proto \
--rust_out=sdks/rust/src/grpc --grpc_out=sdks/rust/src/grpc \
--plugin=protoc-gen-grpc=`which grpc_rust_plugin` \
cat ./build/boilerplate.go.txt ./sdks/rust/src/grpc/sdk.rs >> ./sdk.rs
cat ./build/boilerplate.go.txt ./sdks/rust/src/grpc/sdk_grpc.rs >> ./sdk_grpc.rs
mv ./sdk.rs ./sdks/rust/src/grpc/
mv ./sdk_grpc.rs ./sdks/rust/src/grpc/ |
package kr.co.gardener.admin.service.object.impl;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import kr.co.gardener.admin.dao.object.ClassifyDao;
import kr.co.gardener.admin.model.object.Classify;
import kr.co.gardener.admin.model.object.list.ClassifyList;
import kr.co.gardener.admin.service.object.ClassifyService;
@Service
public class ClassifyServiceImpl implements ClassifyService {
@Autowired
ClassifyDao classifyDao;
@Override
public ClassifyList getList() {
List<Classify> list = classifyDao.getList();
ClassifyList classifyList = new ClassifyList();
//1 = top, 2=mid, 3=bot
for(Classify classify : list) {
switch (classify.getTable()) {
case 1:
classifyList.getTopClass().add(classify);
break;
case 2:
classifyList.getMidClass().add(classify);
break;
case 3:
classifyList.getBotClass().add(classify);
break;
}
}
return classifyList;
}
@Override
public void topAdd(Classify item) {
classifyDao.topAdd(item);
}
@Override
public void midAdd(Classify item) {
classifyDao.midAdd(item);
}
@Override
public void botAdd(Classify item) {
classifyDao.botAdd(item);
}
@Override
public void topUpdate(Classify item) {
classifyDao.topUpdate(item);
}
@Override
public void midUpdate(Classify item) {
classifyDao.midUpdate(item);
}
@Override
public void botUpdate(Classify item) {
classifyDao.botUpdate(item);
}
@Override
public void topDelete(int primaryId) {
classifyDao.topDelete(primaryId);
}
@Override
public void midDelete(int primaryId) {
classifyDao.midDelete(primaryId);
}
@Override
public void botDelete(int primaryId) {
classifyDao.botDelete(primaryId);
}
@Override
public List<kr.co.gardener.admin.model.object.productCategoryList> productCategoryList() {
return classifyDao.productCategoryList();
}
}
|
// Copyright 2021 Keyfactor
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package signer
import (
"context"
"fmt"
"strings"
"time"
"github.com/Keyfactor/k8s-proxy/pkg/keyfactor"
klogger "github.com/Keyfactor/k8s-proxy/pkg/logger"
capi "k8s.io/api/certificates/v1beta1"
certificates "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
hanlderLog = klogger.Register("CertificateSigner-Handler")
)
func (c *CertificateController) handleCSR(csr *capi.CertificateSigningRequest) error {
if !IsCertificateRequestApproved(csr) {
return nil
}
hanlderLog.Infof("Request Certificate - signerName: %s", *csr.Spec.SignerName)
if !strings.Contains(*csr.Spec.SignerName, KeyfactorSignerNameScope) {
hanlderLog.Errorf("Request Certificate - out of signer name scope: %s", *csr.Spec.SignerName)
return fmt.Errorf("Invalid certificate SignerName: %s", *csr.Spec.SignerName)
}
var usages []string
for _, usage := range csr.Spec.Usages {
usages = append(usages, string(usage))
}
hanlderLog.Infof("Request Certificate - usages: %v", usages)
timeoutContext, cancel := context.WithTimeout(context.TODO(), 15*time.Second)
defer cancel()
csrMetadata := extractMetadataFromK8SCSRAPI(csr.GetObjectMeta().GetAnnotations())
hanlderLog.Infof("Request Certificate - extra metadata: %#v", csrMetadata)
res, err := c.keyfactorClient.CSRSign(timeoutContext, string(csr.Spec.Request), csrMetadata, false)
if err != nil {
hanlderLog.Errorf("cannot signing certificate from K8S CSR API: %v", err)
return fmt.Errorf("cannot signing certificate from K8S CSR API: %v", err)
}
certChain := res.CertificateInformation.Certificates
csr.Status.Certificate = []byte(strings.Join(certChain, ""))
_, err = c.kubeClient.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(context.TODO(), csr, v1.UpdateOptions{})
if err != nil {
hanlderLog.Errorf("error updating signature for csr: %v", err)
return fmt.Errorf("error updating signature for csr: %v", err)
}
return nil
}
func extractMetadataFromK8SCSRAPI(extra map[string]string) *keyfactor.CSRMetadata {
meta := &keyfactor.CSRMetadata{}
for key, value := range extra {
hanlderLog.Infof("Meta: %s - %v", key, value)
switch key {
case "ClusterID":
meta.ClusterID = value
case "ServiceName":
meta.ServiceName = value
case "PodIP":
meta.PodIP = value
case "PodName":
meta.PodName = value
case "PodNamespace":
meta.PodNamespace = value
case "TrustDomain":
meta.TrustDomain = value
}
}
return meta
}
// IsCertificateRequestApproved returns true if a certificate request has the
// "Approved" condition and no "Denied" conditions; false otherwise.
func IsCertificateRequestApproved(csr *certificates.CertificateSigningRequest) bool {
approved, denied := getCertApprovalCondition(&csr.Status)
return approved && !denied
}
func getCertApprovalCondition(status *certificates.CertificateSigningRequestStatus) (approved bool, denied bool) {
for _, c := range status.Conditions {
if c.Type == certificates.CertificateApproved {
approved = true
}
if c.Type == certificates.CertificateDenied {
denied = true
}
}
return
}
|
import {NgModule} from '@angular/core';
import {UserIdleConfig} from './models/user-idle-config.model';
@NgModule({
imports: []
})
export class WilyUserIdleModule {
static forRoot(config: UserIdleConfig) {
return {
ngModule: WilyUserIdleModule,
providers: [
{ provide: UserIdleConfig, useValue: config }
]
};
}
}
|
/*
* events_ProcessExecutionEvent.cpp
*/
#include <string>
#include <ostream>
#include "text/text_StringConversion.h"
#include "dbtypes/dbtype_Id.h"
#include "events/events_ProcessExecutionEvent.h"
namespace mutgos
{
namespace events
{
// ----------------------------------------------------------------------
std::string ProcessExecutionEvent::to_string() const
{
std::ostringstream strstream;
strstream << "ProcessExecutionEvent" << std::endl
<< Event::to_string()
<< "PID: "
<< text::to_string(process_id) << std::endl
<< "EXE ID: " << executable_id.to_string(true)
<< std::endl
<< "Native: " << native_executable
<< std::endl
<< "Owner ID: " << owner_id.to_string(true)
<< std::endl
<< "Process name: " << process_name
<< std::endl
<< "Process state: "
<< text::to_string(process_state)
<< std::endl;
return strstream.str();
}
}
} |
package com.zte.zakker.automation.provider;
import android.content.Context;
import android.support.v4.app.Fragment;
import com.alibaba.android.arouter.facade.annotation.Route;
import com.zte.zakker.common.provider.IAutomationProvider;
import com.zte.zakker.automation.fragment.MainAutomationFragment;
/**
* Description: <FindProvider><br>
* Author: mxdl<br>
* Date: 2019/5/23<br>
* Version: V1.0.0<br>
* Update: <br>
*/
@Route(path = "/automation/main",name = "自动化")
public class AutomationProvider implements IAutomationProvider {
@Override
public Fragment getMainFindFragment() {
return MainAutomationFragment.newInstance();
}
@Override
public void init(Context context) {
}
}
|
def generate_random_numbers(n):
"""Generate n unique random numbers in the range of 1-1000"""
generated = set()
while len(generated) < n:
new = random.randint(1,1001)
if new not in generated:
generated.add(new)
return list(generated) |
find lib -iname "*.cpp" -exec clang-format -i {} \;
find lib -iname "*.h" -exec clang-format -i {} \;
find tool -iname "*.cpp" -exec clang-format -i {} \;
find tool -iname "*.h" -exec clang-format -i {} \;
find test -iname "*.cpp" -exec clang-format -i {} \;
|
const gcd = (num1, num2) => {
let min = Math.min(num1, num2);
let max = Math.max(num1, num2);
while (min != 0) {
let remainder = max % min;
max = min;
min = remainder;
}
return max;
};
const result = gcd(10, 25);
console.log(result); // 5 |
#!/bin/bash
# Copyright 2018 Mirantis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
if [ $(uname) = Darwin ]; then
readlinkf(){ perl -MCwd -e 'print Cwd::abs_path shift' "$1";}
else
readlinkf(){ readlink -f "$1"; }
fi
DIND_ROOT="$(cd $(dirname "$(readlinkf "${BASH_SOURCE}")"); pwd)"
docker_info_output="$(docker info)"
RUN_ON_BTRFS_ANYWAY="${RUN_ON_BTRFS_ANYWAY:-}"
if [[ ! ${RUN_ON_BTRFS_ANYWAY} ]] && echo "$docker_info_output"| grep -q '^ *Storage Driver: btrfs'; then
echo "ERROR: Docker is using btrfs storage driver which is unsupported by kubeadm-dind-cluster" >&2
echo "Please refer to the documentation for more info." >&2
echo "Set RUN_ON_BTRFS_ANYWAY to non-empty string to continue anyway." >&2
exit 1
fi
# In case of linuxkit / moby linux, -v will not work so we can't
# mount /lib/modules and /boot. Also we'll be using localhost
# to access the apiserver.
using_linuxkit=
if ! echo "$docker_info_output"|grep -s '^ *Operating System: .*Docker for Windows' > /dev/null 2>&1 ; then
if echo "$docker_info_output"|grep -s '^ *Kernel Version: .*-moby$' >/dev/null 2>&1 ||
echo "$docker_info_output"|grep -s '^ *Kernel Version: .*-linuxkit' > /dev/null 2>&1 ; then
using_linuxkit=1
fi
fi
# Determine when using Linux and docker daemon running locally
using_local_linuxdocker=
if [[ $(uname) == Linux && -z ${DOCKER_HOST:-} ]]; then
using_local_linuxdocker=1
fi
EMBEDDED_CONFIG=y;DOWNLOAD_KUBECTL=y;DIND_K8S_VERSION=v1.13;DIND_IMAGE_DIGEST=sha256:546e1ea92177714fb62c99e1bfb9c9e03c87bb82a8976845dac139d9e6607612;DIND_COMMIT=0456fa51e822d4df1f3b4909f9d4ad797cfecb7b
# dind::localhost provides the local host IP based on the address family used for service subnet.
function dind::localhost() {
if [[ ${SERVICE_NET_MODE} = "ipv6" ]]; then
echo '[::1]'
else
echo '127.0.0.1'
fi
}
# dind::family-for indicates whether the CIDR or IP is for an IPv6 or IPv4 family.
function dind::family-for {
local addr=$1
if [[ "$addr" = *":"* ]]; then
echo "ipv6"
else
echo "ipv4"
fi
}
# dind::cluster-suffix builds a suffix used for resources, based on the DIND_LABEL.
function dind::cluster-suffix {
if [ "$DIND_LABEL" != "$DEFAULT_DIND_LABEL" ]; then
echo "-${DIND_LABEL}"
else
echo ''
fi
}
function dind::net-name {
echo "kubeadm-dind-net$( dind::cluster-suffix )"
}
# dind::add-cluster will inject the cluster ID to the IP address. For IPv4, it is
# assumed that the IP is a /24 with the third part of the address available for cluster ID.
# For IPv6, it is assumed that there is enough space for the cluster to be added, and the
# cluster ID will be added to the 16 bits before the double colon. For example:
#
# 10.192.0.0/24 => 10.192.5.0/24
# fd00:77:20::/64 => fd00:77:20:5::/64
#
# This function is intended to be used for management networks.
#
# TODO: Validate that there is enough space for cluster ID.
# TODO: For IPv6 could get fancy and handle case where cluster ID is placed in upper 8 bits of hextet
# TODO: Consider if want to do /16 for IPv4 management subnet.
#
function dind::add-cluster {
local cidr=$1
local ip_mode=$2
if [[ ${ip_mode} = "ipv4" ]]; then
echo ${cidr} | sed "s/^\([0-9]*\.[0-9]*\.\).*\/24$/\1${CLUSTER_ID}.0\/24/"
else # IPv6
echo ${cidr} | sed "s/^\(.*\)\(\:\:\/[0-9]*\)$/\1:${CLUSTER_ID}\2/"
fi
}
# dind::get-and-validate-cidrs takes a list of CIDRs and validates them based on the ip
# mode, returning them. For IPv4 only and IPv6 only modes, only one CIDR is expected. For
# dual stack, two CIDRS are expected. It verifies that the CIDRs are the right family and
# will use the provided defaults, when CIDRs are missing. For dual-stack, the IPv4 address
# will be first.
#
# For the management network, the cluster ID will be injected into the CIDR. Also, if no
# MGMT_CIDRS value is specified, but the legacy DIND_SUBNET/DIND_SUBNET_SIZE is provided,
# that will be used for the (first) CIDR.
#
# NOTE: It is expected that the CIDR size is /24 for IPv4 management networks.
#
# For pod CIDRs, the size will be increased by 8, to leave room for the node ID to be
# injected into the address.
#
# NOTE: For IPv4, the pod size is expected to be /16 -> /24 in usage.
#
function dind::get-and-validate-cidrs {
IFS=', ' read -r -a cidrs <<< "$1"
IFS=', ' read -r -a defaults <<< "$2"
local is_mgmt=$3
case ${IP_MODE} in
ipv4)
case ${#cidrs[@]} in
0)
cidrs[0]="${defaults[0]}"
;;
1)
;;
*)
echo "ERROR! More than one CIDR provided '$1'"
exit 1
;;
esac
if [[ $( dind::family-for "${cidrs[0]}" ) != "ipv4" ]]; then
echo "ERROR! CIDR must be IPv4 value"
exit 1
fi
if [[ ${is_mgmt} = true ]]; then
cidrs[0]="$( dind::add-cluster "${cidrs[0]}" "${IP_MODE}" )"
fi
;;
ipv6)
case ${#cidrs[@]} in
0)
cidrs[0]="${defaults[0]}"
;;
1)
;;
*)
echo "ERROR! More than one CIDR provided '$1'"
exit 1
;;
esac
if [[ $( dind::family-for "${cidrs[0]}" ) != "ipv6" ]]; then
echo "ERROR! CIDR must be IPv6 value"
exit 1
fi
if [[ ${is_mgmt} = true ]]; then
cidrs[0]="$( dind::add-cluster "${cidrs[0]}" "${IP_MODE}" )"
fi
;;
dual-stack)
case ${#cidrs[@]} in
0)
cidrs[0]="${defaults[0]}"
cidrs[1]="${defaults[1]}"
;;
1)
if [[ $( dind::family-for "${cidrs[0]}" ) = "ipv6" ]]; then
cidrs[1]=${cidrs[0]}
cidrs[0]="${defaults[0]}" # Assuming first default is a V4 address
else
cidrs[1]="${defaults[1]}"
fi
;;
2)
# Force ordering to have V4 address first
if [[ $( dind::family-for "${cidrs[0]}" ) = "ipv6" ]]; then
local temp=${cidrs[0]}
cidrs[0]=${cidrs[1]}
cidrs[1]=${temp}
fi
;;
*)
echo "ERROR! More than two CIDRs provided '$1'"
exit 1
;;
esac
local have_v4=""
local have_v6=""
for cidr in ${cidrs[@]}; do
if [[ $( dind::family-for "${cidr}" ) = "ipv6" ]]; then
have_v6=1
else
have_v4=1
fi
done
if [[ -z ${have_v4} ]]; then
echo "ERROR! Missing IPv4 CIDR in '$1'"
exit 1
fi
if [[ -z ${have_v6} ]]; then
echo "ERROR! Missing IPv6 CIDR in '$1'"
exit 1
fi
if [[ ${is_mgmt} = true ]]; then
cidrs[0]="$( dind::add-cluster "${cidrs[0]}" "${IP_MODE}" )"
cidrs[1]="$( dind::add-cluster "${cidrs[1]}" "${IP_MODE}" )"
fi
;;
esac
echo "${cidrs[@]}"
}
# dind::make-ip-from-cidr strips off the slash and size, and appends the
# interface part to the prefix to form an IP. For IPv4, it strips off the
# fourth part of the prefix, so that it can be replaced. It assumes that the
# resulting prefix will be of sufficient size. It also will use hex for the
# appended part for IPv6, and decimal for IPv4.
#
# fd00:20::/64 -> fd00:20::a
# 10.96.0.0/12 -> 10.96.0.10
#
function dind::make-ip-from-cidr {
prefix="$(echo $1 | sed 's,/.*,,')"
if [[ $( dind::family-for ${prefix} ) == "ipv4" ]]; then
printf "%s%d" $( echo ${prefix} | sed 's/0$//' ) $2
else
printf "%s%x" ${prefix} $2
fi
}
# dind::add-cluster-id-and-validate-nat64-prefix will modify the IPv4 mapping
# subnet prefix, by adding the cluster ID (default 0) to the second octet.
# It will produce an error, if the prefix is not in the 10.0.0.0/8 or
# 172.16.0.0/12 private networks.
function dind::add-cluster-id-and-validate-nat64-prefix {
local parts
IFS="." read -a parts <<<${NAT64_V4_SUBNET_PREFIX}
if [[ ${#parts[@]} -ne 2 ]]; then
echo "ERROR! NAT64_V4_SUBNET_PREFIX must be two octets (have '${NAT64_V4_SUBNET_PREFIX}')"
exit 1
fi
(( parts[1]+=${CLUSTER_ID} ))
NAT64_V4_SUBNET_PREFIX="${parts[0]}.${parts[1]}"
echo "Added cluster ID offset (${CLUSTER_ID}) to NAT64_V4_SUBNET_PREFIX giving prefix '${NAT64_V4_SUBNET_PREFIX}'"
if [[ ${parts[0]} -eq 10 ]]; then
if [[ ${parts[1]} > 253 ]]; then
echo "ERROR! NAT64_V4_SUBNET_PREFIX is too large for 10.0.0.0/8 private net"
exit 1
fi
elif [[ ${parts[0]} -eq 172 ]]; then
if [[ ${parts[1]} -lt 16 || ${parts[1]} -gt 31 ]]; then
echo "ERROR! NAT64_V4_SUBNET_PREFIX is outside of range for 172.16.0.0/12 private net"
exit 1
fi
else
echo "ERROR! NAT64_V4_SUBNET_PREFIX is not in 10.0.0.0/8 or 172.16.0.0/12 private networks"
exit 1
fi
echo "Using NAT64 V4 mapping network prefix: ${NAT64_V4_SUBNET_PREFIX}"
}
# START OF PROCESSING...
IP_MODE="${IP_MODE:-ipv4}" # ipv4, ipv6, dual-stack
# FUTURE: Once dual-stack support is released, check K8s version, and reject for older versions.
if [[ ! ${EMBEDDED_CONFIG:-} ]]; then
source "${DIND_ROOT}/config.sh"
fi
# Multicluster support
# Users can specify a cluster ID number from 1..254, represented as a string.
# This will be used to form resource names "cluster-#", and will be used in the
# management subnet to give unique networks for each cluster. If the cluster ID
# is not specified, or zero, it will be considered a single cluster or the first
# in the multi-cluster. This is the recommended usage.
#
# For legacy support, the user can specify DIND_LABEL, which will be used in the
# resource names. If a cluster ID is specified (a hybrid case, where people are
# using the new method, but want custom names), the resourse name will have the
# suffix "-#" with the cluster ID. If no cluster ID is specified (for backward
# compatibility), then the resource name will be just the DIND_LABEL, and a pseudo-
# random number from 1..13 will be generated for the cluster ID to be used in
# management network. The range is limited, because, in IPv6 mode, the cluster ID
# is used in the NAT64 V4 subnet prefix, which must be in a private network.
# The default is 172.18, so the cluster ID cannot be larger than 13 to guarantee
# a valid value.
#
# To get around that limitation, you can set the cluster ID, in addition to the
# DIND_LABEL, and optionally, change the NAT64_V4_SUBNET_PREFIX value.
#
DEFAULT_DIND_LABEL='wk88.kubeadm_dind_cluster_runtime'
if [[ -z ${DIND_LABEL+x} ]]; then # No legacy DIND_LABEL set
if [[ -z ${CLUSTER_ID+x} ]]; then # No cluster ID set
DIND_LABEL=${DEFAULT_DIND_LABEL} # Single cluster mode
CLUSTER_ID="0"
else # Have cluster ID
if [[ ${CLUSTER_ID} = "0" ]]; then
DIND_LABEL=${DEFAULT_DIND_LABEL} # Single cluster mode or first cluster of multi-cluster
else
DIND_LABEL="cluster-${CLUSTER_ID}" # Multi-cluster
fi
fi
else # Legacy DIND_LABEL set for multi-cluster
if [[ -z ${CLUSTER_ID+x} ]]; then # No cluster ID set, make one from 1..13, but don't use in resource names
CLUSTER_ID="$(( ($RANDOM % 12) + 1 ))"
else
if [[ ${CLUSTER_ID} = "0" ]]; then
CLUSTER_ID="$(( ($RANDOM % 12) + 1 ))" # Force a pseudo-random cluster for additional legacy cluster
else
DIND_LABEL="${DIND_LABEL}-${CLUSTER_ID}"
fi
fi
fi
CNI_PLUGIN="${CNI_PLUGIN:-bridge}"
GCE_HOSTED="${GCE_HOSTED:-}"
DIND_ALLOW_AAAA_USE="${DIND_ALLOW_AAAA_USE:-}" # Default is to use DNS64 always for IPv6 mode
KUBE_ROUTER_VERSION="${KUBE_ROUTER_VERSION:-v0.2.0}"
# Use legacy DIND_SUBNET/DIND_SUBNET_SIZE, only if MGMT_CIDRS is not set.
legacy_mgmt_cidr=""
if [[ ${DIND_SUBNET:-} && ${DIND_SUBNET_SIZE:-} ]]; then
legacy_mgmt_cidr="${DIND_SUBNET}/${DIND_SUBNET_SIZE}"
fi
if [[ ${IP_MODE} = "dual-stack" ]]; then
mgmt_net_defaults="10.192.0.0/24, fd00:20::/64"
KUBE_RSYNC_ADDR="${KUBE_RSYNC_ADDR:-::1}"
SERVICE_CIDR="${SERVICE_CIDR:-fd00:30::/110}" # Will default to IPv6 service net family
pod_net_defaults="10.244.0.0/16, fd00:40::/72"
USE_HAIRPIN="${USE_HAIRPIN:-true}" # Default is to use hairpin for dual-stack
DIND_ALLOW_AAAA_USE=true # Forced, so can access external hosts via IPv6
if [[ ${DIND_ALLOW_AAAA_USE} && ${GCE_HOSTED} ]]; then
echo "ERROR! GCE does not support use of IPv6 for external addresses - aborting."
exit 1
fi
elif [[ ${IP_MODE} = "ipv6" ]]; then
mgmt_net_defaults="fd00:20::/64"
KUBE_RSYNC_ADDR="${KUBE_RSYNC_ADDR:-::1}"
SERVICE_CIDR="${SERVICE_CIDR:-fd00:30::/110}"
pod_net_defaults="fd00:40::/72"
USE_HAIRPIN="${USE_HAIRPIN:-true}" # Default is to use hairpin for IPv6
if [[ ${DIND_ALLOW_AAAA_USE} && ${GCE_HOSTED} ]]; then
echo "ERROR! GCE does not support use of IPv6 for external addresses - aborting."
exit 1
fi
else # IPv4 mode
mgmt_net_defaults="10.192.0.0/24"
KUBE_RSYNC_ADDR="${KUBE_RSYNC_ADDR:-127.0.0.1}"
SERVICE_CIDR="${SERVICE_CIDR:-10.96.0.0/12}"
pod_net_defaults="10.244.0.0/16"
USE_HAIRPIN="${USE_HAIRPIN:-false}" # Disabled for IPv4, as issue with Virtlet networking
if [[ ${DIND_ALLOW_AAAA_USE} ]]; then
echo "WARNING! The DIND_ALLOW_AAAA_USE option is for IPv6 mode - ignoring setting."
DIND_ALLOW_AAAA_USE=
fi
if [[ ${CNI_PLUGIN} = "calico" || ${CNI_PLUGIN} = "calico-kdd" ]]; then
pod_net_defaults="192.168.0.0/16"
fi
fi
IFS=' ' read -r -a mgmt_net_cidrs <<<$( dind::get-and-validate-cidrs "${MGMT_CIDRS:-${legacy_mgmt_cidr}}" "${mgmt_net_defaults[@]}" true )
REMOTE_DNS64_V4SERVER="${REMOTE_DNS64_V4SERVER:-8.8.8.8}"
if [[ ${IP_MODE} == "ipv6" ]]; then
# Uses local DNS64 container
dns_server="$( dind::make-ip-from-cidr ${mgmt_net_cidrs[0]} 0x100 )"
DNS64_PREFIX="${DNS64_PREFIX:-fd00:10:64:ff9b::}"
DNS64_PREFIX_SIZE="${DNS64_PREFIX_SIZE:-96}"
DNS64_PREFIX_CIDR="${DNS64_PREFIX}/${DNS64_PREFIX_SIZE}"
LOCAL_NAT64_SERVER="$( dind::make-ip-from-cidr ${mgmt_net_cidrs[0]} 0x200 )"
NAT64_V4_SUBNET_PREFIX="${NAT64_V4_SUBNET_PREFIX:-172.18}"
dind::add-cluster-id-and-validate-nat64-prefix
else
dns_server="${REMOTE_DNS64_V4SERVER}"
fi
SERVICE_NET_MODE="$( dind::family-for ${SERVICE_CIDR} )"
DNS_SVC_IP="$( dind::make-ip-from-cidr ${SERVICE_CIDR} 10 )"
ETCD_HOST="${ETCD_HOST:-$( dind::localhost )}"
IFS=' ' read -r -a pod_net_cidrs <<<$( dind::get-and-validate-cidrs "${POD_NETWORK_CIDR:-}" "${pod_net_defaults[@]}" false )
declare -a pod_prefixes
declare -a pod_sizes
# Extract the prefix and size from the provided pod CIDR(s), based on the IP mode of each. The
# size will be increased by 8, to make room for the node ID to be added to the prefix later.
# Bridge and PTP plugins can process IPv4 and IPv6 pod CIDRs, other plugins must be IPv4 only.
for pod_cidr in "${pod_net_cidrs[@]}"; do
if [[ $( dind::family-for "${pod_cidr}" ) = "ipv4" ]]; then
actual_size=$( echo ${pod_cidr} | sed 's,.*/,,' )
if [[ ${actual_size} -ne 16 ]]; then
echo "ERROR! For IPv4 CIDRs, the size must be /16. Have '${pod_cidr}'"
exit 1
fi
pod_sizes+=( 24 )
pod_prefixes+=( "$(echo ${pod_cidr} | sed 's/^\([0-9]*\.[0-9]*\.\).*/\1/')" )
else # IPv6
if [[ ${CNI_PLUGIN} != "bridge" && ${CNI_PLUGIN} != "ptp" ]]; then
echo "ERROR! IPv6 pod networks are only supported by bridge and PTP CNI plugins"
exit 1
fi
# There are several cases to address. First, is normal split of prefix and size:
# fd00:10:20:30::/64 ---> fd00:10:20:30: /72
#
# Second, is when the prefix needs to be padded, so that node ID can be added later:
# fd00:10::/64 ---> fd00:10:0:0: /72
#
# Third, is when the low order part of the address, must be removed for the prefix,
# as the node ID will be placed in the lower byte:
# fd00:10:20:30:4000::/72 ---> fd00:10:20:30:40 /80
#
# We will attempt to check for three error cases. One is when the address part is
# way too big for the size specified:
# fd00:10:20:30:40::/48 ---> fd00:10:20: /56 desired, but conflict with 30:40:
#
# Another is when the address part, once trimmed for the size, would loose info:
# fd00:10:20:1234::/56 ---> fd00:10:20:12 /64, but lost 34:, which conflicts
#
# Lastly, again, trimming would leave high byte in hextet, conflicting with
# the node ID:
# fd00:10:20:30:1200::/64 ---> fd00:10:20:30:12 /72, but 12 conflicts
#
# Note: later, the node ID will be appended to the prefix generated.
#
cluster_size="$(echo ${pod_cidr} | sed 's,.*::/,,')"
pod_sizes+=( $((${cluster_size}+8)) )
pod_prefix="$(echo ${pod_cidr} | sed 's,::/.*,:,')"
num_colons="$(grep -o ":" <<< "${pod_prefix}" | wc -l)"
need_zero_pads=$((${cluster_size}/16))
if [[ ${num_colons} -gt $((need_zero_pads + 1)) ]]; then
echo "ERROR! Address part of CIDR (${pod_prefix}) is too large for /${cluster_size}"
exit 1
fi
if [[ ${num_colons} -gt ${need_zero_pads} ]]; then
# Will be replacing lowest byte with node ID, so pull off lower byte and colon
if [[ ${pod_prefix: -3} != "00:" ]]; then # last byte is not zero
echo "ERROR! Cannot trim address part of CIDR (${pod_prefix}) to fit in /${cluster_size}"
exit 1
fi
pod_prefix=${pod_prefix::-3}
if [[ $(( ${cluster_size} % 16 )) -eq 0 && $( ${pod_prefix: -1} ) != ":" ]]; then # should not be upper byte for this size CIDR
echo "ERROR! Trimmed address part of CIDR (${pod_prefix}) is still too large for /${cluster_size}"
exit 1
fi
fi
# Add in zeros to pad 16 bits at a time, up to the padding needed, which is
# need_zero_pads - num_colons.
while [ ${num_colons} -lt ${need_zero_pads} ]; do
pod_prefix+="0:"
((num_colons++))
done
pod_prefixes+=( "${pod_prefix}" )
fi
done
DIND_IMAGE_BASE="${DIND_IMAGE_BASE:-wk88/kubeadm-dind-cluster}"
if [[ ${DIND_COMMIT:-} ]]; then
if [[ ${DIND_COMMIT} = current ]]; then
DIND_COMMIT="$(cd "${DIND_ROOT}"; git rev-parse HEAD)"
fi
DIND_K8S_VERSION="${DIND_K8S_VERSION:-v1.13}"
DIND_IMAGE="${DIND_IMAGE_BASE}:${DIND_COMMIT}-${DIND_K8S_VERSION}"
else
DIND_IMAGE="${DIND_IMAGE:-${DIND_IMAGE_BASE}:local}"
fi
if [[ ${DIND_IMAGE_DIGEST:-} ]]; then
DIND_IMAGE="${DIND_IMAGE}@${DIND_IMAGE_DIGEST}"
fi
BUILD_KUBEADM="${BUILD_KUBEADM:-}"
BUILD_HYPERKUBE="${BUILD_HYPERKUBE:-}"
if [[ ! -z ${DIND_K8S_BIN_DIR:-} ]]; then
BUILD_KUBEADM=""
BUILD_HYPERKUBE=""
fi
KUBEADM_SOURCE="${KUBEADM_SOURCE-}"
HYPERKUBE_SOURCE="${HYPERKUBE_SOURCE-}"
NUM_NODES=${NUM_NODES:-2}
EXTRA_PORTS="${EXTRA_PORTS:-}"
KUBECTL_DIR="${KUBECTL_DIR:-${HOME}/.kubeadm-dind-cluster}"
SKIP_SNAPSHOT="${SKIP_SNAPSHOT:-}"
E2E_REPORT_DIR="${E2E_REPORT_DIR:-}"
DIND_NO_PARALLEL_E2E="${DIND_NO_PARALLEL_E2E:-}"
DNS_SERVICE="${DNS_SERVICE:-coredns}"
DIND_STORAGE_DRIVER="${DIND_STORAGE_DRIVER:-overlay2}"
DIND_CA_CERT_URL="${DIND_CA_CERT_URL:-}"
DIND_PROPAGATE_HTTP_PROXY="${DIND_PROPAGATE_HTTP_PROXY:-}"
DIND_HTTP_PROXY="${DIND_HTTP_PROXY:-}"
DIND_HTTPS_PROXY="${DIND_HTTPS_PROXY:-}"
DIND_NO_PROXY="${DIND_NO_PROXY:-}"
DIND_DAEMON_JSON_FILE="${DIND_DAEMON_JSON_FILE:-/etc/docker/daemon.json}" # can be set to /dev/null
DIND_REGISTRY_MIRROR="${DIND_REGISTRY_MIRROR:-}" # plain string format
DIND_INSECURE_REGISTRIES="${DIND_INSECURE_REGISTRIES:-}" # json list format
# comma-separated custom network(s) for cluster nodes to join
DIND_CUSTOM_NETWORKS="${DIND_CUSTOM_NETWORKS:-}"
SKIP_DASHBOARD="${SKIP_DASHBOARD:-}"
# you can set special value 'none' not to set any FEATURE_GATES / KUBELET_FEATURE_GATES.
FEATURE_GATES="${FEATURE_GATES:-none}"
KUBELET_FEATURE_GATES="${KUBELET_FEATURE_GATES:-DynamicKubeletConfig=true}"
ENABLE_CEPH="${ENABLE_CEPH:-}"
DIND_CRI="${DIND_CRI:-docker}"
case "${DIND_CRI}" in
docker)
CRI_SOCKET=/var/run/dockershim.sock
;;
containerd)
CRI_SOCKET=/var/run/containerd/containerd.sock
;;
*)
echo >&2 "Bad DIND_CRI. Please specify 'docker' or 'containerd'"
;;
esac
# TODO: Test multi-cluster for IPv6, before enabling
if [[ "${DIND_LABEL}" != "${DEFAULT_DIND_LABEL}" && "${IP_MODE}" == 'dual-stack' ]]; then
echo "Multiple parallel clusters currently not supported for dual-stack mode" >&2
exit 1
fi
# not configurable for now, would need to setup context for kubectl _inside_ the cluster
readonly INTERNAL_APISERVER_PORT=8080
function dind::need-source {
if [[ ! -f cluster/kubectl.sh ]]; then
echo "$0 must be called from the Kubernetes repository root directory" 1>&2
exit 1
fi
}
build_tools_dir="build"
use_k8s_source=y
if [[ ! ${BUILD_KUBEADM} && ! ${BUILD_HYPERKUBE} ]]; then
use_k8s_source=
fi
if [[ ${use_k8s_source} ]]; then
dind::need-source
kubectl=cluster/kubectl.sh
if [[ ! -f ${build_tools_dir}/common.sh ]]; then
build_tools_dir="build-tools"
fi
else
if [[ ! ${DOWNLOAD_KUBECTL:-} ]] && ! hash kubectl 2>/dev/null; then
echo "You need kubectl binary in your PATH to use prebuilt DIND image" 1>&2
exit 1
fi
kubectl=kubectl
fi
function dind::retry {
# based on retry function in hack/jenkins/ scripts in k8s source
for i in {1..10}; do
"$@" && return 0 || sleep ${i}
done
"$@"
}
busybox_image="busybox:1.30.1"
e2e_base_image="golang:1.12.4"
sys_volume_args=()
build_volume_args=()
function dind::set-build-volume-args {
if [ ${#build_volume_args[@]} -gt 0 ]; then
return 0
fi
build_container_name=
if [ -n "${KUBEADM_DIND_LOCAL:-}" ]; then
build_volume_args=(-v "$PWD:/go/src/k8s.io/kubernetes")
else
build_container_name="$(KUBE_ROOT=${PWD} ETCD_HOST=${ETCD_HOST} &&
. ${build_tools_dir}/common.sh &&
kube::build::verify_prereqs >&2 &&
echo "${KUBE_DATA_CONTAINER_NAME:-${KUBE_BUILD_DATA_CONTAINER_NAME}}")"
build_volume_args=(--volumes-from "${build_container_name}")
fi
}
function dind::volume-exists {
local name="$1"
if docker volume inspect "${name}" >& /dev/null; then
return 0
fi
return 1
}
function dind::create-volume {
local name="$1"
docker volume create --label "${DIND_LABEL}" --name "${name}" >/dev/null
}
# We mount /boot and /lib/modules into the container
# below to in case some of the workloads need them.
# This includes virtlet, for instance. Also this may be
# useful in future if we want DIND nodes to pass
# preflight checks.
# Unfortunately we can't do this when using Mac Docker
# (unless a remote docker daemon on Linux is used)
# NB: there's no /boot on recent Mac dockers
function dind::prepare-sys-mounts {
if [[ ! ${using_linuxkit} ]]; then
sys_volume_args=()
if [[ -d /boot ]]; then
sys_volume_args+=(-v /boot:/boot)
fi
if [[ -d /lib/modules ]]; then
sys_volume_args+=(-v /lib/modules:/lib/modules)
fi
return 0
fi
local dind_sys_vol_name
dind_sys_vol_name="kubeadm-dind-sys$( dind::cluster-suffix )"
if ! dind::volume-exists "$dind_sys_vol_name"; then
dind::step "Saving a copy of docker host's /lib/modules"
dind::create-volume "$dind_sys_vol_name"
# Use a dirty nsenter trick to fool Docker on Mac and grab system
# /lib/modules into sys.tar file on kubeadm-dind-sys volume.
local nsenter="nsenter --mount=/proc/1/ns/mnt --"
docker run \
--rm \
--privileged \
-v "$dind_sys_vol_name":/dest \
--pid=host \
"${busybox_image}" \
/bin/sh -c \
"if ${nsenter} test -d /lib/modules; then ${nsenter} tar -C / -c lib/modules >/dest/sys.tar; fi"
fi
sys_volume_args=(-v "$dind_sys_vol_name":/dind-sys)
}
tmp_containers=()
function dind::cleanup {
if [ ${#tmp_containers[@]} -gt 0 ]; then
for name in "${tmp_containers[@]}"; do
docker rm -vf "${name}" 2>/dev/null
done
fi
}
trap dind::cleanup EXIT
function dind::check-image {
local name="$1"
if docker inspect --format 'x' "${name}" >&/dev/null; then
return 0
else
return 1
fi
}
function dind::filter-make-output {
# these messages make output too long and make Travis CI choke
egrep -v --line-buffered 'I[0-9][0-9][0-9][0-9] .*(parse|conversion|defaulter|deepcopy)\.go:[0-9]+\]'
}
function dind::run-build-command {
# this is like build/run.sh, but it doesn't rsync back the binaries,
# only the generated files.
local cmd=("$@")
(
# The following is taken from build/run.sh and build/common.sh
# of Kubernetes source tree. It differs in
# --filter='+ /_output/dockerized/bin/**'
# being removed from rsync
. ${build_tools_dir}/common.sh
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command "$@"
kube::log::status "Syncing out of container"
kube::build::start_rsyncd_container
local rsync_extra=""
if (( ${KUBE_VERBOSE} >= 6 )); then
rsync_extra="-iv"
fi
# The filter syntax for rsync is a little obscure. It filters on files and
# directories. If you don't go in to a directory you won't find any files
# there. Rules are evaluated in order. The last two rules are a little
# magic. '+ */' says to go in to every directory and '- /**' says to ignore
# any file or directory that isn't already specifically allowed.
#
# We are looking to copy out all of the built binaries along with various
# generated files.
kube::build::rsync \
--filter='- /vendor/' \
--filter='- /_temp/' \
--filter='+ zz_generated.*' \
--filter='+ generated.proto' \
--filter='+ *.pb.go' \
--filter='+ types.go' \
--filter='+ */' \
--filter='- /**' \
"rsync://k8s@${KUBE_RSYNC_ADDR}/k8s/" "${KUBE_ROOT}"
kube::build::stop_rsyncd_container
)
}
function dind::make-for-linux {
local copy="$1"
shift
dind::step "Building binaries:" "$*"
if [ -n "${KUBEADM_DIND_LOCAL:-}" ]; then
dind::step "+ make WHAT=\"$*\""
make WHAT="$*" 2>&1 | dind::filter-make-output
elif [ "${copy}" = "y" ]; then
dind::step "+ ${build_tools_dir}/run.sh make WHAT=\"$*\""
"${build_tools_dir}/run.sh" make WHAT="$*" 2>&1 | dind::filter-make-output
else
dind::step "+ [using the build container] make WHAT=\"$*\""
dind::run-build-command make WHAT="$*" 2>&1 | dind::filter-make-output
fi
}
function dind::check-binary {
local filename="$1"
local dockerized="_output/dockerized/bin/linux/amd64/${filename}"
local plain="_output/local/bin/linux/amd64/${filename}"
dind::set-build-volume-args
# FIXME: don't hardcode amd64 arch
if [ -n "${KUBEADM_DIND_LOCAL:-${force_local:-}}" ]; then
if [ -f "${dockerized}" -o -f "${plain}" ]; then
return 0
fi
elif docker run --rm "${build_volume_args[@]}" \
"${busybox_image}" \
test -f "/go/src/k8s.io/kubernetes/${dockerized}" >&/dev/null; then
return 0
fi
return 1
}
function dind::ensure-downloaded-kubectl {
local kubectl_url
local kubectl_sha1
local kubectl_sha1_linux
local kubectl_sha1_darwin
local kubectl_link
local kubectl_os
if [[ ! ${DOWNLOAD_KUBECTL:-} ]]; then
return 0
fi
export PATH="${KUBECTL_DIR}:$PATH"
eval "$(docker run --entrypoint /bin/bash --rm "${DIND_IMAGE}" -c "cat /dind-env")"
if [ $(uname) = Darwin ]; then
kubectl_sha1="${KUBECTL_DARWIN_SHA1}"
kubectl_url="${KUBECTL_DARWIN_URL}"
else
kubectl_sha1="${KUBECTL_LINUX_SHA1}"
kubectl_url="${KUBECTL_LINUX_URL}"
fi
local link_target="kubectl-${KUBECTL_VERSION}"
local link_name="${KUBECTL_DIR}"/kubectl
if [[ -h "${link_name}" && "$(readlink "${link_name}")" = "${link_target}" ]]; then
return 0
fi
local path="${KUBECTL_DIR}/${link_target}"
if [[ ! -f "${path}" ]]; then
mkdir -p "${KUBECTL_DIR}"
curl -sSLo "${path}" "${kubectl_url}"
echo "${kubectl_sha1} ${path}" | sha1sum -c
chmod +x "${path}"
fi
ln -fs "${link_target}" "${KUBECTL_DIR}/kubectl"
}
function dind::ensure-kubectl {
if [[ ! ${use_k8s_source} ]]; then
# already checked on startup
dind::ensure-downloaded-kubectl
return 0
fi
if [ $(uname) = Darwin ]; then
dind::step "Building kubectl"
dind::step "+ make WHAT=cmd/kubectl"
make WHAT=cmd/kubectl 2>&1 | dind::filter-make-output
else
dind::make-for-linux y cmd/kubectl
fi
}
function dind::ensure-binaries {
local -a to_build=()
for name in "$@"; do
if ! dind::check-binary "$(basename "${name}")"; then
to_build+=("${name}")
fi
done
if [ "${#to_build[@]}" -gt 0 ]; then
dind::make-for-linux n "${to_build[@]}"
fi
return 0
}
# dind::ensure-network creates the management network for the cluster. For IPv4
# only it will have the management network CIDR. For IPv6 only, it will have
# the IPv6 management network CIDR and the NAT64 V4 mapping network CIDR. For
# dual stack, it will have the IPv4 and IPv6 management CIDRs. Each of the
# management networks (not the NAT64 network) will have a gateway specified.
#
function dind::ensure-network {
if ! docker network inspect $(dind::net-name) >&/dev/null; then
local -a args
for cidr in "${mgmt_net_cidrs[@]}"; do
if [[ $( dind::family-for ${cidr} ) = "ipv6" ]]; then
args+=(--ipv6)
fi
args+=(--subnet="${cidr}")
local gw=$( dind::make-ip-from-cidr ${cidr} 1 )
args+=(--gateway="${gw}")
done
if [[ ${IP_MODE} = "ipv6" ]]; then
# Need second network for NAT64 V4 mapping network
args+=(--subnet=${NAT64_V4_SUBNET_PREFIX}.0.0/16)
fi
docker network create ${args[@]} $(dind::net-name) >/dev/null
fi
}
function dind::ensure-volume {
local reuse_volume=
if [[ $1 = -r ]]; then
reuse_volume=1
shift
fi
local name="$1"
if dind::volume-exists "${name}"; then
if [[ ! ${reuse_volume} ]]; then
docker volume rm "${name}" >/dev/null
fi
fi
dind::create-volume "${name}"
}
function dind::ensure-dns {
if [[ ${IP_MODE} = "ipv6" ]]; then
local dns64_name="bind9$( dind::cluster-suffix )"
if ! docker inspect ${dns64_name} >&/dev/null; then
local force_dns64_for=""
if [[ ! ${DIND_ALLOW_AAAA_USE} ]]; then
# Normally, if have an AAAA record, it is used. This clause tells
# bind9 to do ignore AAAA records for the specified networks
# and/or addresses and lookup A records and synthesize new AAAA
# records. In this case, we select "any" networks that have AAAA
# records meaning we ALWAYS use A records and do NAT64.
force_dns64_for="exclude { any; };"
fi
read -r -d '' bind9_conf <<BIND9_EOF
options {
directory "/var/bind";
allow-query { any; };
forwarders {
${DNS64_PREFIX}${REMOTE_DNS64_V4SERVER};
};
auth-nxdomain no; # conform to RFC1035
listen-on-v6 { any; };
dns64 ${DNS64_PREFIX_CIDR} {
${force_dns64_for}
};
};
BIND9_EOF
docker run -d --name ${dns64_name} --hostname ${dns64_name} --net "$(dind::net-name)" --label "dind-support$( dind::cluster-suffix )" \
--sysctl net.ipv6.conf.all.disable_ipv6=0 --sysctl net.ipv6.conf.all.forwarding=1 \
--privileged=true --ip6 ${dns_server} --dns ${dns_server} \
-e bind9_conf="${bind9_conf}" \
diverdane/bind9:latest /bin/sh -c 'echo "${bind9_conf}" >/named.conf && named -c /named.conf -g -u named' >/dev/null
ipv4_addr="$(docker exec ${dns64_name} ip addr list eth0 | grep "inet" | awk '$1 == "inet" {print $2}')"
docker exec ${dns64_name} ip addr del ${ipv4_addr} dev eth0
docker exec ${dns64_name} ip -6 route add ${DNS64_PREFIX_CIDR} via ${LOCAL_NAT64_SERVER}
fi
fi
}
function dind::ensure-nat {
if [[ ${IP_MODE} = "ipv6" ]]; then
local nat64_name="tayga$( dind::cluster-suffix )"
if ! docker ps | grep ${nat64_name} >&/dev/null; then
docker run -d --name ${nat64_name} --hostname ${nat64_name} --net "$(dind::net-name)" --label "dind-support$( dind::cluster-suffix )" \
--sysctl net.ipv6.conf.all.disable_ipv6=0 --sysctl net.ipv6.conf.all.forwarding=1 \
--privileged=true --ip ${NAT64_V4_SUBNET_PREFIX}.0.200 --ip6 ${LOCAL_NAT64_SERVER} --dns ${REMOTE_DNS64_V4SERVER} --dns ${dns_server} \
-e TAYGA_CONF_PREFIX=${DNS64_PREFIX_CIDR} -e TAYGA_CONF_IPV4_ADDR=${NAT64_V4_SUBNET_PREFIX}.0.200 \
-e TAYGA_CONF_DYNAMIC_POOL=${NAT64_V4_SUBNET_PREFIX}.0.128/25 danehans/tayga:latest >/dev/null
# Need to check/create, as "clean" may remove route
local route="$(ip route | egrep "^${NAT64_V4_SUBNET_PREFIX}.0.128/25")"
if [[ -z "${route}" ]]; then
docker run --net=host --rm --privileged ${busybox_image} ip route add ${NAT64_V4_SUBNET_PREFIX}.0.128/25 via ${NAT64_V4_SUBNET_PREFIX}.0.200
fi
fi
fi
}
function dind::run {
local reuse_volume=
if [[ $1 = -r ]]; then
reuse_volume="-r"
shift
fi
local container_name="${1:-}"
local node_id=${2:-0}
local portforward="${3:-}"
if [[ $# -gt 3 ]]; then
shift 3
else
shift $#
fi
local -a opts=("$@")
local ip_mode="--ip"
for cidr in "${mgmt_net_cidrs[@]}"; do
if [[ $( dind::family-for ${cidr} ) = "ipv6" ]]; then
ip_mode="--ip6"
fi
opts+=("${ip_mode}" "$( dind::make-ip-from-cidr ${cidr} $((${node_id}+1)) )")
done
opts+=("$@")
local -a args=("systemd.setenv=CNI_PLUGIN=${CNI_PLUGIN}")
args+=("systemd.setenv=IP_MODE=${IP_MODE}")
args+=("systemd.setenv=DIND_STORAGE_DRIVER=${DIND_STORAGE_DRIVER}")
args+=("systemd.setenv=DIND_CRI=${DIND_CRI}")
if [[ ${IP_MODE} != "ipv4" ]]; then
opts+=(--sysctl net.ipv6.conf.all.disable_ipv6=0)
opts+=(--sysctl net.ipv6.conf.all.forwarding=1)
fi
if [[ ${IP_MODE} = "ipv6" ]]; then
opts+=(--dns ${dns_server})
args+=("systemd.setenv=DNS64_PREFIX_CIDR=${DNS64_PREFIX_CIDR}")
args+=("systemd.setenv=LOCAL_NAT64_SERVER=${LOCAL_NAT64_SERVER}")
fi
declare -a pod_nets
local i=0
if [[ ${IP_MODE} = "ipv4" || ${IP_MODE} = "dual-stack" ]]; then
pod_nets+=("${pod_prefixes[$i]}${node_id}")
i=$((i+1))
fi
if [[ ${IP_MODE} = "ipv6" || ${IP_MODE} = "dual-stack" ]]; then
# For prefix, if node ID will be in the upper byte, push it over
if [[ $((${pod_sizes[$i]} % 16)) -ne 0 ]]; then
n_id=$(printf "%02x00\n" "${node_id}")
else
if [[ "${pod_prefixes[$i]: -1}" = ":" ]]; then
n_id=$(printf "%x\n" "${node_id}")
else
n_id=$(printf "%02x\n" "${node_id}") # In lower byte, so ensure two chars
fi
fi
pod_nets+=("${pod_prefixes[$i]}${n_id}")
fi
args+=("systemd.setenv=POD_NET_PREFIX=\"${pod_nets[0]}\"")
args+=("systemd.setenv=POD_NET_SIZE=\"${pod_sizes[0]}\"")
args+=("systemd.setenv=POD_NET2_PREFIX=\"${pod_nets[1]:-}\"")
args+=("systemd.setenv=POD_NET2_SIZE=\"${pod_sizes[1]:-}\"")
args+=("systemd.setenv=SERVICE_NET_MODE=${SERVICE_NET_MODE}")
args+=("systemd.setenv=USE_HAIRPIN=${USE_HAIRPIN}")
args+=("systemd.setenv=DNS_SVC_IP=${DNS_SVC_IP}")
args+=("systemd.setenv=DNS_SERVICE=${DNS_SERVICE}")
if [[ ! "${container_name}" ]]; then
echo >&2 "Must specify container name"
exit 1
fi
# remove any previously created containers with the same name
docker rm -vf "${container_name}" >&/dev/null || true
if [[ "${portforward}" ]]; then
IFS=';' read -ra array <<< "${portforward}"
for element in "${array[@]}"; do
opts+=(-p "${element}")
done
fi
opts+=(${sys_volume_args[@]+"${sys_volume_args[@]}"})
dind::step "Starting DIND container:" "${container_name}"
if [[ ! -z ${DIND_K8S_BIN_DIR:-} ]]; then
opts+=(-v ${DIND_K8S_BIN_DIR}:/k8s)
fi
if [[ ! ${using_linuxkit} ]]; then
opts+=(-v /boot:/boot -v /lib/modules:/lib/modules)
fi
if [[ ${ENABLE_CEPH} ]]; then
opts+=(-v /dev:/dev
-v /sys/bus:/sys/bus
-v /var/run/docker.sock:/opt/outer-docker.sock)
fi
local volume_name="kubeadm-dind-${container_name}"
dind::ensure-network
dind::ensure-volume ${reuse_volume} "${volume_name}"
dind::ensure-nat
dind::ensure-dns
# TODO: create named volume for binaries and mount it to /k8s
# in case of the source build
# Start the new container.
docker run \
-e IP_MODE="${IP_MODE}" \
-e KUBEADM_SOURCE="${KUBEADM_SOURCE}" \
-e HYPERKUBE_SOURCE="${HYPERKUBE_SOURCE}" \
-d --privileged \
--net "$(dind::net-name)" \
--name "${container_name}" \
--hostname "${container_name}" \
-l "${DIND_LABEL}" \
-v "${volume_name}:/dind" \
${opts[@]+"${opts[@]}"} \
"${DIND_IMAGE}" \
${args[@]+"${args[@]}"}
if [[ -n ${DIND_CUSTOM_NETWORKS} ]]; then
local cust_nets
local IFS=','; read -ra cust_nets <<< "${DIND_CUSTOM_NETWORKS}"
for cust_net in "${cust_nets[@]}"; do
docker network connect ${cust_net} ${container_name} >/dev/null
done
fi
}
function dind::kubeadm {
local container_id="$1"
shift
dind::step "Running kubeadm:" "$*"
status=0
# See image/bare/wrapkubeadm.
# Capturing output is necessary to grab flags for 'kubeadm join'
local -a env=(-e KUBELET_FEATURE_GATES="${KUBELET_FEATURE_GATES}"
-e DIND_CRI="${DIND_CRI}")
if ! docker exec "${env[@]}" "${container_id}" /usr/local/bin/wrapkubeadm "$@" 2>&1 | tee /dev/fd/2; then
echo "*** kubeadm failed" >&2
return 1
fi
return ${status}
}
# function dind::bare {
# local container_name="${1:-}"
# if [[ ! "${container_name}" ]]; then
# echo >&2 "Must specify container name"
# exit 1
# fi
# shift
# run_opts=(${@+"$@"})
# dind::run "${container_name}"
# }
function dind::configure-kubectl {
dind::step "Setting cluster config"
local host="$(dind::localhost)"
if [[ -z "$using_local_linuxdocker" ]]; then
host="127.0.0.1"
fi
local context_name cluster_name
context_name="$(dind::context-name)"
cluster_name="$(dind::context-name)"
"${kubectl}" config set-cluster "$cluster_name" \
--server="http://${host}:$(dind::apiserver-port)" \
--insecure-skip-tls-verify=true
"${kubectl}" config set-context "$context_name" --cluster="$cluster_name"
if [[ ${DIND_LABEL} = "${DEFAULT_DIND_LABEL}" ]]; then
# Single cluster mode
"${kubectl}" config use-context "$context_name"
fi
}
force_make_binaries=
function dind::set-master-opts {
master_opts=()
if [[ ${BUILD_KUBEADM} || ${BUILD_HYPERKUBE} ]]; then
# share binaries pulled from the build container between nodes
local dind_k8s_bin_vol_name
dind_k8s_bin_vol_name="dind-k8s-binaries$(dind::cluster-suffix)"
dind::ensure-volume -r "${dind_k8s_bin_vol_name}"
dind::set-build-volume-args
master_opts+=("${build_volume_args[@]}" -v "${dind_k8s_bin_vol_name}:/k8s")
local -a bins
if [[ ${BUILD_KUBEADM} ]]; then
master_opts+=(-e KUBEADM_SOURCE=build://)
bins+=(cmd/kubeadm)
else
master_opts+=(-e ${KUBEADM_SOURCE})
fi
if [[ ${BUILD_HYPERKUBE} ]]; then
master_opts+=(-e HYPERKUBE_SOURCE=build://)
bins+=(cmd/hyperkube)
fi
if [[ ${force_make_binaries} ]]; then
dind::make-for-linux n "${bins[@]}"
else
dind::ensure-binaries "${bins[@]}"
fi
fi
if [[ ${MASTER_EXTRA_OPTS:-} ]]; then
master_opts+=( ${MASTER_EXTRA_OPTS} )
fi
}
function dind::ensure-dashboard-clusterrolebinding {
local ctx
ctx="$(dind::context-name)"
# 'create' may cause etcd timeout, yet create the clusterrolebinding.
# So use 'apply' to actually create it
"${kubectl}" --context "$ctx" create clusterrolebinding add-on-cluster-admin \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:default \
-o json --dry-run |
docker exec -i "$(dind::master-name)" jq '.apiVersion="rbac.authorization.k8s.io/v1beta1"|.kind|="ClusterRoleBinding"' |
"${kubectl}" --context "$ctx" apply -f -
}
function dind::deploy-dashboard {
local url="${DASHBOARD_URL:-}"
if [ ! "$url" ]; then
local cmp_api_to_1_15=0
dind::compare-versions 'kubeapi' "$(dind::kubeapi-version)" 1 15 || cmp_api_to_1_15=$?
if [[ $cmp_api_to_1_15 == 2 ]]; then
# API version < 1.15
url='https://rawgit.com/kubernetes/dashboard/bfab10151f012d1acc5dfb1979f3172e2400aa3c/src/deploy/kubernetes-dashboard.yaml'
else
# API version >= 1.15
url='https://rawgit.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml'
fi
fi
dind::step "Deploying k8s dashboard from $url"
dind::retry "${kubectl}" --context "$(dind::context-name)" apply -f "$url"
# https://kubernetes-io-vnext-staging.netlify.com/docs/admin/authorization/rbac/#service-account-permissions
# Thanks @liggitt for the hint
dind::retry dind::ensure-dashboard-clusterrolebinding
}
function dind::version-from-source {
(cluster/kubectl.sh version --short 2>/dev/null || true) |
grep Client |
sed 's/^.*: v\([0-9.]*\).*/\1/'
}
function dind::kubeapi-version {
if [[ ${use_k8s_source} ]]; then
dind::version-from-source
else
docker exec "$(dind::master-name)" \
/bin/bash -c 'kubectl version -o json | jq -r .serverVersion.gitVersion | sed "s/^v\([0-9.]*\).*/\1/"'
fi
}
function dind::kubeadm-version {
if [[ ${use_k8s_source} ]]; then
dind::version-from-source
else
docker exec "$(dind::master-name)" \
/bin/bash -c 'kubeadm version -o json | jq -r .clientVersion.gitVersion' |
sed 's/^v\([0-9.]*\).*/\1/'
fi
}
function dind::kubelet-version {
if [[ ${use_k8s_source} ]]; then
dind::version-from-source
else
docker exec "$(dind::master-name)" \
/bin/bash -c 'kubelet --version | sed -E "s/^(kubernetes )?v?([0-9]+(\.[0-9]+){1,2})/\2/I"'
fi
}
# $1 is the name of the software whose version is being compared (eg 'kubeadm')
# $2 is the version as a string (eg '1.14.5')
# $3 is the major version it is being compared to
# $4 is the minor version it is being compared to
# returns 0 if the 2 versions are equal
# returns 1 if the string version is greater
# returns 2 if the other version is greater
# any other return code is an error
function dind::compare-versions {
local name="$1"
local version_str="$2"
local cmp_to_major="$3"
local cmp_to_minor="$4"
if [[ ! "$version_str" =~ ^([0-9]+)\.([0-9]+) ]]; then
echo >&2 "WARNING: can't parse $name version: $version_str"
return 3
fi
local major="${BASH_REMATCH[1]}"
local minor="${BASH_REMATCH[2]}"
if [[ $major -gt $cmp_to_major ]]; then
return 1
fi
if [[ $major -lt $cmp_to_major ]]; then
return 2
fi
if [[ $minor -gt $cmp_to_minor ]]; then
return 1
fi
if [[ $minor -lt $cmp_to_minor ]]; then
return 2
fi
return 0
}
function dind::kubeadm-version-at-least {
local major="${1}"
local minor="${2}"
local cmp=0
dind::compare-versions 'kubeadm' "$(dind::kubeadm-version)" "$major" "$minor" || cmp=$?
[[ $cmp -lt 2 ]]
}
function dind::verify-image-compatibility {
# We can't tell in advance, if the image selected supports dual-stack,
# but will do the next best thing, and check as soon as start up kube-master
local master_name=$1
if [[ ${IP_MODE} = "dual-stack" ]]; then
local dual_stack_support="$(docker exec ${master_name} cat /node-info 2>/dev/null | grep "dual-stack-support" | wc -l)"
if [[ ${dual_stack_support} -eq 0 ]]; then
echo "ERROR! DinD image (${DIND_IMAGE}) does not support dual-stack mode - aborting!"
dind::remove-images "${DIND_LABEL}"
exit 1
fi
fi
}
function dind::check-dns-service-type {
if [[ ${DNS_SERVICE} = "kube-dns" ]] && dind::kubeadm-version-at-least 1 13; then
echo >&2 "WARNING: for 1.13+, only coredns can be used as the DNS service"
DNS_SERVICE="coredns"
fi
}
function dind::set-version-specific-flags {
local kubelet_version_specific_flags="$1"
docker exec "$(dind::master-name)" sed -i "s@KUBELET_VERSION_SPECIFIC_FLAGS=[^\"]*\"@KUBELET_VERSION_SPECIFIC_FLAGS=$kubelet_version_specific_flags\"@" /lib/systemd/system/kubelet.service
}
function dind::init {
local -a opts
dind::set-master-opts
local local_host master_name container_id
master_name="$(dind::master-name)"
local_host="$( dind::localhost )"
container_id=$(dind::run "${master_name}" 1 "${local_host}:$(dind::apiserver-port):${INTERNAL_APISERVER_PORT}" ${master_opts[@]+"${master_opts[@]}"})
dind::verify-image-compatibility ${master_name}
# FIXME: I tried using custom tokens with 'kubeadm ex token create' but join failed with:
# 'failed to parse response as JWS object [square/go-jose: compact JWS format must have three parts]'
# So we just pick the line from 'kubeadm init' output
# Using a template file in the image (based on version) to build a kubeadm.conf file and to customize
# it based on CNI plugin, IP mode, and environment settings. User can add additional
# customizations to template and then rebuild the image used (build/build-local.sh).
local pod_subnet_disable="# "
# TODO: May want to specify each of the plugins that require --pod-network-cidr
if [[ ${CNI_PLUGIN} != "bridge" && ${CNI_PLUGIN} != "ptp" ]]; then
pod_subnet_disable=""
fi
local bind_address="0.0.0.0"
if [[ ${SERVICE_NET_MODE} = "ipv6" ]]; then
bind_address="::"
fi
dind::proxy "$master_name"
dind::custom-docker-opts "$master_name"
# HACK: Indicating mode, so that wrapkubeadm will not set a cluster CIDR for kube-proxy
# in IPv6 (only) mode.
if [[ ${SERVICE_NET_MODE} = "ipv6" ]]; then
docker exec --privileged -i "$master_name" touch /v6-mode
fi
feature_gates="{CoreDNS: false}"
if [[ ${DNS_SERVICE} == "coredns" ]]; then
feature_gates="{CoreDNS: true}"
fi
kubeadm_version="$(dind::kubeadm-version)"
case "${kubeadm_version}" in
1\.12\.*)
template="1.12"
;;
*) # Includes 1.13 master branch
# Will make a separate template if/when it becomes incompatible
template="1.13"
# CoreDNS can no longer be switched off
feature_gates="{}"
;;
esac
dind::check-dns-service-type
local kubelet_version_specific_flags=()
local cmp_kubelet_to_1_15=0
dind::compare-versions 'kubelet' "$(dind::kubelet-version)" 1 15 || cmp_kubelet_to_1_15=$?
if [[ "$cmp_kubelet_to_1_15" == 2 ]]; then
# this option got deprecated in v 1.15
kubelet_version_specific_flags+=('--allow-privileged=true')
fi
# explicit conversion to a string is needed, as calling ${arr[@]} or ${arr[*]}
# on an empty array will trigger an error on bash < 4.4 (and Travis is 4.3...)
local kubelet_version_specific_flags_as_str=''
if [[ ${#kubelet_version_specific_flags[@]} -gt 0 ]]; then
kubelet_version_specific_flags_as_str="${kubelet_version_specific_flags[*]}"
fi
dind::set-version-specific-flags "$kubelet_version_specific_flags_as_str"
component_feature_gates=""
if [ "${FEATURE_GATES}" != "none" ]; then
component_feature_gates="feature-gates: \\\"${FEATURE_GATES}\\\""
fi
apiserver_extra_args=""
for e in $(set -o posix ; set | grep -E "^APISERVER_[a-z_]+=" | cut -d'=' -f 1); do
opt_name=$(echo ${e#APISERVER_} | sed 's/_/-/g')
apiserver_extra_args+=" ${opt_name}: \\\"$(eval echo \$$e)\\\"\\n"
done
controller_manager_extra_args=""
for e in $(set -o posix ; set | grep -E "^CONTROLLER_MANAGER_[a-z_]+=" | cut -d'=' -f 1); do
opt_name=$(echo ${e#CONTROLLER_MANAGER_} | sed 's/_/-/g')
controller_manager_extra_args+=" ${opt_name}: \\\"$(eval echo \$$e)\\\"\\n"
done
scheduler_extra_args=""
for e in $(set -o posix ; set | grep -E "^SCHEDULER_[a-z_]+=" | cut -d'=' -f 1); do
opt_name=$(echo ${e#SCHEDULER_} | sed 's/_/-/g')
scheduler_extra_args+=" ${opt_name}: \\\"$(eval echo \$$e)\\\"\\n"
done
local mgmt_cidr=${mgmt_net_cidrs[0]}
if [[ ${IP_MODE} = "dual-stack" && ${SERVICE_NET_MODE} = "ipv6" ]]; then
mgmt_cidr=${mgmt_net_cidrs[1]}
fi
local master_ip=$( dind::make-ip-from-cidr ${mgmt_cidr} 2 )
docker exec -i "$master_name" bash <<EOF
sed -e "s|{{ADV_ADDR}}|${master_ip}|" \
-e "s|{{POD_SUBNET_DISABLE}}|${pod_subnet_disable}|" \
-e "s|{{POD_NETWORK_CIDR}}|${pod_net_cidrs[0]}|" \
-e "s|{{SVC_SUBNET}}|${SERVICE_CIDR}|" \
-e "s|{{BIND_ADDR}}|${bind_address}|" \
-e "s|{{BIND_PORT}}|${INTERNAL_APISERVER_PORT}|" \
-e "s|{{FEATURE_GATES}}|${feature_gates}|" \
-e "s|{{KUBEADM_VERSION}}|${kubeadm_version}|" \
-e "s|{{COMPONENT_FEATURE_GATES}}|${component_feature_gates}|" \
-e "s|{{APISERVER_EXTRA_ARGS}}|${apiserver_extra_args}|" \
-e "s|{{CONTROLLER_MANAGER_EXTRA_ARGS}}|${controller_manager_extra_args}|" \
-e "s|{{SCHEDULER_EXTRA_ARGS}}|${scheduler_extra_args}|" \
-e "s|{{KUBE_MASTER_NAME}}|${master_name}|" \
-e "s|{{DNS_SVC_IP}}|${DNS_SVC_IP}|" \
-e "s|{{CRI_SOCKET}}|${CRI_SOCKET}|" \
/etc/kubeadm.conf.${template}.tmpl > /etc/kubeadm.conf
EOF
init_args=(--config /etc/kubeadm.conf)
# required when building from source
if [[ ${BUILD_KUBEADM} || ${BUILD_HYPERKUBE} ]]; then
docker exec "$master_name" mount --make-shared /k8s
fi
dind::kubeadm "${container_id}" init "${init_args[@]}" --ignore-preflight-errors=all "$@"
kubeadm_join_flags="$(docker exec "${container_id}" kubeadm token create --print-join-command | sed 's/^kubeadm join //')"
dind::configure-kubectl
dind::start-port-forwarder
}
function dind::create-node-container {
local reuse_volume next_node_index node_name
reuse_volume=''
if [[ ${1:-} = -r ]]; then
reuse_volume="-r"
shift
fi
# if there's just one node currently, it's master, thus we need to use
# kube-node-1 hostname, if there are two nodes, we should pick
# kube-node-2 and so on
next_node_index=${1:-$(docker ps -q --filter=label="${DIND_LABEL}" | wc -l | sed 's/^ *//g')}
local -a opts
if [[ ${BUILD_KUBEADM} || ${BUILD_HYPERKUBE} ]]; then
opts+=(-v "dind-k8s-binaries$(dind::cluster-suffix)":/k8s)
if [[ ${BUILD_KUBEADM} ]]; then
opts+=(-e KUBEADM_SOURCE=build://)
fi
if [[ ${BUILD_HYPERKUBE} ]]; then
opts+=(-e HYPERKUBE_SOURCE=build://)
fi
fi
node_name="$(dind::node-name ${next_node_index})"
dind::run ${reuse_volume} "$node_name" $((next_node_index + 1)) "${EXTRA_PORTS}" ${opts[@]+"${opts[@]}"}
}
function dind::join {
local container_id="$1"
shift
dind::proxy "${container_id}"
dind::custom-docker-opts "${container_id}"
local -a join_opts=(--ignore-preflight-errors=all
--cri-socket="${CRI_SOCKET}")
dind::kubeadm "${container_id}" join "${join_opts[@]}" "$@" >/dev/null
}
function dind::escape-e2e-name {
sed 's/[]\$*.^()[]/\\&/g; s/\s\+/\\s+/g' <<< "$1" | tr -d '\n'
}
function dind::accelerate-kube-dns {
if [[ ${DNS_SERVICE} == "kube-dns" ]]; then
dind::step "Patching kube-dns deployment to make it start faster"
# Could do this on the host, too, but we don't want to require jq here
# TODO: do this in wrapkubeadm
docker exec "$(dind::master-name)" /bin/bash -c \
"kubectl get deployment kube-dns -n kube-system -o json | jq '.spec.template.spec.containers[0].readinessProbe.initialDelaySeconds = 3|.spec.template.spec.containers[0].readinessProbe.periodSeconds = 3' | kubectl apply --force -f -"
fi
}
function dind::component-ready {
local label="$1"
local out
if ! out="$("${kubectl}" --context "$(dind::context-name)" get pod -l "${label}" -n kube-system \
-o jsonpath='{ .items[*].status.conditions[?(@.type == "Ready")].status }' 2>/dev/null)"; then
return 1
fi
if ! grep -v False <<<"${out}" | grep -q True; then
return 1
fi
return 0
}
function dind::kill-failed-pods {
local pods ctx
ctx="$(dind::context-name)"
# workaround for https://github.com/kubernetes/kubernetes/issues/36482
if ! pods="$(kubectl --context "$ctx" get pod -n kube-system -o jsonpath='{ .items[?(@.status.phase == "Failed")].metadata.name }' 2>/dev/null)"; then
return
fi
for name in ${pods}; do
kubectl --context "$ctx" delete pod --now -n kube-system "${name}" >&/dev/null || true
done
}
function dind::create-static-routes {
echo "Creating static routes for bridge/PTP plugin"
for ((i=0; i <= NUM_NODES; i++)); do
if [[ ${i} -eq 0 ]]; then
node="$(dind::master-name)"
else
node="$(dind::node-name $i)"
fi
for ((j=0; j <= NUM_NODES; j++)); do
if [[ ${i} -eq ${j} ]]; then
continue
fi
if [[ ${j} -eq 0 ]]; then
dest_node="$(dind::master-name)"
else
dest_node="$(dind::node-name $j)"
fi
id=$((${j}+1))
if [[ ${IP_MODE} = "ipv4" || ${IP_MODE} = "dual-stack" ]]; then
# Assuming pod subnets will all be /24
dest="${pod_prefixes[0]}${id}.0/24"
gw=`docker exec ${dest_node} ip addr show eth0 | grep -w inet | awk '{ print $2 }' | sed 's,/.*,,'`
docker exec "${node}" ip route add "${dest}" via "${gw}"
fi
if [[ ${IP_MODE} = "ipv6" || ${IP_MODE} = "dual-stack" ]]; then
local position=0
if [[ ${IP_MODE} = "dual-stack" ]]; then
position=1
fi
instance=$(printf "%02x" ${id})
if [[ $((${pod_sizes[$position]} % 16)) -ne 0 ]]; then
instance+="00" # Move node ID to upper byte
fi
dest="${pod_prefixes[$position]}${instance}::/${pod_sizes[$position]}"
gw=`docker exec ${dest_node} ip addr show eth0 | grep -w inet6 | grep -i global | head -1 | awk '{ print $2 }' | sed 's,/.*,,'`
docker exec "${node}" ip route add "${dest}" via "${gw}"
fi
done
done
}
# If we are allowing AAAA record use, then provide SNAT for IPv6 packets from
# node containers, and forward packets to bridge used for $(dind::net-name).
# This gives pods access to external IPv6 sites, when using IPv6 addresses.
function dind::setup_external_access_on_host {
if [[ ! ${DIND_ALLOW_AAAA_USE} ]]; then
return
fi
local main_if=`ip route | grep default | awk '{print $5}'`
dind::ip6tables-on-hostnet -t nat -A POSTROUTING -o $main_if -j MASQUERADE
if [[ ${IP_MODE} = "dual-stack" ]]; then
return
fi
local bridge_if=`ip route | grep ${NAT64_V4_SUBNET_PREFIX}.0.0 | awk '{print $3}'`
if [[ -n "$bridge_if" ]]; then
dind::ip6tables-on-hostnet -A FORWARD -i $bridge_if -j ACCEPT
else
echo "WARNING! No $(dind::net-name) bridge with NAT64 - unable to setup forwarding/SNAT"
fi
}
# Remove ip6tables rules for SNAT and forwarding, if they exist.
function dind::remove_external_access_on_host {
if [[ ! ${DIND_ALLOW_AAAA_USE} ]]; then
return
fi
local have_rule
local main_if="$(ip route | grep default | awk '{print $5}')"
have_rule="$(dind::ip6tables-on-hostnet -S -t nat | grep "\-o $main_if" || true)"
if [[ -n "$have_rule" ]]; then
dind::ip6tables-on-hostnet -t nat -D POSTROUTING -o $main_if -j MASQUERADE
else
echo "Skipping delete of ip6tables rule for SNAT, as rule non-existent"
fi
if [[ ${IP_MODE} = "dual-stack" ]]; then
return
fi
local bridge_if="$(ip route | grep ${NAT64_V4_SUBNET_PREFIX}.0.0 | awk '{print $3}')"
if [[ -n "$bridge_if" ]]; then
have_rule="$(dind::ip6tables-on-hostnet -S | grep "\-i $bridge_if" || true)"
if [[ -n "$have_rule" ]]; then
dind::ip6tables-on-hostnet -D FORWARD -i $bridge_if -j ACCEPT
else
echo "Skipping delete of ip6tables rule for forwarding, as rule non-existent"
fi
else
echo "Skipping delete of ip6tables rule for forwarding, as no bridge interface using NAT64"
fi
}
function dind::ip6tables-on-hostnet {
local mod_path='/lib/modules'
docker run -v "${mod_path}:${mod_path}" --entrypoint /sbin/ip6tables --net=host --rm --privileged "${DIND_IMAGE}" "$@"
}
function dind::component-ready-by-labels {
local labels=("$@");
for label in ${labels[@]}; do
dind::component-ready "${label}" && return 0
done
return 1
}
function dind::wait-for-service-ready {
local service=$1
local labels=("${@:2}")
local ctx="$(dind::context-name)"
dind::step "Bringing up ${service}"
# on Travis 'scale' sometimes fails with 'error: Scaling the resource failed with: etcdserver: request timed out; Current resource version 442' here
dind::retry "${kubectl}" --context "$ctx" scale deployment --replicas=1 -n kube-system ${service}
local ntries=200
while ! dind::component-ready-by-labels ${labels[@]}; do
if ((--ntries == 0)); then
echo "Error bringing up ${service}" >&2
exit 1
fi
echo -n "." >&2
dind::kill-failed-pods
sleep 1
done
echo "[done]" >&2
}
function dind::wait-for-ready {
local app="kube-proxy"
if [[ ${CNI_PLUGIN} = "kube-router" ]]; then
app=kube-router
fi
dind::step "Waiting for ${app} and the nodes"
local app_ready
local nodes_ready
local n=3
local ntries=200
local ctx
ctx="$(dind::context-name)"
while true; do
dind::kill-failed-pods
if "${kubectl}" --context "$ctx" get nodes 2>/dev/null | grep -q NotReady; then
nodes_ready=
else
nodes_ready=y
fi
if dind::component-ready k8s-app=${app}; then
app_ready=y
else
app_ready=
fi
if [[ ${nodes_ready} && ${app_ready} ]]; then
if ((--n == 0)); then
echo "[done]" >&2
break
fi
else
n=3
fi
if ((--ntries == 0)); then
echo "Error waiting for ${app} and the nodes" >&2
exit 1
fi
echo -n "." >&2
sleep 1
done
dind::wait-for-service-ready ${DNS_SERVICE} "k8s-app=kube-dns"
if [[ ! ${SKIP_DASHBOARD} ]]; then
local service="kubernetes-dashboard"
dind::wait-for-service-ready ${service} "app=${service}" "k8s-app=${service}"
fi
dind::retry "${kubectl}" --context "$ctx" get nodes >&2
if [[ ! ${SKIP_DASHBOARD} ]]; then
local local_host
local_host="$( dind::localhost )"
local base_url="http://${local_host}:$(dind::apiserver-port)/api/v1/namespaces/kube-system/services"
dind::step "Access dashboard at:" "${base_url}/kubernetes-dashboard:/proxy"
dind::step "Access dashboard at:" "${base_url}/https:kubernetes-dashboard:/proxy (if version>1.6 and HTTPS enabled)"
fi
}
# dind::make-kube-router-yaml creates a temp file with contents of the configuration needed for the kube-router CNI
# plugin at a specific version, instead of using the publically available file, which uses the latest version. This
# allows us to control the version used. If/when updating, be sure to update the KUBE_ROUTER_VERSION env variable
# ensure the YAML contents below, reflect the configuration in:
#
# https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter-all-feature.yaml
#
# FUTURE: May be able to remove this, if/when kube-router "latest" is stable, and use the public YAML file instead.
function dind::make-kube-router-yaml {
tmp_yaml=$(mktemp /tmp/kube-router-yaml.XXXXXX)
cat >${tmp_yaml} <<KUBE_ROUTER_YAML
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam": {
"type":"host-local"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
k8s-app: kube-router
tier: node
name: kube-router
namespace: kube-system
spec:
template:
metadata:
labels:
k8s-app: kube-router
tier: node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kube-router
serviceAccount: kube-router
containers:
- name: kube-router
image: cloudnativelabs/kube-router:${KUBE_ROUTER_VERSION}
imagePullPolicy: Always
args:
- --run-router=true
- --run-firewall=true
- --run-service-proxy=true
- --kubeconfig=/var/lib/kube-router/kubeconfig
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
resources:
requests:
cpu: 250m
memory: 250Mi
securityContext:
privileged: true
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /var/lib/kube-router
readOnly: true
initContainers:
- name: install-cni
image: busybox
imagePullPolicy: Always
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conf ]; then
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json \${TMP};
mv \${TMP} /etc/cni/net.d/10-kuberouter.conf;
fi
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kube-router-cfg
mountPath: /etc/kube-router
hostNetwork: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg
- name: kubeconfig
configMap:
name: kube-proxy
items:
- key: kubeconfig.conf
path: kubeconfig
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-router
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- nodes
- endpoints
verbs:
- list
- get
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- list
- get
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-router
subjects:
- kind: ServiceAccount
name: kube-router
namespace: kube-system
KUBE_ROUTER_YAML
echo $tmp_yaml
}
function dind::up {
dind::down
dind::init
local ctx
ctx="$(dind::context-name)"
# pre-create node containers sequentially so they get predictable IPs
local -a node_containers
for ((n=1; n <= NUM_NODES; n++)); do
dind::step "Starting node container:" ${n}
if ! container_id="$(dind::create-node-container ${n})"; then
echo >&2 "*** Failed to start node container ${n}"
exit 1
else
node_containers+=(${container_id})
dind::step "Node container started:" ${n}
fi
done
dind::fix-mounts
status=0
local -a pids
for ((n=1; n <= NUM_NODES; n++)); do
(
dind::step "Joining node:" ${n}
container_id="${node_containers[${n}-1]}"
if ! dind::join ${container_id} ${kubeadm_join_flags}; then
echo >&2 "*** Failed to start node container ${n}"
exit 1
else
dind::step "Node joined:" ${n}
fi
)&
pids[${n}]=$!
done
if ((NUM_NODES > 0)); then
for pid in ${pids[*]}; do
wait ${pid}
done
else
# FIXME: this may fail depending on k8s/kubeadm version
# FIXME: check for taint & retry if it's there
"${kubectl}" --context "$ctx" taint nodes $(dind::master-name) node-role.kubernetes.io/master- || true
fi
case "${CNI_PLUGIN}" in
bridge | ptp)
dind::create-static-routes
dind::setup_external_access_on_host
;;
flannel)
# without --validate=false this will fail on older k8s versions
dind::retry "${kubectl}" --context "$ctx" apply --validate=false -f "https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml?raw=true"
;;
calico)
manifest_base=https://docs.projectcalico.org/${CALICO_VERSION:-v3.3}/getting-started/kubernetes/installation
dind::retry "${kubectl}" --context "$ctx" apply -f ${manifest_base}/hosted/etcd.yaml
if [ "${CALICO_VERSION:-v3.3}" != master ]; then
dind::retry "${kubectl}" --context "$ctx" apply -f ${manifest_base}/rbac.yaml
fi
dind::retry "${kubectl}" --context "$ctx" apply -f ${manifest_base}/hosted/calico.yaml
dind::retry "${kubectl}" --context "$ctx" apply -f ${manifest_base}/hosted/calicoctl.yaml
;;
calico-kdd)
manifest_base=https://docs.projectcalico.org/${CALICO_VERSION:-v3.3}/getting-started/kubernetes/installation
dind::retry "${kubectl}" --context "$ctx" apply -f ${manifest_base}/hosted/rbac-kdd.yaml
dind::retry "${kubectl}" --context "$ctx" apply -f ${manifest_base}/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
;;
weave)
dind::retry "${kubectl}" --context "$ctx" apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(${kubectl} --context "$ctx" version | base64 | tr -d '\n')"
;;
kube-router)
kube_router_config="$( dind::make-kube-router-yaml )"
dind::retry "${kubectl}" --context "$ctx" apply -f ${kube_router_config}
rm "${kube_router_config}"
dind::retry "${kubectl}" --context "$ctx" -n kube-system delete ds kube-proxy
docker run --privileged --net=host k8s.gcr.io/kube-proxy-amd64:v1.10.2 kube-proxy --cleanup
;;
*)
echo "Unsupported CNI plugin '${CNI_PLUGIN}'" >&2
;;
esac
if [[ ! ${SKIP_DASHBOARD} ]]; then
dind::deploy-dashboard
fi
dind::accelerate-kube-dns
if [[ (${CNI_PLUGIN} != "bridge" && ${CNI_PLUGIN} != "ptp") || ${SKIP_SNAPSHOT} ]]; then
# This is especially important in case of Calico -
# the cluster will not recover after snapshotting
# (at least not after restarting from the snapshot)
# if Calico installation is interrupted
dind::wait-for-ready
fi
dind::step "Cluster Info"
echo "Network Mode: ${IP_MODE}"
echo "Cluster context: $( dind::context-name )"
echo "Cluster ID: ${CLUSTER_ID}"
echo "Management CIDR(s): ${mgmt_net_cidrs[@]}"
echo "Service CIDR/mode: ${SERVICE_CIDR}/${SERVICE_NET_MODE}"
echo "Pod CIDR(s): ${pod_net_cidrs[@]}"
}
function dind::fix-mounts {
local node_name
for ((n=0; n <= NUM_NODES; n++)); do
node_name="$(dind::master-name)"
if ((n > 0)); then
node_name="$(dind::node-name $n)"
fi
docker exec "${node_name}" mount --make-shared /run
if [[ ! ${using_linuxkit} ]]; then
docker exec "${node_name}" mount --make-shared /lib/modules/
fi
# required when building from source
if [[ ${BUILD_KUBEADM} || ${BUILD_HYPERKUBE} ]]; then
docker exec "${node_name}" mount --make-shared /k8s
fi
# docker exec "${node_name}" mount --make-shared /sys/kernel/debug
done
}
function dind::snapshot_container {
local container_name="$1"
# we must pass DIND_CRI here because in case of containerd
# a special care must be taken to stop the containers during
# the snapshot
docker exec -e DIND_CRI="${DIND_CRI}" -i ${container_name} \
/usr/local/bin/snapshot prepare
# remove the hidden *plnk directories
docker diff ${container_name} | grep -v plnk | docker exec -i ${container_name} /usr/local/bin/snapshot save
}
function dind::snapshot {
dind::step "Taking snapshot of the cluster"
dind::snapshot_container "$(dind::master-name)"
for ((n=1; n <= NUM_NODES; n++)); do
dind::snapshot_container "$(dind::node-name $n)"
done
dind::wait-for-ready
}
restore_cmd=restore
function dind::restore_container {
local container_id="$1"
docker exec ${container_id} /usr/local/bin/snapshot "${restore_cmd}"
}
function dind::restore {
local apiserver_port local_host pid pids
dind::down
dind::check-dns-service-type
dind::step "Restoring containers"
dind::set-master-opts
local_host="$( dind::localhost )"
apiserver_port="$( dind::apiserver-port )"
for ((n=0; n <= NUM_NODES; n++)); do
(
if [[ n -eq 0 ]]; then
dind::step "Restoring master container"
dind::restore_container "$(dind::run -r "$(dind::master-name)" 1 "${local_host}:${apiserver_port}:${INTERNAL_APISERVER_PORT}" ${master_opts[@]+"${master_opts[@]}"})"
dind::verify-image-compatibility "$(dind::master-name)"
dind::step "Master container restored"
else
dind::step "Restoring node container:" ${n}
if ! container_id="$(dind::create-node-container -r ${n})"; then
echo >&2 "*** Failed to start node container ${n}"
exit 1
else
dind::restore_container "${container_id}"
dind::step "Node container restored:" ${n}
fi
fi
)&
pids[${n}]=$!
done
for pid in ${pids[*]}; do
wait ${pid}
done
if [[ ${CNI_PLUGIN} = "bridge" || ${CNI_PLUGIN} = "ptp" ]]; then
dind::create-static-routes
dind::setup_external_access_on_host
fi
dind::fix-mounts
# Recheck kubectl config. It's possible that the cluster was started
# on this docker from different host
dind::configure-kubectl
dind::start-port-forwarder
dind::wait-for-ready
}
function dind::down {
dind::remove-images "${DIND_LABEL}"
if [[ ${CNI_PLUGIN} = "bridge" || ${CNI_PLUGIN} = "ptp" ]]; then
dind::remove_external_access_on_host
elif [[ "${CNI_PLUGIN}" = "kube-router" ]]; then
if [[ ${COMMAND} = "down" || ${COMMAND} = "clean" ]]; then
# FUTURE: Updated pinned version, after verifying operation
docker run --privileged --net=host cloudnativelabs/kube-router:${KUBE_ROUTER_VERSION} --cleanup-config
fi
fi
}
function dind::apiserver-port {
# APISERVER_PORT is explicitely set
if [ -n "${APISERVER_PORT:-}" ]
then
echo "$APISERVER_PORT"
return
fi
# Get the port from the master
local master port
master="$(dind::master-name)"
# 8080/tcp -> 127.0.0.1:8082 => 8082
port="$( docker port "$master" 2>/dev/null | awk -F: "/^${INTERNAL_APISERVER_PORT}/{ print \$NF }" )"
if [ -n "$port" ]
then
APISERVER_PORT="$port"
echo "$APISERVER_PORT"
return
fi
# get a random free port
APISERVER_PORT=0
echo "$APISERVER_PORT"
}
function dind::master-name {
echo "kube-master$( dind::cluster-suffix )"
}
function dind::node-name {
local nr="$1"
echo "kube-node-${nr}$( dind::cluster-suffix )"
}
function dind::context-name {
echo "dind$( dind::cluster-suffix )"
}
function dind::remove-volumes {
# docker 1.13+: docker volume ls -q -f label="${DIND_LABEL}"
local nameRE
nameRE="^kubeadm-dind-(sys|kube-master|kube-node-[0-9]+)$(dind::cluster-suffix)$"
docker volume ls -q | (grep -E "$nameRE" || true) | while read -r volume_id; do
dind::step "Removing volume:" "${volume_id}"
docker volume rm "${volume_id}"
done
}
function dind::remove-images {
local which=$1
docker ps -a -q --filter=label="${which}" | while read container_id; do
dind::step "Removing container:" "${container_id}"
docker rm -fv "${container_id}"
done
}
function dind::remove-cluster {
cluster_name="dind$(dind::cluster-suffix)"
if ${kubectl} config get-clusters | grep -qE "^${cluster_name}$"; then
dind::step "Removing cluster from config:" "${cluster_name}"
${kubectl} config delete-cluster ${cluster_name} 2>/dev/null || true
fi
}
function dind::remove-context {
context_name="$(dind::context-name)"
if ${kubectl} config get-contexts | grep -qE "${context_name}\\s"; then
dind::step "Removing context from config:" "${context_name}"
${kubectl} config delete-context ${context_name} 2>/dev/null || true
fi
}
function dind::start-port-forwarder {
local fwdr port
fwdr="${DIND_PORT_FORWARDER:-}"
[ -n "$fwdr" ] || return 0
[ -x "$fwdr" ] || {
echo "'${fwdr}' is not executable." >&2
return 1
}
port="$( dind::apiserver-port )"
dind::step "+ Setting up port-forwarding for :${port}"
"$fwdr" "$port"
}
function dind::check-for-snapshot {
if ! dind::volume-exists "kubeadm-dind-$(dind::master-name)"; then
return 1
fi
for ((n=1; n <= NUM_NODES; n++)); do
if ! dind::volume-exists "kubeadm-dind-$(dind::node-name ${n})"; then
return 1
fi
done
}
function dind::do-run-e2e {
local parallel="${1:-}"
local focus="${2:-}"
local skip="${3:-}"
local host="$(dind::localhost)"
if [[ -z "$using_local_linuxdocker" ]]; then
host="127.0.0.1"
fi
dind::need-source
local kubeapi test_args term=
local -a e2e_volume_opts=()
kubeapi="http://${host}:$(dind::apiserver-port)"
test_args="--host=${kubeapi}"
if [[ ${focus} ]]; then
test_args="--ginkgo.focus=${focus} ${test_args}"
fi
if [[ ${skip} ]]; then
test_args="--ginkgo.skip=${skip} ${test_args}"
fi
if [[ ${E2E_REPORT_DIR} ]]; then
test_args="--report-dir=/report ${test_args}"
e2e_volume_opts=(-v "${E2E_REPORT_DIR}:/report")
fi
dind::make-for-linux n "cmd/kubectl test/e2e/e2e.test vendor/github.com/onsi/ginkgo/ginkgo"
dind::step "Running e2e tests with args:" "${test_args}"
dind::set-build-volume-args
if [ -t 1 ] ; then
term="-it"
test_args="--ginkgo.noColor --num-nodes=2 ${test_args}"
fi
docker run \
--rm ${term} \
--net=host \
"${build_volume_args[@]}" \
-e KUBERNETES_PROVIDER=dind \
-e KUBE_MASTER_IP="${kubeapi}" \
-e KUBE_MASTER=local \
-e KUBERNETES_CONFORMANCE_TEST=y \
-e GINKGO_PARALLEL=${parallel} \
${e2e_volume_opts[@]+"${e2e_volume_opts[@]}"} \
-w /go/src/k8s.io/kubernetes \
"${e2e_base_image}" \
bash -c "cluster/kubectl.sh config set-cluster dind --server='${kubeapi}' --insecure-skip-tls-verify=true &&
cluster/kubectl.sh config set-context dind --cluster=dind &&
cluster/kubectl.sh config use-context dind &&
go run hack/e2e.go -- --v 6 --test --check-version-skew=false --test_args='${test_args}'"
}
function dind::clean {
dind::ensure-downloaded-kubectl
dind::down
dind::remove-images "dind-support$( dind::cluster-suffix )"
dind::remove-volumes
local net_name
net_name="$(dind::net-name)"
if docker network inspect "$net_name" >&/dev/null; then
docker network rm "$net_name"
fi
dind::remove-cluster
dind::remove-context
}
function dind::copy-image {
local image="${2:-}"
local image_path="/tmp/save_${image//\//_}"
if [[ -f "${image_path}" ]]; then
rm -fr "${image_path}"
fi
docker save "${image}" -o "${image_path}"
docker ps -a -q --filter=label="${DIND_LABEL}" | while read container_id; do
cat "${image_path}" | docker exec -i "${container_id}" docker load
done
rm -fr "${image_path}"
}
function dind::run-e2e {
local focus="${1:-}"
local skip="${2:-[Serial]}"
skip="$(dind::escape-e2e-name "${skip}")"
if [[ "$focus" ]]; then
focus="$(dind::escape-e2e-name "${focus}")"
else
focus="\[Conformance\]"
fi
local parallel=y
if [[ ${DIND_NO_PARALLEL_E2E} ]]; then
parallel=
fi
dind::do-run-e2e "${parallel}" "${focus}" "${skip}"
}
function dind::run-e2e-serial {
local focus="${1:-}"
local skip="${2:-}"
skip="$(dind::escape-e2e-name "${skip}")"
dind::need-source
if [[ "$focus" ]]; then
focus="$(dind::escape-e2e-name "${focus}")"
else
focus="\[Serial\].*\[Conformance\]"
fi
dind::do-run-e2e n "${focus}" "${skip}"
# TBD: specify filter
}
function dind::step {
local OPTS=""
if [ "$1" = "-n" ]; then
shift
OPTS+="-n"
fi
GREEN="$1"
shift
if [ -t 2 ] ; then
echo -e ${OPTS} "\x1B[97m* \x1B[92m${GREEN}\x1B[39m $*" 1>&2
else
echo ${OPTS} "* ${GREEN} $*" 1>&2
fi
}
function dind::dump {
set +e
echo "*** Dumping cluster state ***"
for node in $(docker ps --format '{{.Names}}' --filter label="${DIND_LABEL}"); do
for service in kubelet.service dindnet.service criproxy.service dockershim.service; do
if docker exec "${node}" systemctl is-enabled "${service}" >&/dev/null; then
echo "@@@ service-${node}-${service}.log @@@"
docker exec "${node}" systemctl status "${service}"
docker exec "${node}" journalctl -xe -n all -u "${service}"
fi
done
echo "@@@ psaux-${node}.txt @@@"
docker exec "${node}" ps auxww
echo "@@@ dockerps-a-${node}.txt @@@"
docker exec "${node}" docker ps -a
echo "@@@ ip-a-${node}.txt @@@"
docker exec "${node}" ip a
echo "@@@ ip-r-${node}.txt @@@"
docker exec "${node}" ip r
done
local ctx master_name
master_name="$(dind::master-name)"
ctx="$(dind::context-name)"
docker exec "$master_name" kubectl get pods --all-namespaces \
-o go-template='{{range $x := .items}}{{range $x.spec.containers}}{{$x.spec.nodeName}}{{" "}}{{$x.metadata.namespace}}{{" "}}{{$x.metadata.name}}{{" "}}{{.name}}{{"\n"}}{{end}}{{end}}' |
while read node ns pod container; do
echo "@@@ pod-${node}-${ns}-${pod}--${container}.log @@@"
docker exec "$master_name" kubectl logs -n "${ns}" -c "${container}" "${pod}"
done
echo "@@@ kubectl-all.txt @@@"
docker exec "$master_name" kubectl get all --all-namespaces -o wide
echo "@@@ describe-all.txt @@@"
docker exec "$master_name" kubectl describe all --all-namespaces
echo "@@@ nodes.txt @@@"
docker exec "$master_name" kubectl get nodes -o wide
}
function dind::dump64 {
echo "%%% start-base64 %%%"
dind::dump | docker exec -i "$(dind::master-name)" /bin/sh -c "lzma | base64 -w 100"
echo "%%% end-base64 %%%"
}
function dind::split-dump {
mkdir -p cluster-dump
cd cluster-dump
awk '!/^@@@ .* @@@$/{print >out}; /^@@@ .* @@@$/{out=$2}' out=/dev/null
ls -l
}
function dind::split-dump64 {
decode_opt=-d
if base64 --help | grep -q '^ *-D'; then
# Mac OS X
decode_opt=-D
fi
sed -n '/^%%% start-base64 %%%$/,/^%%% end-base64 %%%$/p' |
sed '1d;$d' |
base64 "${decode_opt}" |
lzma -dc |
dind::split-dump
}
function dind::proxy {
local container_id="$1"
if [[ ${DIND_CA_CERT_URL} ]] ; then
dind::step "+ Adding certificate on ${container_id}"
docker exec ${container_id} /bin/sh -c "cd /usr/local/share/ca-certificates; curl -sSO ${DIND_CA_CERT_URL}"
docker exec ${container_id} update-ca-certificates
fi
if [[ "${DIND_PROPAGATE_HTTP_PROXY}" || "${DIND_HTTP_PROXY}" || "${DIND_HTTPS_PROXY}" || "${DIND_NO_PROXY}" ]]; then
dind::step "+ Setting *_PROXY for docker service on ${container_id}"
local proxy_env="[Service]"$'\n'"Environment="
if [[ "${DIND_PROPAGATE_HTTP_PROXY}" ]]; then
# take *_PROXY values from container environment
proxy_env+=$(docker exec ${container_id} env | grep -i _proxy | awk '{ print "\""$0"\""}' | xargs -d'\n')
else
if [[ "${DIND_HTTP_PROXY}" ]] ; then proxy_env+="\"HTTP_PROXY=${DIND_HTTP_PROXY}\" "; fi
if [[ "${DIND_HTTPS_PROXY}" ]] ; then proxy_env+="\"HTTPS_PROXY=${DIND_HTTPS_PROXY}\" "; fi
if [[ "${DIND_NO_PROXY}" ]] ; then proxy_env+="\"NO_PROXY=${DIND_NO_PROXY}\" "; fi
fi
docker exec -i ${container_id} /bin/sh -c "cat > /etc/systemd/system/docker.service.d/30-proxy.conf" <<< "${proxy_env}"
docker exec ${container_id} systemctl daemon-reload
docker exec ${container_id} systemctl restart docker
fi
}
function dind::custom-docker-opts {
local container_id="$1"
local -a jq=()
local got_changes=""
if [[ ! -f ${DIND_DAEMON_JSON_FILE} ]] ; then
jq[0]="{}"
else
jq+=("$(cat ${DIND_DAEMON_JSON_FILE})")
if [[ ${DIND_DAEMON_JSON_FILE} != "/etc/docker/daemon.json" ]]; then
got_changes=1
fi
fi
if [[ ${DIND_REGISTRY_MIRROR} ]] ; then
dind::step "+ Setting up registry mirror on ${container_id}"
jq+=("{\"registry-mirrors\": [\"${DIND_REGISTRY_MIRROR}\"]}")
got_changes=1
fi
if [[ ${DIND_INSECURE_REGISTRIES} ]] ; then
dind::step "+ Setting up insecure-registries on ${container_id}"
jq+=("{\"insecure-registries\": ${DIND_INSECURE_REGISTRIES}}")
got_changes=1
fi
if [[ ${got_changes} ]] ; then
local json=$(IFS="+"; echo "${jq[*]}")
docker exec -i ${container_id} /bin/sh -c "mkdir -p /etc/docker && jq -n '${json}' > /etc/docker/daemon.json"
docker exec ${container_id} systemctl daemon-reload
docker exec ${container_id} systemctl restart docker
fi
}
COMMAND="${1:-}"
case ${COMMAND} in
up)
if [[ ! ( ${DIND_IMAGE} =~ local ) && ! ${DIND_SKIP_PULL:-} ]]; then
dind::step "Making sure DIND image is up to date"
docker pull "${DIND_IMAGE}" >&2
fi
dind::prepare-sys-mounts
dind::ensure-kubectl
if [[ ${SKIP_SNAPSHOT} ]]; then
force_make_binaries=y dind::up
elif ! dind::check-for-snapshot; then
force_make_binaries=y dind::up
dind::snapshot
else
dind::restore
fi
;;
reup)
dind::prepare-sys-mounts
dind::ensure-kubectl
if [[ ${SKIP_SNAPSHOT} ]]; then
force_make_binaries=y dind::up
elif ! dind::check-for-snapshot; then
force_make_binaries=y dind::up
dind::snapshot
else
force_make_binaries=y
restore_cmd=update_and_restore
dind::restore
fi
;;
down)
dind::down
;;
init)
shift
dind::prepare-sys-mounts
dind::ensure-kubectl
dind::init "$@"
;;
join)
shift
dind::prepare-sys-mounts
dind::ensure-kubectl
dind::join "$(dind::create-node-container)" "$@"
;;
# bare)
# shift
# dind::bare "$@"
# ;;
snapshot)
shift
dind::snapshot
;;
restore)
shift
dind::restore
;;
clean)
dind::clean
;;
copy-image)
dind::copy-image "$@"
;;
e2e)
shift
dind::run-e2e "$@"
;;
e2e-serial)
shift
dind::run-e2e-serial "$@"
;;
dump)
dind::dump
;;
dump64)
dind::dump64
;;
split-dump)
dind::split-dump
;;
split-dump64)
dind::split-dump64
;;
apiserver-port)
dind::apiserver-port
;;
*)
echo "usage:" >&2
echo " $0 up" >&2
echo " $0 reup" >&2
echo " $0 down" >&2
echo " $0 init kubeadm-args..." >&2
echo " $0 join kubeadm-args..." >&2
# echo " $0 bare container_name [docker_options...]"
echo " $0 clean"
echo " $0 copy-image [image_name]" >&2
echo " $0 e2e [test-name-substring]" >&2
echo " $0 e2e-serial [test-name-substring]" >&2
echo " $0 dump" >&2
echo " $0 dump64" >&2
echo " $0 split-dump" >&2
echo " $0 split-dump64" >&2
exit 1
;;
esac
|
<filename>python/plot_mag_residuals.py
#!/usr/bin/env python
"""
"""
import argparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 18})
mpl.rcParams.update({'savefig.dpi': 200})
mpl.rcParams.update({'savefig.bbox': 'tight'})
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from sklearn import linear_model
import scipy.optimize
def find_confidence_interval(x, pdf, confidence_level):
return pdf[pdf > x].sum() - confidence_level
def density_contour(xdata, ydata, nbins_x, nbins_y, xlim, ylim, ax=None, **contour_kwargs):
""" Create a density contour plot.
Parameters
----------
xdata : numpy.ndarray
ydata : numpy.ndarray
nbins_x : int
Number of bins along x dimension
nbins_y : int
Number of bins along y dimension
ax : matplotlib.Axes (optional)
If supplied, plot the contour to this axis. Otherwise, open a new figure
contour_kwargs : dict
kwargs to be passed to pyplot.contour()
"""
H, xedges, yedges = np.histogram2d(xdata, ydata, bins=(nbins_x,nbins_y), range=[xlim, ylim], normed=True)
x_bin_sizes = (xedges[1:] - xedges[:-1]).reshape((nbins_x,1))
y_bin_sizes = (yedges[1:] - yedges[:-1]).reshape((1,nbins_y))
pdf = (H*(x_bin_sizes*y_bin_sizes))
one_sigma = scipy.optimize.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.68))
two_sigma = scipy.optimize.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.95))
#three_sigma = scipy.optimize.brentq(find_confidence_interval, 0., 1., args=(pdf, 0.99))
#levels = [one_sigma, two_sigma three_sigma]
levels = [one_sigma]
X, Y = 0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1])
Z = pdf.T
if ax == None:
contour = plt.contour(X, Y, Z, levels=levels, origin="lower", **contour_kwargs)
else:
contour = ax.contour(X, Y, Z, levels=levels, origin="lower", **contour_kwargs)
return contour
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
<NAME> and <NAME> (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def mad(arr):
"""
Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
def nmad(arr):
return 1.4826*mad(arr)
def add_stat_legend(x, prec=2):
textstr = ''
textstr += '$\mathrm{N}=%d$\n' % len(x)
textstr += ('$\mathrm{mean}=%.'+str(prec)+'g$\n') % np.nanmean(x)
textstr += ('$\mathrm{median}=%.'+str(prec)+'g$\n') % np.nanmedian(x)
textstr += ('$\mathrm{std}=%.'+str(prec)+'g$\n') % np.nanstd(x)
textstr += ('$\mathrm{NMAD}=%.'+str(prec)+'g$') % nmad(x)
props = dict(boxstyle='round', facecolor='white')
plt.text(0.95, 0.95, textstr, transform=plt.gca().transAxes,
va='top', ha='right', bbox=props)
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
parser.add_argument("-o", "--output", type=str, default='mag-residuals-hist',
help="output file base name")
parser.add_argument("--sdss-mags", type=str, default=None,
help="sdss imaging magnitudes")
parser.add_argument("--boss-mags", type=str, default=None,
help="boss synthetic magnitudes")
parser.add_argument("--corrected-mags", type=str, default=None,
help="corrected synthetic magnitudes")
parser.add_argument("--anc-mags", type=str, default=None,
help="ancillary reduction synthetic magnitudes")
parser.add_argument("--correct-ab", action="store_true",
help="correct SDSS mag to AB mag")
args = parser.parse_args()
# these are the bands we're using
bands = 'gri'
# import data and filter missing entries
sdss_mags = np.loadtxt(args.sdss_mags)
boss_mags = np.loadtxt(args.boss_mags)
corrected_mags = np.loadtxt(args.corrected_mags)
if args.anc_mags:
ancillary_mags = np.loadtxt(args.anc_mags)
def get_residuals(x, y):
# filter missing data
mask = np.logical_not(np.any(x == 0, axis=1)) & np.logical_not(np.any(y == 0, axis=1)) #& (ydata_raw[:,0] < 19)
if args.verbose:
print 'Number entries kept: %d' % np.sum(mask)
x = x[mask]
y = y[mask]
# convert sdss mags to ab mags on xdata
#if args.correct_ab:
# sdss_data += [0.012, 0.010, 0.028]
return x-y, x, y
res1, x1, y1 = get_residuals(sdss_mags, boss_mags)
res2, x2, y2 = get_residuals(sdss_mags, corrected_mags)
if args.anc_mags:
res3, x3, y3 = get_residuals(sdss_mags, ancillary_mags)
if args.verbose:
print 'delta mag (band: mean & rms & nmad):'
for i in [0, 1, 2]:
if args.verbose:
print ' %s:' % bands[i]
print '%.3f & %.3f & %.3f' % (np.mean(res1[i]), np.sqrt(np.var(res1[i])), nmad(res1[i]))
print '%.3f & %.3f & %.3f' % (np.mean(res2[i]), np.sqrt(np.var(res2[i])), nmad(res2[i]))
if args.anc_mags:
print '%.3f & %.3f & %.3f' % (np.mean(res3[i]), np.sqrt(np.var(res3[i])), nmad(res3[i]))
# delta mag histograms
fig = plt.figure(figsize=(8,6))
bins = np.linspace(-1,1,50,endpoint=False)
bins += .5*(bins[1]-bins[0])
plt.hist(res1[:, i], bins=bins, histtype='stepfilled', color='red', alpha=0.3, label='BOSS')
plt.hist(res2[:, i], bins=bins, histtype='stepfilled', color='blue', alpha=0.3, label='Corrected BOSS')
if args.anc_mags:
plt.hist(res3[:, i], bins=bins, histtype='step', color='black', linestyle='dashed', label='Ancillary Reduction')
plt.grid(True)
plt.xlim([-1,1])
plt.xlabel(r'$\Delta{%s}$'%bands[i])
plt.ylabel('Counts')
plt.legend(prop={'size':12})
#add_stat_legend(res1, prec=3)
fig.savefig(args.output+'-residuals-%s.pdf' % (bands[i]), bbox_inches='tight')
color_res1 = res1[:, :-1] - res1[:, 1:]
color_res2 = res2[:, :-1] - res2[:, 1:]
if args.anc_mags:
color_res3 = res3[:, :-1] - res3[:, 1:]
colors = ['g-r', 'r-i']
for i in [0, 1]:
if args.verbose:
print ' %s:' % colors[i]
print '%.3f & %.3f & %.3f' % (np.mean(color_res1[i]), np.sqrt(np.var(color_res1[i])), nmad(color_res1[i]))
print '%.3f & %.3f & %.3f' % (np.mean(color_res2[i]), np.sqrt(np.var(color_res2[i])), nmad(color_res2[i]))
if args.anc_mags:
print '%.3f & %.3f & %.3f' % (np.mean(color_res3[i]), np.sqrt(np.var(color_res3[i])), nmad(color_res3[i]))
# delta mag histograms
fig = plt.figure(figsize=(8,6))
xlimit = .5
bins = np.linspace(-xlimit, xlimit, 40, endpoint=False)
bins += .5*(bins[1]-bins[0])
plt.hist(color_res1[:, i], bins=bins, histtype='stepfilled', color='red', alpha=0.3, label='BOSS')
plt.hist(color_res2[:, i], bins=bins, histtype='stepfilled', color='blue', alpha=0.3, label='Corrected BOSS')
if args.anc_mags:
plt.hist(color_res3[:, i], bins=bins, histtype='step', color='black', linestyle='dashed', label='Ancillary Reduction')
plt.grid(True)
plt.xlim([-xlimit, xlimit])
plt.xlabel(r'$\Delta{(%s)}$' % colors[i])
plt.ylabel('Counts')
plt.legend(prop={'size':12})
#add_stat_legend(res1, prec=3)
fig.savefig(args.output+'-residuals-%s.pdf' % (colors[i]), bbox_inches='tight')
fig = plt.figure(figsize=(8,6))
plt.scatter(x1[:, 0], color_res1[:, i], facecolor='red', alpha=0.5, label='BOSS', edgecolor='none')
plt.scatter(x2[:, 0], color_res2[:, i], facecolor='blue', alpha=0.5, label='Corrected BOSS', edgecolor='none')
# if args.anc_mags:
# plt.scatter(x3[:, 0], color_res3[:, i], facecolor='white', alpha=0.5, label='Ancillary Reduction')
contour = density_contour(x1[:, 0], color_res1[:, i], 15, 15, [15.5, 19.5], [-.25, +.4], colors='red', label='BOSS')
# two_sigma = contour.collections[1]
# plt.setp(two_sigma, linestyle='dashed')
contour = density_contour(x2[:, 0], color_res2[:, i], 15, 15, [15.5, 19.5], [-.25, +.4], colors='blue', label='Corrected BOSS')
# two_sigma = contour.collections[1]
# plt.setp(two_sigma, linestyle='dashed')
#contour = density_contour(x3[:, 0], color_res3[:, i], 21, 31, colors='black', label='Ancillary Reduction')
# two_sigma = contour.collections[1]
# plt.setp(two_sigma, linestyle='dashed')
plt.ylim([-xlimit,+xlimit])
plt.ylabel(r'$\Delta{(%s)}$' % colors[i])
plt.xlabel(r'$g$')
plt.legend(prop={'size':12})
plt.grid()
fig.savefig(args.output+'-scatter-%s.pdf' % (colors[i]), bbox_inches='tight')
if __name__ == '__main__':
main()
|
#!/bin/bash -xe
source ${P}
export INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
qs_cloudwatch_install
systemctl stop awslogs || true
cat << EOF > /var/awslogs/etc/awslogs.conf
[general]
state_file = /var/awslogs/state/agent-state
[/var/log/messages]
buffer_duration = 5000
log_group_name = ${LOG_GROUP}
file = /var/log/messages
log_stream_name = ${INSTANCE_ID}/var/log/messages
initial_position = start_of_file
datetime_format = %b %d %H:%M:%S
EOF
systemctl start awslogs || true
cd /tmp
qs_retry_command 10 yum install -y wget
qs_retry_command 10 wget https://s3-us-west-1.amazonaws.com/amazon-ssm-us-west-1/latest/linux_amd64/amazon-ssm-agent.rpm
qs_retry_command 10 yum install -y ./amazon-ssm-agent.rpm
systemctl start amazon-ssm-agent
systemctl enable amazon-ssm-agent
rm ./amazon-ssm-agent.rpm
if [ -f /quickstart/pre-install.sh ]
then
/quickstart/pre-install.sh
fi
qs_enable_epel &> /var/log/userdata.qs_enable_epel.log || true
qs_retry_command 10 yum -y install jq
qs_retry_command 25 aws s3 cp ${QS_S3URI}scripts/redhat_ose-register-${OCP_VERSION}.sh ~/redhat_ose-register.sh
chmod 755 ~/redhat_ose-register.sh
qs_retry_command 25 ~/redhat_ose-register.sh ${RH_CREDS_ARN}
mkdir -p /etc/aws/
printf "[Global]\nZone = $(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)\n" > /etc/aws/aws.conf
printf "KubernetesClusterTag='kubernetes.io/cluster/${CLUSTER_NAME}'\n" >> /etc/aws/aws.conf
printf "KubernetesClusterID=owned\n" >> /etc/aws/aws.conf
DOCKER_DEV=/dev/xvdb
INSTANCE_TYPE=$(curl http://169.254.169.254/latest/meta-data/instance-type)
if [[ $(echo ${INSTANCE_TYPE} | grep -c '^m5\|^c5\|^t3') -gt 0 ]] ; then
DOCKER_DEV=/dev/nvme1n1
fi
# log disk layput
fdisk -l
if [ "${LAUNCH_CONFIG}" != "OpenShiftEtcdLaunchConfig" ]; then
qs_retry_command 10 yum install docker-client-1.13.1 docker-common-1.13.1 docker-rhel-push-plugin-1.13.1 docker-1.13.1 -y
systemctl enable docker.service
qs_retry_command 20 'systemctl start docker.service'
echo "CONTAINER_THINPOOL=docker-pool" >> /etc/sysconfig/docker-storage-setup
echo "DEVS=${DOCKER_DEV}" >> /etc/sysconfig/docker-storage-setup
echo "VG=docker-vg" >>/etc/sysconfig/docker-storage-setup
echo "STORAGE_DRIVER=devicemapper" >> /etc/sysconfig/docker-storage-setup
systemctl stop docker
rm -rf /var/lib/docker
docker-storage-setup
qs_retry_command 10 systemctl start docker
fi
qs_retry_command 10 cfn-init -v --stack ${AWS_STACKNAME} --resource ${LAUNCH_CONFIG} --configsets quickstart --region ${AWS_REGION}
qs_retry_command 10 yum install -y atomic-openshift-docker-excluder atomic-openshift-node \
atomic-openshift-sdn-ovs ceph-common conntrack-tools dnsmasq glusterfs \
glusterfs-client-xlators glusterfs-fuse glusterfs-libs iptables-services \
iscsi-initiator-utils iscsi-initiator-utils-iscsiuio tuned-profiles-atomic-openshift-node
systemctl restart dbus
systemctl restart dnsmasq
qs_retry_command 25 ls /var/run/dbus/system_bus_socket
systemctl restart NetworkManager
systemctl restart systemd-logind
if [ -f /quickstart/post-install.sh ]
then
/quickstart/post-install.sh &> /var/log/post-install.log
fi
|
class Waypoint {
constructor(x,y,dir_x,dir_y) {
this.point = new Point(x, y);
this.dir_x = dir_x;
this.dir_y = dir_y;
}
}
|
CREATE OR REPLACE PROCEDURE get_product (
i_product_id IN NUMBER,
o_product OUT PRODUCT,
o_price OUT NUMBER)
IS
BEGIN
SELECT P.PRODUCT, P.PRICE
INTO o_product, o_price
FROM product P
WHERE P.PRODUCT_ID = i_product_id;
EXCEPTION
WHEN NO_DATA_FOUND THEN
o_product := NULL;
o_price := NULL;
END; |
#!/bin/bash
set -e
print_usage() {
echo "Usage: $(basename $0) [options]"
echo -e "Creates a Ubuntu root file system image.\n"
echo -e " --help\t\t\tDisplay this information."
echo -e " --arch {armhf|arm64}\t\tSelects architecture of rootfs image."
echo -e " --distro {bionic|focal}\tSelects Ubuntu distribution of rootfs image."
echo -e " --size n{K|M|G}\t\tSets size of rootfs image to n Kilo, Mega or Giga bytes."
exit "$1"
}
invalid_arg() {
echo "ERROR: Unrecognized argument: $1" >&2
print_usage 1
}
update_repositories() {
echo -e "\nUpdating apt repositories. "
echo -e "\nPress 'y' to continue or any other key to exit..."
read -s -n 1 user_input
if [[ $user_input == 'Y' ]] || [[ $user_input == 'y' ]]; then
sudo apt update
else
exit
fi
}
# Parse options
while [[ $# -gt 0 ]]; do
case "${END_OF_OPT}${1}" in
--help) print_usage 0 ;;
--arch) rfs_arch=$2; shift;;
--distro) rfs_distro=$2; shift;;
--size) rfs_size=$2; shift;;
*) invalid_arg "$1" ;;
esac
shift
done
if [ -z "$rfs_arch" ]; then
echo "Missing architecture"
print_usage 1
fi
if [ -z "$rfs_distro" ]; then
echo "Missing distribution"
print_usage 1
fi
if [ -z "$rfs_size" ]; then
echo "Missing size"
print_usage 1
fi
if [[ "$rfs_arch" != "arm64" && "$rfs_arch" != "armhf" ]]; then
echo "Invalid architecture: $rfs_arch"
print_usage 1
fi
pat='^[0-9]+[K|M|G]$'
if [[ ! $rfs_size =~ $pat ]]; then
echo "Invalid size: $rfs_size"
print_usage 1
fi
update_repositories
echo "Installing build dependencies ..."
sudo apt-get install debootstrap qemu-user-static schroot qemu-utils
image_name=$rfs_distro-$rfs_arch-"rootfs"
echo "Creating $rfs_distro ($rfs_arch) root file system ..."
echo "Image name: $image_name.img"
echo "Image size: $rfs_size"
qemu-img create $image_name.img $rfs_size
mkfs.ext4 $image_name.img
mkdir $image_name.dir
sudo mount -o loop $image_name.img $image_name.dir
sudo qemu-debootstrap --arch $rfs_arch $rfs_distro $image_name.dir
sudo chroot $image_name.dir locale-gen en_US.UTF-8
sudo chroot $image_name.dir sed -i \
's/main/main restricted multiverse universe/g' /etc/apt/sources.list
sudo chroot $image_name.dir sed -i '$ a\nameserver 8.8.8.8' /etc/resolv.conf
sudo chroot $image_name.dir apt update
sudo chroot $image_name.dir apt -y install ssh bash-completion
sudo chroot $image_name.dir adduser --gecos "" $USER
sudo chroot $image_name.dir adduser $USER sudo
sudo umount $image_name.dir
rmdir $image_name.dir
|
#!/usr/bin/env node
/*!
* Copyright 2020 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
/* Dependencies */
import { appendFileSync } from 'fs';
import { join } from 'path';
import FSConfig from 'fs-config';
import { QBTable } from 'qb-table';
import { QuickBase } from 'quickbase';
import { hash, compare } from 'bcrypt';
import { createInterface } from 'readline';
/* Config */
const fsConfig = new FSConfig();
const config = fsConfig.loadDirSync<{
quickbase: {
connection: {
realm: string;
userToken: string;
},
users: {
dbid: string;
fids: {
recordid: number;
primaryKey: number;
username: number;
domain: number;
password: number;
active: number;
}
}
},
encryption: {
saltRounds: number;
}
}>(join(__dirname, '..', 'config'));
/* Main */
const qb = new QuickBase(config.quickbase.connection);
const usersTable = new QBTable({
quickbase: qb,
dbid: config.quickbase.users.dbid,
fids: config.quickbase.users.fids
});
const main = async (line: string) => {
log(`Received line: ${line}`);
try {
const results = await parseLine(line);
log(`Line Results: ${results}`);
console.log(results ? 1 : 0);
process.exit(results ? 0 : 1);
}catch(err){
log(`Line Error: ${err.message}`);
console.log(0);
process.exit(1);
}
};
const parseLine = async (line: string): Promise<boolean> => {
const parts = line.split(':');
const action = parts[0];
const username = parts[1];
const domain = parts[2];
const password = parts.slice(3).join(':');
switch(action){
case 'auth': return auth(username, domain, password);
case 'isuser': return isUser(username, domain);
case 'setpass': return setPass(username, domain, password);
case 'register': return register(username, domain, password);
default: throw new Error(`Unknown prosody protocol send: ${line}`);
}
};
const auth = async (username: string, domain: string, password: string) => {
const user = await getUser(username, domain);
if(!user.get('active')){
throw new Error(`User ${username} is not active`);
}
const same = await compare(password, user.get('password'));
if(!same){
throw new Error('Invalid password');
}
return true;
};
const isUser = async (username: string, domain: string) => {
return !!(await getUser(username, domain));
};
const setPass = async (username: string, domain: string, password: string) => {
const user = await getUser(username, domain);
const newPassword = await hash(password, config.encryption.saltRounds);
user.set('password', newPassword);
const results = await user.save([
'password'
]);
return results.password === newPassword;
};
const getUser = async (username: string, domain: string) => {
const results = await usersTable.runQuery({
where: [
`{'${usersTable.getFid('username')}'.EX.'${username}'}`,
`{'${usersTable.getFid('domain')}'.EX.'${domain}'}`
].join('AND')
});
const record = results.records[0];
if(!record){
throw new Error(`User ${username} does not exist in ${domain}`);
}
return record;
};
const register = async (username: string, domain: string, password: string) => {
let exists = false;
try {
exists = await isUser(username, domain);
}catch(ignore){}
if(exists){
throw new Error(`User ${username} already exists in ${domain}`);
}
await usersTable.upsertRecord({
username: username,
domain: domain,
password: await hash(password, config.encryption.saltRounds),
active: true
}, true);
return true;
};
const log = (text: string) => {
try {
appendFileSync('/var/log/prosody/external-auth.log', new Date().toUTCString() + ': ' + text + '\n');
}catch(err){
console.error(err);
}
};
/* Bang */
if(process.argv.length > 2){
if(process.argv.length === 3){
main(process.argv[2]);
}else{
main(process.argv.slice(2).join(':'));
}
}else{
const readline = createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
readline.on('line', main);
}
|
/*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.netty.connectionpool;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import com.google.common.net.InetAddresses;
import com.google.common.truth.Truth;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.Builder;
import com.netflix.client.config.DefaultClientConfigImpl;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.zuul.discovery.DiscoveryResult;
import com.netflix.zuul.discovery.DynamicServerResolver;
import com.netflix.zuul.discovery.NonDiscoveryServer;
import com.netflix.zuul.netty.server.Server;
import com.netflix.zuul.origins.OriginName;
import com.netflix.zuul.passport.CurrentPassport;
import io.netty.channel.DefaultEventLoop;
import io.netty.channel.EventLoop;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.concurrent.Promise;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.SocketAddress;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Tests for {@link DefaultClientChannelManager}. These tests don't use IPv6 addresses because {@link InstanceInfo} is
* not capable of expressing them.
*/
@RunWith(JUnit4.class)
public class DefaultClientChannelManagerTest {
@Test
public void pickAddressInternal_discovery() {
InstanceInfo instanceInfo =
Builder.newBuilder().setAppName("app").setHostName("192.168.0.1").setPort(443).build();
DiscoveryResult s = DiscoveryResult.from(instanceInfo, true);
SocketAddress addr = DefaultClientChannelManager.pickAddressInternal(s, OriginName.fromVip("vip"));
Truth.assertThat(addr).isInstanceOf(InetSocketAddress.class);
InetSocketAddress socketAddress = (InetSocketAddress) addr;
assertEquals(InetAddresses.forString("192.168.0.1"), socketAddress.getAddress());
assertEquals(443, socketAddress.getPort());
}
@Test
public void pickAddressInternal_discovery_unresolved() {
InstanceInfo instanceInfo =
Builder.newBuilder().setAppName("app").setHostName("localhost").setPort(443).build();
DiscoveryResult s = DiscoveryResult.from(instanceInfo, true);
SocketAddress addr = DefaultClientChannelManager.pickAddressInternal(s, OriginName.fromVip("vip"));
Truth.assertThat(addr).isInstanceOf(InetSocketAddress.class);
InetSocketAddress socketAddress = (InetSocketAddress) addr;
assertTrue(socketAddress.toString(), socketAddress.getAddress().isLoopbackAddress());
assertEquals(443, socketAddress.getPort());
}
@Test
public void pickAddressInternal_nonDiscovery() {
NonDiscoveryServer s = new NonDiscoveryServer("192.168.0.1", 443);
SocketAddress addr = DefaultClientChannelManager.pickAddressInternal(s, OriginName.fromVip("vip"));
Truth.assertThat(addr).isInstanceOf(InetSocketAddress.class);
InetSocketAddress socketAddress = (InetSocketAddress) addr;
assertEquals(InetAddresses.forString("192.168.0.1"), socketAddress.getAddress());
assertEquals(443, socketAddress.getPort());
}
@Test
public void pickAddressInternal_nonDiscovery_unresolved() {
NonDiscoveryServer s = new NonDiscoveryServer("localhost", 443);
SocketAddress addr = DefaultClientChannelManager.pickAddressInternal(s, OriginName.fromVip("vip"));
Truth.assertThat(addr).isInstanceOf(InetSocketAddress.class);
InetSocketAddress socketAddress = (InetSocketAddress) addr;
assertTrue(socketAddress.toString(), socketAddress.getAddress().isLoopbackAddress());
assertEquals(443, socketAddress.getPort());
}
@Test
public void updateServerRefOnEmptyDiscoveryResult() {
OriginName originName = OriginName.fromVip("vip", "test");
final DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
final DynamicServerResolver resolver = mock(DynamicServerResolver.class);
when(resolver.resolve(any())).thenReturn(DiscoveryResult.EMPTY);
final DefaultClientChannelManager clientChannelManager = new DefaultClientChannelManager(originName,
clientConfig, resolver, new DefaultRegistry());
final AtomicReference<DiscoveryResult> serverRef = new AtomicReference<>();
final Promise<PooledConnection> promise = clientChannelManager
.acquire(new DefaultEventLoop(), null, CurrentPassport.create(), serverRef, new AtomicReference<>());
Truth.assertThat(promise.isSuccess()).isFalse();
Truth.assertThat(serverRef.get()).isSameInstanceAs(DiscoveryResult.EMPTY);
}
@Test
public void updateServerRefOnValidDiscoveryResult() {
OriginName originName = OriginName.fromVip("vip", "test");
final DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
final DynamicServerResolver resolver = mock(DynamicServerResolver.class);
final InstanceInfo instanceInfo = Builder.newBuilder()
.setAppName("server-equality")
.setHostName("server-equality")
.setPort(7777).build();
final DiscoveryResult discoveryResult = DiscoveryResult.from(instanceInfo, false);
when(resolver.resolve(any())).thenReturn(discoveryResult);
final DefaultClientChannelManager clientChannelManager = new DefaultClientChannelManager(originName,
clientConfig, resolver, new DefaultRegistry());
final AtomicReference<DiscoveryResult> serverRef = new AtomicReference<>();
//TODO(argha-c) capture and assert on the promise once we have a dummy with ServerStats initialized
clientChannelManager
.acquire(new DefaultEventLoop(), null, CurrentPassport.create(), serverRef, new AtomicReference<>());
Truth.assertThat(serverRef.get()).isSameInstanceAs(discoveryResult);
}
@Test
public void initializeAndShutdown() throws Exception {
final String appName = "app-" + UUID.randomUUID();
final ServerSocket serverSocket = new ServerSocket(0);
final InetSocketAddress serverSocketAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress();
final String serverHostname = serverSocketAddress.getHostName();
final int serverPort = serverSocketAddress.getPort();
final OriginName originName = OriginName.fromVipAndApp("vip", appName);
final DefaultClientConfigImpl clientConfig = new DefaultClientConfigImpl();
Server.defaultOutboundChannelType.set(NioSocketChannel.class);
final InstanceInfo instanceInfo = Builder.newBuilder()
.setAppName(appName)
.setHostName(serverHostname)
.setPort(serverPort)
.build();
DiscoveryResult discoveryResult = DiscoveryResult.from(instanceInfo, true);
final DynamicServerResolver resolver = mock(DynamicServerResolver.class);
when(resolver.resolve(any())).thenReturn(discoveryResult);
when(resolver.hasServers()).thenReturn(true);
final Registry registry = new DefaultRegistry();
final DefaultClientChannelManager clientChannelManager = new DefaultClientChannelManager(originName,
clientConfig, resolver, registry);
final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(10);
final EventLoop eventLoop = eventLoopGroup.next();
clientChannelManager.init();
Truth.assertThat(clientChannelManager.getConnsInUse()).isEqualTo(0);
final Promise<PooledConnection> promiseConn = clientChannelManager.acquire(eventLoop);
promiseConn.await(200, TimeUnit.MILLISECONDS);
assertTrue(promiseConn.isDone());
assertTrue(promiseConn.isSuccess());
final PooledConnection connection = promiseConn.get();
assertTrue(connection.isActive());
assertFalse(connection.isInPool());
Truth.assertThat(clientChannelManager.getConnsInUse()).isEqualTo(1);
final boolean releaseResult = clientChannelManager.release(connection);
assertTrue(releaseResult);
assertTrue(connection.isInPool());
Truth.assertThat(clientChannelManager.getConnsInUse()).isEqualTo(0);
clientChannelManager.shutdown();
serverSocket.close();
}
}
|
#!/bin/sh
export CC=${PREFIX}/bin/h5cc
export DYLD_FALLBACK_LIBRARY_PATH=${PREFIX}/lib
export CFLAGS="-fPIC $CFLAGS"
export HDF5_LDFLAGS="-L ${PREFIX}/lib"
./configure --prefix=${PREFIX} \
--with-hdf5=${PREFIX} \
--with-zlib=${PREFIX}
make
# skip "make check" because sample program he5_pt_readattrs is failing:
# make[2]: *** [pt_write_test] Segmentation fault (core dumped)
#make check
make install
pushd include
make install-includeHEADERS
popd
# We can remove this when we start using the new conda-build.
find $PREFIX -name '*.la' -delete
|
#!/bin/bash
script_dir=$(dirname "$(readlink -f "$0")")
export PYTHONPATH=$script_dir/../lib:$PATH:$PYTHONPATH
python -u $script_dir/../lib/pranjan77_ContigFilter/pranjan77_ContigFilterServer.py $1 $2 $3
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Requirements
# - Ruby >= 2.3
# - Maven >= 3.3.9
# - JDK >=7
# - gcc >= 4.8
# - Node.js >= 11.12 (best way is to use nvm)
# - Go >= 1.11
#
# If using a non-system Boost, set BOOST_ROOT and add Boost libraries to
# LD_LIBRARY_PATH.
#
# To reuse build artifacts between runs set ARROW_TMPDIR environment variable to
# a directory where the temporary files should be placed to, note that this
# directory is not cleaned up automatically.
case $# in
3) ARTIFACT="$1"
VERSION="$2"
RC_NUMBER="$3"
case $ARTIFACT in
source|binaries|wheels) ;;
*) echo "Invalid argument: '${ARTIFACT}', valid options are \
'source', 'binaries', or 'wheels'"
exit 1
;;
esac
;;
*) echo "Usage: $0 source|binaries X.Y.Z RC_NUMBER"
exit 1
;;
esac
set -e
set -x
set -o pipefail
SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" && pwd)"
ARROW_DIR="$(dirname $(dirname ${SOURCE_DIR}))"
detect_cuda() {
if ! (which nvcc && which nvidia-smi) > /dev/null; then
return 1
fi
local n_gpus=$(nvidia-smi --list-gpus | wc -l)
return $((${n_gpus} < 1))
}
# Build options for the C++ library
if [ -z "${ARROW_CUDA:-}" ] && detect_cuda; then
ARROW_CUDA=ON
fi
: ${ARROW_CUDA:=OFF}
: ${ARROW_FLIGHT:=ON}
ARROW_DIST_URL='https://dist.apache.org/repos/dist/dev/arrow'
download_dist_file() {
curl \
--silent \
--show-error \
--fail \
--location \
--remote-name $ARROW_DIST_URL/$1
}
download_rc_file() {
download_dist_file apache-arrow-${VERSION}-rc${RC_NUMBER}/$1
}
import_gpg_keys() {
download_dist_file KEYS
gpg --import KEYS
}
fetch_archive() {
local dist_name=$1
download_rc_file ${dist_name}.tar.gz
download_rc_file ${dist_name}.tar.gz.asc
download_rc_file ${dist_name}.tar.gz.sha256
download_rc_file ${dist_name}.tar.gz.sha512
gpg --verify ${dist_name}.tar.gz.asc ${dist_name}.tar.gz
shasum -a 256 -c ${dist_name}.tar.gz.sha256
shasum -a 512 -c ${dist_name}.tar.gz.sha512
}
verify_dir_artifact_signatures() {
# verify the signature and the checksums of each artifact
find $1 -name '*.asc' | while read sigfile; do
artifact=${sigfile/.asc/}
gpg --verify $sigfile $artifact || exit 1
# go into the directory because the checksum files contain only the
# basename of the artifact
pushd $(dirname $artifact)
base_artifact=$(basename $artifact)
if [ -f $base_artifact.sha256 ]; then
shasum -a 256 -c $base_artifact.sha256 || exit 1
fi
shasum -a 512 -c $base_artifact.sha512 || exit 1
popd
done
}
test_binary() {
local download_dir=binaries
mkdir -p ${download_dir}
python $SOURCE_DIR/download_rc_binaries.py $VERSION $RC_NUMBER --dest=${download_dir}
verify_dir_artifact_signatures ${download_dir}
}
test_apt() {
for target in "debian:stretch" \
"arm64v8/debian:stretch" \
"debian:buster" \
"arm64v8/debian:buster" \
"ubuntu:xenial" \
"arm64v8/ubuntu:xenial" \
"ubuntu:bionic" \
"arm64v8/ubuntu:bionic" \
"ubuntu:eoan" \
"arm64v8/ubuntu:eoan" \
"ubuntu:focal" \
"arm64v8/ubuntu:focal"; do \
# We can't build some arm64 binaries by Crossbow for now.
if [ "${target}" = "arm64v8/debian:stretch" ]; then continue; fi
if [ "${target}" = "arm64v8/debian:buster" ]; then continue; fi
if [ "${target}" = "arm64v8/ubuntu:eoan" ]; then continue; fi
if [ "${target}" = "arm64v8/ubuntu:focal" ]; then continue; fi
case "${target}" in
arm64v8/*)
if [ "$(arch)" = "aarch64" -o -e /usr/bin/qemu-aarch64-static ]; then
: # OK
else
continue
fi
;;
esac
if ! docker run -v "${SOURCE_DIR}"/../..:/arrow:delegated \
"${target}" \
/arrow/dev/release/verify-apt.sh \
"${VERSION}" \
"yes" \
"${BINTRAY_REPOSITORY}"; then
echo "Failed to verify the APT repository for ${target}"
exit 1
fi
done
}
test_yum() {
for target in "centos:6" \
"centos:7" \
"arm64v8/centos:7" \
"centos:8" \
"arm64v8/centos:8"; do
# We can't build some arm64 binaries by Crossbow for now.
if [ "${target}" = "arm64v8/centos:8" ]; then continue; fi
case "${target}" in
arm64v8/*)
if [ "$(arch)" = "aarch64" -o -e /usr/bin/qemu-aarch64-static ]; then
: # OK
else
continue
fi
;;
esac
if ! docker run -v "${SOURCE_DIR}"/../..:/arrow:delegated \
"${target}" \
/arrow/dev/release/verify-yum.sh \
"${VERSION}" \
"yes" \
"${BINTRAY_REPOSITORY}"; then
echo "Failed to verify the Yum repository for ${target}"
exit 1
fi
done
}
setup_tempdir() {
cleanup() {
if [ "${TEST_SUCCESS}" = "yes" ]; then
rm -fr "${ARROW_TMPDIR}"
else
echo "Failed to verify release candidate. See ${ARROW_TMPDIR} for details."
fi
}
if [ -z "${ARROW_TMPDIR}" ]; then
# clean up automatically if ARROW_TMPDIR is not defined
ARROW_TMPDIR=$(mktemp -d -t "$1.XXXXX")
trap cleanup EXIT
else
# don't clean up automatically
mkdir -p "${ARROW_TMPDIR}"
fi
}
setup_miniconda() {
# Setup short-lived miniconda for Python and integration tests
if [ "$(uname)" == "Darwin" ]; then
MINICONDA_URL=https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
else
MINICONDA_URL=https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
fi
MINICONDA=$PWD/test-miniconda
if [ ! -d "${MINICONDA}" ]; then
# Setup miniconda only if the directory doesn't exist yet
wget -O miniconda.sh $MINICONDA_URL
bash miniconda.sh -b -p $MINICONDA
rm -f miniconda.sh
fi
. $MINICONDA/etc/profile.d/conda.sh
conda create -n arrow-test -y -q -c conda-forge \
python=3.6 \
nomkl \
numpy \
pandas \
cython
conda activate arrow-test
}
# Build and test Java (Requires newer Maven -- I used 3.3.9)
test_package_java() {
pushd java
mvn test
mvn package
popd
}
# Build and test C++
test_and_install_cpp() {
mkdir -p cpp/build
pushd cpp/build
ARROW_CMAKE_OPTIONS="
${ARROW_CMAKE_OPTIONS:-}
-DCMAKE_INSTALL_PREFIX=$ARROW_HOME
-DCMAKE_INSTALL_LIBDIR=lib
-DARROW_FLIGHT=${ARROW_FLIGHT}
-DARROW_PLASMA=ON
-DARROW_ORC=ON
-DARROW_PYTHON=ON
-DARROW_GANDIVA=ON
-DARROW_PARQUET=ON
-DARROW_DATASET=ON
-DPARQUET_REQUIRE_ENCRYPTION=ON
-DARROW_WITH_BZ2=ON
-DARROW_WITH_ZLIB=ON
-DARROW_WITH_ZSTD=ON
-DARROW_WITH_LZ4=ON
-DARROW_WITH_SNAPPY=ON
-DARROW_WITH_BROTLI=ON
-DARROW_BOOST_USE_SHARED=ON
-DCMAKE_BUILD_TYPE=release
-DARROW_BUILD_TESTS=ON
-DARROW_BUILD_INTEGRATION=ON
-DARROW_CUDA=${ARROW_CUDA}
-DARROW_DEPENDENCY_SOURCE=AUTO
"
cmake $ARROW_CMAKE_OPTIONS ..
make -j$NPROC install
# TODO: ARROW-5036: plasma-serialization_tests broken
# TODO: ARROW-5054: libgtest.so link failure in flight-server-test
LD_LIBRARY_PATH=$PWD/release:$LD_LIBRARY_PATH ctest \
--exclude-regex "plasma-serialization_tests" \
-j$NPROC \
--output-on-failure \
-L unittest
popd
}
test_csharp() {
pushd csharp
local csharp_bin=${PWD}/bin
mkdir -p ${csharp_bin}
if which dotnet > /dev/null 2>&1; then
if ! which sourcelink > /dev/null 2>&1; then
local dotnet_tools_dir=$HOME/.dotnet/tools
if [ -d "${dotnet_tools_dir}" ]; then
PATH="${dotnet_tools_dir}:$PATH"
fi
fi
else
local dotnet_version=2.2.300
local dotnet_platform=
case "$(uname)" in
Linux)
dotnet_platform=linux
;;
Darwin)
dotnet_platform=macos
;;
esac
local dotnet_download_thank_you_url=https://dotnet.microsoft.com/download/thank-you/dotnet-sdk-${dotnet_version}-${dotnet_platform}-x64-binaries
local dotnet_download_url=$( \
curl --location ${dotnet_download_thank_you_url} | \
grep 'window\.open' | \
grep -E -o '[^"]+' | \
sed -n 2p)
curl ${dotnet_download_url} | \
tar xzf - -C ${csharp_bin}
PATH=${csharp_bin}:${PATH}
fi
dotnet test
mv dummy.git ../.git
dotnet pack -c Release
mv ../.git dummy.git
if ! which sourcelink > /dev/null 2>&1; then
dotnet tool install --tool-path ${csharp_bin} sourcelink
PATH=${csharp_bin}:${PATH}
if ! sourcelink --help > /dev/null 2>&1; then
export DOTNET_ROOT=${csharp_bin}
fi
fi
sourcelink test artifacts/Apache.Arrow/Release/netstandard1.3/Apache.Arrow.pdb
sourcelink test artifacts/Apache.Arrow/Release/netcoreapp2.1/Apache.Arrow.pdb
popd
}
# Build and test Python
test_python() {
pushd python
pip install -r requirements-build.txt -r requirements-test.txt
export PYARROW_WITH_DATASET=1
export PYARROW_WITH_GANDIVA=1
export PYARROW_WITH_PARQUET=1
export PYARROW_WITH_PLASMA=1
if [ "${ARROW_CUDA}" = "ON" ]; then
export PYARROW_WITH_CUDA=1
fi
if [ "${ARROW_FLIGHT}" = "ON" ]; then
export PYARROW_WITH_FLIGHT=1
fi
python setup.py build_ext --inplace
py.test pyarrow -v --pdb
popd
}
test_glib() {
pushd c_glib
if brew --prefix libffi > /dev/null 2>&1; then
PKG_CONFIG_PATH=$(brew --prefix libffi)/lib/pkgconfig:$PKG_CONFIG_PATH
fi
if [ -f configure ]; then
./configure --prefix=$ARROW_HOME
make -j$NPROC
make install
else
meson build --prefix=$ARROW_HOME --libdir=lib
ninja -C build
ninja -C build install
fi
export GI_TYPELIB_PATH=$ARROW_HOME/lib/girepository-1.0:$GI_TYPELIB_PATH
if ! bundle --version; then
gem install bundler
fi
bundle install --path vendor/bundle
bundle exec ruby test/run-test.rb
popd
}
test_js() {
pushd js
export NVM_DIR="`pwd`/.nvm"
mkdir -p $NVM_DIR
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
nvm install node
npm install
# clean, lint, and build JS source
npx run-s clean:all lint build
npm run test
# create initial integration test data
# npm run create:testdata
# run once to write the snapshots
# npm test -- -t ts -u --integration
# run again to test all builds against the snapshots
# npm test -- --integration
popd
}
test_ruby() {
pushd ruby
local modules="red-arrow red-plasma red-gandiva red-parquet"
if [ "${ARROW_CUDA}" = "ON" ]; then
modules="${modules} red-arrow-cuda"
fi
for module in ${modules}; do
pushd ${module}
bundle install --path vendor/bundle
bundle exec ruby test/run-test.rb
popd
done
popd
}
test_go() {
local VERSION=1.14.1
local ARCH=amd64
if [ "$(uname)" == "Darwin" ]; then
local OS=darwin
else
local OS=linux
fi
local GO_ARCHIVE=go$VERSION.$OS-$ARCH.tar.gz
wget https://dl.google.com/go/$GO_ARCHIVE
mkdir -p local-go
tar -xzf $GO_ARCHIVE -C local-go
rm -f $GO_ARCHIVE
export GOROOT=`pwd`/local-go/go
export GOPATH=`pwd`/local-go/gopath
export PATH=$GOROOT/bin:$GOPATH/bin:$PATH
pushd go/arrow
go get -v ./...
go test ./...
go clean -modcache
popd
}
test_rust() {
# install rust toolchain in a similar fashion like test-miniconda
export RUSTUP_HOME=$PWD/test-rustup
export CARGO_HOME=$PWD/test-rustup
curl https://sh.rustup.rs -sSf | sh -s -- -y --no-modify-path
export PATH=$RUSTUP_HOME/bin:$PATH
source $RUSTUP_HOME/env
# build and test rust
pushd rust
# raises on any formatting errors
rustup component add rustfmt --toolchain stable
cargo +stable fmt --all -- --check
# we are targeting Rust nightly for releases
rustup default nightly
# use local modules because we don't publish modules to crates.io yet
sed \
-i.bak \
-E \
-e 's/^arrow = "([^"]*)"/arrow = { version = "\1", path = "..\/arrow" }/g' \
-e 's/^parquet = "([^"]*)"/parquet = { version = "\1", path = "..\/parquet" }/g' \
*/Cargo.toml
# raises on any warnings
RUSTFLAGS="-D warnings" cargo build
cargo test
popd
}
# Run integration tests
test_integration() {
JAVA_DIR=$PWD/java
CPP_BUILD_DIR=$PWD/cpp/build
export ARROW_JAVA_INTEGRATION_JAR=$JAVA_DIR/tools/target/arrow-tools-$VERSION-jar-with-dependencies.jar
export ARROW_CPP_EXE_PATH=$CPP_BUILD_DIR/release
pip install -e dev/archery
INTEGRATION_TEST_ARGS=""
if [ "${ARROW_FLIGHT}" = "ON" ]; then
INTEGRATION_TEST_ARGS="${INTEGRATION_TEST_ARGS} --run-flight"
fi
# Flight integration test executable have runtime dependency on
# release/libgtest.so
LD_LIBRARY_PATH=$ARROW_CPP_EXE_PATH:$LD_LIBRARY_PATH \
archery integration \
--with-cpp=${TEST_INTEGRATION_CPP} \
--with-java=${TEST_INTEGRATION_JAVA} \
--with-js=${TEST_INTEGRATION_JS} \
--with-go=${TEST_INTEGRATION_GO} \
$INTEGRATION_TEST_ARGS
}
clone_testing_repositories() {
# Clone testing repositories if not cloned already
if [ ! -d "arrow-testing" ]; then
git clone https://github.com/apache/arrow-testing.git
fi
if [ ! -d "parquet-testing" ]; then
git clone https://github.com/apache/parquet-testing.git
fi
export ARROW_TEST_DATA=$PWD/arrow-testing/data
export PARQUET_TEST_DATA=$PWD/parquet-testing/data
}
test_source_distribution() {
export ARROW_HOME=$ARROW_TMPDIR/install
export PARQUET_HOME=$ARROW_TMPDIR/install
export LD_LIBRARY_PATH=$ARROW_HOME/lib:${LD_LIBRARY_PATH:-}
export PKG_CONFIG_PATH=$ARROW_HOME/lib/pkgconfig:${PKG_CONFIG_PATH:-}
if [ "$(uname)" == "Darwin" ]; then
NPROC=$(sysctl -n hw.ncpu)
else
NPROC=$(nproc)
fi
clone_testing_repositories
if [ ${TEST_JAVA} -gt 0 ]; then
test_package_java
fi
if [ ${TEST_CPP} -gt 0 ]; then
test_and_install_cpp
fi
if [ ${TEST_CSHARP} -gt 0 ]; then
test_csharp
fi
if [ ${TEST_PYTHON} -gt 0 ]; then
test_python
fi
if [ ${TEST_GLIB} -gt 0 ]; then
test_glib
fi
if [ ${TEST_RUBY} -gt 0 ]; then
test_ruby
fi
if [ ${TEST_JS} -gt 0 ]; then
test_js
fi
if [ ${TEST_GO} -gt 0 ]; then
test_go
fi
if [ ${TEST_RUST} -gt 0 ]; then
test_rust
fi
if [ ${TEST_INTEGRATION} -gt 0 ]; then
test_integration
fi
}
test_binary_distribution() {
: ${BINTRAY_REPOSITORY:=apache/arrow}
if [ ${TEST_BINARY} -gt 0 ]; then
test_binary
fi
if [ ${TEST_APT} -gt 0 ]; then
test_apt
fi
if [ ${TEST_YUM} -gt 0 ]; then
test_yum
fi
}
check_python_imports() {
local py_arch=$1
python -c "import pyarrow.parquet"
python -c "import pyarrow.plasma"
python -c "import pyarrow.fs"
if [[ "$py_arch" =~ ^3 ]]; then
# Flight, Gandiva and Dataset are only available for py3
python -c "import pyarrow.dataset"
python -c "import pyarrow.flight"
python -c "import pyarrow.gandiva"
fi
}
test_linux_wheels() {
local py_arches="3.5m 3.6m 3.7m 3.8"
local manylinuxes="1 2010 2014"
for py_arch in ${py_arches}; do
local env=_verify_wheel-${py_arch}
conda create -yq -n ${env} python=${py_arch//[mu]/}
conda activate ${env}
for ml_spec in ${manylinuxes}; do
# check the mandatory and optional imports
pip install python-rc/${VERSION}-rc${RC_NUMBER}/pyarrow-${VERSION}-cp${py_arch//[mu.]/}-cp${py_arch//./}-manylinux${ml_spec}_x86_64.whl
check_python_imports py_arch
# install test requirements
pip install -r ${ARROW_DIR}/python/requirements-test.txt
# execute the python unit tests
pytest --pyargs pyarrow
done
conda deactivate
done
}
test_macos_wheels() {
local py_arches="3.5m 3.6m 3.7m 3.8"
for py_arch in ${py_arches}; do
local env=_verify_wheel-${py_arch}
conda create -yq -n ${env} python=${py_arch//m/}
conda activate ${env}
macos_suffix=macosx
case "${py_arch}" in
*m)
macos_suffix="${macos_suffix}_10_9_intel"
;;
*)
macos_suffix="${macos_suffix}_10_9_x86_64"
;;
esac
# check the mandatory and optional imports
pip install python-rc/${VERSION}-rc${RC_NUMBER}/pyarrow-${VERSION}-cp${py_arch//[m.]/}-cp${py_arch//./}-${macos_suffix}.whl
check_python_imports py_arch
# install test requirements
pip install -r ${ARROW_DIR}/python/requirements-test.txt
# execute the python unit tests
pytest --pyargs pyarrow
conda deactivate
done
}
test_wheels() {
clone_testing_repositories
local download_dir=binaries
mkdir -p ${download_dir}
if [ "$(uname)" == "Darwin" ]; then
local filter_regex=.*macosx.*
else
local filter_regex=.*manylinux.*
fi
conda create -yq -n py3-base python=3.7
conda activate py3-base
python $SOURCE_DIR/download_rc_binaries.py $VERSION $RC_NUMBER \
--regex=${filter_regex} \
--dest=${download_dir}
verify_dir_artifact_signatures ${download_dir}
pushd ${download_dir}
if [ "$(uname)" == "Darwin" ]; then
test_macos_wheels
else
test_linux_wheels
fi
popd
}
# By default test all functionalities.
# To deactivate one test, deactivate the test and all of its dependents
# To explicitly select one test, set TEST_DEFAULT=0 TEST_X=1
if [ "${ARTIFACT}" == "source" ]; then
TEST_SOURCE=1
fi
: ${TEST_DEFAULT:=1}
: ${TEST_JAVA:=${TEST_DEFAULT}}
: ${TEST_CPP:=${TEST_DEFAULT}}
: ${TEST_CSHARP:=${TEST_DEFAULT}}
: ${TEST_GLIB:=${TEST_DEFAULT}}
: ${TEST_RUBY:=${TEST_DEFAULT}}
: ${TEST_PYTHON:=${TEST_DEFAULT}}
: ${TEST_JS:=${TEST_DEFAULT}}
: ${TEST_GO:=${TEST_DEFAULT}}
: ${TEST_RUST:=${TEST_DEFAULT}}
: ${TEST_INTEGRATION:=${TEST_DEFAULT}}
: ${TEST_BINARY:=${TEST_DEFAULT}}
: ${TEST_APT:=${TEST_DEFAULT}}
: ${TEST_YUM:=${TEST_DEFAULT}}
# For selective Integration testing, set TEST_DEFAULT=0 TEST_INTEGRATION_X=1 TEST_INTEGRATION_Y=1
: ${TEST_INTEGRATION_CPP:=${TEST_INTEGRATION}}
: ${TEST_INTEGRATION_JAVA:=${TEST_INTEGRATION}}
: ${TEST_INTEGRATION_JS:=${TEST_INTEGRATION}}
: ${TEST_INTEGRATION_GO:=${TEST_INTEGRATION}}
# Automatically test if its activated by a dependent
TEST_GLIB=$((${TEST_GLIB} + ${TEST_RUBY}))
TEST_CPP=$((${TEST_CPP} + ${TEST_GLIB} + ${TEST_PYTHON} + ${TEST_INTEGRATION_CPP}))
TEST_JAVA=$((${TEST_JAVA} + ${TEST_INTEGRATION_JAVA}))
TEST_JS=$((${TEST_JS} + ${TEST_INTEGRATION_JS}))
TEST_GO=$((${TEST_GO} + ${TEST_INTEGRATION_GO}))
TEST_INTEGRATION=$((${TEST_INTEGRATION} + ${TEST_INTEGRATION_CPP} + ${TEST_INTEGRATION_JAVA} + ${TEST_INTEGRATION_JS} + ${TEST_INTEGRATION_GO}))
if [ "${ARTIFACT}" == "wheels" ]; then
TEST_WHEELS=1
else
TEST_WHEELS=0
fi
NEED_MINICONDA=$((${TEST_CPP} + ${TEST_WHEELS} + ${TEST_INTEGRATION}))
: ${TEST_ARCHIVE:=apache-arrow-${VERSION}.tar.gz}
case "${TEST_ARCHIVE}" in
/*)
;;
*)
TEST_ARCHIVE=${PWD}/${TEST_ARCHIVE}
;;
esac
TEST_SUCCESS=no
setup_tempdir "arrow-${VERSION}"
echo "Working in sandbox ${ARROW_TMPDIR}"
cd ${ARROW_TMPDIR}
if [ ${NEED_MINICONDA} -gt 0 ]; then
setup_miniconda
echo "Using miniconda environment ${MINICONDA}"
fi
if [ "${ARTIFACT}" == "source" ]; then
dist_name="apache-arrow-${VERSION}"
if [ ${TEST_SOURCE} -gt 0 ]; then
import_gpg_keys
fetch_archive ${dist_name}
tar xf ${dist_name}.tar.gz
else
mkdir -p ${dist_name}
if [ ! -f ${TEST_ARCHIVE} ]; then
echo "${TEST_ARCHIVE} not found"
exit 1
fi
tar xf ${TEST_ARCHIVE} -C ${dist_name} --strip-components=1
fi
pushd ${dist_name}
test_source_distribution
popd
elif [ "${ARTIFACT}" == "wheels" ]; then
import_gpg_keys
test_wheels
else
import_gpg_keys
test_binary_distribution
fi
TEST_SUCCESS=yes
echo 'Release candidate looks good!'
exit 0
|
<filename>Lib/fontbakery/commands/check_profile.py
#!/usr/bin/env python
# usage:
# $ fontbakery check-profile fontbakery.profiles.googlefonts -h
import argparse
from importlib import import_module
import os
import sys
from collections import OrderedDict
from fontbakery.checkrunner import (
distribute_generator
, CheckRunner
, ValueValidationError
, Profile
, get_module_profile
, get_module_from_file
, DEBUG
, INFO
, WARN
, ERROR
, SKIP
, PASS
, FAIL
, SECTIONSUMMARY
, START
, END
, ENDCHECK
)
from fontbakery.multiproc import multiprocessing_runner
log_levels = OrderedDict((s.name, s) \
for s in sorted((
DEBUG
, INFO
, WARN
, ERROR
, SKIP
, PASS
, FAIL
)))
DEFAULT_LOG_LEVEL = INFO
from fontbakery.reporters.terminal import TerminalReporter
from fontbakery.reporters.serialize import SerializeReporter
from fontbakery.reporters.ghmarkdown import GHMarkdownReporter
from fontbakery.reporters.html import HTMLReporter
def ArgumentParser(profile, profile_arg=True):
argument_parser = \
argparse.ArgumentParser(description="Check TTF files against a profile.",
formatter_class=argparse.RawTextHelpFormatter)
if profile_arg:
argument_parser.add_argument('profile',
help='File/Module name,'
' must define a fontbakery "profile".')
values_keys = profile.setup_argparse(argument_parser)
argument_parser.add_argument(
"-c",
"--checkid",
action="append",
help=(
"Explicit check-ids (or parts of their name) to be executed. "
"Use this option multiple times to select multiple checks."
),
)
argument_parser.add_argument(
"-x",
"--exclude-checkid",
action="append",
help=(
"Exclude check-ids (or parts of their name) from execution. "
"Use this option multiple times to exclude multiple checks."
),
)
valid_keys = ', '.join(log_levels.keys())
def log_levels_get(key):
if key in log_levels:
return log_levels[key]
raise argparse.ArgumentTypeError(f'Key "{key}" must be one of: {valid_keys}.')
argument_parser.add_argument('-v', '--verbose',
dest='loglevels',
const=PASS,
action='append_const',
help='Shortcut for `-l PASS`.\n')
argument_parser.add_argument('-l', '--loglevel',
dest='loglevels',
type=log_levels_get,
action='append',
metavar= 'LOGLEVEL',
help=f'Report checks with a result of this status or higher.\n'
f'One of: {valid_keys}.\n'
f'(default: {DEFAULT_LOG_LEVEL.name})')
argument_parser.add_argument('-m', '--loglevel-messages',
default=None,
type=log_levels_get,
help=f'Report log messages of this status or higher.\n'
f'Messages are all status lines within a check.\n'
f'One of: {valid_keys}.\n'
f'(default: LOGLEVEL)')
argument_parser.add_argument('--succinct',
action='store_true',
help='This is a slightly more compact and succint'
' output layout for the text terminal.')
if sys.platform != "win32":
argument_parser.add_argument('-n', '--no-progress',
action='store_true',
help='In a tty as stdout, don\'t'
' render the progress indicators.')
argument_parser.add_argument('-C', '--no-colors',
action='store_true',
help='No colors for tty output.')
argument_parser.add_argument('-S', '--show-sections', default=False, action='store_true',
help='Show section summaries.')
argument_parser.add_argument('-L', '--list-checks', default=False, action='store_true',
help='List the checks available in the selected profile.')
argument_parser.add_argument('--dark-theme', default=False, action='store_true',
help='Use a color theme with dark colors.')
argument_parser.add_argument('--light-theme', default=False, action='store_true',
help='Use a color theme with light colors.')
argument_parser.add_argument('--json', default=False, type=argparse.FileType('w'),
metavar= 'JSON_FILE',
help='Write a json formatted report to JSON_FILE.')
argument_parser.add_argument('--ghmarkdown', default=False, type=argparse.FileType('w'),
metavar= 'MD_FILE',
help='Write a GitHub-Markdown formatted report to MD_FILE.')
argument_parser.add_argument('--html', default=False,
type=argparse.FileType('w', encoding="utf-8"),
metavar= 'HTML_FILE',
help='Write a HTML report to HTML_FILE.')
iterargs = sorted(profile.iterargs.keys())
gather_by_choices = iterargs + ['*check']
comma_separated = ', '.join(gather_by_choices)
argument_parser.add_argument('-g','--gather-by', default=None,
metavar= 'ITERATED_ARG',
choices=gather_by_choices,
type=str,
help='Optional: collect results by ITERATED_ARG\n'
'In terminal output: create a summary counter for each ITERATED_ARG.\n'
'In json output: structure the document by ITERATED_ARG.\n'
'One of: {comma_separated}')
def parse_order(arg):
order = filter(len, [n.strip() for n in arg.split(',')])
return order or None
comma_separated = ', '.join(iterargs)
argument_parser.add_argument('-o','--order', default=None, type=parse_order,
help=f'Comma separated list of order arguments.\n'
f'The execution order is determined by the order of the check\n'
f'definitions and by the order of the iterable arguments.\n'
f'A section defines its own order. `--order` can be used to\n'
f'override the order of *all* sections.\n'
f'Despite the ITERATED_ARGS there are two special\n'
f'values available:\n'
f'"*iterargs" -- all remainig ITERATED_ARGS\n'
f'"*check" -- order by check\n'
f'ITERATED_ARGS: {comma_separated}\n'
f'A sections default is equivalent to: "*iterargs, *check".\n'
f'A common use case is `-o "*check"` when checking the whole \n'
f'collection against a selection of checks picked with `--checkid`.')
def positive_int(value):
int_value = int(value)
if int_value < 0:
raise argparse.ArgumentTypeError(f'Invalid value "{value}" must be'
f' zero or a positive integer value.')
return int_value
argument_parser.add_argument('-J','--jobs', default=0, type=positive_int,
metavar='JOBS', dest='multiprocessing',
help=f'Use multi-processing to run the checks. The argument is the number\n'
f'of worker processes. A sensible number is the cpu count of your\n'
f'system, detected: {os.cpu_count()}.'
f' As an automated shortcut see -j/--auto-jobs.\n'
f'Use 0 to run in single-processing mode (default %(default)s).')
argument_parser.add_argument('-j','--auto-jobs', const=os.cpu_count(),
action='store_const', dest='multiprocessing',
help='Use the auto detected cpu count (= %(const)s)'
' as number of worker processes\n'
'in multi-processing. This is equivalent to : `--jobs %(const)s`')
return argument_parser, values_keys
class ArgumentParserError(Exception): pass
def get_module(name):
if os.path.isfile(name):
# This name could also be the name of a module, but if there's a
# file that we can load the file will win. Otherwise, it's still
# possible to change the directory
imported = get_module_from_file(name)
else:
# Fails with an appropriate ImportError.
imported = import_module(name, package=None)
return imported
def get_profile():
""" Prefetch the profile module, to fill some holes in the help text. """
argument_parser, _ = ArgumentParser(Profile(), profile_arg=True)
# monkey patching will do here
def error(message): raise ArgumentParserError(message)
argument_parser.error = error
try:
args, _ = argument_parser.parse_known_args()
except ArgumentParserError:
# silently fails, the main parser will show usage string.
return Profile()
imported = get_module(args.profile)
profile = get_module_profile(imported)
if not profile:
raise Exception(f"Can't get a profile from {imported}.")
return profile
# This stub or alias is kept for compatibility (e.g. check-commands, FontBakery
# Dashboard). The function of the same name previously only passed on all parameters to
# CheckRunner.
runner_factory = CheckRunner
def main(profile=None, values=None):
# profile can be injected by e.g. check-googlefonts injects it's own profile
add_profile_arg = False
if profile is None:
profile = get_profile()
add_profile_arg = True
argument_parser, values_keys = ArgumentParser(profile, profile_arg=add_profile_arg)
args = argument_parser.parse_args()
# The default Windows Terminal just displays the escape codes. The argument
# parser above therefore has these options disabled.
if sys.platform == "win32":
args.no_progress = True
args.no_colors = True
from fontbakery.constants import NO_COLORS_THEME, DARK_THEME, LIGHT_THEME
if args.no_colors:
theme = NO_COLORS_THEME
else:
if args.light_theme:
theme = LIGHT_THEME
elif args.dark_theme:
theme = DARK_THEME
elif sys.platform == "darwin":
# The vast majority of MacOS users seem to use a light-background on the text terminal
theme = LIGHT_THEME
else:
# For orther systems like GNU+Linux and Windows, a dark terminal seems to be more common.
theme = DARK_THEME
if args.list_checks:
if args.loglevels == [PASS]: # if verbose:
for section in profile._sections.values():
print(theme["list-checks: section"]("\nSection:") + " " + section.name)
for check in section._checks:
print(theme["list-checks: check-id"](check.id) + "\n" +
theme["list-checks: description"](f'"{check.description}"') + "\n")
else:
for section_name, section in profile._sections.items():
for check in section._checks:
print(check.id)
sys.exit()
values_ = {}
if values is not None:
values_.update(values)
# values_keys are returned by profile.setup_argparse
# these are keys for custom arguments required by the profile.
if values_keys:
for key in values_keys:
if hasattr(args, key):
values_[key] = getattr(args, key)
runner_kwds = dict( values=values_
, custom_order=args.order
, explicit_checks=args.checkid
, exclude_checks=args.exclude_checkid
)
try:
runner = CheckRunner(profile, **runner_kwds)
except ValueValidationError as e:
print(e)
argument_parser.print_usage()
sys.exit(1)
is_async = args.multiprocessing != 0
# the most verbose loglevel wins
loglevel = min(args.loglevels) if args.loglevels else DEFAULT_LOG_LEVEL
tr = TerminalReporter(runner=runner, is_async=is_async
, print_progress=not args.no_progress
, succinct=args.succinct
, check_threshold=loglevel
, log_threshold=args.loglevel_messages or loglevel
, theme=theme
, collect_results_by=args.gather_by
, skip_status_report=None if args.show_sections \
else (SECTIONSUMMARY, )
)
reporters = [tr.receive]
if args.json:
sr = SerializeReporter(runner=runner, collect_results_by=args.gather_by)
reporters.append(sr.receive)
if args.ghmarkdown:
mdr = GHMarkdownReporter(loglevels=args.loglevels,
runner=runner,
collect_results_by=args.gather_by)
reporters.append(mdr.receive)
if args.html:
hr = HTMLReporter(loglevels=args.loglevels,
runner=runner,
collect_results_by=args.gather_by)
reporters.append(hr.receive)
if args.multiprocessing == 0:
status_generator = runner.run()
else:
status_generator = multiprocessing_runner(args.multiprocessing, runner, runner_kwds)
distribute_generator(status_generator, reporters)
if args.json:
import json
json.dump(sr.getdoc(), args.json, sort_keys=True, indent=4)
print(f'A report in JSON format has been'
f' saved to "{args.json.name}"')
if args.ghmarkdown:
args.ghmarkdown.write(mdr.get_markdown())
print(f'A report in GitHub Markdown format which can be useful\n'
f' for posting issues on a GitHub issue tracker has been\n'
f' saved to "{args.ghmarkdown.name}"')
if args.html:
args.html.write(hr.get_html())
print(f'A report in HTML format has been saved to "{args.html.name}"')
# Fail and error let the command fail
return 1 if tr.worst_check_status in (ERROR, FAIL) else 0
if __name__ == '__main__':
sys.exit(main())
|
#!/bin/bash
sudo apt update
git clone -b monolith https://github.com/express42/reddit.git /usr/puma
cd /usr/puma && bundle install
puma -d
|
# loading the necessary packages
library(tidyverse)
# loading the iris dataset
iris <- read_csv("iris.csv")
# generating an exhaustive summary
summary <- iris %>%
summarize_all(funs(mean, median, min, max, sd))
# printing the summary
print(summary) |
package com.sparklicorn.sudoku.util;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
import java.util.function.Function;
/**
* Provides utility methods for working with files, HTTP connections.
*/
public class FileUtil {
public static void transformLinesInFile(String fromPath, String toPath, Function<String, String> func, boolean skipEmptyLines) {
try (
Scanner scanner = new Scanner(new File(fromPath));
PrintWriter pw = new PrintWriter(toPath);
) {
while(scanner.hasNextLine()) {
String line = scanner.nextLine();
if (line.trim().isEmpty() && skipEmptyLines) {
continue;
}
pw.println(func.apply(line));
}
} catch (IOException ex) {
ex.printStackTrace();
}
}
/**
* Returns a List containing the absolute paths of files found matching
* the given criteria in the given directory.
* <br/>File names are compared against filterRegex as lowercase.
* @param dir - Directory path to search in.
* @param filterRegex - Regular expression to match file names with.
* @param recurse - Whether to search directories recursively.
* @return A List of file paths, or <code>null</code> if the directory name
* provided does not exist or is not a real directory.
*/
public static List<String> findFiles(String dir, String filterRegex, boolean recurse) {
ArrayList<String> result = null;
File fDir = new File(dir);
if (fDir.exists() && fDir.isDirectory()) {
result = new ArrayList<>();
for (File f : fDir.listFiles()) {
if (f.isDirectory() && recurse) {
result.addAll(findFiles(f.getAbsolutePath(), filterRegex, recurse));
} else if (f.getName().toLowerCase().matches(filterRegex)) {
result.add(f.getAbsolutePath());
}
}
}
return result;
}
/** Returns the content of a file.*/
public static String getFileContent(String filepath) {
StringBuilder s = new StringBuilder();
try (Scanner scanner = new Scanner(new File(filepath))) {
while (scanner.hasNextLine()) {
s.append(scanner.nextLine());
s.append(System.lineSeparator());
}
scanner.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
}
return s.toString();
}
/** Returns the content of a file.*/
public static String getFileContent(File file) {
StringBuilder s = new StringBuilder();
try (Scanner scanner = new Scanner(file)) {
while (scanner.hasNextLine()) {
s.append(scanner.nextLine());
s.append(System.lineSeparator());
}
} catch (FileNotFoundException e) {
e.printStackTrace();
}
return s.toString().trim();
}
/**
* Attempts to read data from the given HttpURLConnection and save it with
* the specified filename.
* @param con - Connections that should provide some data.
* @param filename - Name of file to save the data to.
*/
public static void saveContentToFile(HttpURLConnection con, String filename) {
if(con!=null) {
try (
PrintWriter pw = new PrintWriter(new BufferedWriter(new FileWriter(filename)));
BufferedReader br = new BufferedReader(new InputStreamReader(con.getInputStream()))) {
String input;
while ((input = br.readLine()) != null) {
pw.println(input);
}
} catch (IOException e) {
e.printStackTrace();
}
} else {
throw new NullPointerException("Provided HttspURLConnection was null.");
}
}
/** Attempts to read and return data from the given HttpURLConnection.*/
public static String getContent(HttpURLConnection con) {
StringBuilder strb = new StringBuilder();
if(con!=null) {
try (BufferedReader br = new BufferedReader(new InputStreamReader(con.getInputStream()))) {
String input;
String line_sep = System.lineSeparator();
while ((input = br.readLine()) != null) {
strb.append(input);
strb.append(line_sep);
}
br.close();
} catch (IOException e) {
e.printStackTrace();
}
} else {
throw new NullPointerException("Provided HttspURLConnection was null.");
}
return strb.toString();
}
}
|
package Contollers.Magazine;
import ApiMessages.MagazineMessage;
import ApiMessages.SignupMessage;
import DB.Domain.Magazine.Magazine;
import Models.MagazineModel;
import BackendUtilities.Parser;
import java.io.IOException;
import java.util.ArrayList;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
*
* @author jefemayoneso
*/
@WebServlet(name = "MagazineContoller", urlPatterns = {"/MagazineContoller"})
public class MagazineContoller extends HttpServlet {
/**
* Manage the actions of Magazine
*
* @param request servlet request
* @param response servlet response
* @throws ServletException if a servlet-specific error occurs
* @throws IOException if an I/O error occurs
*/
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
response.setContentType("text/plain;charset=UTF-8");
request.setCharacterEncoding("UTF-8");
Parser parser = new Parser();
try {
MagazineMessage message = new MagazineModel().executeModel(request.getReader());
response.getWriter().append(parser.toJSON(message, MagazineMessage.class));
} catch (Exception e) {
response.getWriter().append(parser.toJSON(new SignupMessage("Error trying to make a Magazine action at [MagazineController]" + e.getMessage(), null), SignupMessage.class));
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
response.setContentType("text/plain;charset=UTF-8");
request.setCharacterEncoding("UTF-8");
Parser parser = new Parser();
try {
ArrayList<Magazine> magazines = new MagazineModel().selectMagazines(request);
response.getWriter().append(parser.toJSON(magazines, new ArrayList<Magazine>().getClass()));
} catch (Exception e) {
response.getWriter().append(parser.toJSON(new SignupMessage("Error trying to make a Magazine action at [MagazineController]" + e.getMessage(), null), SignupMessage.class));
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
|
#!/usr/bin/env bats
@test "Check that total is listed" {
run ls -l
[[ ${lines[0]} =~ "total" ]]
}
|
<filename>RTSProject/RTSProject/DirectionalLight.h
#pragma once
class DirectionalLight
{
public:
DirectionalLight();
~DirectionalLight();
};
|
#!/bin/bash
{
cat > configs/kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "RU",
"L": "Moscow",
"O": "system:kube-scheduler",
"OU": "Kubernetes The Hard Way",
"ST": "Moscow"
}
]
}
EOF
cfssl gencert \
-ca=certs/ca.pem \
-ca-key=certs/ca-key.pem \
-config=configs/ca-config.json \
-profile=kubernetes \
configs/kube-scheduler-csr.json | cfssljson -bare certs/kube-scheduler
}
|
<reponame>zhaosiwen1949/malagu
export * from './disposable';
export * from './prioritizeable';
export * from './promise-util';
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.compile;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
import org.apache.phoenix.compile.OrderPreservingTracker.Ordering;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.OrderByExpression;
import org.apache.phoenix.iterate.OrderedResultIterator;
import org.apache.phoenix.parse.HintNode.Hint;
import org.apache.phoenix.parse.LiteralParseNode;
import org.apache.phoenix.parse.OrderByNode;
import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.SelectStatement;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.types.PInteger;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**
* Validates ORDER BY clause and builds up a list of referenced columns.
*
*
* @since 0.1
*/
public class OrderByCompiler {
public static class OrderBy {
public static final OrderBy EMPTY_ORDER_BY = new OrderBy(Collections.<OrderByExpression>emptyList());
/**
* Used to indicate that there was an ORDER BY, but it was optimized out because
* rows are already returned in this order.
*/
public static final OrderBy FWD_ROW_KEY_ORDER_BY = new OrderBy(Collections.<OrderByExpression>emptyList());
public static final OrderBy REV_ROW_KEY_ORDER_BY = new OrderBy(Collections.<OrderByExpression>emptyList());
private final List<OrderByExpression> orderByExpressions;
public OrderBy(List<OrderByExpression> orderByExpressions) {
this.orderByExpressions = ImmutableList.copyOf(orderByExpressions);
}
public List<OrderByExpression> getOrderByExpressions() {
return orderByExpressions;
}
public boolean isEmpty() {
return this.orderByExpressions == null || this.orderByExpressions.isEmpty();
}
public static List<OrderBy> wrapForOutputOrderBys(OrderBy orderBy) {
assert orderBy != OrderBy.FWD_ROW_KEY_ORDER_BY && orderBy != OrderBy.REV_ROW_KEY_ORDER_BY;
if(orderBy == null || orderBy == OrderBy.EMPTY_ORDER_BY) {
return Collections.<OrderBy> emptyList();
}
return Collections.<OrderBy> singletonList(orderBy);
}
/**
* When we compile {@link OrderByNode} in {@link OrderByCompiler#compile}, we invoke {@link OrderByExpression#createByCheckIfExpressionSortOrderDesc}
* to get the compiled {@link OrderByExpression} for using it in {@link OrderedResultIterator}, but for {@link QueryPlan#getOutputOrderBys()},
* the returned {@link OrderByExpression} is used for {@link OrderPreservingTracker}, so we should invoke {@link OrderByExpression#createByCheckIfExpressionSortOrderDesc}
* again to the actual {@link OrderByExpression}.
* @return
*/
public static OrderBy convertCompiledOrderByToOutputOrderBy(OrderBy orderBy) {
if(orderBy.isEmpty()) {
return orderBy;
}
List<OrderByExpression> orderByExpressions = orderBy.getOrderByExpressions();
List<OrderByExpression> newOrderByExpressions = new ArrayList<OrderByExpression>(orderByExpressions.size());
for(OrderByExpression orderByExpression : orderByExpressions) {
OrderByExpression newOrderByExpression =
OrderByExpression.convertIfExpressionSortOrderDesc(orderByExpression);
newOrderByExpressions.add(newOrderByExpression);
}
return new OrderBy(newOrderByExpressions);
}
}
/**
* Gets a list of columns in the ORDER BY clause
* @param context the query context for tracking various states
* associated with the given select statement
* @param statement TODO
* @param groupBy the list of columns in the GROUP BY clause
* @param limit the row limit or null if no limit
* @return the compiled ORDER BY clause
* @throws SQLException
*/
public static OrderBy compile(StatementContext context,
SelectStatement statement,
GroupBy groupBy,
Integer limit,
Integer offset,
RowProjector rowProjector,
QueryPlan innerQueryPlan,
Expression whereExpression) throws SQLException {
List<OrderByNode> orderByNodes = statement.getOrderBy();
if (orderByNodes.isEmpty()) {
return OrderBy.EMPTY_ORDER_BY;
}
// for ungroupedAggregates as GROUP BY expression, check against an empty group by
ExpressionCompiler compiler;
if (groupBy.isUngroupedAggregate()) {
compiler = new StatelessExpressionCompiler(context, GroupBy.EMPTY_GROUP_BY);
} else {
compiler = new ExpressionCompiler(context, groupBy);
}
OrderPreservingTracker tracker = null;
if(isTrackOrderByPreserving(statement)) {
// accumulate columns in ORDER BY
tracker = new OrderPreservingTracker(
context,
groupBy,
Ordering.ORDERED,
orderByNodes.size(),
null,
innerQueryPlan,
whereExpression);
}
LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size());
for (OrderByNode node : orderByNodes) {
ParseNode parseNode = node.getNode();
Expression expression = null;
if (parseNode instanceof LiteralParseNode && ((LiteralParseNode)parseNode).getType() == PInteger.INSTANCE){
Integer index = (Integer)((LiteralParseNode)parseNode).getValue();
int size = rowProjector.getColumnProjectors().size();
if (index > size || index <= 0 ) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.PARAM_INDEX_OUT_OF_BOUND)
.build().buildException();
}
expression = rowProjector.getColumnProjector(index-1).getExpression();
} else {
expression = node.getNode().accept(compiler);
// Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns)
if (!expression.isStateless() && !compiler.isAggregate()) {
if (statement.isAggregate() || statement.isDistinct()) {
// Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x
if (statement.isDistinct()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT)
.setMessage(expression.toString()).build().buildException();
}
ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
}
}
}
if (!expression.isStateless()) {
boolean isAscending = node.isAscending();
boolean isNullsLast = node.isNullsLast();
if(tracker != null) {
tracker.track(expression, isAscending, isNullsLast);
}
/**
* If we have a schema where column A is DESC, reverse the sort order
* since this is the order they actually are in.
* Reverse is required because the compiled OrderByExpression is used in {@link OrderedResultIterator},
* {@link OrderedResultIterator} implements the compare based on binary representation, not the decoded value of corresponding dataType.
*/
OrderByExpression orderByExpression = OrderByExpression.createByCheckIfExpressionSortOrderDesc(
expression,
isNullsLast,
isAscending);
orderByExpressions.add(orderByExpression);
}
compiler.reset();
}
// we can remove ORDER BY clauses in case of only COUNT(DISTINCT...) clauses
if (orderByExpressions.isEmpty() || groupBy.isUngroupedAggregate()) {
return OrderBy.EMPTY_ORDER_BY;
}
// If we're ordering by the order returned by the scan, we don't need an order by
if (tracker != null && tracker.isOrderPreserving()) {
if (tracker.isReverse()) {
// Don't use reverse scan if:
// 1) we're using a skip scan, as our skip scan doesn't support this yet.
// 2) we have the FORWARD_SCAN hint set to choose to keep loading of column
// families on demand versus doing a reverse scan
// REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it for such table types.
if (context.getConnection().getQueryServices().getProps().getBoolean(QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN)
&& !context.getScanRanges().useSkipScanFilter()
&& context.getCurrentTable().getTable().getType() != PTableType.PROJECTED
&& context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY
&& !statement.getHint().hasHint(Hint.FORWARD_SCAN)) {
return OrderBy.REV_ROW_KEY_ORDER_BY;
}
} else {
return OrderBy.FWD_ROW_KEY_ORDER_BY;
}
}
return new OrderBy(Lists.newArrayList(orderByExpressions.iterator()));
}
public static boolean isTrackOrderByPreserving(SelectStatement selectStatement) {
return !selectStatement.isUnion();
}
private OrderByCompiler() {
}
}
|
package com.netflix.dyno.connectionpool.impl;
import java.util.concurrent.TimeUnit;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.OperationMonitor;
public class OperationResultImplTest {
@Test
public void testProcess() throws Exception {
OperationMonitor monitor = new LastOperationMonitor();
OperationResultImpl<Integer> opResult = new OperationResultImpl<Integer>("test", 11, monitor);
Host host = new Host("testHost", 1234);
opResult.attempts(2)
.addMetadata("foo", "f1").addMetadata("bar", "b1")
.setLatency(10, TimeUnit.MILLISECONDS)
.setNode(host);
Assert.assertEquals(2, opResult.getAttemptsCount());
Assert.assertEquals(10, opResult.getLatency());
Assert.assertEquals(10, opResult.getLatency(TimeUnit.MILLISECONDS));
Assert.assertEquals(host, opResult.getNode());
Assert.assertEquals("f1", opResult.getMetadata().get("foo"));
Assert.assertEquals("b1", opResult.getMetadata().get("bar"));
}
}
|
#!/bin/sh
npm install chai typemoq --save-dev
npm install @types/{mocha,chai} --save-dev
|
<reponame>vasinov/cathouse-frontend<filename>lib/generators/comfy/scaffold/templates/tests/model.rb
require_relative '../test_helper'
class <%= class_name %>Test < ActiveSupport::TestCase
def test_fixtures_validity
<%= class_name %>.all.each do |<%= file_name %>|
assert <%= file_name %>.valid?, <%= file_name %>.errors.inspect
end
end
def test_validation
<%= file_name %> = <%= class_name %>.new
assert <%= file_name %>.invalid?
assert_errors_on <%= file_name %>, <%= model_attrs.collect{|attr| ":#{attr.name}"}.join(', ') %>
end
def test_creation
assert_difference '<%= class_name %>.count' do
<%= class_name %>.create(
<%- model_attrs.each do |attr| -%>
:<%= attr.name %> => 'test <%= attr.name %>',
<%- end -%>
)
end
end
end |
// seeker-compliance-report
// ////////////////////////
//
// Downloads the Seeker Compliance report for the specified project and uploads
// it to the workflow results as a build artefact.
import * as core from '@actions/core'
import {generateSeekerComplianceReportPDF, getInputOrEnvironmentVariable, uploadSeekerComplianceReport} from './utils'
async function run(): Promise<void> {
try {
core.info('⬇️ Downloading Seeker compliance report from the Seeker Server')
// Get the action inputs (or environment variables)
const seekerServerURL = getInputOrEnvironmentVariable(
'seekerServerUrl',
'SEEKER_SERVER_URL',
true // required
)
const seekerProjectKey = getInputOrEnvironmentVariable(
'seekerProjectKey',
'SEEKER_PROJECT_KEY',
true // required
)
const seekerAPIToken = getInputOrEnvironmentVariable(
'seekerAPIToken',
'SEEKER_API_TOKEN',
true // required
)
// Generate and upload the Seeker Compliance report
await generateSeekerComplianceReportPDF({
seekerServerURL,
seekerProjectKey,
seekerAPIToken
})
await uploadSeekerComplianceReport()
} catch (error) {
core.setFailed(error.message)
}
}
run()
|
Moderate, consisting of one clause with nine words. |
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
from deepthermal.FFNN_model import fit_FFNN, init_xavier, FFNN
class MultilevelFFNN(FFNN):
def __init__(self, levels=1, **model_params):
self.levels = levels
self.models = []
for level in range(levels):
self.models.append(FFNN(**model_params))
super().__init__(**model_params)
def __getitem__(self, item):
return self.models[item]
def __call__(self, x_data):
y_pred = self.models[0](x_data)
for level in range(1, len(self.models)):
y_pred += self.models[level](x_data)
return y_pred
def __len__(self):
return self.levels
# this is not the most effective way, but it is easy
class MultilevelDataset(Dataset):
def __init__(self, x_tensor, y_tensors):
# last tensor are the x_values
self.levels = len(y_tensors)
rows = x_tensor.size(0)
new_y_tensor = [y_tensors[0]]
y_nan = torch.full_like(y_tensors[0], float("nan"))
assert x_tensor.size(0) == y_tensors[0].size(
0
), "Tensor lenght of y ground level does not mach x"
self.level_len = torch.zeros(self.levels, dtype=torch.int)
self.level_len[0] = y_tensors[0].size(0)
for i in range(self.levels - 1):
assert y_tensors[i].size(0) >= y_tensors[i + 1].size(
0
), "First tensor should be one with the most data"
self.level_len[i + 1] = y_tensors[i + 1].size(0)
new_y_tensor.append(torch.cat((y_tensors[i + 1], y_nan), dim=0)[:rows])
self.y_tensors = new_y_tensor
self.x_tensor = x_tensor
def __getitem__(self, index):
return self.x_tensor[index], tuple(tensor[index] for tensor in self.y_tensors)
def __len__(self):
return self.x_tensor.size(0)
def get_level_dataset(x_tensor, y_tensors, level):
if level == 0:
return x_tensor, y_tensors[0]
elif level > 0:
# not the most efficient, I do not care right now
test = ~torch.isnan(y_tensors[level])
level_indices = test.nonzero()[:, 0]
diff = y_tensors[level][level_indices] - y_tensors[level - 1][level_indices]
return x_tensor[level_indices], diff
def fit_multilevel_FFNN(Models, data, data_val=None, num_epochs=100, **training_param):
levels = len(Models)
loss_history_train_levels = torch.zeros((levels, num_epochs))
loss_history_val_levels = torch.zeros((levels, num_epochs))
for level in range(len(Models)):
level_data = TensorDataset(*get_level_dataset(*data[:], level))
if data_val is not None:
level_data_val = TensorDataset(*get_level_dataset(*data_val[:], level))
else:
level_data_val = None
loss_history_train_levels[level], loss_history_val_levels[level] = fit_FFNN(
model=Models[level],
data=level_data,
data_val=level_data_val,
num_epochs=num_epochs,
**training_param,
)
# return the sum of the losses since it is not relative loss
loss_history_train = torch.sum(loss_history_train_levels, dim=0)
loss_history_val = torch.sum(loss_history_val_levels, dim=0)
return loss_history_train, loss_history_val
def get_init_multilevel(init=init_xavier):
def init_multilevel(Models, **kwargs):
for level in range(len(Models)):
init(Models[level], **kwargs)
return init_multilevel
# Root Relative Squared Error
def get_multilevel_RRSE(model, data, type_str="", verbose=False, level=0):
# Compute the relative mean square error
x_data, y_data_list = next(
iter(DataLoader(data, batch_size=len(data), shuffle=False))
)
y_data = y_data_list[level]
y_pred = model(x_data).detach()
y_data_mean = torch.mean(y_data, dim=0)
relative_error_2 = torch.sum((y_pred - y_data) ** 2) / torch.sum(
(y_data_mean - y_data) ** 2
)
relative_error = relative_error_2 ** 0.5
if verbose:
print(
f"Root Relative Squared {type_str} Error: ",
relative_error.item() * 100,
"%",
)
return relative_error.item()
def predict_multilevel(models, x_data):
y_pred = models[0](x_data)
for level in range(1, len(models)):
y_pred += models[level](x_data)
return y_pred
|
package domain
import (
"cf/api"
"cf/configuration"
"cf/requirements"
"cf/terminal"
"errors"
"github.com/codegangsta/cli"
)
type CreateDomain struct {
ui terminal.UI
config configuration.Reader
domainRepo api.DomainRepository
orgReq requirements.OrganizationRequirement
}
func NewCreateDomain(ui terminal.UI, config configuration.Reader, domainRepo api.DomainRepository) (cmd *CreateDomain) {
cmd = new(CreateDomain)
cmd.ui = ui
cmd.config = config
cmd.domainRepo = domainRepo
return
}
func (cmd *CreateDomain) GetRequirements(reqFactory requirements.Factory, c *cli.Context) (reqs []requirements.Requirement, err error) {
if len(c.Args()) != 2 {
err = errors.New("Incorrect Usage")
cmd.ui.FailWithUsage(c, "create-domain")
return
}
cmd.orgReq = reqFactory.NewOrganizationRequirement(c.Args()[0])
reqs = []requirements.Requirement{
reqFactory.NewLoginRequirement(),
cmd.orgReq,
}
return
}
func (cmd *CreateDomain) Run(c *cli.Context) {
domainName := c.Args()[1]
owningOrg := cmd.orgReq.GetOrganization()
cmd.ui.Say("Creating domain %s for org %s as %s...",
terminal.EntityNameColor(domainName),
terminal.EntityNameColor(owningOrg.Name),
terminal.EntityNameColor(cmd.config.Username()),
)
_, apiResponse := cmd.domainRepo.Create(domainName, owningOrg.Guid)
if apiResponse.IsNotSuccessful() {
cmd.ui.Failed(apiResponse.Message)
return
}
cmd.ui.Ok()
}
|
#!/bin/bash
# Run a specified unit test
export PYTHONPATH=$(pwd)/python:$PYTHONPATH
cd $(dirname $0)/..
if [ $# -eq 1 ]; then
make setuputs || exit 1
make libfemtools || exit 1
fi
cd $(dirname $1)
perl -pi -e 's/$1//' Makefile
rm $(basename $1)
make $(basename $1) || exit 1
cd ../../bin/tests
ln -s ../../$1
exec ./$(basename $1)
|
<filename>app/src/main/java/com/example/veterineruygulamas/Adapters/UserListAdapter.java
package com.example.veterineruygulamas.Adapters;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import androidx.annotation.NonNull;
import androidx.recyclerview.widget.RecyclerView;
import com.example.veterineruygulamas.Pojos.User;
import com.example.veterineruygulamas.R;
import java.util.List;
public class UserListAdapter extends RecyclerView.Adapter<UserListAdapter.ViewHolder> {
Context context;
List<User> liste;
public UserListAdapter(Context context, List<User> liste) {
this.context = context;
this.liste = liste;
}
@NonNull
@Override
public ViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {
return new ViewHolder(LayoutInflater.from(parent.getContext()).inflate(R.layout.adapter_kullaniciyonetimi,null));
}
@Override
public void onBindViewHolder(@NonNull ViewHolder holder, int position) {
}
@Override
public int getItemCount() {
return liste.size();
}
public class ViewHolder extends RecyclerView.ViewHolder{
public ViewHolder(@NonNull View itemView) {
super(itemView);
}
}
}
|
import AtomPreview from '../app/atoms/AtomPreview.react';
import React, {PropTypes as RPT} from 'react';
import ReactDOM from 'react-dom/server';
const variantChecksum = ({atom, variantProps, context}) => {
const Context = contextCreator(context);
const html = ReactDOM.renderToStaticMarkup(
<Context context={context}>
<AtomPreview atom={atom} variantProps={variantProps} />
</Context>
)
const pureHtml = html.replace(/<([a-zA-Z]+)\s?([^>])+>/ig, '<$1>');
const style = (html.match(/style="[^"]+"/ig) || []).toString();
const classes = (html.match(/class="[^"]+"/ig) || []).toString();
return JSON.stringify([pureHtml, style, classes]);
}
const contextCreator = (context) => class ContextProvider extends React.Component {
static childContextTypes = Object.keys(context).reduce((out, key)=> ({...out, [key]: RPT.any}), {});
static propTypes = {
children: RPT.node
}
getChildContext() {
return context;
}
render() {
return this.props.children;
}
}
export default variantChecksum;
|
#!/bin/bash
export TG_PROJECT=$(jq -r '.project_id' ./resources/common_vars.json)
export TG_REGION=$(jq -r '.region' ./resources/common_vars.json)
gcloud config set project $TG_PROJECT
gcloud config list
export TG_BUCKET=terraform-state-$(gcloud projects describe $TG_PROJECT --format="value(projectNumber)")
echo "TG environment variables:"
env | grep "TG_"
alias kp='HTTPS_PROXY=localhost:8080 kubectl $*'
alias np='HTTPS_PROXY=localhost:8080 nomos $*' |
<gh_stars>0
import * as tslib_1 from "tslib";
import { Tone } from "../Tone";
import { isUndef } from "./TypeCheck";
/**
* Emitter gives classes which extend it
* the ability to listen for and emit events.
* Inspiration and reference from <NAME>'s [MicroEvent](https://github.com/jeromeetienne/microevent.js).
* MIT (c) 2011 <NAME>.
*/
var Emitter = /** @class */ (function (_super) {
tslib_1.__extends(Emitter, _super);
function Emitter() {
var _this = _super !== null && _super.apply(this, arguments) || this;
_this.name = "Emitter";
return _this;
}
/**
* Bind a callback to a specific event.
* @param event The name of the event to listen for.
* @param callback The callback to invoke when the event is emitted
*/
Emitter.prototype.on = function (event, callback) {
var _this = this;
// split the event
var events = event.split(/\W+/);
events.forEach(function (eventName) {
if (isUndef(_this._events)) {
_this._events = {};
}
if (!_this._events.hasOwnProperty(eventName)) {
_this._events[eventName] = [];
}
_this._events[eventName].push(callback);
});
return this;
};
/**
* Bind a callback which is only invoked once
* @param event The name of the event to listen for.
* @param callback The callback to invoke when the event is emitted
*/
Emitter.prototype.once = function (event, callback) {
var _this = this;
var boundCallback = function () {
var args = [];
for (var _i = 0; _i < arguments.length; _i++) {
args[_i] = arguments[_i];
}
// invoke the callback
callback.apply(void 0, tslib_1.__spread(args));
// remove the event
_this.off(event, boundCallback);
};
this.on(event, boundCallback);
return this;
};
/**
* Remove the event listener.
* @param event The event to stop listening to.
* @param callback The callback which was bound to the event with Emitter.on.
* If no callback is given, all callbacks events are removed.
*/
Emitter.prototype.off = function (event, callback) {
var _this = this;
var events = event.split(/\W+/);
events.forEach(function (eventName) {
if (isUndef(_this._events)) {
_this._events = {};
}
if (_this._events.hasOwnProperty(event)) {
if (isUndef(callback)) {
_this._events[event] = [];
}
else {
var eventList = _this._events[event];
for (var i = 0; i < eventList.length; i++) {
if (eventList[i] === callback) {
eventList.splice(i, 1);
}
}
}
}
});
return this;
};
/**
* Invoke all of the callbacks bound to the event
* with any arguments passed in.
* @param event The name of the event.
* @param args The arguments to pass to the functions listening.
*/
Emitter.prototype.emit = function (event) {
var args = [];
for (var _i = 1; _i < arguments.length; _i++) {
args[_i - 1] = arguments[_i];
}
if (this._events) {
if (this._events.hasOwnProperty(event)) {
var eventList = this._events[event].slice(0);
for (var i = 0, len = eventList.length; i < len; i++) {
eventList[i].apply(this, args);
}
}
}
return this;
};
/**
* Add Emitter functions (on/off/emit) to the object
*/
Emitter.mixin = function (constr) {
// instance._events = {};
["on", "once", "off", "emit"].forEach(function (name) {
var property = Object.getOwnPropertyDescriptor(Emitter.prototype, name);
Object.defineProperty(constr.prototype, name, property);
});
};
/**
* Clean up
*/
Emitter.prototype.dispose = function () {
_super.prototype.dispose.call(this);
this._events = undefined;
return this;
};
return Emitter;
}(Tone));
export { Emitter };
//# sourceMappingURL=Emitter.js.map |
"""
Program to calculate the total amount of energy generated per hour by a wind turbine
"""
# Function to calculate the total energy generated
def calculate_energy(size, speed):
power_rating = size * speed**3 # calculate the power rating
return power_rating * 0.5 # return the total energy generated in kWh
if __name__ == "__main__":
size = 100
speed = 10
result = calculate_energy(size, speed)
print("Total energy generated by the wind turbine in kWh: " + str(result)) |
class AxisError(Exception):
pass
class AxisSeries:
def __init__(self, N, a_ids, axes_series):
if len(a_ids) != N or len(axes_series) != N:
raise AxisError(
f"AxisSeries is {N}-D but initiated with {len(a_ids)} axes"
)
for n, (a_id, axis_series) in enumerate(zip(a_ids, axes_series)):
if a_id is not None and axis_series is not None and a_id != axis_series.id:
raise AxisError(
f"AxisSeries initiated with contradicting id's for {n}'th axis"
)
elif a_id is None and axis_series is None:
raise AxisError(
f"AxisSeries has no axis id for series or id for its {n}'th axis"
)
self._a_ids = a_ids
self._axes_series = axes_series
# Additional methods and properties can be added as per the requirements of the AxisSeries class |
# 开始执行任务
if [ $# -gt 0 ];then
echo "main: ----------开始----------"
openApiUrl="http://192.168.14.155:24001/openapi/javadoc/uploadZip/"
paramName=""
paramValue=""
path=""
name=""
token=""
echo "main: ----------获取入参----------"
for param in "$@"
do
paramName=${param%%=*}
paramValue=${param#*=}
# echo "main: paramName: $paramName, paramValue: $paramValue"
if [ "$paramName" = "name" ];then
name=$paramValue
elif [ "$paramName" = "path" ];then
path=$paramValue
elif [ "$paramName" = "token" ];then
token=$paramValue
fi
done
echo "main: name: $name"
echo "main: path: $path"
echo "main: token: $token"
echo "main: ----------创建工作区----------"
mkdir workspace
cd workspace
echo "main: ----------删除工程目录,重新生成空的目录----------"
rm -rf ./$name/
mkdir $name
echo "main: ----------删除zip包----------"
rm -rf ./$name.zip
echo "main: ----------从指定目录复制java文件到项目目录----------"
for file in `find $path -name *.java`
do
uuid=$(uuidgen |sed 's/-//g')
# echo "file: $file, uuid: $uuid"
cp $file ./$name/$uuid.java
done
echo "main: ----------打包zip文件----------"
zip -q -r $name.zip ./$name
echo "main: ----------提交到hidoc接口中----------"
curl -F "file=@$name.zip" -X POST $openApiUrl$token
echo "main: ----------任务执行完成----------"
else
echo "main: ----------异常:参数为空,终止任务----------"
fi
|
#!/bin/bash
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is invoked by run_interop_tests.py to build the docker image
# for interop testing. You should never need to call this script on your own.
set -x
# Params:
# INTEROP_IMAGE - Name of tag of the final interop image
# INTEROP_IMAGE_REPOSITORY_TAG - Optional. If set, the created image will be tagged using
# the command: 'docker tag $INTEROP_IMAGE $INTEROP_IMAGE_REPOSITORY_TAG'
# BASE_NAME - Base name used to locate the base Dockerfile and build script
# BUILD_TYPE - The 'CONFIG' variable passed to the 'make' command (example:
# asan, tsan. Default value: opt).
# TTY_FLAG - optional -t flag to make docker allocate tty
# BUILD_INTEROP_DOCKER_EXTRA_ARGS - optional args to be passed to the
# docker run command
cd `dirname $0`/../../..
GRPC_ROOT=`pwd`
MOUNT_ARGS="-v $GRPC_ROOT:/var/local/jenkins/grpc:ro"
GRPC_JAVA_ROOT=`cd ../grpc-java && pwd`
if [ "$GRPC_JAVA_ROOT" != "" ]
then
MOUNT_ARGS+=" -v $GRPC_JAVA_ROOT:/var/local/jenkins/grpc-java:ro"
else
echo "WARNING: grpc-java not found, it won't be mounted to the docker container."
fi
GRPC_GO_ROOT=`cd ../grpc-go && pwd`
if [ "$GRPC_GO_ROOT" != "" ]
then
MOUNT_ARGS+=" -v $GRPC_GO_ROOT:/var/local/jenkins/grpc-go:ro"
else
echo "WARNING: grpc-go not found, it won't be mounted to the docker container."
fi
mkdir -p /tmp/ccache
# Mount service account dir if available.
# If service_directory does not contain the service account JSON file,
# some of the tests will fail.
if [ -e $HOME/service_account ]
then
MOUNT_ARGS+=" -v $HOME/service_account:/var/local/jenkins/service_account:ro"
fi
# Use image name based on Dockerfile checksum
BASE_IMAGE=${BASE_NAME}_base:`sha1sum tools/dockerfile/stress_test/$BASE_NAME/Dockerfile | cut -f1 -d\ `
# Make sure base docker image has been built. Should be instantaneous if so.
docker build -t $BASE_IMAGE --force-rm=true tools/dockerfile/stress_test/$BASE_NAME || exit $?
# Create a local branch so the child Docker script won't complain
git branch -f jenkins-docker
CONTAINER_NAME="build_${BASE_NAME}_$(uuidgen)"
# Prepare image for interop tests, commit it on success.
(docker run \
-e CCACHE_DIR=/tmp/ccache \
-e THIS_IS_REALLY_NEEDED='see https://github.com/docker/docker/issues/14203 for why docker is awful' \
-e BUILD_TYPE=${BUILD_TYPE:=opt} \
-i $TTY_FLAG \
$MOUNT_ARGS \
$BUILD_INTEROP_DOCKER_EXTRA_ARGS \
-v /tmp/ccache:/tmp/ccache \
--name=$CONTAINER_NAME \
$BASE_IMAGE \
bash -l /var/local/jenkins/grpc/tools/dockerfile/stress_test/$BASE_NAME/build_interop_stress.sh \
&& docker commit $CONTAINER_NAME $INTEROP_IMAGE \
&& ( if [ -n "$INTEROP_IMAGE_REPOSITORY_TAG" ]; then docker tag $INTEROP_IMAGE $INTEROP_IMAGE_REPOSITORY_TAG ; fi ) \
&& echo "Successfully built image $INTEROP_IMAGE")
EXITCODE=$?
# remove intermediate container, possibly killing it first
docker rm -f $CONTAINER_NAME
exit $EXITCODE
|
<filename>src/js/modules/zoo.js
export default class Zoo {
constructor (animals) {
this.animals = animals || []
}
getRandomAnimal () {
let randomKey = Math.floor(Math.random() * this.animals.length)
return this.animals[randomKey]
}
}
|
package main
import (
"fmt"
"sync"
)
func main() {
wg := &sync.WaitGroup{}
data := make([]int, 1000) // Data
errChan := make(chan error, 1) // Error channel
numGoroutines := 20 // Number of goroutines
// Divide work into separate Goroutines
for i := 0; i < numGoroutines; i++ {
start := i * len(data) / numGoroutines
end := (i + 1) * len(data) / numGoroutines
wg.Add(1)
go func(start, end int) {
for i := start; i < end; i++ {
// Do something with data[i]
err := processData(data[i])
if err != nil {
errChan <- err
}
}
wg.Done()
}(start, end)
}
// Wait for Goroutines to finish their work
wg.Wait()
// Receive errors if any
select {
case err := <-errChan:
fmt.Printf("Error occured: %v\n", err)
default:
// No errors
fmt.Println("No errors")
}
}
func processData(data int) error {
// Process data
return nil
} |
#!/bin/sh
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at
# trunk/opends/resource/legal-notices/OpenDS.LICENSE
# or https://OpenDS.dev.java.net/OpenDS.LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at
# trunk/opends/resource/legal-notices/OpenDS.LICENSE. If applicable,
# add the following below this CDDL HEADER, with the fields enclosed
# by brackets "[]" replaced with your own identifying information:
# Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Portions Copyright 2006-2007 Sun Microsystems, Inc.
# This script is used to invoke various client-side processes. It should not
# be invoked directly by end users.
if test -z "${OPENDS_INVOKE_CLASS}"
then
echo "ERROR: OPENDS_INVOKE_CLASS environment variable is not set."
exit 1
fi
# Capture the current working directory so that we can change to it later.
# Then capture the location of this script and the Directory Server instance
# root so that we can use them to create appropriate paths.
WORKING_DIR=`pwd`
cd `dirname "${0}"`
SCRIPT_DIR=`pwd`
cd ..
INSTANCE_ROOT=`pwd`
export INSTANCE_ROOT
cd "${WORKING_DIR}"
# See if the environment variables are set. If not, then see if there is a java
# executable in the path and try to figure it out.
if test -z "${OPENDS_JAVA_BIN}"
then
if test -z "${OPENDS_JAVA_HOME}"
then
if test -f "${INSTANCE_ROOT}/lib/set-java-home"
then
. "${INSTANCE_ROOT}/lib/set-java-home"
OPENDS_JAVA_BIN="${OPENDS_JAVA_HOME}/bin/java"
export OPENDS_JAVA_BIN
else
if test -z "${JAVA_BIN}"
then
if test -z "${JAVA_HOME}"
then
OPENDS_JAVA_BIN=`which java 2> /dev/null`
if test ${?} -eq 0
then
export OPENDS_JAVA_BIN
else
echo "Please set OPENDS_JAVA_HOME to the root of a Java 5 (or later) installation."
exit 1
fi
else
OPENDS_JAVA_BIN="${JAVA_HOME}/bin/java"
export OPENDS_JAVA_BIN
fi
else
OPENDS_JAVA_BIN="${JAVA_BIN}"
export OPENDS_JAVA_BIN
fi
fi
else
OPENDS_JAVA_BIN="${OPENDS_JAVA_HOME}/bin/java"
export OPENDS_JAVA_BIN
fi
fi
# Explicitly set the PATH, LD_LIBRARY_PATH, LD_PRELOAD, and other important
# system environment variables for security and compatibility reasons.
PATH=/bin:/usr/bin
LD_LIBRARY_PATH=
LD_LIBRARY_PATH_32=
LD_LIBRARY_PATH_64=
LD_PRELOAD=
LD_PRELOAD_32=
LD_PRELOAD_64=
export PATH LD_LIBRARY_PATH LD_LIBRARY_PATH_32 LD_LIBRARY_PATH_64 \
LD_PRELOAD LD_PRELOAD_32 LD_PRELOAD_34
# Configure the appropriate CLASSPATH.
CLASSPATH=${INSTANCE_ROOT}/classes
for JAR in ${INSTANCE_ROOT}/lib/*.jar
do
CLASSPATH=${CLASSPATH}:${JAR}
done
export CLASSPATH
# Launch the appropriate server utility.
"${OPENDS_JAVA_BIN}" ${JAVA_ARGS} ${SCRIPT_NAME_ARG} "${OPENDS_INVOKE_CLASS}" "${@}"
|
_get_is_mac() {
local mac="no"
if [ "`uname`" == "Darwin" ]; then
mac="yes"
fi
echo "$mac"
}
export is_mac=`_get_is_mac`
_get_this_dir() {
if [ "$0" == "bash" ] || [ "$0" == "sh" ]; then
return
fi
if [ is_mac == "no" ]; then
readlink -f "$0"
else
local dir=$(echo "${0%/*}")
if [ -d "$dir" ]; then
(cd "$dir" && pwd -P)
fi
fi
}
export this_dir=`_get_this_dir`
if [ -z "$this_dir" ]; then
echo "helper: get base dir failed" >&2
exit 1
fi
export repo_dir=`dirname $this_dir`
exit_test()
{
return 1
}
export -f exit_test
wait_sub_procs()
{
wait
}
export -f wait_sub_procs
|
<reponame>bogdanbebic/InverseSquareRoot
var searchData=
[
['binary_5fsearch',['binary_search',['../namespaceinv__sqrt.html#ac8f471c39b23911ec1174b17319337a1',1,'inv_sqrt']]],
['binary_5fsearch_5fversion_5f2',['binary_search_version_2',['../namespaceinv__sqrt.html#a387a1a5588160e3855862d238ec678a0',1,'inv_sqrt']]]
];
|
#!/bin/bash
set -eo pipefail
SCRIPT_DIR=$(cd "$(dirname "$0")"; pwd)
PROJECT_DIR=$1
shift
"$@" ./src/play/play \
KYOETRO \
"${SCRIPT_DIR}/tiles.txt" \
"${PROJECT_DIR}/boards/wwf_challenge.txt"
|
import { TreasureMap } from './treasure-map';
import { Player } from './player';
import { Mountain } from './mountain';
import { Treasure } from './treasure';
describe('TreasureMap', () => {
let treasureMap: TreasureMap;
beforeEach(() => {
treasureMap = new TreasureMap(5, 6);
})
describe('Map composition', () => {
it('Impossible adding player', async () => {
const player1: Player = new Player('Alphonse', 3, 3, 'South', [], treasureMap);
const player2: Player = new Player('Germaine', 3, 3, 'South', [], treasureMap);
treasureMap.addPlayer(player1);
expect(() => treasureMap.addPlayer(player2)).toThrowError('This place is already taken');
});
it('Impossible adding mountain', async () => {
const mountain1: Mountain = new Mountain(2, 3);
const mountain2: Mountain = new Mountain(2, 3);
treasureMap.addMountain(mountain1);
expect(() => treasureMap.addMountain(mountain2)).toThrowError('This place is already taken');
});
it('Merge adding treasure', async () => {
const treasure1: Treasure = new Treasure(2, 3, 1);
const treasure2: Treasure = new Treasure(2, 3, 2);
treasureMap.addTreasure(treasure1);
treasureMap.addTreasure(treasure2);
expect(treasureMap.treasures.length).toBe(1);
expect(treasure1.quantity).toBe(3);
});
});
describe('Map game processing', () => {
it('Map does simple turn', async () => {
const player1: Player = new Player('Alphonse', 1, 3, 'South', ['A'], treasureMap);
const player2: Player = new Player('Germaine', 3, 3, 'North', ['A', 'D'], treasureMap);
treasureMap.addPlayer(player1);
treasureMap.addPlayer(player2);
treasureMap['doSingleTurn']();
expect(player1.row).toBe(4);
expect(player1.actions.length).toBe(0);
expect(player2.row).toBe(2);
expect(player2.actions.length).toBe(1);
});
it('Player horizontal concurrency', async () => {
const player1: Player = new Player('Alphonse', 1, 3, 'East', ['A'], treasureMap);
const player2: Player = new Player('Germaine', 3, 3, 'West', ['A'], treasureMap);
treasureMap.addPlayer(player1);
treasureMap.addPlayer(player2);
treasureMap['doSingleTurn']();
expect(player1.column).toBe(2);
expect(player2.column).toBe(3);
});
it('Player vertical concurrency', async () => {
const player1: Player = new Player('Alphonse', 2, 2, 'South', ['A'], treasureMap);
const player2: Player = new Player('Germaine', 2, 4, 'North', ['A'], treasureMap);
treasureMap.addPlayer(player1);
treasureMap.addPlayer(player2);
treasureMap['doSingleTurn']();
expect(player1.row).toBe(3);
expect(player2.row).toBe(4);
});
});
}) |
/**
* Classes used to model duration.
*/
package io.opensphere.core.units.duration;
|
<gh_stars>0
#include <iostream.h>
#include <dos.h>
#include "SCHEDULE.H"
#include "thread.h"
#include "pcb.h"
#include "idle.h"
#include "list.h"
#include "system.h"
#include <stdarg.h>
extern int userMain(int argc, char* argv[]);
int main(int argc, char* argv[]){
System* s = new System();
int ret = userMain(argc, argv);
cout << ret << endl;
delete s;
return ret;
}
|
<gh_stars>0
package mechconstruct.util.slotconfig;
public enum SlotConfig {
NONE(false, false),
INPUT(false, true),
OUTPUT(true, false);
boolean extact;
boolean insert;
SlotConfig(boolean extact, boolean insert) {
this.extact = extact;
this.insert = insert;
}
public boolean isExtact() {
return extact;
}
public boolean isInsert() {
return insert;
}
public SlotConfig getNext() {
int i = this.ordinal() + 1;
if (i >= SlotConfig.values().length) {
i = 0;
}
return SlotConfig.values()[i];
}
}
|
package com.github.peacetrue.beans.properties.deleted;
/**
* @author peace
* @since 1.0
**/
public interface DeletedAware {
void setDeleted(Boolean deleted);
}
|
package command_line_calculator_v2;
/*
* The FinancialFunctions class will contain methods that allow the user to utilize basic financial algorithms.
* This class must be instantiated.
*/
import java.util.*;
/*
* @author <NAME>
*/
public class FinancialFunctions {
static Scanner userInput = new Scanner(System.in);
private double value;
// Calculates the future value of 'amount'.
// Needs additional functionality, like allowing the user to add payments and changing how often interest accrues.
double futureValue(){
System.out.println("Amount:");
double amount = userInput.nextDouble();
System.out.println("Interest Rate:");
double interestRate = userInput.nextDouble();
System.out.println("Years:");
double numOfYears = userInput.nextDouble();
value = amount * Math.pow((1+interestRate), numOfYears);
return value;
}
// Calculate the present value of some amount
// based upon a stated discount rate.
double presentValue(){
return value;
}
// Calculate the Present Value of some cash inflows
// minus the present value of some cash outflows.
double netPresentValue(){
return value;
}
// Calculate loan payments based on some APR and length of time.
// Possible amortization table.
double Amortization(){
return value;
}
double getValue(){
return value;
}
}
|
package com.example.android.activities;
import android.app.Activity;
import android.bluetooth.BluetoothAdapter;
import android.bluetooth.BluetoothDevice;
import android.bluetooth.BluetoothManager;
import android.content.Context;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.os.Bundle;
import android.os.Handler;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.TextView;
import android.widget.Toast;
import com.example.android.bluetoothlegatt.R;
import com.example.android.misc.Globals;
public class LauncherActivity extends Activity {
//UI handles
TextView device_status_tv;
Button scan_btn;
Button connect_btn;
//Bluetooth members
private BluetoothAdapter mBluetoothAdapter;
private BluetoothDevice target_device = null;
private Handler scanHandler;
private boolean scanning = false;
private boolean target_found_in_current_scan = false;
private static final int REQUEST_ENABLE_BT = 1;
private static final long SCAN_PERIOD = 5000;
private static final String TARGET_DEVICE_NAME = "BTLE_G4";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
scanHandler = new Handler();
setContentView(R.layout.activity_launcher);
//UI bindings
device_status_tv = (TextView) findViewById(R.id.ble_status_value);
scan_btn = (Button) findViewById(R.id.scan_btn);
connect_btn = (Button) findViewById(R.id.connect_btn);
//Bluetooth checks
// Use this check to determine whether BLE is supported on the device. Then you can
// selectively disable BLE-related features.
if (!getPackageManager().hasSystemFeature(PackageManager.FEATURE_BLUETOOTH_LE)) {
Toast.makeText(this, R.string.ble_not_supported, Toast.LENGTH_SHORT).show();
finish();
}
// Initializes a Bluetooth adapter. For API level 18 and above, get a reference to
// BluetoothAdapter through BluetoothManager.
final BluetoothManager bluetoothManager =
(BluetoothManager) getSystemService(Context.BLUETOOTH_SERVICE);
mBluetoothAdapter = bluetoothManager.getAdapter();
// Checks if Bluetooth is supported on the device.
if (mBluetoothAdapter == null) {
Toast.makeText(this, R.string.error_bluetooth_not_supported, Toast.LENGTH_SHORT).show();
finish();
return;
}
}
@Override
protected void onResume() {
super.onResume();
// Ensures Bluetooth is enabled on the device. If Bluetooth is not currently enabled,
// fire an intent to display a dialog asking the user to grant permission to enable it.
if (!mBluetoothAdapter.isEnabled()) {
if (!mBluetoothAdapter.isEnabled()) {
Intent enableBtIntent = new Intent(BluetoothAdapter.ACTION_REQUEST_ENABLE);
startActivityForResult(enableBtIntent, REQUEST_ENABLE_BT);
}
}
}
public void allDeviceScannerClicked(View v){
final Intent intent = new Intent(this, DeviceScanActivity.class);
startActivity(intent);
}
public void scanClicked(View v){
Log.w("clicked", "scan");
scan_btn.setText(R.string.btn_scanning);
scan_btn.setEnabled(false);
target_found_in_current_scan = false;
scanLeDevice(true);
}
public void connectClicked(View v){
if (Globals.testing){
final Intent intent = new Intent(this, ConnectedDeviceActivity.class);
intent.putExtra("device_name", "<NAME>");
intent.putExtra("device_address", "12:43:55:78:AA:09");
if (scanning) {
mBluetoothAdapter.stopLeScan(mLeScanCallback);
scan_btn.setText(R.string.btn_scan);
scan_btn.setEnabled(true);
scanning = false;
}
startActivity(intent);
} else {
if (target_device == null) return;
final Intent intent = new Intent(this, ConnectedDeviceActivity.class);
intent.putExtra("device_name", target_device.getName());
intent.putExtra("device_address", target_device.getAddress());
if (scanning) {
mBluetoothAdapter.stopLeScan(mLeScanCallback);
scan_btn.setText(R.string.btn_scan);
scan_btn.setEnabled(true);
scanning = false;
}
startActivity(intent);
}
}
private void scanLeDevice(final boolean enable) {
if (enable) {
// Stops scanning after a pre-defined scan period.
scanHandler.postDelayed(new Runnable() {
@Override
public void run() {
scanning = false;
mBluetoothAdapter.stopLeScan(mLeScanCallback);
scan_btn.setText(R.string.btn_scan);
scan_btn.setEnabled(true);
invalidateOptionsMenu();
if (!target_found_in_current_scan) setTargetUnavailable();
}
}, SCAN_PERIOD);
scanning = true;
mBluetoothAdapter.startLeScan(mLeScanCallback);
} else {
scanning = false;
mBluetoothAdapter.stopLeScan(mLeScanCallback);
}
invalidateOptionsMenu();
}
private BluetoothAdapter.LeScanCallback mLeScanCallback =
new BluetoothAdapter.LeScanCallback() {
@Override
public void onLeScan(final BluetoothDevice device, int rssi, byte[] scanRecord) {
runOnUiThread(new Runnable() {
@Override
public void run() {
deviceFound(device);
}
});
}
};
private void deviceFound(BluetoothDevice device){
String device_name = device.getName();
if (device_name != null){
if (device_name.equals(TARGET_DEVICE_NAME)){
targetDeviceFound(device);
}
}
}
private void targetDeviceFound(BluetoothDevice device){
target_found_in_current_scan = true;
target_device = device;
device_status_tv.setText(R.string.ble_available);
device_status_tv.setTextColor(getResources().getColor(R.color.available_green));
connect_btn.setEnabled(true);
}
private void setTargetUnavailable(){
target_device = null;
device_status_tv.setText(R.string.ble_unavailable);
device_status_tv.setTextColor(getResources().getColor(R.color.unavailable_red));
connect_btn.setEnabled(false);
}
}
|
def find_mode(nums):
# Create an empty dictionary
mode = {}
# Loop through all the numbers
for num in nums:
# Check if the number is in the dictionary
if num in mode:
# If it is, increment the count by one
mode[num] += 1
else:
# Else set the count to one
mode[num] = 1
# Initialize the max count to 0
max_count = 0
# Initialize the mode to 0
mode_num = 0
# Loop through the dictionary
for key, value in mode.items():
# If current count is higher than the max count
if value > max_count:
# Save the mode number and max count
mode_num = key
max_count = value
return mode_num
# call the function
mode = find_mode(nums)
print("The mode is " + str(mode)) |
<gh_stars>1-10
from twisted.internet.epollreactor import EPollReactor
from twisted.internet.main import installReactor
reactor: EPollReactor = EPollReactor()
installReactor(reactor)
|
<gh_stars>1-10
package vehicle
import (
"fmt"
"net/http"
"net/http/cookiejar"
"strconv"
"time"
"github.com/evcc-io/evcc/api"
"github.com/evcc-io/evcc/provider"
"github.com/evcc-io/evcc/util"
"github.com/evcc-io/evcc/util/request"
"golang.org/x/net/publicsuffix"
)
type ovmsStatusResponse struct {
Odometer string `json:"odometer"`
}
type ovmsChargeResponse struct {
ChargeEtrFull string `json:"charge_etr_full"`
ChargeState string `json:"chargestate"`
ChargePortOpen int `json:"cp_dooropen"`
EstimatedRange string `json:"estimatedrange"`
MessageAgeServer int `json:"m_msgage_s"`
Soc string `json:"soc"`
}
type ovmsConnectResponse struct {
NetConnected int `json:"v_net_connected"`
}
// OVMS is an api.Vehicle implementation for dexters-web server requests
type Ovms struct {
*embed
*request.Helper
user, password, vehicleId, server string
cache time.Duration
isOnline bool
chargeG func() (interface{}, error)
statusG func() (interface{}, error)
}
func init() {
registry.Add("ovms", NewOvmsFromConfig)
}
// NewOVMSFromConfig creates a new vehicle
func NewOvmsFromConfig(other map[string]interface{}) (api.Vehicle, error) {
cc := struct {
embed `mapstructure:",squash"`
User, Password, VehicleID, Server string
Cache time.Duration
}{
Cache: interval,
}
if err := util.DecodeOther(other, &cc); err != nil {
return nil, err
}
log := util.NewLogger("ovms").Redact(cc.User, cc.Password, cc.VehicleID)
v := &Ovms{
embed: &cc.embed,
Helper: request.NewHelper(log),
user: cc.User,
password: <PASSWORD>,
vehicleId: cc.VehicleID,
server: cc.Server,
cache: cc.Cache,
}
v.chargeG = provider.NewCached(v.batteryAPI, cc.Cache).InterfaceGetter()
v.statusG = provider.NewCached(v.statusAPI, cc.Cache).InterfaceGetter()
var err error
v.Jar, err = cookiejar.New(&cookiejar.Options{
PublicSuffixList: publicsuffix.List,
})
return v, err
}
func (v *Ovms) loginToServer() (err error) {
uri := fmt.Sprintf("https://%s:6869/api/cookie?username=%s&password=%s", v.server, v.user, v.password)
var resp *http.Response
if resp, err = v.Get(uri); err == nil {
resp.Body.Close()
}
return err
}
func (v *Ovms) connectRequest() (ovmsConnectResponse, error) {
uri := fmt.Sprintf("https://%s:6869/api/vehicle/%s", v.server, v.vehicleId)
var res ovmsConnectResponse
err := v.GetJSON(uri, &res)
return res, err
}
func (v *Ovms) chargeRequest() (ovmsChargeResponse, error) {
uri := fmt.Sprintf("https://%s:6869/api/charge/%s", v.server, v.vehicleId)
var res ovmsChargeResponse
err := v.GetJSON(uri, &res)
return res, err
}
func (v *Ovms) statusRequest() (ovmsStatusResponse, error) {
uri := fmt.Sprintf("https://%s:6869/api/status/%s", v.server, v.vehicleId)
var res ovmsStatusResponse
err := v.GetJSON(uri, &res)
return res, err
}
func (v *Ovms) authFlow() error {
var resp ovmsConnectResponse
err := v.loginToServer()
if err == nil {
resp, err = v.connectRequest()
if err == nil {
v.isOnline = resp.NetConnected == 1
}
}
return err
}
// batteryAPI provides battery-status api response
func (v *Ovms) batteryAPI() (interface{}, error) {
var resp ovmsChargeResponse
resp, err := v.chargeRequest()
if err != nil {
err = v.authFlow()
if err == nil {
resp, err = v.chargeRequest()
}
}
messageAge := time.Duration(resp.MessageAgeServer) * time.Second
if err == nil && v.isOnline && messageAge > v.cache {
err = api.ErrMustRetry
}
return resp, err
}
// statusAPI provides vehicle status api response
func (v *Ovms) statusAPI() (interface{}, error) {
var resp ovmsStatusResponse
resp, err := v.statusRequest()
if err != nil {
err = v.authFlow()
if err == nil {
resp, err = v.statusRequest()
}
}
return resp, err
}
// SoC implements the api.Vehicle interface
func (v *Ovms) SoC() (float64, error) {
res, err := v.chargeG()
if res, ok := res.(ovmsChargeResponse); err == nil && ok {
return strconv.ParseFloat(res.Soc, 64)
}
return 0, err
}
var _ api.ChargeState = (*Ovms)(nil)
// Status implements the api.ChargeState interface
func (v *Ovms) Status() (api.ChargeStatus, error) {
status := api.StatusA // disconnected
res, err := v.chargeG()
if res, ok := res.(ovmsChargeResponse); err == nil && ok {
if res.ChargePortOpen > 0 {
status = api.StatusB
}
if res.ChargeState == "charging" {
status = api.StatusC
}
}
return status, err
}
var _ api.VehicleRange = (*Ovms)(nil)
// Range implements the api.VehicleRange interface
func (v *Ovms) Range() (int64, error) {
res, err := v.chargeG()
if res, ok := res.(ovmsChargeResponse); err == nil && ok {
return strconv.ParseInt(res.EstimatedRange, 0, 64)
}
return 0, err
}
var _ api.VehicleOdometer = (*Ovms)(nil)
// Odometer implements the api.VehicleOdometer interface
func (v *Ovms) Odometer() (float64, error) {
res, err := v.statusG()
if res, ok := res.(ovmsStatusResponse); err == nil && ok {
odometer, err := strconv.ParseFloat(res.Odometer, 64)
if err == nil {
return odometer / 10, nil
}
}
return 0, err
}
var _ api.VehicleFinishTimer = (*Ovms)(nil)
// FinishTime implements the api.VehicleFinishTimer interface
func (v *Ovms) FinishTime() (time.Time, error) {
res, err := v.chargeG()
if res, ok := res.(ovmsChargeResponse); err == nil && ok {
cef, err := strconv.ParseInt(res.ChargeEtrFull, 0, 64)
if err == nil {
return time.Now().Add(time.Duration(cef) * time.Minute), nil
}
}
return time.Time{}, err
}
|
let addingDiv = document.getElementById('content');
let para = document.createElement('p');
para.innerHTML = 'I just added this paragraph!';
addingDiv.appendChild(para); |
<gh_stars>1-10
package com.wpisen.trace.server.service.entity;
/**
* Description: 请求地址查询<br/>
*
* @author <EMAIL>
* @version 1.0
* @date: 2016年8月4日 下午4:52:51
* @since JDK 1.7
*/
public class SearchRequestParam {
private String clientIp; // 客户端IP
private String addressIp; // 目标IP
private String queryWord; // 请求关键字
private Long timeBegin; // 开始时间
private Long timeEnd; // 截止时间
private Integer pageSize; // 每页大小
private Integer pageIndex; // 页码
private String nodeType; // 节点类型
private String servicePath;// 服务路径
public String getClientIp() {
return clientIp;
}
public void setClientIp(String clientIp) {
this.clientIp = clientIp;
}
public String getQueryWord() {
return queryWord;
}
public void setQueryWord(String queryWord) {
this.queryWord = queryWord;
}
public Long getTimeBegin() {
return timeBegin;
}
public void setTimeBegin(Long timeBegin) {
this.timeBegin = timeBegin;
}
public Long getTimeEnd() {
return timeEnd;
}
public void setTimeEnd(Long timeEnd) {
this.timeEnd = timeEnd;
}
public Integer getPageSize() {
return pageSize;
}
public void setPageSize(Integer pageSize) {
this.pageSize = pageSize;
}
public Integer getPageIndex() {
return pageIndex;
}
public void setPageIndex(Integer pageIndex) {
this.pageIndex = pageIndex;
}
public String getNodeType() {
return nodeType;
}
public void setNodeType(String nodeType) {
this.nodeType = nodeType;
}
public String getAddressIp() {
return addressIp;
}
public void setAddressIp(String addressIp) {
this.addressIp = addressIp;
}
public String getServicePath() {
return servicePath;
}
public void setServicePath(String servicePath) {
this.servicePath = servicePath;
}
}
|
<filename>db/schema.sql
DROP DATABASE IF EXISTS dev_hub;
CREATE DATABASE dev_hub;
|
# -*- coding: utf-8 -*-
# @Author : kevin_w
import math
import numpy as np
import torch
from modify.util import collect_action, GetRawObservations
ball_pos = [0, 0, 0]
player_pos = [0, 0, 0]
last_team = 0
last_dist_to_ball = 0
last_dist_to_goal = 0
e = np.finfo(np.float32).eps.item()
def action_modify(obs, action):
get_obs = GetRawObservations(obs)
team, pos, ball_direction, player = get_obs.get_ball_info()
left_team, right_team = get_obs.get_team_position()
active_player = left_team[get_obs.get_player()]
# modify it if the agent repeat one single action to prevent local minimum
# dribble the ball for a long time
# pass the ball to each other for a long time
# take a shoot in a long distance to the goal
a, is_in_local_min = collect_action(action.item())
if (team == 0) and (0 <= a <= 8) and \
is_in_local_min:
modified_action = torch.IntTensor([[9]])
return modified_action
if (team == 0) and (9 <= a <= 11) and \
is_in_local_min:
modified_action = torch.IntTensor([[5]])
print('modified action2')
return modified_action
if (active_player[0] <= 0.4) and a == 12:
modified_action = torch.IntTensor([[5]])
print('modified shoot1')
return modified_action
if (team == 0) and (active_player[0] > 0.6) and \
(abs(active_player[1]) < 0.25) and \
(ball_direction[0] > 0):
modified_action = torch.IntTensor([[12]])
print('modified shoot2')
return modified_action
else:
return action
# design a reward function based on raw info
def reward_func(obs, score, action):
global ball_pos
global last_team
global last_dist_to_ball
global last_dist_to_goal
global player_pos
global e
get_obs = GetRawObservations(obs)
team, pos, ball_direction, player = get_obs.get_ball_info()
left_team, right_team = get_obs.get_team_position()
team_direction = get_obs.get_team_direction()
active_player_direction = team_direction[get_obs.get_player()]
active_player = left_team[get_obs.get_player()]
reward = 0
distance = 0
first_offense_player = max([player[0] for player in left_team])
ball_player_distance = math.sqrt((active_player[0] - pos[0])**2 + (active_player[1] - pos[1])**2)
# if opponents control, reward -
if team == 1:
reward -= 0.1
if team == 1 and last_team != 1:
reward -= 0.1
last_team = 1
if team == 0 and last_team != 0:
# reward += 0.5
last_team = 0
# if ball outside the playground
if team == 0 and \
((pos[0] <= -1.02) or (pos[0] >= 1.02) or (pos[1] >= 0.42) or (pos[1] <= -0.42)):
reward -= 0.5
print('outside punishment')
# run to the ball and get control
distance_to_ball = math.sqrt((pos[0] - active_player[0])**2 + (pos[1] - active_player[1])**2)
if (last_team != 0) and (distance-last_dist_to_ball > 0.01):
reward -= 0.1
# action limit
if (team != 0) and (9 <= action <= 11):
reward -= 0.1
print('uncontrolled punishment')
if (team == 0) and (active_player[0] < 0.7) and \
active_player[0] >= first_offense_player:
if (ball_direction[0] < 0) or \
(active_player_direction[0] < 0) or \
((pos[0] - ball_pos[0]) < 0.0):
reward -= 0.1
print('pass behind punishment')
if (team == 0) and (active_player_direction[0] > 0.01) and \
(ball_direction[0] > 0.01) and \
((action == 13) or (action == 17)):
reward += 0.5
print('dribble reward')
if (active_player[0] < 0.6) and (action == 12):
reward -= 0.1
print('shoot punishment')
if (team == 0) and active_player[0] > 0.6 and \
(abs(active_player[1]) < 0.3) and \
(action == 12):
reward += 1
print('shoot opportunity')
# offense
distance = math.sqrt((active_player[0] - 1) ** 2 + (active_player[1] - 0) ** 2)
if pos[1] > 0:
if (team == 0) and ((pos[0] - ball_pos[0]) > 0) and \
((ball_pos[1] - pos[1]) > 0) and (active_player[0] > -0.7):
if last_dist_to_goal - distance > 0.01:
reward += (2 - distance)
print('move reward')
if (active_player_direction[0] > 0.001) and \
ball_player_distance < 0.05:
reward += (2 - distance)
print('controlled reward')
elif pos[1] < 0:
if (team == 0) and ((pos[0] - ball_pos[0]) > 0) and \
((ball_pos[1] - pos[1]) < 0) and (active_player[0] > -0.7):
if last_dist_to_goal - distance > 0.01:
reward += (2 - distance)
print('move reward')
if (active_player_direction[0] > 0.001) and \
ball_player_distance < 0.05:
reward += (2 - distance)
print('controlled reward')
# score the goal +-5
reward += score*50
# update record
ball_pos = pos
last_dist_to_ball = distance_to_ball
last_dist_to_goal = distance
player_pos = active_player
return reward
|
<filename>src/types.ts<gh_stars>0
import { IncomingMessage } from 'http'
import { Redirect, GetServerSidePropsContext, GetServerSideProps } from 'next'
import { NextApiRequestCookies } from 'next/dist/server/api-utils'
/***********************************************/
/******************** CASE *********************/
/***********************************************/
export type SsrRequest = IncomingMessage & {
cookies: NextApiRequestCookies
}
export type SsrCasePropsResult = {
props: Record<string, unknown>
}
export type SsrCaseRedirectResult = {
redirect: Redirect
}
export type SsrCaseNotFoundResult = {
notFound: true
}
export type SsrCaseAmbiguousResult = {
props?: Record<string, unknown>
redirect?: Redirect
notFound?: true
}
export type SsrCaseResult =
| SsrCasePropsResult
| SsrCaseRedirectResult
| SsrCaseNotFoundResult
export type SsrCaseHandler<ContextType> = (
nextContext: GetServerSidePropsContext,
appContext: ContextType
) => Promise<SsrCaseResult>
export type WrappedSsrCaseHandler = (
nextContext: GetServerSidePropsContext
) => Promise<SsrCaseAmbiguousResult>
export type SsrContextGenerator<ContextType> = (
nextContext: GetServerSidePropsContext
) => Promise<ContextType>
export type SsrErrorPageUrlGetter<ContextType> = (
err: unknown,
nextContext: GetServerSidePropsContext,
appContext?: ContextType
) => string
export type SsrCaseErrorHandler<ContextType> = (
err: unknown,
nextContext: GetServerSidePropsContext,
appContext: ContextType
) => void
export type SsrCaseErrorHandlerGetter = <
ContextType
>() => SsrCaseErrorHandler<ContextType>
export type SsrCaseHandlerWrapper = <ContextType>(
caseHandler: SsrCaseHandler<ContextType>,
nextContext: ContextType,
getErrorPageUrl: SsrErrorPageUrlGetter<ContextType>,
onCaseHandlingError?: SsrCaseErrorHandler<ContextType>
) => WrappedSsrCaseHandler
/***********************************************/
/****************** CASE LIST ******************/
/***********************************************/
export type WrappedSsrCaseListHandler = (
nextContext: GetServerSidePropsContext,
wrappedCaseHandlers: Array<WrappedSsrCaseHandler>,
previousResult?: SsrCasePropsResult
) => Promise<SsrCaseResult>
/***********************************************/
/******************** SSR **********************/
/***********************************************/
export type SsrContextGenerationErrorHandler = (
err: unknown,
nextContext: GetServerSidePropsContext
) => void
export type SsrHandlerConfig<ContextType> = {
contextGenerator: SsrContextGenerator<ContextType>
globalCaseHandlers: Array<SsrCaseHandler<ContextType>>
getErrorPageUrl: SsrErrorPageUrlGetter<ContextType>
onContextGenerationError?: SsrContextGenerationErrorHandler
onCaseHandlingError?: SsrCaseErrorHandler<ContextType>
}
export type SsrHandler<ContextType> = (
pageCaseHandlers: Array<SsrCaseHandler<ContextType>>
) => GetServerSideProps
export type SrrHandlerGenerator = <ContextType>(
config: SsrHandlerConfig<ContextType>
) => SsrHandler<ContextType>
|
#!/bin/bash
#SBATCH --mem-per-cpu 4G
#SBATCH -t 1:00:00
#SBATCH -p coin,batch-ivb,batch-wsm,batch-hsw,short-ivb,short-wsm,short-hsw
#SBATCH -N1
#SBATCH -c 20
source ../common/common.sh
PROFILE=${1:-triton-gcc-openblas}
module purge
source profiles/${PROFILE}
module list
NAME=kaldi
GIT_REPO=https://github.com/kaldi-asr/kaldi.git
GIT_DIR=src
init_vars
checkout_git
pushd ${BUILD_DIR}/src
echo "BUILD_DIR = ${BUILD_DIR}" > kaldi.mk
echo "${MAKELINES}" >> kaldi.mk
cat ${FILE_DIR}/common.mk >> kaldi.mk
#patch -p2 < ${FILE_DIR}/matrix.diff
echo "${PWD}"
make clean
make -j $SLURM_CPUS_PER_TASK all
#make -j $SLURM_CPUS_PER_TASK test_compile
rm -Rf "${INSTALL_DIR}"
mkdir -p ${INSTALL_DIR}/{bin,testbin}
find . -type f -executable -print | grep "bin/" | grep -v "\.cc$" | grep -v "so$" | grep -v test | xargs cp -t "${INSTALL_DIR}/bin"
find . -type f -executable -print | grep -v "\.cc$" | grep -v "so$" | grep test | xargs cp -t "${INSTALL_DIR}/testbin"
popd
BIN_PATH=${INSTALL_DIR}/bin
EXTRA_LINES="setenv KALDI_INSTALL_DIR ${INSTALL_DIR}"
DESC="Kaldi Speech Recognition Toolkit"
HELP="Kaldi ${VERSION} ${TOOLCHAIN}"
write_module
rm -Rf ${BUILD_DIR}
|
<reponame>yegobox/kraken
import {Component, Input, OnInit} from '@angular/core';
import {RuntimeHostService} from 'projects/runtime/src/lib/runtime-host/runtime-host.service';
import {AbstractControl, FormControl, FormGroup, Validators} from '@angular/forms';
import {Host} from 'projects/runtime/src/lib/entities/host';
import * as _ from 'lodash';
import {LocalStorageService} from 'projects/tools/src/lib/local-storage.service';
@Component({
selector: 'lib-hosts-selector',
templateUrl: './hosts-selector.component.html',
styleUrls: ['./hosts-selector.component.scss']
})
export class HostsSelectorComponent implements OnInit {
private static readonly ID_PREFIX = 'host-selector-';
@Input() storageId: string;
@Input() formGroup: FormGroup;
@Input() multiple: boolean;
public hostsList: Host[] = [];
public loading = true;
constructor(private hostService: RuntimeHostService,
private localStorage: LocalStorageService) {
}
ngOnInit() {
this.hostService.hosts().subscribe(hosts => {
this.loading = false;
this.hostsList = hosts;
const hostIds = _.map(this.hostsList, 'id');
const savedIds = this.localStorage.getItem<string[]>(HostsSelectorComponent.ID_PREFIX + this.storageId, []);
const intersect = _.intersection(hostIds, savedIds);
const selectedHostIds = intersect.length ? intersect : hostIds;
const selectedHostId = _.first(hostIds);
this.formGroup.addControl('hosts', new FormControl(this.multiple ? selectedHostIds : selectedHostId, [Validators.required]));
});
}
get hosts(): AbstractControl {
return this.formGroup.get('hosts');
}
get hostIds(): string[] {
const hostIds = this.hosts ? this.multiple ? this.hosts.value : [this.hosts.value] : [];
this.localStorage.setItem(HostsSelectorComponent.ID_PREFIX + this.storageId, hostIds);
return hostIds;
}
get hostId(): string {
const hostId = this.hosts ? this.hosts.value : null;
this.localStorage.setItem(HostsSelectorComponent.ID_PREFIX + this.storageId, [hostId]);
return hostId;
}
}
|
<filename>src/components/progress/progress.test.tsx
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import React from 'react';
import { render } from 'enzyme';
import { requiredProps } from '../../test/required_props';
import { EuiProgress, COLORS, SIZES } from './progress';
describe('EuiProgress', () => {
test('is rendered', () => {
const component = render(<EuiProgress {...requiredProps} />);
expect(component).toMatchSnapshot();
});
test('has max', () => {
const component = render(<EuiProgress max={100} {...requiredProps} />);
expect(component).toMatchSnapshot();
});
test('has value', () => {
const component = render(<EuiProgress value={100} {...requiredProps} />);
expect(component).toMatchSnapshot();
});
test('is determinate', () => {
const val = 50;
const component = render(
<EuiProgress max={val ? 100 : undefined} value={val} {...requiredProps} />
);
expect(component).toMatchSnapshot();
});
test('is indeterminate', () => {
const val = undefined;
const component = render(
<EuiProgress max={val ? 100 : undefined} value={val} {...requiredProps} />
);
expect(component).toMatchSnapshot();
});
test('has valueText and label', () => {
const component = render(
<EuiProgress
valueText="150"
label="Label"
value={50}
max={100}
{...requiredProps}
/>
);
expect(component).toMatchSnapshot();
});
test('valueText is true', () => {
const component = render(
<EuiProgress valueText={true} value={50} max={100} {...requiredProps} />
);
expect(component).toMatchSnapshot();
});
test('has labelProps', () => {
const component = render(
<EuiProgress
max={100}
value={50}
labelProps={{ title: 'Custom title' }}
valueText="150"
{...requiredProps}
/>
);
expect(component).toMatchSnapshot();
});
describe('color', () => {
[...COLORS, '#885522'].forEach((color) => {
test(`${color} is rendered`, () => {
const component = render(<EuiProgress color={color} />);
expect(component).toMatchSnapshot();
});
});
});
describe('size', () => {
SIZES.forEach((size) => {
test(`${size} is rendered`, () => {
const component = render(<EuiProgress size={size} />);
expect(component).toMatchSnapshot();
});
});
});
});
|
var searchData=
[
['testenv_0',['TestEnv',['../classproxen_1_1gui_1_1_test_env.html',1,'proxen::gui']]],
['testenveditor_1',['TestEnvEditor',['../classproxen_1_1gui_1_1_test_env_editor.html',1,'proxen::gui']]],
['testenveditorask_2',['TestEnvEditorAsk',['../classproxen_1_1gui_1_1_test_env_editor_ask.html',1,'proxen::gui']]]
];
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/EnemyFramework/EnemyFramework.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/EnemyFramework/EnemyFramework.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
package hector
import (
"math"
"strconv"
"math/rand"
"fmt"
"os"
"bufio"
"strings"
"runtime"
)
/*
This algorithm implement L1 Linear SVM described in "A Dual Coordinate Descent Method for Large-scale Linear SVM"
You can download the paper from http://ntu.csie.org/~cjlin/papers/cddual.pdf
*/
type LinearSVM struct {
sv []*Vector
y []float64
a []float64
b float64
C float64
e float64
w *Vector
xx []float64
}
func (self *LinearSVM) SaveModel(path string){
sb := StringBuilder{}
for f, g := range self.w.data {
sb.Int64(f)
sb.Write("\t")
sb.Float(g)
sb.Write("\n")
}
sb.WriteToFile(path)
}
func (self *LinearSVM) LoadModel(path string){
file, _ := os.Open(path)
defer file.Close()
scaner := bufio.NewScanner(file)
for scaner.Scan() {
line := scaner.Text()
tks := strings.Split(line, "\t")
fid, _ := strconv.ParseInt(tks[0], 10, 64)
fw, _ := strconv.ParseFloat(tks[1], 64)
self.w.SetValue(fid, fw)
}
}
func (c *LinearSVM) Init(params map[string]string){
c.C,_ = strconv.ParseFloat(params["c"], 64)
c.e,_ = strconv.ParseFloat(params["e"], 64)
c.w = NewVector()
}
func (c *LinearSVM) Predict(sample *Sample) float64 {
x := sample.GetFeatureVector()
return c.PredictVector(x)
}
func (c *LinearSVM) PredictVector(x *Vector) float64 {
ret := c.w.Dot(x)
return ret
}
func (c *LinearSVM) Train(dataset *DataSet) {
c.sv = []*Vector{}
c.y = []float64{}
c.a = []float64{}
for k, sample := range dataset.Samples {
x := sample.GetFeatureVector()
c.sv = append(c.sv, x)
c.xx = append(c.xx, x.Dot(x))
if sample.Label > 0.0 {
c.y = append(c.y, 1.0)
} else {
c.y = append(c.y, -1.0)
}
c.a = append(c.a, c.C * rand.Float64() * 0.0)
c.w.AddVector(x, c.y[k] * c.a[k])
}
da0 := 0.0
for {
da := 0.0
for i, ai := range c.a {
g := c.y[i] * c.w.Dot(c.sv[i]) - 1.0
pg := g
if ai < 1e-9 {
pg = math.Min(0.0, g)
} else if ai > c.C - 1e-9 {
pg = math.Max(0.0, g)
}
if math.Abs(pg) > 1e-9 {
ai0 := ai
ai = math.Min(math.Max(0, ai - g / c.xx[i]), c.C)
c.w.AddVector(c.sv[i], (ai - ai0) * c.y[i])
da += math.Abs(ai - ai0)
}
}
da /= float64(len(c.a))
fmt.Println(da)
if da < c.e || math.Abs(da - da0) < 1e-3 {
break
}
da0 = da
}
c.sv = nil
runtime.GC()
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.