text stringlengths 1 1.05M |
|---|
#include "include/basket.hpp"
int main(int argc, char* argv[])
{
/* Make some fruit! */
Fruit banana;
banana.name = "banana";
banana.color = "Yellow";
banana.rating = 5;
Fruit apple;
apple.name = "apple";
apple.color = "red";
apple.rating = 7;
Fruit mango;
mango.name = "mango";
mango.color = "orange";
mango.rating = 8;
Fruit pineapple;
pineapple.name = "pineapple";
pineapple.color = "yellow";
pineapple.rating = 9;
/* Fill our basket with delicious fruit */
Fruit_Basket* fruit_basket = new Fruit_Basket;
fruit_basket -> push_fruit(banana);
fruit_basket -> push_fruit(apple);
fruit_basket -> push_fruit(mango);
fruit_basket -> push_fruit(pineapple);
/* gimme an apple */
fruit_basket -> gimme("apple");
/* What's left? */
fruit_basket -> print_fruit();
/* What's the tastiest one? */
fruit_basket -> find_tastiest();
/* There will be no fruit basket memory leaks today */
return 0;
}
|
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
# Load the dataset
data = {"Objects": ["Cat", "Car", "Bike"],
"Category": ["Animal", "Vehicle", "Transportation"]}
df = pd.DataFrame.from_dict(data)
# Divide dataset into labels and features
X = df.iloc[:, :-1].values
y = df.iloc[:, 1].values
# Split the dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# Train the model
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Test the model
y_pred = classifier.predict(X_test)
# Print accuracy
accuracy = round(100 * (1 - np.mean(np.abs(y_pred - y_test))), 2)
print("Accuracy: {}%".format(accuracy)) |
<gh_stars>1-10
package com.example.bookingapp;
/*Model class for bookings*/
public class BookingClass {
private String clientName, clientAddress, clientEmail;
private long dateTime;
public BookingClass() {
}
public BookingClass(long dateTime, String clientName, String clientAddress, String clientEmail) {
this.dateTime = dateTime;
this.clientName = clientName;
this.clientAddress = clientAddress;
this.clientEmail = clientEmail;
}
public long getDateTime() {
return dateTime;
}
public String getClientName() {
return clientName;
}
public String getClientAddress() {
return clientAddress;
}
public String getClientEmail() {
return clientEmail;
}
}
|
#!/bin/bash
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function build_success() {
echo "Build succeeded: $@"
exit 0
}
function build_fail() {
echo "Build failed: $@"
exit 1
}
function exit_error() {
build_fail "$0:$1 \"$BASH_COMMAND\" returned $?"
}
trap 'exit_error $LINENO' ERR
function install_go() {
# Installs a specific version of go for compilation, since availability varies
# across linux distributions. Needs curl and tar to be installed.
local arch="amd64"
if [[ `uname -m` == "aarch64" ]]; then
arch="arm64"
fi
local GOLANG="go1.13.9.linux-${arch}.tar.gz"
export GOPATH=/usr/share/gocode
export GOCACHE=/tmp/.cache
# Golang setup
[[ -d /tmp/go ]] && rm -rf /tmp/go
mkdir -p /tmp/go/
curl -s "https://dl.google.com/go/${GOLANG}" -o /tmp/go/go.tar.gz
tar -C /tmp/go/ --strip-components=1 -xf /tmp/go/go.tar.gz
export PATH="/tmp/go/bin:${GOPATH}/bin:${PATH}" # set path for whoever invokes this function.
export GO=/tmp/go/bin/go # reference this go explicitly.
}
function git_checkout() {
# Checks out a repo at a specified commit or ref into a specified directory.
BASE_REPO="$1"
REPO="$2"
PULL_REF="$3"
# pull the repository from github - start
mkdir -p $REPO
cd $REPO
git init
# fetch only the branch that we want to build
git_command="git fetch https://github.com/${BASE_REPO}/${REPO}.git ${PULL_REF:-"master"}"
echo "Running ${git_command}"
$git_command
git checkout FETCH_HEAD
}
function try_command() {
n=0
while ! "$@"; do
echo "try $n to run $@"
if [[ n -gt 3 ]]; then
return 1
fi
((n++))
sleep 5
done
}
|
#!/bin/bash
env version=pthreads \
CXX=clang++ \
CXXFLAGS="-DNDEBUG -D_MM_NO_ALIGN_CHECK -g -Ofast" \
LIBS="-lGL -lGLU -lXmu -lXext -lXau -lX11 -ldl -lpthread -g -Ofast" \
./configure
make -j10
cp bin/rtview ./rtview.orig.exe
|
#include <iostream>
#include <string>
#include "DetectorData.h" // Assume the necessary header file for detector data types
std::string performAnalysis(const HBHEDigiCollection& hbheData, const HFDigiCollection& hfData, const HODigiCollection& hoData) {
// Perform analysis on the detector data
// Example: Calculate the total energy readings from all detectors
double totalEnergy = 0.0;
for (const auto& digi : hbheData) {
totalEnergy += digi.energy();
}
for (const auto& digi : hfData) {
totalEnergy += digi.energy();
}
for (const auto& digi : hoData) {
totalEnergy += digi.energy();
}
// Generate a result string based on the analysis
std::string result = "Total energy from all detectors: " + std::to_string(totalEnergy) + " GeV";
return result;
}
int main() {
// Example usage of the performAnalysis function
HBHEDigiCollection hbheData; // Assume initialization of HBHE detector data
HFDigiCollection hfData; // Assume initialization of HF detector data
HODigiCollection hoData; // Assume initialization of HO detector data
std::string analysisResult = performAnalysis(hbheData, hfData, hoData);
std::cout << "Analysis Result: " << analysisResult << std::endl;
return 0;
} |
def factorial(n):
result = 1
for i in range(1, n+1):
result *= i
return result
print(factorial(3)) |
/*
* Hedgewars, a free turn based strategy game
* Copyright (c) 2004-2015 <NAME> <<EMAIL>>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* @brief GameStyleModel class implementation
*/
#include <QTextStream>
#include "physfs.h"
#include "GameStyleModel.h"
#include "hwconsts.h"
void GameStyleModel::loadGameStyles()
{
beginResetModel();
// empty list, so that we can (re)fill it
QStandardItemModel::clear();
QList<QStandardItem * > items;
items.append(new QStandardItem("Normal"));
// define a separator item
QStandardItem * separator = new QStandardItem("---");
separator->setData(QLatin1String("separator"), Qt::AccessibleDescriptionRole);
separator->setFlags(separator->flags() & ~( Qt::ItemIsEnabled | Qt::ItemIsSelectable ) );
items.append(separator);
QStringList scripts = DataManager::instance().entryList(
QString("Scripts/Multiplayer"),
QDir::Files,
QStringList("*.lua")
);
foreach(QString script, scripts)
{
script = script.remove(".lua", Qt::CaseInsensitive);
QFile scriptCfgFile(QString("physfs://Scripts/Multiplayer/%2.cfg").arg(script));
QString name = script;
name = name.replace("_", " ");
QString scheme = "locked";
QString weapons = "locked";
if (scriptCfgFile.exists() && scriptCfgFile.open(QFile::ReadOnly))
{
QTextStream input(&scriptCfgFile);
input >> scheme;
input >> weapons;
scriptCfgFile.close();
if (!scheme.isEmpty())
scheme.replace("_", " ");
if (!weapons.isEmpty())
weapons.replace("_", " ");
}
// detect if script is dlc
QString scriptPath = PHYSFS_getRealDir(QString("Scripts/Multiplayer/%1.lua").arg(script).toLocal8Bit().data());
bool isDLC = !scriptPath.startsWith(datadir->absolutePath());
QStandardItem * item = new QStandardItem((isDLC ? "*" : "") + name);
item->setData(script, ScriptRole);
item->setData(scheme, SchemeRole);
item->setData(weapons, WeaponsRole);
item->setData(isDLC, IsDlcRole);
items.append(item);
}
QStandardItemModel::appendColumn(items);
endResetModel();
}
|
#
# Copyright (C) 2021 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from concurrent.futures.thread import ThreadPoolExecutor
from functools import partial
from typing import List
from behave import *
from hamcrest import *
from typedb.client import *
from tests.behaviour.config.parameters import parse_bool, parse_list
from tests.behaviour.context import Context
SCHEMA = SessionType.SCHEMA
DATA = SessionType.DATA
def open_sessions_for_databases(context: Context, names: list, session_type):
for name in names:
context.sessions.append(context.client.session(name, session_type, context.session_options))
@step("connection open schema session for database: {database_name}")
def step_impl(context: Context, database_name):
open_sessions_for_databases(context, [database_name], SCHEMA)
@step("connection open data session for database: {database_name}")
@step("connection open session for database: {database_name}")
def step_impl(context: Context, database_name: str):
open_sessions_for_databases(context, [database_name], DATA)
@step("connection open schema session for database")
@step("connection open schema session for databases")
@step("connection open schema sessions for database")
@step("connection open schema sessions for databases")
def step_impl(context: Context):
names = parse_list(context.table)
open_sessions_for_databases(context, names, SCHEMA)
@step("connection open data session for database")
@step("connection open data session for databases")
@step("connection open data sessions for database")
@step("connection open data sessions for databases")
@step("connection open session for database")
@step("connection open session for databases")
@step("connection open sessions for database")
@step("connection open sessions for databases")
def step_impl(context: Context):
names = parse_list(context.table)
open_sessions_for_databases(context, names, DATA)
@step("connection open data sessions in parallel for databases")
@step("connection open sessions in parallel for databases")
def step_impl(context: Context):
names = parse_list(context.table)
assert_that(len(names), is_(less_than_or_equal_to(context.THREAD_POOL_SIZE)))
with ThreadPoolExecutor(max_workers=context.THREAD_POOL_SIZE) as executor:
for name in names:
context.sessions_parallel.append(executor.submit(partial(context.client.session, name, DATA)))
@step("connection close all sessions")
def step_impl(context: Context):
for session in context.sessions:
session.close()
context.sessions = []
@step("session is null: {is_null}")
@step("sessions are null: {is_null}")
def step_impl(context: Context, is_null):
is_null = parse_bool(is_null)
for session in context.sessions:
assert_that(session is None, is_(is_null))
@step("session is open: {is_open}")
@step("sessions are open: {is_open}")
def step_impl(context: Context, is_open):
is_open = parse_bool(is_open)
for session in context.sessions:
assert_that(session.is_open(), is_(is_open))
@step("sessions in parallel are null: {is_null}")
def step_impl(context: Context, is_null):
is_null = parse_bool(is_null)
for future_session in context.sessions_parallel:
assert_that(future_session.result() is None, is_(is_null))
@step("sessions in parallel are open: {is_open}")
def step_impl(context: Context, is_open):
is_open = parse_bool(is_open)
for future_session in context.sessions_parallel:
assert_that(future_session.result().is_open(), is_(is_open))
def sessions_have_databases(context: Context, names: List[str]):
assert_that(context.sessions, has_length(equal_to(len(names))))
session_iter = iter(context.sessions)
for name in names:
assert_that(next(session_iter).database().name(), is_(name))
@step("session has database: {database_name}")
@step("sessions have database: {database_name}")
def step_impl(context: Context, database_name: str):
sessions_have_databases(context, [database_name])
# TODO: session(s) has/have databases in other implementations, simplify
@step("sessions have databases")
def step_impl(context: Context):
database_names = parse_list(context.table)
sessions_have_databases(context, database_names)
@step("sessions in parallel have databases")
def step_impl(context: Context):
database_names = parse_list(context.table)
assert_that(context.sessions_parallel, has_length(equal_to(len(database_names))))
future_session_iter = iter(context.sessions_parallel)
for name in database_names:
assert_that(next(future_session_iter).result().database().name(), is_(name))
######################################
# session configuration #
######################################
@step("set session option {option} to: {value}")
def step_impl(context: Context, option: str, value: str):
if option not in context.option_setters:
raise Exception("Unrecognised option: " + option)
context.option_setters[option](context.session_options, value)
|
#ifndef H_LINGO_TEST_CASE
#define H_LINGO_TEST_CASE
#include <catch/catch.hpp>
#include <lingo/string.hpp>
#include <lingo/string_view.hpp>
#include <lingo/encoding/execution.hpp>
#include <lingo/encoding/point_iterator.hpp>
#include <lingo/page/execution.hpp>
#include "test_types.hpp"
#include <cstddef>
#include <memory>
#include <tuple>
#include <type_traits>
#include <vector>
#define LINGO_UNIT_LEAST_64_TEST_CASE(...) TEMPLATE_LIST_TEST_CASE(__VA_ARGS__, "", lingo::test::unit_least_64_types)
#define LINGO_UNIT_LEAST_32_TEST_CASE(...) TEMPLATE_LIST_TEST_CASE(__VA_ARGS__, "", lingo::test::unit_least_32_types)
#define LINGO_UNIT_LEAST_16_TEST_CASE(...) TEMPLATE_LIST_TEST_CASE(__VA_ARGS__, "", lingo::test::unit_least_16_types)
#define LINGO_UNIT_LEAST_8_TEST_CASE(...) TEMPLATE_LIST_TEST_CASE(__VA_ARGS__, "", lingo::test::unit_least_8_types)
#define LINGO_UNIT_TEST_CASE(...) TEMPLATE_LIST_TEST_CASE(__VA_ARGS__, "", lingo::test::unit_types)
#define LINGO_UNIT_TEST_TYPEDEFS \
using unit_type = TestType; \
using encoding_type = lingo::encoding::execution_encoding_t<unit_type>; \
using page_type = lingo::page::execution_page_t<unit_type>; \
using point_type = typename page_type::point_type; \
using size_type = std::size_t; \
using difference_type = std::ptrdiff_t; \
using allocator_type = lingo::internal::default_allocator<encoding_type>; \
using string_type = lingo::basic_string<encoding_type, page_type>; \
using string_view_type = lingo::basic_string_view<encoding_type, page_type>; \
using point_iterator_type = lingo::encoding::point_iterator<encoding_type>
#endif |
/***************************************************************************
*
* Project _____ __ ____ _ _
* ( _ ) /__\ (_ _)_| |_ _| |_
* )(_)( /(__)\ )( (_ _)(_ _)
* (_____)(__)(__)(__) |_| |_|
*
*
* Copyright 2018-present, <NAME> <<EMAIL>>
* <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***************************************************************************/
#define OATPP_MACRO_DTO_ENUM_PARAM_MACRO(MACRO, NAME, PARAM_LIST) MACRO(NAME, PARAM_LIST)
#define OATPP_MACRO_DTO_ENUM_PARAM_NAME(MACRO, NAME, PARAM_LIST) NAME
#define OATPP_MACRO_DTO_ENUM_PARAM_NAME_STR(MACRO, NAME, PARAM_LIST) #NAME
#define OATPP_MACRO_DTO_ENUM_PARAM_VALUE(MACRO, NAME, PARAM_LIST) OATPP_MACRO_FIRSTARG PARAM_LIST
#define OATPP_MACRO_DTO_ENUM_PARAM_VALUE_STR(MACRO, NAME, PARAM_LIST) OATPP_MACRO_FIRSTARG_STR PARAM_LIST
#define OATPP_MACRO_DTO_ENUM_PARAM(MACRO, NAME, PARAM_LIST) (MACRO, NAME, PARAM_LIST)
/**
* Enum entry value.
* @param NAME - name of the enum. **required**.
* @param ORDINAL_VALUE - corresponding ordinal value. **required**.
* @param QUALIFIER - name qualifier to be used instead of the `NAME`. **optional**.
* @param DESCRIPTION - description of the enum value. **optional**.
*/
#define VALUE(NAME, ...) \
OATPP_MACRO_DTO_ENUM_PARAM(OATPP_MACRO_DTO_ENUM_VALUE, NAME, (__VA_ARGS__))
//////////////////////////////////////////////////////////////////////////
#define OATPP_MACRO_DTO_ENUM_MACRO_SELECTOR(MACRO, NAME, ...) \
OATPP_MACRO_EXPAND(OATPP_MACRO_MACRO_SELECTOR(MACRO, (__VA_ARGS__)) (NAME, __VA_ARGS__))
//////////////////////////////////////////////////////////////////////////
// VALUE MACRO
#define OATPP_MACRO_DTO_ENUM_VALUE_1(NAME, VAL) \
{ \
oatpp::data::mapping::type::EnumValueInfo<EnumType> entry = {EnumType::NAME, index ++, #NAME, nullptr}; \
info.byName.insert({#NAME, entry}); \
info.byValue.insert({static_cast<v_uint64>(EnumType::NAME), entry}); \
info.byIndex.push_back(entry); \
}
#define OATPP_MACRO_DTO_ENUM_VALUE_2(NAME, VAL, QUALIFIER) \
{ \
oatpp::data::mapping::type::EnumValueInfo<EnumType> entry = {EnumType::NAME, index ++, QUALIFIER, nullptr}; \
info.byName.insert({QUALIFIER, entry}); \
info.byValue.insert({static_cast<v_uint64>(EnumType::NAME), entry}); \
info.byIndex.push_back(entry); \
}
#define OATPP_MACRO_DTO_ENUM_VALUE_3(NAME, VAL, QUALIFIER, DESCRIPTION) \
{ \
oatpp::data::mapping::type::EnumValueInfo<EnumType> entry = {EnumType::NAME, index ++, QUALIFIER, DESCRIPTION}; \
info.byName.insert({QUALIFIER, entry}); \
info.byValue.insert({static_cast<v_uint64>(EnumType::NAME), entry}); \
info.byIndex.push_back(entry); \
}
#define OATPP_MACRO_DTO_ENUM_VALUE(NAME, PARAM_LIST) \
OATPP_MACRO_DTO_ENUM_MACRO_SELECTOR(OATPP_MACRO_DTO_ENUM_VALUE_, NAME, OATPP_MACRO_UNFOLD_VA_ARGS PARAM_LIST)
// FOR EACH
#define OATPP_MACRO_DTO_ENUM_PARAM_DECL_FIRST(INDEX, COUNT, X) \
OATPP_MACRO_DTO_ENUM_PARAM_NAME X = OATPP_MACRO_DTO_ENUM_PARAM_VALUE X
#define OATPP_MACRO_DTO_ENUM_PARAM_DECL_REST(INDEX, COUNT, X) \
, OATPP_MACRO_DTO_ENUM_PARAM_NAME X = OATPP_MACRO_DTO_ENUM_PARAM_VALUE X
#define OATPP_MACRO_DTO_ENUM_PARAM_PUT(INDEX, COUNT, X) \
OATPP_MACRO_DTO_ENUM_PARAM_MACRO X
// ENUM MACRO
#define OATPP_ENUM_0(NAME, ORDINAL_TYPE) \
enum class NAME : ORDINAL_TYPE {}; \
\
namespace { \
\
class Z__OATPP_ENUM_META_##NAME : public oatpp::data::mapping::type::EnumMeta<NAME> { \
private: \
\
static bool init() { \
auto& info = *EnumMeta<NAME>::getInfo(); \
v_int32 index = 0; \
(void)index; \
info.nameQualifier = #NAME; \
return true; \
} \
\
public: \
\
static bool initializer() { \
static bool initialized = init(); \
return initialized; \
} \
\
}; \
\
bool Z__OATPP_ENUM_META_INITIALIZER_##NAME = Z__OATPP_ENUM_META_##NAME::initializer(); \
\
}
#define OATPP_ENUM_1(NAME, ORDINAL_TYPE, ...) \
enum class NAME : ORDINAL_TYPE { \
OATPP_MACRO_FOREACH_FIRST_AND_REST( \
OATPP_MACRO_DTO_ENUM_PARAM_DECL_FIRST, \
OATPP_MACRO_DTO_ENUM_PARAM_DECL_REST, \
__VA_ARGS__ \
) \
}; \
\
class Z__OATPP_ENUM_META_##NAME : public oatpp::data::mapping::type::EnumMeta<NAME> { \
private: \
\
static bool init() { \
auto& info = *EnumMeta<NAME>::getInfo(); \
v_int32 index = 0; \
info.nameQualifier = #NAME; \
OATPP_MACRO_FOREACH(OATPP_MACRO_DTO_ENUM_PARAM_PUT, __VA_ARGS__) \
return true; \
} \
\
public: \
\
static bool initializer() { \
static bool initialized = init(); \
return initialized; \
} \
\
}; \
\
static bool Z__OATPP_ENUM_META_INITIALIZER_##NAME = Z__OATPP_ENUM_META_##NAME::initializer();
// Chooser
#define OATPP_ENUM_MACRO_0(NAME, ORDINAL_TYPE) \
OATPP_ENUM_0(NAME, ORDINAL_TYPE)
#define OATPP_ENUM_MACRO_1(NAME, ORDINAL_TYPE, ...) \
OATPP_ENUM_1(NAME, ORDINAL_TYPE, __VA_ARGS__)
/**
* Codegen macro to generate oatpp mapping-enabled enum.
* @param NAME - name of the enum. **required**.
* @param UNDERLYING_TYPE - underlying ordinal type. **required**.
* @param ... - enum values defined with &l:VALUE (...);. macro.
*/
#define ENUM(NAME, ...) \
OATPP_MACRO_EXPAND(OATPP_MACRO_MACRO_BINARY_SELECTOR(OATPP_ENUM_MACRO_, (__VA_ARGS__)) (NAME, __VA_ARGS__))
|
use failure::Fail;
#[derive(Debug, Fail)]
pub enum ParentsError {
#[fail(display = "Missing parent for child")]
MissingParent,
#[fail(display = "Duplicate parent for child")]
DuplicateParent,
#[fail(display = "Invalid parent-child relationship")]
InvalidRelationship,
} |
<filename>backend/src/domain/tezos/tezos.types.ts
export type FaucetAccount = {
mnemonic: string | string[];
secret: string;
amount: string; // mutez
pkh: string;
password?: string;
email: string;
}; |
<reponame>pageobject-io/pageobject-generator
"use strict";
const _ = require('lodash');
const parse5 = require('parse5');
const fs = require('fs');
const ParseResult = require('./parse-result');
class HtmlParser {
constructor(source, isPath) {
this._source = source;
this._isPath = isPath;
}
parse() {
let source = this._isPath ? fs.readFileSync(this._source, 'utf8') : this._source;
return new ParseResult(parse5.parse(source,
{treeAdapter: parse5.treeAdapters.htmlparser2, locationInfo: true}), null);
}
}
module.exports = HtmlParser; |
#!/bin/sh
#
#
#PBS -N SimulSD
#PBS -o output/output.file
#PBS -e error/error.file
#PBS -m a
#PBS -l walltime=11:30:00
#PBS -l vmem=30GB
#
#----------------------------------------------------#
# MODULES TO LOAD IN
module load R/3.2.1-intel-2015a
#----------------------------------------------------#
#----------------------------------------------------#
# CHANGE YOUR VSC NUMBER HERE AND GOD WILL DO THE REST
vsc=40728
#----------------------------------------------------#
#----------------------------------------------------#
# LOCATION OF SCRIPT TO RUN
srcdir=/user/scratch/gent/gvo000/gvo00022/vsc"$vsc"/Simulation
cd $srcdir
#----------------------------------------------------#
#----------------------------------------------------#
# NUMBER OF SCENARIOS
NSCEN=30
#----------------------------------------------------#
#----------------------------------------------------#
# CREATE THE FOLDERS IN RESULTS
cd Results
mkdir ${PBS_ARRAYID}
cd ${PBS_ARRAYID}
mkdir $(printf "SCEN_%1i " $(seq 1 $NSCEN))
cd $srcdir
#----------------------------------------------------#
#----------------------------------------------------#
for i in $(eval echo "{1..$NSCEN}"); do
# GO TIME: FOR LOOP OVER ALL SCENARIOS:
Rscript lowLevelToMetaSimpDes.R ${PBS_ARRAYID} "$i" "HPC"
done
#----------------------------------------------------#
|
'use strict';
const { Database } = require('sqlite3').verbose();
const db = new Database('example.sqlite', () => console.log('Connected!'));
db.run("CREATE TABLE IF NOT EXISTS employees (id INT, first_name TEXT, last_name TEXT)");
const errorHandler = (err) => {
if (err) {
console.log(`Msg: ${err}`);
};
};
const populateEmployees = () => {
const list = require('./employees.json');
list.forEach(each => {
db.run(`INSERT INTO employees VALUES (
${each.id},
"${each.first_name}",
"${each.last_name}"
)`, errorHandler);
})
};
populateEmployees()
db.all("SELECT * FROM employees", (err, allRows) => {
allRows.forEach(each => {
console.log(each.id, each.first_name, each.last_name);
});
});
db.close(err => {
errorHandler(err);
console.log('Database closed')
});
|
import { useMutation, useQueryClient } from 'react-query';
import { Routes } from 'lib-client/constants';
import axiosInstance from 'lib-client/react-query/axios';
import { AxiosError } from 'axios';
import { signOut } from 'next-auth/react';
export type SeedResponseType = {
success: boolean;
};
const createSeed = async () => {
const { data } = await axiosInstance.post<SeedResponseType>(Routes.API.SEED);
return data;
};
export const useCreateSeed = () => {
const queryClient = useQueryClient();
const mutation = useMutation<SeedResponseType, AxiosError, void, unknown>(
() => createSeed(),
{
onSuccess: async () => {
// clear everything
signOut();
queryClient.clear();
},
}
);
return mutation;
};
|
<gh_stars>0
interface Document {
selection: {
}
}
// Credits: https://stackoverflow.com/questions/2897155/get-cursor-position-in-characters-within-a-text-input-field
export function getCursorPos(element : HTMLInputElement) : number {
// if (document.selection) {
// element.focus();
// return document.selection.createRange().moveStart('character', -element.value.length);
// }
return element.selectionDirection == 'backward' ? element.selectionStart : element.selectionEnd;
}
// Credits: http://blog.vishalon.net/index.php/javascript-getting-and-setting-caret-position-in-textarea/
export function setCursorPos(element : HTMLInputElement, pos : number) : void {
// Modern browsers
if (element.setSelectionRange) {
element.focus();
element.setSelectionRange(pos, pos);
// IE8 and below
} else if ((element as any).createTextRange) {
var range = (element as any).createTextRange();
range.collapse(true);
range.moveEnd('character', pos);
range.moveStart('character', pos);
range.select();
}
} |
#ifndef INTERFACEMODEL_H
#define INTERFACEMODEL_H
#include "dependency.h"
#include "class.h"
#include "struct.h"
#include "enum.h"
#include "simpletypeelement.h"
#include "printer.h"
#include <set>
class InterfaceModel
{
public:
InterfaceModel();
void print(Printer& printer);
const CodeElement* get_type(const std::string& type) const;
Dependency* add_dependency(const Dependency&);
Class* add_class(const Class& new_class);
Class* add_class(Class&& new_class);
Struct* add_struct(const Struct& new_struct);
Struct* add_struct(Struct&& new_struct);
Enum* add_enumeration(const Enum& new_enum);
Enum* add_enumeration(Enum&& new_enum);
Function* add_function(const Function& new_enum);
Function* add_function(Function&& new_enum);
std::vector<Class> get_classes() const;
std::vector<Struct> get_sturctures() const;
std::vector<Enum> get_enumerations() const;
std::vector<Function> get_functions() const;
private:
std::vector<Dependency> dependencies;
std::vector<Class> classes;
std::vector<Struct> structs;
std::vector<Enum> enums;
std::vector<Function> functions;
std::unordered_map<std::string, SimpleType> types;
};
#endif // INTERFACEMODEL_H
|
#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc # set to IAM instead of DEFAULT if cluster is IAM enabled
echo "export GRAPH_NOTEBOOK_HOST=CHANGE-ME" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_PORT=8182" >> ~/.bashrc
echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=" >> ~/.bashrc
echo "export AWS_REGION=us-west-2" >> ~/.bashrc
source activate JupyterSystemEnv
echo "installing Python 3 kernel"
python3 -m ipykernel install --sys-prefix --name python3 --display-name "Python 3"
echo "installing python dependencies..."
pip uninstall NeptuneGraphNotebook -y # legacy uninstall when we used to install from source in s3
pip install --upgrade graph-notebook
echo "installing nbextensions..."
python -m graph_notebook.nbextensions.install
echo "installing static resources..."
python -m graph_notebook.static_resources.install
echo "enabling visualization..."
jupyter nbextension install --py --sys-prefix graph_notebook.widgets
jupyter nbextension enable --py --sys-prefix graph_notebook.widgets
mkdir -p ~/SageMaker/Neptune
cd ~/SageMaker/Neptune || exit
python -m graph_notebook.notebooks.install
chmod -R a+rw ~/SageMaker/Neptune/*
source ~/.bashrc || exit
HOST=${GRAPH_NOTEBOOK_HOST}
PORT=${GRAPH_NOTEBOOK_PORT}
AUTH_MODE=${GRAPH_NOTEBOOK_AUTH_MODE}
SSL=${GRAPH_NOTEBOOK_SSL}
LOAD_FROM_S3_ARN=${NEPTUNE_LOAD_FROM_S3_ROLE_ARN}
if [[ ${SSL} -eq "" ]]; then
SSL="True"
fi
echo "Creating config with
HOST: ${HOST}
PORT: ${PORT}
AUTH_MODE: ${AUTH_MODE}
SSL: ${SSL}
AWS_REGION: ${AWS_REGION}"
/home/ec2-user/anaconda3/envs/JupyterSystemEnv/bin/python -m graph_notebook.configuration.generate_config \
--host "${HOST}" \
--port "${PORT}" \
--auth_mode "${AUTH_MODE}" \
--ssl "${SSL}" \
--load_from_s3_arn "${LOAD_FROM_S3_ARN}" \
--aws_region "${AWS_REGION}"
source /home/ec2-user/anaconda3/bin/deactivate
echo "done."
EOF
|
#!/bin/bash
set -v
set -e
SDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd ${SDIR}
cd ..
mkdir -p _static
mkdir -p _templates
rm -rf ./generated
export SPHINX_APIDOC_OPTIONS=members,undoc-members
sphinx-apidoc -o ./generated -f -e -T -M ../../pydarkstar ../../pydarkstar/tests ../../pydarkstar/apps
rm -f ./generated/modules.rst
declare -a stubs=(
"requirements"
"setup"
"usage"
"advanced"
)
echo ":orphan:" > ./generated/README.rst
echo "" >> ./generated/README.rst
pandoc --from=gfm --to=rst ../../README.md >> ./generated/README.rst
for stub in "${stubs[@]}"; do
pandoc --from=gfm --to=rst ./markdown/${stub}.md > ./generated/${stub}.rst
echo ${stub}
done
cd ./generated
python ../scripts/titles.py
python ../scripts/apps.py
cd ..
|
<filename>kratos/test/client/client.go
//
// Copyright 2022 SkyAPM org
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"context"
"fmt"
"os"
"github.com/SkyAPM/go2sky"
kratosplugin "github.com/SkyAPM/go2sky-plugins/kratos"
"github.com/SkyAPM/go2sky/reporter"
"github.com/go-kratos/kratos/examples/helloworld/helloworld"
"github.com/go-kratos/kratos/v2"
"github.com/go-kratos/kratos/v2/log"
"github.com/go-kratos/kratos/v2/middleware/logging"
"github.com/go-kratos/kratos/v2/middleware/recovery"
"github.com/go-kratos/kratos/v2/transport/grpc"
"github.com/go-kratos/kratos/v2/transport/http"
)
const (
oap = "mockoap:19876"
serviceName = "kratos-client"
host = "kratosserver"
)
func main() {
r, err := reporter.NewGRPCReporter(oap)
//r, err := reporter.NewLogReporter()
if err != nil {
panic(err)
}
defer r.Close()
logger := log.NewStdLogger(os.Stdout)
logger = log.With(logger, "segment_id", kratosplugin.SegmentID())
logger = log.With(logger, "trace_id", kratosplugin.TraceID())
logger = log.With(logger, "span_id", kratosplugin.SpanID())
tracer, err := go2sky.NewTracer(serviceName, go2sky.WithReporter(r))
if err != nil {
panic(err)
}
httpCli, err := http.NewClient(
context.Background(),
http.WithMiddleware(
kratosplugin.Client(tracer),
logging.Client(logger),
),
http.WithEndpoint(host+":8000"),
)
if err != nil {
panic(err)
}
defer httpCli.Close()
grpcCli, err := grpc.DialInsecure(
context.Background(),
grpc.WithMiddleware(
kratosplugin.Client(tracer),
logging.Client(logger),
),
grpc.WithEndpoint(host+":9000"),
)
if err != nil {
panic(err)
}
defer grpcCli.Close()
httpClient := helloworld.NewGreeterHTTPClient(httpCli)
grpcClient := helloworld.NewGreeterClient(grpcCli)
server := http.NewServer(
http.Address(":8080"),
http.Middleware(
recovery.Recovery(),
kratosplugin.Server(tracer),
logging.Server(logger),
),
)
route := server.Route("/")
route.GET("/hello", func(ctx http.Context) error {
var in interface{}
http.SetOperation(ctx, "/hello")
h := ctx.Middleware(func(ctx context.Context, req interface{}) (interface{}, error) {
hreply, err := httpClient.SayHello(ctx, &helloworld.HelloRequest{Name: "http-kratos"})
if err != nil {
return fmt.Sprintf("[http] error: %v", err), err
}
greply, err := grpcClient.SayHello(ctx, &helloworld.HelloRequest{Name: "grpc-kratos"})
if err != nil {
return fmt.Sprintf("[grpc] error: %v", err), err
}
return fmt.Sprintf("[http] Say hello: %s, [grpc] Say hello: %s", hreply, greply), nil
})
return ctx.Returns(h(ctx, &in))
})
app := kratos.New(
kratos.Name(serviceName),
kratos.Server(server),
)
if err := app.Run(); err != nil {
panic(err)
}
}
|
<filename>SCRIPTS/mts_wait.sql<gh_stars>0
REM FILE NAME: mts_wait.sql
REM LOCATION: System Monitoring\Reports
REM FUNCTION: Generate wait time report for dispatchers
REM TESTED ON: 7.3.3.5, 8.0.4.1, 8.1.5, 8.1.7, 9.0.1
REM PLATFORM: non-specific
REM REQUIRES: v$queue, v$dispatcher
REM
REM This is a part of the Knowledge Xpert for Oracle Administration library.
REM Copyright (C) 2001 Quest Software
REM All rights reserved.
REM
REM******************** Knowledge Xpert for Oracle Administration ********************
COLUMN network FORMAT A50 HEADING 'Protocol'
COLUMN aw FORMAT A30 HEADING 'Average Wait Time %'
SET FEEDBACK OFF VERIFY OFF LINES 78 PAGES 58
START title132 'Dispatcher Wait Times'
SPOOL rep_out\mts_wait
SELECT network,
DECODE (
SUM (totalq),
0, 'No responses',
SUM (wait) / SUM (totalq) * 100
|| 'Seconds Wait Per response'
) aw
FROM v$queue q, v$dispatcher d
WHERE q.TYPE = 'DISPATCHER' AND q.paddr = d.paddr
GROUP BY network;
SPOOL OFF
SET FEEDBACK ON VERIFY ON
TTITLE OFF
|
#!/bin/sh
# OpenSSL configuration
CERTDIR=./priv/test_certs
CADIR=$CERTDIR/ca
[ -d $CERTDIR ] || mkdir -p $CERTDIR
[ -d $CADIR ] || mkdir $CADIR
[ -d $CADIR/ca.db.certs ] || mkdir $CADIR/ca.db.certs
touch $CADIR/ca.db.index
echo 01 > $CADIR/ca.db.serial
cat>$CADIR/ca.conf<<'EOF'
[ ca ]
default_ca = ca_default
[ ca_default ]
dir = REPLACE_LATER
certs = $dir
new_certs_dir = $dir/ca.db.certs
database = $dir/ca.db.index
serial = $dir/ca.db.serial
RANDFILE = $dir/ca.db.rand
certificate = $dir/ca.pem
private_key = $dir/ca.key
default_days = 365
default_crl_days = 30
default_md = md5
preserve = no
policy = generic_policy
[ generic_policy ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
EOF
sed -i "s|REPLACE_LATER|${CADIR}|" ${CADIR}/ca.conf
# CA
openssl genrsa -out $CERTDIR/test_ca.key 2048
openssl req -x509 -new -key $CERTDIR/test_ca.key -days 36500 -out $CERTDIR/test_ca.pem -subj "/C=RU/ST=Default-City/L=DC/O=Periscope/CN=Periscope Test CA"
SERVERHOST=${1:-localhost}
# Server certificate
openssl req -new -newkey rsa:2048 -keyout $CERTDIR/test_server.key -nodes -out $CERTDIR/test_server.csr -subj "/C=RU/ST=Default-City/L=DC/O=Periscope/CN=$SERVERHOST"
openssl ca -batch -config $CADIR/ca.conf -days 36500 -in $CERTDIR/test_server.csr -out $CERTDIR/test_server.crt -cert $CERTDIR/test_ca.pem -keyfile $CERTDIR/test_ca.key
cat $CERTDIR/test_server.crt $CERTDIR/test_server.key > $CERTDIR/test_server.pem
# Client certificate (CA signed)
openssl req -new -newkey rsa:2048 -keyout $CERTDIR/test_client_ca.key -nodes -out $CERTDIR/test_client_ca.csr -subj "/C=RU/ST=Default-City/L=DC/O=Periscope/CN=Periscope Test Client (CA)"
openssl ca -batch -config $CADIR/ca.conf -days 36500 -in $CERTDIR/test_client_ca.csr -out $CERTDIR/test_client_ca.crt -cert $CERTDIR/test_ca.pem -keyfile $CERTDIR/test_ca.key
cat $CERTDIR/test_client_ca.crt $CERTDIR/test_client_ca.key > $CERTDIR/test_client_ca.pem
# Client certificate (no CA)
openssl req -new -newkey rsa:2048 -keyout $CERTDIR/test_client.key -nodes -out $CERTDIR/test_client.csr -subj "/C=RU/ST=Default-City/L=DC/O=Periscope/CN=Periscope Test Client"
cat $CERTDIR/test_client.crt $CERTDIR/test_client.key > $CERTDIR/test_client.pem
rm -rf $CADIR
|
# Calculate the best possible score for the player's hand
for card in your_cards:
if card != 'A':
score += card
else:
if ace_set and score + ace_value <= 21:
score += ace_value
else:
score += 1
# Calculate the best possible score for the dealer's hand
for card in dealer_cards:
if card != 'A':
dealers_score += card
else:
if ace_set and dealers_score + ace_value <= 21:
dealers_score += ace_value
else:
dealers_score += 1
# Game loop to continue until the game is done
while not done:
# Implement game logic here
# Update the 'done' variable based on game conditions
pass # Placeholder for game logic
# Compare final scores to determine the winner
if score > 21:
player_score = 0
else:
player_score = score
if dealers_score > 21:
dealer_score = 0
else:
dealer_score = dealers_score
if player_score > dealer_score:
print("Player wins!")
elif player_score < dealer_score:
print("Dealer wins!")
else:
print("It's a tie!") |
import org.apache.spark.ml.classification.{LogisticRegression, RandomForestClassificationModel}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{VectorAssembler, StringIndexer}
// Read the dataset
val df = spark.read.option("header","false").csv("data.csv")
// Create feature vector
val assembler = new VectorAssembler().
setInputCols(Array("genre","director","cast","etc")).
setOutputCol("features")
val df2 = assembler.transform(df)
// Label encode the target
val labelIndexer = new StringIndexer().
setInputCol("rating").
setOutputCol("label")
val df3 = labelIndexer.fit(df2).transform(df2)
// Create training and test splits
val Array(train, test) = df3.randomSplit(Array(0.7, 0.3))
// Create the machine learning model
val model = new LogisticRegression().fit(train)
// Evaluation
val predictions = model.transform(test)
val evaluator = new MulticlassClassificationEvaluator().
setLabelCol("label").
setPredictionCol("prediction").
setMetricName("accuracy")
val accuracy = evaluator.evaluate(predictions)
println("Test Accuracy = "+ accuracy) |
void Manager::setAttribute(AttributeName attribute, AttributeValue value)
{
// Access the pendingAttributes function from the base class
auto pendingAttrs = Base::pendingAttributes();
// Perform operations to set the attribute with the given value
// Example: Assuming pendingAttributes returns a map, set the attribute with the given value
pendingAttrs[attribute] = value;
// Additional operations if required based on the specific implementation
// Example: Save the updated pendingAttributes back to the base class
Base::setPendingAttributes(pendingAttrs);
} |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rasa.workshop.rasa;
import com.rasa.workshop.common.DocumentException;
import com.rasa.workshop.common.DocumentExistsException;
import com.rasa.workshop.common.DocumentNotFoundException;
import com.rasa.workshop.rasa.event.Event;
import java.util.List;
import java.util.Map;
public interface FormAction
extends Action {
List<String> requiredSlots(Tracker pTracker);
Map<String, List<SlotExtractor>> slotExtractorsMap();
List<Event> submit(Tracker pTracker, ActionResult pResult)
throws DocumentException, DocumentNotFoundException, DocumentExistsException;
}
|
#!/bin/bash
cd $GOPATH/src
PROJECT=github.com/mesg-foundation/core
protoc --go_out=./ $PROJECT/service/service.proto
# build Proto API
protoc --go_out=plugins=grpc:./ --proto_path=./ $PROJECT/api/core/api.proto
protoc --go_out=plugins=grpc:./ --proto_path=./ $PROJECT/api/service/api.proto
|
#!/usr/bin/env bash
set -e
echo Generating all 2>&1
lua generate.lua fosdem17/list all >out/all.mermaid
docker run -v $(pwd)/out:/src mermaid mermaid -w 8192 -o /src /src/all.mermaid
|
SELECT avg(salary)
FROM Employees
WHERE department IN
(SELECT department
FROM Employees
GROUP BY department
ORDER BY count(*) DESC
LIMIT 1); |
#!/usr/bin/env bash
# Terminate already running bar instances
killall polybar
# Wait until the processes have been shut down
while pgrep -u $UID -x polybar >/dev/null; do sleep 1; done
# Launch polybar
polybar main -c ~/.config/polybar/dracula/config.ini &
|
// list of settings, events, and methods: http://kenwheeler.github.io/slick/
// there may be some additional ones specific to the HubSpot version
// note: I'm using the slick-initialized class. if you have multiple sliders on your page you'll need to be more specific in your selectors.
// get the value of an option:
$('.slick-initialized').slick('slickGetOption', '');
// example:
$('.slick-initialized').slick('slickGetOption', 'autoplay');
// get values of all options:
$('.slick-initialized').get(0).slick.options;
// set the value of an option:
$('.slick-initialized').slick('slickSetOption', '', '', true);
// the true at the end is for a refresh to the UI. just do it.
// example:
$('.slick-initialized').slick('slickSetOption', 'autoplay', false, true);
// effectively pause transitions:
$('.slick-initialized').slick('slickSetOption', 'autoplaySpeed', 10000000, true);
// alternatively:
$('.slick-initialized').slick('slickSetOption', 'autoplay', false, true);
// ^this sometimes takes a bit to trigger
// there is another, more consistent way to turn off the transitions. see: turnOffAutoplay.js
// restart transitions:
// change 3000 to whatever your transitions should be. it's in milliseconds,
// so 3000 equals 3 seconds.
// it takes about 30-60 seconds for this to kick in after having paused it.
$('.slick-initialized').slick('slickSetOption', 'autoplaySpeed', 3000, true);
// few more examples:
$('.slick-initialized').slick('slickSetOption', 'speed', 2000, true);
$('.slick-initialized').slick('slickSetOption', 'autoplaySpeed', 8000, true);
$('.slick-initialized').slick('slickSetOption', 'arrows', false, true);
// example usage of an event
// after slide changes:
$('.slick-initialized').on('afterChange', function(event, slick, currentSlide, nextSlide){
// do stuff
});
// log all of a slider's options to the console:
// (this will do it after every slide change, so if that's going to both you make sure to turn off autoplay first)
// it will log the slider data to the console. your options will be in an object for the key "options"
$('.slick-initialized').on('afterChange', function(event, slick, currentSlide, nextSlide){
console.log(slick);
});
|
<filename>src/drawer/Drawer.tsx<gh_stars>1-10
import React from 'react';
import ReactDOM from 'react-dom';
import { CSSTransition } from 'react-transition-group';
import cx from 'classnames';
import bem from '../utils/bem';
import { isBrowser } from '../utils/vars';
const b = bem('rdf-drawer');
const drawerMap: Record<string, Drawer> = {};
type Position = 'top' | 'right' | 'bottom' | 'left';
export interface DrawerProps {
id?: string;
visible?: boolean;
defaultVisible?: boolean;
position?: Position;
mask?: boolean;
maskClosable?: boolean;
disabled?: boolean;
root?: Element | null;
onVisibleChange?: (visible: boolean) => void;
className?: string;
style?: React.CSSProperties;
}
export interface DrawerState {
visible: boolean;
}
export class Drawer extends React.Component<DrawerProps> {
static defaultProps: Partial<DrawerProps> = {
defaultVisible: false,
position: 'top',
mask: true,
maskClosable: true,
disabled: false,
};
static getDerivedStateFromProps(props: DrawerProps) {
if ('visible' in props) {
return { visible: props.visible };
}
return null;
}
static toggle(id: string, visible?: boolean) {
const drawer = drawerMap[id];
if (!drawer) {
return;
}
visible = typeof visible === 'undefined' ? !drawer.state.visible : visible;
drawer.changeVisible(visible);
}
state: DrawerState = {
visible: ('visible' in this.props
? this.props.visible
: this.props.defaultVisible) as boolean,
};
constructor(props: DrawerProps) {
super(props);
if (props.id) {
drawerMap[props.id] = this;
}
}
changeVisible = (visible: boolean) => {
const { disabled, onVisibleChange } = this.props;
if (disabled) {
return;
}
if (!('visible' in this.props)) {
this.setState({ visible });
}
if (onVisibleChange) {
onVisibleChange(visible);
}
};
handleMaskClick = () => {
const { maskClosable } = this.props;
if (maskClosable) {
this.changeVisible(false);
}
};
render() {
if (!isBrowser) {
return null;
}
const { position, mask, root, className, style, children } = this.props;
const { visible } = this.state;
const cls = cx(b(), className, b('', position as Position));
let drawerRoot = root || document.getElementById('rdf-drawer-root');
if (!drawerRoot) {
drawerRoot = document.createElement('div');
drawerRoot.id = 'rdf-drawer-root';
document.body.appendChild(drawerRoot);
}
return ReactDOM.createPortal(
<CSSTransition
classNames="rdf-drawer-anim-"
in={visible}
timeout={{ exit: 300 }}
appear
mountOnEnter
unmountOnExit
>
<div className={cls} style={style}>
{mask && <div className={b('mask')} onClick={this.handleMaskClick} />}
<div className={b('content')}>{children}</div>
</div>
</CSSTransition>,
drawerRoot
);
}
}
export default Drawer;
|
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
BASE_DIR=$(pwd)
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
source ${SCRIPTDIR}/shared_utilities.sh
is_source_from_pr_testable "geode" "$(get_geode_pr_exclusion_dirs)" || exit 0
if [[ -z "${GRADLE_TASK}" ]]; then
echo "GRADLE_TASK must be set. exiting..."
exit 1
fi
ROOT_DIR=$(pwd)
BUILD_DATE=$(date +%s)
# Precheckin does not get a geode-build-version
if [ -e "${ROOT_DIR}/geode-build-version" ] ; then
GEODE_BUILD_VERSION_FILE=${ROOT_DIR}/geode-build-version/number
GEODE_BUILD_DIR=/tmp/geode-build
GEODE_PULL_REQUEST_ID_FILE=${ROOT_DIR}/geode/.git/resource/version.json
if [ -e "${GEODE_PULL_REQUEST_ID_FILE}" ]; then
GEODE_PULL_REQUEST_ID=$(cat ${GEODE_PULL_REQUEST_ID_FILE} | jq --raw-output '.["pr"]')
FULL_PRODUCT_VERSION="geode-pr-${GEODE_PULL_REQUEST_ID}"
else
CONCOURSE_VERSION=$(cat ${GEODE_BUILD_VERSION_FILE})
echo "Concourse VERSION is ${CONCOURSE_VERSION}"
# Rebuild version, zero-padded
FULL_PRODUCT_VERSION=$(get-full-version ${CONCOURSE_VERSION})
BUILD_ID=$(get-geode-build-id-padded ${CONCOURSE_VERSION} 2> /dev/null)
fi
fi
if [[ ${PARALLEL_GRADLE:-"true"} == "true" ]]; then
PARALLEL_GRADLE="--parallel"
else
PARALLEL_GRADLE=""
fi
DEFAULT_GRADLE_TASK_OPTIONS="${PARALLEL_GRADLE} --console=plain --no-daemon"
GRADLE_SKIP_TASK_OPTIONS=""
SSHKEY_FILE="instance-data/sshkey"
SSH_OPTIONS="-i ${SSHKEY_FILE} -o ConnectionAttempts=60 -o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=5"
INSTANCE_IP_ADDRESS="$(cat instance-data/instance-ip-address)"
scp ${SSH_OPTIONS} ${SCRIPTDIR}/capture-call-stacks.sh geode@${INSTANCE_IP_ADDRESS}:.
if [[ -n "${PARALLEL_DUNIT}" && "${PARALLEL_DUNIT}" == "true" ]]; then
PARALLEL_DUNIT="-PparallelDunit -PdunitDockerUser=geode"
if [ -n "${DUNIT_PARALLEL_FORKS}" ]; then
DUNIT_PARALLEL_FORKS="--max-workers=${DUNIT_PARALLEL_FORKS} -PtestMaxParallelForks=${DUNIT_PARALLEL_FORKS} -PdunitParallelForks=${DUNIT_PARALLEL_FORKS}"
fi
else
PARALLEL_DUNIT=""
DUNIT_PARALLEL_FORKS=""
fi
SET_JAVA_HOME="export JAVA_HOME=/usr/lib/jvm/bellsoft-java${JAVA_BUILD_VERSION}-amd64"
if [ -v CALL_STACK_TIMEOUT ]; then
ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "${SET_JAVA_HOME} && tmux new-session -d -s callstacks; tmux send-keys ~/capture-call-stacks.sh\ ${PARALLEL_DUNIT}\ ${CALL_STACK_TIMEOUT} C-m"
fi
if [ -z "${FULL_PRODUCT_VERSION}" ] ; then
FULL_PRODUCT_VERSION="0.0.0-UndefinedVersion"
fi
GRADLE_ARGS="\
${DEFAULT_GRADLE_TASK_OPTIONS} \
${GRADLE_SKIP_TASK_OPTIONS} \
${GRADLE_GLOBAL_ARGS} \
-Pversion=${FULL_PRODUCT_VERSION} \
-PbuildId=${BUILD_ID} \
build install javadoc spotlessCheck rat checkPom resolveDependencies pmdMain -x test"
EXEC_COMMAND="mkdir -p tmp \
&& cp geode/ci/scripts/attach_sha_to_branch.sh /tmp/ \
&& /tmp/attach_sha_to_branch.sh geode ${BUILD_BRANCH} \
&& cd geode \
&& ${SET_JAVA_HOME} \
&& ./gradlew ${GRADLE_ARGS}"
echo "${EXEC_COMMAND}"
ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "${EXEC_COMMAND}"
|
<reponame>jeffrey-xiao/acm-notebook<gh_stars>1-10
/* Time: O(V^3)
* Memory: O(V^2)
*/
#include <bits/stdc++.h>
using namespace std;
struct MinCut {
int N;
vector<vector<int>> adj;
vector<int> weight;
vector<bool> inContraction, used;
MinCut(int N) : N(N), adj(N, vector<int>(N)), weight(N, 0), inContraction(N, 0), used(N, 0) {}
void addEdge(int u, int v, int c) {
adj[u][v] = c;
adj[v][u] = c;
}
int getMinCut() {
int minCut = 1 << 30;
for (int v = N - 1; v >= 0; v--) {
for (int i = 1; i < N; i++) {
used[i] = inContraction[i];
weight[i] = adj[0][i];
}
int prev = 0, curr = 0;
for (int sz = 1; sz <= v; sz++) {
prev = curr;
curr = -1;
for (int i = 1; i < N; i++)
if (!used[i] && (curr == -1 || weight[i] > weight[curr]))
curr = i;
if (sz != v) {
for (int i = 0; i < N; i++)
weight[i] += adj[curr][i];
used[curr] = true;
} else {
for (int i = 0; i < N; i++)
adj[prev][i] = adj[i][prev] += adj[i][curr];
inContraction[curr] = true;
minCut = min(minCut, weight[curr]);
}
}
}
return minCut;
}
};
|
import React from 'react';
import { connect } from 'react-redux';
import UserSettings from '../components/main/UserSettings';
import {
setTheme,
} from "../actions/SettingsActions";
const UserSettingsContainer = props => <UserSettings {...props} />;
const mapStateToProps = (state) => {
// const { } = state;
return {
}
};
const mapDispatchToProps = {
setTheme,
};
export default connect(mapStateToProps, mapDispatchToProps)(UserSettingsContainer); |
<reponame>lostmsu/RoboZZle-Droid<filename>src/com/team242/robozzle/achievements/PuzzleListAchievement.java
package com.team242.robozzle.achievements;
import com.team242.robozzle.model.Puzzle;
import java.util.HashSet;
import java.util.Set;
/**
* Created by IntelliJ IDEA.
* User: lost
* Date: 11.11.11
* Time: 19:49
*/
public class PuzzleListAchievement extends PuzzleCountAchievement{
final Set<Integer> puzzles;
public PuzzleListAchievement(int titleID, int descriptionID, int iconID, int[] puzzles){
super(titleID, descriptionID, iconID, 0);
if (puzzles == null) throw new IllegalArgumentException();
this.puzzles = new HashSet<Integer>();
for(int puzzleID: puzzles) this.puzzles.add(puzzleID);
}
public boolean puzzleMatches(Puzzle puzzle){
return puzzle != null && puzzles.contains(puzzle.id);
}
@Override
public boolean isDone(Puzzle[] puzzles){
int found = 0;
for(Puzzle puzzle: puzzles){
if (this.puzzles.contains(puzzle.id)) found++;
}
if (found < this.puzzles.size()) return false;
return super.isDone(puzzles);
}
}
|
#!/usr/bin/env bash
export DEBIAN_FRONTEND="noninteractive"
wget -O - http://v.s.cz/info@vitexsoftware.cz.gpg.key|sudo apt-key add -
echo deb http://v.s.cz/ stable main | tee /etc/apt/sources.list.d/vitexsoftware.list
apt-get update
apt-get install -y php-cli php-curl php-pear php-intl php-zip composer dpkg-dev devscripts php-apigen-theme-default debhelper gdebi-core
apt-get update
cd /vagrant
debuild -i -us -uc -b
#mkdir -p /vagrant/deb
#mv /*.deb /vagrant/deb
#cd /vagrant/deb
#dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz
#echo "deb file:/vagrant/deb ./" > /etc/apt/sources.list.d/local.list
#apt-get update
export DEBCONF_DEBUG="developer"
#apt-get -y --allow-unauthenticated install flexipeehp
gdebi -n ../flexipeehp_*_all.deb
cd /usr/share/doc/FlexiPeeHP/
composer update
php -f /usr/share/doc/FlexiPeeHP/flexibeeping.php
|
<gh_stars>0
/* **** Notes
Copy
//*/
# define CALEND
# define CAR
# include "../../../incl/config.h"
signed(__cdecl cals_copy_events(cals_event_t(*di),cals_event_t(*si))) {
auto cals_event_t *ev;
auto signed i,r;
if(!di) return(0x00);
if(!si) return(0x00);
R(w,*di) = (R(w,*si));
R(b,*di) = (R(b,*si));
R(t,*di) = (R(t,*si));
R(colors,*di) = (R(colors,*si));
R(flag,*di) = (R(flag,*si));
R(periodic,*di) = (R(periodic,*si));
i = (CALS_DATE);
while(i) *(i+(R(date,*di))) = (*(--i+(R(date,*si))));
i = (CALS_TIME);
while(i) *(i+(R(time,*di))) = (*(--i+(R(time,*si))));
return(0x01);
}
|
package com.lewisallen.rtdptiCache.repositories;
import com.lewisallen.rtdptiCache.models.Station;
import org.springframework.data.jpa.repository.JpaRepository;
import java.util.List;
public interface TrainRepository extends JpaRepository<Station, String> {
List<Station> findByRetrieve(int retrieve);
}
|
#!/bin/bash
# Create data file needed for Positve City Case Counts
# Fetch the daily cases page. Save in csv format in a file named with today's date.
# Aggregate the daily data into a single cumulative csv file.
# Customized for RaspberryPi
# Fetch the daily data page and parse it.
wget -q -O - https://e.infogram.com/f6d9f731-5772-4da5-b149-5e42cc1c3b89 | sed -n '/Cases by City/p' | sed 's/<script>window.infographicData=//g' | sed 's#</script>##' | sed 's/\["City/\n\"City/g' | sed 's/,"custom/\n/'| sed '1,2d' | sed '$d' | sed 's/\],\[/\n/g' | sed 's/\]\]\]//' | sed 's/"$//g' | sed 's/","/",/g' > "$HOME/Projects/CityCaseCounts/dailydata/`date +%Y-%m%d`.csv"
echo "wget of today's data completed to ~/Projects/CityCaseCounts/dailydata/`date +%Y-%m%d`.csv"
# Create a work directory
kWork="/tmp/Citydata"
if [ -d $kWork ]; then
rm -r $kWork/*
else
mkdir $kWork
fi
# For each daily data file
# Extract the data lines and sort them and save just the numbers
cd ~/Projects/CityCaseCounts/dailydata
for f in *.csv ; do tail -n+2 $f | sort | cut -d"," -f2 > $kWork/$f ; done
# Create a file of just city names sorted alphabetically
tail -n+2 2020-0425.csv | sort | cut -d"," -f1 > $kWork/cities.txt
# Create a file of city names ordered by count (descending) from today's data
sort -k2 -t, -nr "$HOME/Projects/CityCaseCounts/dailydata/`date +%Y-%m%d`.csv" | cut -d, -f1 > ~/Projects/CityCaseCounts/CityCaseCounts.git/trunk/sortedCityNames.txt
# Get all the filenames (dates) into a single file
ls -1 *.csv | cut -d "." -f 1 | cut -d "-" -f 2 | paste -d, -s > ~/Projects/CityCaseCounts/CityCaseCounts.git/trunk/days.txt
# Merge the city names and the aggregate daily data
cd ~/Projects/CityCaseCounts/CityCaseCounts.git/trunk
ls -1 ~/Projects/CityCaseCounts/dailydata/*.csv | python aggregate.py > cumulative.csv
echo "Cumulative file created at `date`."
../../commitdaily.sh
|
<gh_stars>1-10
package com.dieselpoint.norm;
import static org.junit.Assert.fail;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import javax.persistence.*;
import org.junit.Ignore;
import org.junit.Test;
public class TestSelect {
@Test
public void selectTest() {
Database db = new Database();
db.setJdbcUrl("jdbc:sqlite:/home/ghost/IdeaProjects/norm/norm/test.sqlite3");
List<Person> rows =
db.select("person.id, name")
.table("person")
.innerJoin("name")
.on("person.name_id = name.id")
.where("name.name = ? or name.name = ?", "nick", "Nick")
.results(Person.class);
System.out.println();
}
@Table(name="person")
public static class Person {
@Id
public long id;
@OneToOne()
@JoinColumn
public String name;
}
@Table(name="name")
public static class Name {
@Id
public long id;
public String firstName;
}
}
|
#!/usr/bin/env sh
# generated from catkin/cmake/template/setup.sh.in
# Sets various environment variables and sources additional environment hooks.
# It tries it's best to undo changes from a previously sourced setup file before.
# Supported command line options:
# --extend: skips the undoing of changes from a previously sourced setup file
# --local: only considers this workspace but not the chained ones
# In plain sh shell which doesn't support arguments for sourced scripts you can
# set the environment variable `CATKIN_SETUP_UTIL_ARGS=--extend/--local` instead.
# since this file is sourced either use the provided _CATKIN_SETUP_DIR
# or fall back to the destination set at configure time
: ${_CATKIN_SETUP_DIR:=/home/venom/ros/hrwros_ws/devel/.private/ur_msgs}
_SETUP_UTIL="$_CATKIN_SETUP_DIR/_setup_util.py"
unset _CATKIN_SETUP_DIR
if [ ! -f "$_SETUP_UTIL" ]; then
echo "Missing Python script: $_SETUP_UTIL"
return 22
fi
# detect if running on Darwin platform
_UNAME=`uname -s`
_IS_DARWIN=0
if [ "$_UNAME" = "Darwin" ]; then
_IS_DARWIN=1
fi
unset _UNAME
# make sure to export all environment variables
export CMAKE_PREFIX_PATH
if [ $_IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH
else
export DYLD_LIBRARY_PATH
fi
unset _IS_DARWIN
export PATH
export PKG_CONFIG_PATH
export PYTHONPATH
# remember type of shell if not already set
if [ -z "$CATKIN_SHELL" ]; then
CATKIN_SHELL=sh
fi
# invoke Python script to generate necessary exports of environment variables
# use TMPDIR if it exists, otherwise fall back to /tmp
if [ -d "${TMPDIR:-}" ]; then
_TMPDIR="${TMPDIR}"
else
_TMPDIR=/tmp
fi
_SETUP_TMP=`mktemp "${_TMPDIR}/setup.sh.XXXXXXXXXX"`
unset _TMPDIR
if [ $? -ne 0 -o ! -f "$_SETUP_TMP" ]; then
echo "Could not create temporary file: $_SETUP_TMP"
return 1
fi
CATKIN_SHELL=$CATKIN_SHELL "$_SETUP_UTIL" $@ ${CATKIN_SETUP_UTIL_ARGS:-} >> "$_SETUP_TMP"
_RC=$?
if [ $_RC -ne 0 ]; then
if [ $_RC -eq 2 ]; then
echo "Could not write the output of '$_SETUP_UTIL' to temporary file '$_SETUP_TMP': may be the disk if full?"
else
echo "Failed to run '\"$_SETUP_UTIL\" $@': return code $_RC"
fi
unset _RC
unset _SETUP_UTIL
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
return 1
fi
unset _RC
unset _SETUP_UTIL
. "$_SETUP_TMP"
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
# source all environment hooks
_i=0
while [ $_i -lt $_CATKIN_ENVIRONMENT_HOOKS_COUNT ]; do
eval _envfile=\$_CATKIN_ENVIRONMENT_HOOKS_$_i
unset _CATKIN_ENVIRONMENT_HOOKS_$_i
eval _envfile_workspace=\$_CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
unset _CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
# set workspace for environment hook
CATKIN_ENV_HOOK_WORKSPACE=$_envfile_workspace
. "$_envfile"
unset CATKIN_ENV_HOOK_WORKSPACE
_i=$((_i + 1))
done
unset _i
unset _CATKIN_ENVIRONMENT_HOOKS_COUNT
|
<reponame>intel/intel-iot-services-orchestration-layer-dev<gh_stars>0
/******************************************************************************
Copyright (c) 2015, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
import class_name from "classnames";
function get_widget_impl(widget) {
var spec = $hope.app.stores.spec.get_spec(widget.spec);
if (spec && spec.is_ui) {
return spec.$get_impl();
}
return UnknownWidget;
}
export default class Grid extends ReactComponent {
static propTypes = {
float: React.PropTypes.bool,
animate: React.PropTypes.bool,
cellHeight: React.PropTypes.number,
verticalMargin: React.PropTypes.number,
maxHeight: React.PropTypes.number,
width: React.PropTypes.number
};
constructor() {
super();
this.widgets = [];
}
remove_widget(w) {
_.pull(this.widgets, w);
var dom = ReactDOM.findDOMNode(w);
this.grid.remove_widget(dom, false);
}
add_widget(w) {
this.widgets.push(w);
}
update_widgets() {
this.grid.batch_update();
this.widgets.forEach(w => {
if (!w.$hope_is_added) {
let widget = w.props.widget;
let auto = widget.auto_position || false;
if (!_.isNumber(widget.x) || !_.isNumber(widget.y)) {
auto = true;
}
$hope.log("widget", "add", widget, "is_auto:", auto);
var dom = ReactDOM.findDOMNode(w);
this.grid.add_widget(dom,
widget.x, widget.y, widget.width, widget.height, auto);
}
w.$hope_is_added = true;
});
this.grid.commit();
}
componentDidMount() {
var options = {
always_show_resize_handle: false,
float: this.props.float || false,
animate: this.props.animate || false,
cell_height: $hope.config.widget_cell_height,
vertical_margin: $hope.config.widget_vertical_margin,
height: this.props.maxHeight || 0,
auto: false // we will add_widget by ourselves
};
var gridstack = $(ReactDOM.findDOMNode(this)).gridstack(options);
this.grid = gridstack.data("gridstack");
this.update_widgets();
}
componentWillUnmount() {
setTimeout(()=> {
this.grid.destroy();
}, 0);
}
componentDidUpdate() {
setTimeout(()=> {
this.update_widgets();
}, 0);
}
render() {
var children = _.map(this.props.widgets, w => {
return React.createElement(get_widget_impl(w), {
key: w.id,
view: this.props.view,
gw: this.props.width,
hopeGrid: this,
widget: w
});
});
return (
<div className={class_name(this.props.className, "grid-stack")}>
{children}
</div>
);
}
}
class Widget extends ReactComponent {
static propTypes = {
view: React.PropTypes.object.isRequired,
hopeGrid: React.PropTypes.object.isRequired,
widget: React.PropTypes.object.isRequired,
gw: React.PropTypes.number.isRequired
};
$state = -1;
constructor(props) {
super(props);
// ensure data is created
$hope.app.stores.ui.data.create_widget(props.view.get_app_id(), props.widget.id);
}
get_data() {
var widget = this.props.widget;
var data_manager = $hope.app.stores.ui.data;
return data_manager.get_data(widget.id);
}
get_height() {
var h = this.props.widget.height;
return h * $hope.config.widget_cell_height + (h - 1) * $hope.config.widget_vertical_margin;
}
get_width() {
var w = this.props.widget.width;
return w * this.props.gw / 12;
}
set_css(id, css) {
var style = $("style#" + id);
if (style.length === 0) {
style = $("<style id='" + id + "' type='text/css'>" + css + "</style>");
$("head").append(style);
}
}
//
// {
// "PORT_1": data,
// "PORT_2": data
// }
//
send_data(json) {
if (_.isEmpty(json)) {
return;
}
$hope.trigger_action("ui/send_data", {
ui_id: this.props.view.id,
id: this.props.widget.id,
data: json
});
}
// Switch UI
switch_ui(id) {
window.location.replace("/#/ui/" + id); //TODO: hack hack
}
componentDidMount() {
// NOTE that the cdm of Grid is invoked later than its children's cdm
// so this would be added to parent and got invoked by gridstack's add_widget
this.props.hopeGrid.add_widget(this);
this.get_data();
}
componentWillUnmount() {
this.props.hopeGrid.remove_widget(this);
}
shouldComponentUpdate(nextProps) {
var widget = nextProps.widget;
var data_manager = $hope.app.stores.ui.data;
var latest_state = data_manager.get_state(widget.id);
var gw = nextProps.gw || this.props.gw;
if (latest_state === this.$state && gw === this.$gw) {
return false;
}
this.$state = latest_state;
this.$gw = gw;
return true;
}
render(children) {
var widget = this.props.widget;
return (
<div className={"grid-stack-item"}
data-hope-widget-id={widget.id}
key={widget.id}
data-gs-no-resize={true}
data-gs-no-move={true}
data-gs-locked={true} >
<div className={class_name("grid-stack-item-content",
"hope-ui-widget", widget.className)}>
{children}
</div>
</div>
);
}
}
class UnknownWidget extends Widget {
shouldComponentUpdate() {
return false;
}
render() {
var widget = this.props.widget;
return super.render(
<div style={{
width: "100%",
height: "100%",
background: "red",
color: "yellow",
textAlign: "center"
}}>
<strong>{__("Unknown UI Widget")}</strong>
<div style={{
color: "blue"
}}>{widget.spec}</div>
</div>
);
}
}
window.Widget = Widget;
window.ReactBootstrap = require("react-bootstrap");
|
#!/bin/bash
# Compile SINGE_GLG_Test.m and SINGE_Aggregate.m for macOS
# Run from the repository base directory
# Compile SINGE creating binaries for SINGE_GLG_Test.m and SINGE_Aggregate.m
# Rename both files to include the _mac suffix so that there are not filename collisions
mv code/SINGE_GLG_Test.m code/SINGE_GLG_Test_mac.m
mcc -N -m -R -singleCompThread -R -nodisplay -R -nojvm -a ./glmnet_matlab/ -a ./code/ SINGE_GLG_Test_mac.m
mv code/SINGE_GLG_Test_mac.m code/SINGE_GLG_Test.m
mv readme.txt readme_SINGE_GLG_Test_mac.txt
mv code/SINGE_Aggregate.m code/SINGE_Aggregate_mac.m
mcc -N -m -R -singleCompThread -R -nodisplay -R -nojvm -a ./code/ SINGE_Aggregate_mac.m
mv code/SINGE_Aggregate_mac.m code/SINGE_Aggregate.m
mv readme.txt readme_SINGE_Aggregate_mac.txt
tar cfz SINGE_mac.tgz SINGE_GLG_Test_mac.app SINGE_Aggregate_mac.app
rm -rf SINGE_GLG_Test_mac.app
rm -rf SINGE_Aggregate_mac.app
|
echo "pwd: "$PWD
export NODE_ENV=production
# remove any existing distribution
rm -rf dest
npm install --production
electron-packager . $npm_package_productName --out=dest --ignore='(test|backups|github)' --asar=false --platform=darwin --arch=x64 --version=$(npm run electronVersion) --icon=res/app-icons/Crypter.icns --app-copyright=Habib_Rehman --overwrite
cp ./github/RELEASE ./dest/Crypter-darwin-x64/RELEASE
cp ./license ./dest/Crypter-darwin-x64
zip -9r ./dest/Crypter-darwin-x64 ./dest/Crypter-darwin-x64
|
// Use server-friendly technology
const http = require('http');
const https = require('https');
const fs = require('fs');
// Use async methods
const get = async (url) => {
return new Promise((resolve, reject) => {
https.get(url, (res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () => {
resolve(data);
});
}).on('error', (err) => {
reject(err);
});
});
};
// Cache requests
let cache = {};
const getData = async (url) => {
if (cache[url]) {
return cache[url];
} else {
const data = await get(url);
cache[url] = data;
return data;
}
};
// Serve compressed files if supported
const mimeTypes = {
'.html': 'text/html',
'.js': 'text/javascript',
'.css': 'text/css',
'.json': 'application/json'
};
const compressHandler = (ext, data) => {
if (ext === '.html' || ext === '.css' || ext === '.js') {
const acceptEncoding = req.headers['accept-encoding'];
if (acceptEncoding.match(/\bgzip\b/)) {
res.setHeader('Content-Encoding', 'gzip');
data = zlib.gzipSync(data);
} else if (acceptEncoding.match(/\bdeflate\b/)) {
res.setHeader('Content-Encoding', 'deflate');
data = zlib.deflateSync(data);
}
}
return data;
};
http.createServer(async (req, res) => {
const parsedUrl = url.parse(req.url, true);
const { pathname } = parsedUrl;
const ext = path.parse(pathname).ext;
const filePath = path.join(process.cwd(), pathname);
let data;
try {
data = compressHandler(ext, await fs.promises.readFile(filePath));
} catch (e) {
data = await getData(`http://example.com${req.url}`);
}
res.setHeader('Content-Type', mimeTypes[ext] || 'application/octet-stream');
res.end(data);
}).listen(8080); |
<gh_stars>1-10
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Verify.verify;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.base.Preconditions;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* Utilities for treating interruptible operations as uninterruptible. In all cases, if a thread is
* interrupted during such a call, the call continues to block until the result is available or the
* timeout elapses, and only then re-interrupts the thread.
*
* @author <NAME>
* @since 10.0
*/
@GwtCompatible(emulated = true)
@ElementTypesAreNonnullByDefault
public final class Uninterruptibles {
// Implementation Note: As of 3-7-11, the logic for each blocking/timeout
// methods is identical, save for method being invoked.
/** Invokes {@code latch.}{@link CountDownLatch#await() await()} uninterruptibly. */
@GwtIncompatible // concurrency
public static void awaitUninterruptibly(CountDownLatch latch) {
boolean interrupted = false;
try {
while (true) {
try {
latch.await();
return;
} catch (InterruptedException e) {
interrupted = true;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code latch.}{@link CountDownLatch#await(long, TimeUnit) await(timeout, unit)}
* uninterruptibly.
*/
@CanIgnoreReturnValue // TODO(cpovirk): Consider being more strict.
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static boolean awaitUninterruptibly(CountDownLatch latch, long timeout, TimeUnit unit) {
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
// CountDownLatch treats negative timeouts just like zero.
return latch.await(remainingNanos, NANOSECONDS);
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code condition.}{@link Condition#await(long, TimeUnit) await(timeout, unit)}
* uninterruptibly.
*
* @since 23.6
*/
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static boolean awaitUninterruptibly(Condition condition, long timeout, TimeUnit unit) {
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
return condition.await(remainingNanos, NANOSECONDS);
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/** Invokes {@code toJoin.}{@link Thread#join() join()} uninterruptibly. */
@GwtIncompatible // concurrency
public static void joinUninterruptibly(Thread toJoin) {
boolean interrupted = false;
try {
while (true) {
try {
toJoin.join();
return;
} catch (InterruptedException e) {
interrupted = true;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code unit.}{@link TimeUnit#timedJoin(Thread, long) timedJoin(toJoin, timeout)}
* uninterruptibly.
*/
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static void joinUninterruptibly(Thread toJoin, long timeout, TimeUnit unit) {
Preconditions.checkNotNull(toJoin);
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
// TimeUnit.timedJoin() treats negative timeouts just like zero.
NANOSECONDS.timedJoin(toJoin, remainingNanos);
return;
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code future.}{@link Future#get() get()} uninterruptibly.
*
* <p>Similar methods:
*
* <ul>
* <li>To retrieve a result from a {@code Future} that is already done, use {@link
* Futures#getDone Futures.getDone}.
* <li>To treat {@link InterruptedException} uniformly with other exceptions, use {@link
* Futures#getChecked(Future, Class) Futures.getChecked}.
* <li>To get uninterruptibility and remove checked exceptions, use {@link
* Futures#getUnchecked}.
* </ul>
*
* @throws ExecutionException if the computation threw an exception
* @throws CancellationException if the computation was cancelled
*/
@CanIgnoreReturnValue
@ParametricNullness
public static <V extends @Nullable Object> V getUninterruptibly(Future<V> future)
throws ExecutionException {
boolean interrupted = false;
try {
while (true) {
try {
return future.get();
} catch (InterruptedException e) {
interrupted = true;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code future.}{@link Future#get(long, TimeUnit) get(timeout, unit)} uninterruptibly.
*
* <p>Similar methods:
*
* <ul>
* <li>To retrieve a result from a {@code Future} that is already done, use {@link
* Futures#getDone Futures.getDone}.
* <li>To treat {@link InterruptedException} uniformly with other exceptions, use {@link
* Futures#getChecked(Future, Class, long, TimeUnit) Futures.getChecked}.
* <li>To get uninterruptibility and remove checked exceptions, use {@link
* Futures#getUnchecked}.
* </ul>
*
* @throws ExecutionException if the computation threw an exception
* @throws CancellationException if the computation was cancelled
* @throws TimeoutException if the wait timed out
*/
@CanIgnoreReturnValue
@GwtIncompatible // TODO
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
@ParametricNullness
public static <V extends @Nullable Object> V getUninterruptibly(
Future<V> future, long timeout, TimeUnit unit) throws ExecutionException, TimeoutException {
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
// Future treats negative timeouts just like zero.
return future.get(remainingNanos, NANOSECONDS);
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/** Invokes {@code queue.}{@link BlockingQueue#take() take()} uninterruptibly. */
@GwtIncompatible // concurrency
public static <E> E takeUninterruptibly(BlockingQueue<E> queue) {
boolean interrupted = false;
try {
while (true) {
try {
return queue.take();
} catch (InterruptedException e) {
interrupted = true;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code queue.}{@link BlockingQueue#put(Object) put(element)} uninterruptibly.
*
* @throws ClassCastException if the class of the specified element prevents it from being added
* to the given queue
* @throws IllegalArgumentException if some property of the specified element prevents it from
* being added to the given queue
*/
@GwtIncompatible // concurrency
public static <E> void putUninterruptibly(BlockingQueue<E> queue, E element) {
boolean interrupted = false;
try {
while (true) {
try {
queue.put(element);
return;
} catch (InterruptedException e) {
interrupted = true;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// TODO(user): Support Sleeper somehow (wrapper or interface method)?
/** Invokes {@code unit.}{@link TimeUnit#sleep(long) sleep(sleepFor)} uninterruptibly. */
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static void sleepUninterruptibly(long sleepFor, TimeUnit unit) {
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(sleepFor);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
// TimeUnit.sleep() treats negative timeouts just like zero.
NANOSECONDS.sleep(remainingNanos);
return;
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code semaphore.}{@link Semaphore#tryAcquire(int, long, TimeUnit) tryAcquire(1,
* timeout, unit)} uninterruptibly.
*
* @since 18.0
*/
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static boolean tryAcquireUninterruptibly(
Semaphore semaphore, long timeout, TimeUnit unit) {
return tryAcquireUninterruptibly(semaphore, 1, timeout, unit);
}
/**
* Invokes {@code semaphore.}{@link Semaphore#tryAcquire(int, long, TimeUnit) tryAcquire(permits,
* timeout, unit)} uninterruptibly.
*
* @since 18.0
*/
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static boolean tryAcquireUninterruptibly(
Semaphore semaphore, int permits, long timeout, TimeUnit unit) {
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
// Semaphore treats negative timeouts just like zero.
return semaphore.tryAcquire(permits, remainingNanos, NANOSECONDS);
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code lock.}{@link Lock#tryLock(long, TimeUnit) tryLock(timeout, unit)}
* uninterruptibly.
*
* @since 30.0
*/
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static boolean tryLockUninterruptibly(Lock lock, long timeout, TimeUnit unit) {
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
return lock.tryLock(remainingNanos, NANOSECONDS);
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Invokes {@code executor.}{@link ExecutorService#awaitTermination(long, TimeUnit)
* awaitTermination(long, TimeUnit)} uninterruptibly with no timeout.
*
* @since 30.0
*/
@GwtIncompatible // concurrency
public static void awaitTerminationUninterruptibly(ExecutorService executor) {
// TODO(cpovirk): We could optimize this to avoid calling nanoTime() at all.
verify(awaitTerminationUninterruptibly(executor, Long.MAX_VALUE, NANOSECONDS));
}
/**
* Invokes {@code executor.}{@link ExecutorService#awaitTermination(long, TimeUnit)
* awaitTermination(long, TimeUnit)} uninterruptibly.
*
* @since 30.0
*/
@GwtIncompatible // concurrency
@SuppressWarnings("GoodTime")
public static boolean awaitTerminationUninterruptibly(
ExecutorService executor, long timeout, TimeUnit unit) {
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
return executor.awaitTermination(remainingNanos, NANOSECONDS);
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// TODO(user): Add support for waitUninterruptibly.
private Uninterruptibles() {}
}
|
package com.codernauti.sweetie.firebase;
import android.net.Uri;
import android.support.annotation.NonNull;
import android.util.Log;
import com.google.android.gms.tasks.OnFailureListener;
import com.google.android.gms.tasks.OnSuccessListener;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ValueEventListener;
import com.google.firebase.storage.FirebaseStorage;
import com.google.firebase.storage.OnProgressListener;
import com.google.firebase.storage.StorageReference;
import com.google.firebase.storage.UploadTask;
import com.codernauti.sweetie.model.CoupleFB;
import com.codernauti.sweetie.utils.DataMaker;
import java.util.HashMap;
import java.util.Map;
public class FirebaseCoupleDetailsController {
private static final String TAG = "CoupleDetailsController";
private static final String ACTIVE_COUPLE_PARTIAL_URL = Constraints.COUPLE_INFO + "/" + Constraints.ACTIVE_COUPLE;
private static final String ARCHIVED_COUPLES_PARTIAL_URL = Constraints.COUPLE_INFO + "/" + Constraints.ARCHIVED_COUPLES;
private final DatabaseReference mDatabaseRef;
private final StorageReference mCoupleStorage;
private final DatabaseReference mCoupleRef;
private ValueEventListener mCoupleListener;
private final String mCoupleUid;
private final String mUserFuturePartnerUrl; // users/<userUid>/futurePartner
private final String mPartnerFuturePartnerUrl; // users/<partnerUid>/futurePartner
private final String mUserArchivedCouplesUrl; // users/<userUid>/coupleInfo/activeCouple/<coupleUid>
private final String mUserActiveCoupleUrl; // users/<userUid>/coupleInfo/archivedCouples
private final String mPartnerArchivedCouplesUrl; // users/<partnerUid>/coupleInfo/activeCouple/<coupleUid>
private final String mPartnerActiveCoupleUrl; // users/<partnerUid>/coupleInfo/archivedCouples
private final String mCoupleUidUrl; // couples/<coupleUid>
private CoupleDetailsControllerListener mListener;
public interface CoupleDetailsControllerListener {
void onCoupleDetailsChanged(CoupleFB couple);
}
public FirebaseCoupleDetailsController(String userUid, String partnerUid, String coupleUid) {
mCoupleUid = coupleUid;
mDatabaseRef = FirebaseDatabase.getInstance().getReference();
mCoupleStorage = FirebaseStorage.getInstance().getReference(Constraints.COUPLES_DETAILS + "/" + coupleUid);
mUserFuturePartnerUrl = Constraints.USERS + "/" + userUid + "/" + Constraints.FUTURE_PARTNER;
mPartnerFuturePartnerUrl = Constraints.USERS + "/" + partnerUid + "/" + Constraints.FUTURE_PARTNER;
mUserArchivedCouplesUrl = buildArchivedCouplesUrl(userUid, coupleUid);
mUserActiveCoupleUrl = buildActiveCoupleUrl(userUid);
mPartnerArchivedCouplesUrl = buildArchivedCouplesUrl(partnerUid, coupleUid);
mPartnerActiveCoupleUrl = buildActiveCoupleUrl(partnerUid);
mCoupleUidUrl = Constraints.COUPLES + "/" + coupleUid;
mCoupleRef = mDatabaseRef.child(mCoupleUidUrl);
}
private String buildArchivedCouplesUrl(String genericUserUid, String coupleUid) {
return Constraints.USERS + "/" + genericUserUid + "/" + ARCHIVED_COUPLES_PARTIAL_URL + "/" + coupleUid;
}
private String buildActiveCoupleUrl(String genericUserUid) {
return Constraints.USERS + "/" + genericUserUid + "/" + ACTIVE_COUPLE_PARTIAL_URL;
}
public void setListener(CoupleDetailsControllerListener listener) {
mListener = listener;
}
public void attachCoupleListener() {
if (mCoupleListener == null) {
mCoupleListener = new ValueEventListener() {
@Override
public void onDataChange(DataSnapshot dataSnapshot) {
Log.d(TAG, "onCoupleChange trigger!");
CoupleFB couple = dataSnapshot.getValue(CoupleFB.class);
if (mListener != null && couple != null) {
mListener.onCoupleDetailsChanged(couple);
}
}
@Override
public void onCancelled(DatabaseError databaseError) {
}
};
mCoupleRef.addValueEventListener(mCoupleListener);
}
}
public void detachCoupleListener() {
if (mCoupleListener != null) {
mCoupleRef.removeEventListener(mCoupleListener);
}
mCoupleListener = null;
}
public void archiveCouple() {
String now = DataMaker.get_UTC_DateTime();
Map<String, Object> updates = new HashMap<>();
// remove futurePartner field for each user
updates.put(mUserFuturePartnerUrl, null);
updates.put(mPartnerFuturePartnerUrl, null);
// added the coupleUid into archived of each partner
updates.put(mPartnerArchivedCouplesUrl, true);
updates.put(mUserArchivedCouplesUrl, true);
// set to null the activeCouple of each partner
updates.put(mPartnerActiveCoupleUrl, null);
updates.put(mUserActiveCoupleUrl, null);
// set to false the active couple of coupleUid and push break time
updates.put(mCoupleUidUrl + "/" + Constraints.Couples.ACTIVE, false);
updates.put(mCoupleUidUrl + "/" + Constraints.Couples.BREAK_TIME, now);
mDatabaseRef.updateChildren(updates);
}
public void changeCoupleImage(Uri imageLocalUri) {
mCoupleRef.child(Constraints.Couples.UPLOADING_IMG).setValue(true);
String imageName = DataMaker.get_UTC_DateTime() + mCoupleUid;
UploadTask uploadTask = mCoupleStorage.child(imageName).putFile(imageLocalUri);
uploadTask.addOnFailureListener(new OnFailureListener() {
@Override
public void onFailure(@NonNull Exception exception) {
Log.e(TAG, "onFailure sendFileFirebase " + exception.getMessage());
Map<String, Object> updates = new HashMap<>();
updates.put(Constraints.Couples.PROGRESS, null);
updates.put(Constraints.Couples.UPLOADING_IMG, false);
}
}).addOnSuccessListener(new OnSuccessListener<UploadTask.TaskSnapshot>() {
@Override
public void onSuccess(UploadTask.TaskSnapshot taskSnapshot) {
Log.d(TAG, "onSuccess sendImage(): " + taskSnapshot.getDownloadUrl() +"\n" + "update into uri: " + mCoupleRef);
Uri downloadUrl = taskSnapshot.getDownloadUrl();
String imageStorageUriString = downloadUrl.toString();
Map<String, Object> updates = new HashMap<>();
updates.put(Constraints.Couples.IMAGE_STORAGE_URI, imageStorageUriString);
updates.put(Constraints.Couples.PROGRESS, null);
updates.put(Constraints.Couples.UPLOADING_IMG, false);
mCoupleRef.updateChildren(updates);
}
}).addOnProgressListener(new OnProgressListener<UploadTask.TaskSnapshot>() {
@Override
public void onProgress(UploadTask.TaskSnapshot taskSnapshot) {
double progress = (100.0 * taskSnapshot.getBytesTransferred()) / taskSnapshot.getTotalByteCount();
Log.d(TAG, "Upload image progress: " + progress);
mCoupleRef.child(Constraints.Couples.PROGRESS).setValue(progress);
}
});
}
public void changeAnniversaryDate(String anniversary) {
mCoupleRef.child(Constraints.Couples.ANNIVERSARY).setValue(anniversary);
}
}
|
//#####################################################################
// Copyright 2004-2005, <NAME>, <NAME>, <NAME>, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
#include <PhysBAM_Tools/Log/LOG.h>
#include <PhysBAM_Tools/Read_Write/Utilities/FILE_UTILITIES.h>
#include <PhysBAM_Geometry/Read_Write/Grids_Uniform_Level_Sets/READ_WRITE_LEVELSET_2D.h>
#include <PhysBAM_Rendering/PhysBAM_OpenGL/OpenGL/OPENGL_GRID_2D.h>
#include <PhysBAM_Rendering/PhysBAM_OpenGL/OpenGL/OPENGL_INDEXED_COLOR_MAP.h>
#include <PhysBAM_Rendering/PhysBAM_OpenGL/OpenGL_Components/OPENGL_COMPONENT_CHIMERA_LEVELSET_2D.h>
#include <PhysBAM_Rendering/PhysBAM_OpenGL/OpenGL_Components/OPENGL_COMPONENT_PARTICLES_2D.h>
using namespace PhysBAM;
//#####################################################################
template<class T,class RW> OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
OPENGL_COMPONENT_CHIMERA_LEVELSET_2D(const std::string filename_set_input,bool is_moving_grid_input,const std::string rigid_grid_frame_filename_set_input)
:OPENGL_COMPONENT("Chimera Levelset 2D"),opengl_levelset(0),filename_set(filename_set_input),frame_loaded(-1),current_grid(1),valid(false),draw_all_grids(true),is_moving_grid(is_moving_grid_input),rigid_grid_frame_filename_set(rigid_grid_frame_filename_set_input)
{
int number_of_grids=0;
while(filename_set!=""){
std::string filename=STRING_UTILITIES::string_sprintf(filename_set.c_str(),frame,number_of_grids+1);
if(FILE_UTILITIES::File_Exists(filename)) number_of_grids++;else break;}
LOG::cout<<"Found "<<number_of_grids<<" grids."<<std::endl;
opengl_levelsets.Resize(number_of_grids);
for(int j=1;j<=opengl_levelsets.m;j++)
opengl_levelsets(j)=new OPENGL_LEVELSET_2D<T>(*(new LEVELSET_2D<GRID<TV> >(*(new GRID<TV>),*(new ARRAY<T,VECTOR<int,2> >))),OPENGL_COLOR::Blue(),OPENGL_COLOR::Red((T).5),0,true);
opengl_levelset=opengl_levelsets(1);
is_animation=true;
Reinitialize();
}
template<class T,class RW> OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
~OPENGL_COMPONENT_CHIMERA_LEVELSET_2D()
{
for(int j=1;j<=opengl_levelsets.m;j++){
delete &opengl_levelsets(j)->levelset.grid;
delete &opengl_levelsets(j)->levelset.phi;
delete &opengl_levelsets(j)->levelset;}
}
template<class T,class RW> bool OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Valid_Frame(int frame_input) const
{
return FILE_UTILITIES::File_Exists(STRING_UTILITIES::string_sprintf(filename_set.c_str(),current_grid,frame_input));
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Set_Frame(int frame_input)
{
OPENGL_COMPONENT::Set_Frame(frame_input);
Reinitialize();
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Set_Draw(bool draw_input)
{
OPENGL_COMPONENT::Set_Draw(draw_input);
Reinitialize();
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Display(const int in_color) const
{
if(valid && draw){
if(draw_all_grids) for(int j=1;j<=opengl_levelsets.m;j++) opengl_levelsets(j)->Display(in_color);
else opengl_levelset->Display(in_color);}
}
template<class T,class RW> RANGE<VECTOR<float,3> > OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Bounding_Box() const
{
if (valid && draw) return opengl_levelset->Bounding_Box();
else return RANGE<VECTOR<float,3> >::Centered_Box();
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Print_Selection_Info(std::ostream& output_stream,OPENGL_SELECTION* current_selection) const
{
if(Is_Up_To_Date(frame)){
if(current_selection && current_selection->type==OPENGL_SELECTION::GRID_CELL_2D && opengl_levelset->levelset.grid.Is_MAC_Grid()){
VECTOR<int,2> index=((OPENGL_SELECTION_GRID_CELL_2D<T>*)current_selection)->index;
output_stream<<component_name<<": phi="<<opengl_levelset->levelset.phi(index)
<<" curvature="<<opengl_levelset->levelset.Compute_Curvature(opengl_levelset->levelset.grid.Center(index))<<std::endl;}
if(current_selection && current_selection->type==OPENGL_SELECTION::GRID_NODE_2D && !opengl_levelset->levelset.grid.Is_MAC_Grid()){
VECTOR<int,2> index=((OPENGL_SELECTION_GRID_NODE_2D<T>*)current_selection)->index;
output_stream<<component_name<<": phi="<<opengl_levelset->levelset.phi(index)<<std::endl;}
if(current_selection && current_selection->type==OPENGL_SELECTION::COMPONENT_PARTICLES_2D){
OPENGL_SELECTION_COMPONENT_PARTICLES_2D<T> *selection=(OPENGL_SELECTION_COMPONENT_PARTICLES_2D<T>*)current_selection;
VECTOR<T,2> location=selection->location;
output_stream<<component_name<<": phi @ particle="<<opengl_levelset->levelset.Phi(location)<<std::endl;}}
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Reinitialize(const bool force_even_if_not_drawn)
{
if (draw||force_even_if_not_drawn){
if ((is_animation && (frame_loaded!=frame)) || (!is_animation && frame_loaded<0)){
if(is_moving_grid){
for(int i=1;i<=opengl_levelsets.Size();i++){
std::string grid_filename=STRING_UTILITIES::string_sprintf(rigid_grid_frame_filename_set.c_str(),frame,i);
if(FILE_UTILITIES::File_Exists(grid_filename)){
FILE_UTILITIES::Read_From_File<T>(grid_filename,opengl_levelsets(i)->rigid_grid_frame);}}}
valid=false;std::string filename;
for(int i=1;i<=opengl_levelsets.m;i++){
filename=STRING_UTILITIES::string_sprintf(filename_set.c_str(),frame,i);
if(FILE_UTILITIES::File_Exists(filename)) FILE_UTILITIES::Read_From_File<RW>(filename.c_str(),opengl_levelsets(i)->levelset);
else return;
opengl_levelsets(i)->Update();}
frame_loaded=frame;valid=true;}}
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Toggle_Color_Mode()
{
for(int j=1;j<=opengl_levelsets.m;j++) opengl_levelsets(j)->Toggle_Color_Map();
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Toggle_Smooth()
{
for(int j=1;j<=opengl_levelsets.m;j++) opengl_levelsets(j)->Toggle_Smooth_Texture();
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Toggle_Normals()
{
for(int j=1;j<=opengl_levelsets.m;j++) opengl_levelsets(j)->Toggle_Normals();
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Toggle_Draw_Mode()
{
for(int j=1;j<=opengl_levelsets.m;j++){
int mask=((int)opengl_levelsets(j)->draw_area<<2) | ((int)opengl_levelsets(j)->draw_curve<<1) | ((int)opengl_levelsets(j)->draw_cells);
int newmask=(mask%8)+1;
opengl_levelsets(j)->draw_area=(newmask&4)!=0;
opengl_levelsets(j)->draw_curve=(newmask&2)!=0;
opengl_levelsets(j)->draw_cells=(newmask&1)!=0;
opengl_levelsets(j)->Update();}
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Toggle_Draw_Sign()
{
for(int j=1;j<=opengl_levelsets.m;j++){
opengl_levelsets(j)->dominant_sign=(opengl_levelsets(j)->dominant_sign==1)?-1:1;
opengl_levelsets(j)->Update();}
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Next_Grid()
{
current_grid=min(current_grid+1,opengl_levelsets.m);
LOG::cout<<"viewing levelset "<<current_grid<<std::endl;
opengl_levelset=opengl_levelsets(current_grid);
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Previous_Grid()
{
current_grid=max(current_grid-1,1);
LOG::cout<<"viewing levelsetset "<<current_grid<<std::endl;
opengl_levelset=opengl_levelsets(current_grid);
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Toggle_Draw_All_Grids()
{
draw_all_grids=!draw_all_grids;
}
template<class T,class RW> void OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<T,RW>::
Toggle_Draw_Ghost_Values()
{
for(int j=1;j<=opengl_levelsets.m;j++) opengl_levelsets(j)->Toggle_Draw_Ghost_Values();
}
template class OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<float,float>;
#ifndef COMPILE_WITHOUT_DOUBLE_SUPPORT
template class OPENGL_COMPONENT_CHIMERA_LEVELSET_2D<double,double>;
#endif
|
package vcs.citydb.wfs.operation.getpropertyvalue;
import net.opengis.ows._1.ExceptionReport;
import net.opengis.wfs._2.GetPropertyValueType;
import net.opengis.wfs._2.ResultTypeType;
import net.opengis.wfs._2.TruncatedResponse;
import org.citydb.core.database.connection.DatabaseConnectionPool;
import org.citydb.core.database.schema.mapping.AbstractObjectType;
import org.citydb.core.database.schema.mapping.MappingConstants;
import org.citydb.core.database.schema.mapping.SchemaMapping;
import org.citydb.core.operation.exporter.database.content.DBSplittingResult;
import org.citydb.core.operation.exporter.util.InternalConfig;
import org.citydb.core.operation.exporter.writer.FeatureWriteException;
import org.citydb.core.query.Query;
import org.citydb.core.registry.ObjectRegistry;
import org.citydb.sqlbuilder.select.Select;
import org.citydb.util.concurrent.WorkerPool;
import org.citydb.util.event.Event;
import org.citydb.util.event.EventDispatcher;
import org.citydb.util.event.EventHandler;
import org.citydb.util.event.global.EventType;
import org.citydb.util.event.global.InterruptEvent;
import org.citygml4j.builder.jaxb.CityGMLBuilder;
import vcs.citydb.wfs.config.Constants;
import vcs.citydb.wfs.config.WFSConfig;
import vcs.citydb.wfs.exception.WFSException;
import vcs.citydb.wfs.exception.WFSExceptionCode;
import vcs.citydb.wfs.exception.WFSExceptionReportHandler;
import vcs.citydb.wfs.kvp.KVPConstants;
import vcs.citydb.wfs.paging.PageRequest;
import vcs.citydb.wfs.paging.PagingCacheManager;
import vcs.citydb.wfs.util.xml.NamespaceFilter;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
public class QueryExecuter implements EventHandler {
private final PropertyValueWriter writer;
private final WorkerPool<DBSplittingResult> databaseWorkerPool;
private final Object eventChannel;
private final DatabaseConnectionPool connectionPool;
private final CityGMLBuilder cityGMLBuilder;
private final InternalConfig internalConfig;
private final long countDefault;
private final boolean computeNumberMatched;
private final boolean usePaging;
private final PagingCacheManager pagingCacheManager;
private final SchemaMapping schemaMapping;
private final QueryBuilder queryBuilder;
private final EventDispatcher eventDispatcher;
private WFSException wfsException;
private volatile boolean shouldRun = true;
public QueryExecuter(
PropertyValueWriter writer,
WorkerPool<DBSplittingResult> databaseWorkerPool,
Object eventChannel,
DatabaseConnectionPool connectionPool,
CityGMLBuilder cityGMLBuilder,
InternalConfig internalConfig,
WFSConfig wfsConfig) {
this.writer = writer;
this.databaseWorkerPool = databaseWorkerPool;
this.eventChannel = eventChannel;
this.connectionPool = connectionPool;
this.cityGMLBuilder = cityGMLBuilder;
this.internalConfig = internalConfig;
countDefault = wfsConfig.getConstraints().getCountDefault();
computeNumberMatched = wfsConfig.getConstraints().isComputeNumberMatched();
usePaging = wfsConfig.getConstraints().isUseResultPaging();
pagingCacheManager = ObjectRegistry.getInstance().lookup(PagingCacheManager.class);
schemaMapping = ObjectRegistry.getInstance().getSchemaMapping();
queryBuilder = new QueryBuilder(connectionPool.getActiveDatabaseAdapter(), schemaMapping);
eventDispatcher = ObjectRegistry.getInstance().getEventDispatcher();
eventDispatcher.addEventHandler(EventType.INTERRUPT, this);
}
public void executeQuery(GetPropertyValueType wfsRequest, QueryExpression queryExpression, NamespaceFilter namespaceFilter, PageRequest pageRequest, Query dummy, HttpServletRequest request) throws WFSException {
boolean purgeConnectionPool = false;
boolean invalidatePageRequest = false;
// get standard request parameters
long count = wfsRequest.isSetCount() && wfsRequest.getCount().longValue() < countDefault ? wfsRequest.getCount().longValue() : countDefault;
long startIndex = wfsRequest.isSetStartIndex() ? wfsRequest.getStartIndex().longValue() : 0;
ResultTypeType resultType = wfsRequest.getResultType();
// create paging request
if (pageRequest == null) {
pageRequest = usePaging && count != Constants.COUNT_DEFAULT ?
pagingCacheManager.create(wfsRequest, queryExpression, namespaceFilter) :
PageRequest.dummy();
}
try (Connection connection = initConnection()) {
long numberMatched = getNumberMatched(queryExpression, resultType, connection);
long numberReturned = numberMatched != Constants.UNKNOWN_NUMBER_MATCHED ?
Math.min(Math.max(numberMatched - startIndex, 0), count) :
getNumberReturned(queryExpression, count, startIndex, pageRequest.getPageNumber(), connection);
// get property values
if (numberReturned > 0 && resultType == ResultTypeType.RESULTS) {
Select query = queryBuilder.buildQuery(queryExpression, startIndex, count, numberReturned, pageRequest.getPageNumber());
setExporterContext(queryExpression, dummy);
try (PreparedStatement stmt = connectionPool.getActiveDatabaseAdapter().getSQLAdapter().prepareStatement(query, connection);
ResultSet rs = stmt.executeQuery()) {
writer.startValueCollection(numberMatched, numberReturned,
pageRequest.getPageNumber() > 0 ? pageRequest.previous(request) : null,
numberReturned == count && numberMatched - startIndex - numberReturned > 0 ? pageRequest.next(request) : null);
if (rs.next()) {
long initialId, currentId;
long nextId = initialId = rs.getLong(MappingConstants.ID);
long propertyOffset = queryExpression.getPropertyOffset();
writer.setInitialPropertyOffset((int) propertyOffset);
int propertyCount;
long sequenceId = 0;
do {
currentId = nextId;
int objectClassId = rs.getInt(MappingConstants.OBJECTCLASS_ID);
propertyCount = 0;
do {
// check property offset for first feature
if (sequenceId == 0 && propertyOffset-- > 0)
continue;
propertyCount++;
} while (rs.next() && (nextId = rs.getLong(MappingConstants.ID)) == currentId);
// skip first feature if no properties shall be exported
if (sequenceId == 0 && propertyCount == 0)
continue;
AbstractObjectType<?> type = schemaMapping.getAbstractObjectType(objectClassId);
if (type == null) {
throw new WFSException(WFSExceptionCode.OPERATION_PROCESSING_FAILED,
"Failed to map the object class id '" + objectClassId + "' to an object type (ID: " + currentId + ").");
}
// put feature on worker queue
writer.setPropertyCount(sequenceId, propertyCount);
DBSplittingResult splitter = new DBSplittingResult(currentId, type, sequenceId++);
databaseWorkerPool.addWork(splitter);
} while (shouldRun && currentId != nextId);
if (usePaging) {
if (queryExpression.supportsPagingByStartId()) {
queryExpression.setStartId(currentId);
}
propertyOffset = propertyCount;
if (currentId == initialId) {
propertyOffset += queryExpression.getPropertyOffset();
}
queryExpression.setPropertyOffset(propertyOffset);
}
}
}
// shutdown database worker pool
databaseWorkerPool.shutdownAndWait();
// write truncated response if a worker pool has thrown an error
if (wfsException != null) {
writer.writeTruncatedResponse(getTruncatedResponse(wfsException, request));
invalidatePageRequest = true;
}
} else {
// no results returned
writer.startValueCollection(numberMatched, 0, null,
resultType == ResultTypeType.HITS & numberMatched > 0 ? pageRequest.first(request) : null);
invalidatePageRequest = numberMatched == 0 && pageRequest.getPageNumber() == 0;
}
writer.endValueCollection();
// update paging cache
if (usePaging && !invalidatePageRequest) {
try {
pagingCacheManager.update(pageRequest);
} catch (IOException e) {
invalidatePageRequest = true;
throw new WFSException(WFSExceptionCode.OPERATION_PROCESSING_FAILED, "Failed to update the paging cache.", e);
}
}
} catch (SQLException e) {
purgeConnectionPool = true;
invalidatePageRequest = true;
throw new WFSException(WFSExceptionCode.OPERATION_PROCESSING_FAILED, "A fatal SQL error occurred whilst querying the database.", e);
} catch (FeatureWriteException | InterruptedException e) {
invalidatePageRequest = true;
throw new WFSException(WFSExceptionCode.OPERATION_PROCESSING_FAILED, "A fatal error occurred whilst marshalling the response document.", e);
} finally {
// purge connection pool to remove possibly defect connections
if (purgeConnectionPool)
connectionPool.purge();
// invalidate paging cache
if (usePaging && invalidatePageRequest)
pagingCacheManager.remove(pageRequest);
eventDispatcher.removeEventHandler(this);
}
}
private void setExporterContext(QueryExpression queryExpression, Query dummy) {
// set flag for coordinate transformation
internalConfig.setTransformCoordinates(queryExpression.getTargetSrs().getSrid() != connectionPool.getActiveDatabaseAdapter().getConnectionMetaData().getReferenceSystem().getSrid());
// update filter configuration
dummy.copyFrom(queryExpression);
}
private TruncatedResponse getTruncatedResponse(WFSException wfsException, HttpServletRequest request) {
WFSExceptionReportHandler reportHandler = new WFSExceptionReportHandler(cityGMLBuilder);
ExceptionReport exceptionReport = reportHandler.getExceptionReport(wfsException, KVPConstants.GET_PROPERTY_VALUE, request, true);
TruncatedResponse truncatedResponse = new TruncatedResponse();
truncatedResponse.setExceptionReport(exceptionReport);
return truncatedResponse;
}
private long getNumberMatched(QueryExpression queryExpression, ResultTypeType resultType, Connection connection) throws WFSException, SQLException {
if (!queryExpression.isSetNumberMatched()) {
if (computeNumberMatched || resultType == ResultTypeType.HITS) {
Select query = queryBuilder.buildNumberMatchedQuery(queryExpression);
try (PreparedStatement stmt = connectionPool.getActiveDatabaseAdapter().getSQLAdapter().prepareStatement(query, connection);
ResultSet rs = stmt.executeQuery()) {
queryExpression.setNumberMatched(rs.next() ? rs.getLong(1) : 0);
}
} else
queryExpression.setNumberMatched(Constants.UNKNOWN_NUMBER_MATCHED);
}
return queryExpression.getNumberMatched();
}
private long getNumberReturned(QueryExpression queryExpression, long count, long startIndex, long pageNumber, Connection connection) throws WFSException, SQLException {
Select query = queryBuilder.buildNumberReturnedQuery(queryExpression, count, startIndex, pageNumber);
try (PreparedStatement stmt = connectionPool.getActiveDatabaseAdapter().getSQLAdapter().prepareStatement(query, connection);
ResultSet rs = stmt.executeQuery()) {
return rs.next() ? Math.max(rs.getLong(1) - queryExpression.getPropertyOffset(), 0) : 0;
}
}
private Connection initConnection() throws SQLException {
Connection connection = connectionPool.getConnection();
connection.setAutoCommit(false);
return connection;
}
@Override
public void handleEvent(Event event) throws Exception {
if (event.getChannel() == eventChannel) {
wfsException = new WFSException(WFSExceptionCode.OPERATION_PROCESSING_FAILED, ((InterruptEvent)event).getLogMessage(), ((InterruptEvent)event).getCause());
shouldRun = false;
}
}
}
|
<filename>_src/six_seven/eclipse/SpringFreshFruitsStore/src/it/freshfruits/domain/entity/BaseEntity.java
package it.freshfruits.domain.entity;
public interface BaseEntity {
public Integer getId();
}
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# picks appropriate docker-compose argments to use when bringing up and down integration test clusters
# for a given test group
getComposeArgs()
{
# Sanity check: DRUID_INTEGRATION_TEST_INDEXER must be "indexer" or "middleManager"
if [ "$DRUID_INTEGRATION_TEST_INDEXER" != "indexer" ] && [ "$DRUID_INTEGRATION_TEST_INDEXER" != "middleManager" ]
then
echo "DRUID_INTEGRATION_TEST_INDEXER must be 'indexer' or 'middleManager' (is '$DRUID_INTEGRATION_TEST_INDEXER')"
exit 1
fi
if [ "$DRUID_INTEGRATION_TEST_INDEXER" = "indexer" ]
then
# Sanity check: cannot combine CliIndexer tests with security, query-retry tests
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]
then
echo "Cannot run test group '$DRUID_INTEGRATION_TEST_GROUP' with CliIndexer"
exit 1
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ]
then
# Replace MiddleManager with Indexer + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml -f ${DOCKERDIR}/docker-compose.schema-registry-indexer.yml"
else
# Replace MiddleManager with Indexer
echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml"
fi
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ]
then
# default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls)
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.security.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ]
then
# default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls)
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.ldap-security.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ]
then
# default + additional historical modified for query retry test
# See CliHistoricalForQueryRetryTest.
echo "-f ${DOCKERDIR}/docker-compose.query-retry-test.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ]
then
# default + additional historical modified for query error test
# See CliHistoricalForQueryRetryTest.
echo "-f ${DOCKERDIR}/docker-compose.query-error-test.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]
then
# the 'high availability' test cluster with multiple coordinators and overlords
echo "-f ${DOCKERDIR}/docker-compose.high-availability.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ]
then
# default + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kinesis-data-format" ]
then
# default + with override config + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml"
else
# default
echo "-f ${DOCKERDIR}/docker-compose.yml"
fi
}
|
#!/usr/bin/env bash
set -e
here=$(dirname "$0")
SOLANA_ROOT="$(cd "$here"/..; pwd)"
# shellcheck source=net/common.sh
source "$here"/common.sh
usage() {
exitcode=0
if [[ -n "$1" ]]; then
exitcode=1
echo "Error: $*"
fi
CLIENT_OPTIONS=$(cat << EOM
-c clientType=numClients=extraArgs - Number of clientTypes to start. This options can be specified
more than once. Defaults to bench-tps for all clients if not
specified.
Valid client types are:
idle
bench-tps
User can optionally provide extraArgs that are transparently
supplied to the client program as command line parameters.
For example,
-c bench-tps=2="--tx_count 25000"
This will start 2 bench-tps clients, and supply "--tx_count 25000"
to the bench-tps client.
EOM
)
cat <<EOF
usage: $0 [start|stop|restart|sanity] [command-specific options]
Operate a configured testnet
start - Start the network
sanity - Sanity check the network
stop - Stop the network
restart - Shortcut for stop then start
logs - Fetch remote logs from each network node
startnode - Start an individual node (previously stopped with stopNode)
stopnode - Stop an individual node
startclients - Start client nodes only
prepare - Prepare software deployment. (Build/download the software release)
update - Deploy a new software update to the cluster
upgrade - Upgrade software on bootstrap validator. (Restart bootstrap validator manually to run it)
start-specific options:
-T [tarFilename] - Deploy the specified release tarball
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
specified release channel (edge|beta|stable) or release tag
(vX.Y.Z)
-r / --skip-setup - Reuse existing node/ledger configuration from a
previous |start| (ie, don't run ./multinode-demo/setup.sh).
-d / --debug - Build/deploy the testnet with debug binaries
$CLIENT_OPTIONS
--client-delay-start
- Number of seconds to wait after validators have finished starting before starting client programs
(default: $clientDelayStart)
-n NUM_VALIDATORS - Number of validators to apply command to.
--gpu-mode GPU_MODE - Specify GPU mode to launch validators with (default: $gpuMode).
MODE must be one of
on - GPU *required*, any vendor *
off - No GPU, CPU-only
auto - Use GPU if available, any vendor *
cuda - GPU *required*, Nvidia CUDA only
* Currently, Nvidia CUDA is the only supported GPU vendor
--hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster
--no-airdrop
- If set, disables the faucet keypair. Nodes must be funded in genesis config
--faucet-lamports NUM_LAMPORTS_TO_MINT
- Override the default 500000000000000000 lamports minted in genesis
--extra-primordial-stakes NUM_EXTRA_PRIMORDIAL_STAKES
- Number of extra nodes to be initially staked in genesis.
Implies --wait-for-supermajority 1 --async-node-init and the supermajority
wait slot may be overridden with the corresponding flag
--internal-nodes-stake-lamports NUM_LAMPORTS_PER_NODE
- Amount to stake internal nodes.
--internal-nodes-lamports NUM_LAMPORTS_PER_NODE
- Amount to fund internal nodes in genesis config.
--external-accounts-file FILE_PATH
- A YML file with a list of account pubkeys and corresponding lamport balances
in genesis config for external nodes
--no-snapshot-fetch
- If set, disables booting validators from a snapshot
--skip-poh-verify
- If set, validators will skip verifying
the ledger they already have saved to disk at
boot (results in a much faster boot)
--no-deploy
- Don't deploy new software, use the
existing deployment
--no-build
- Don't build new software, deploy the
existing binaries
--deploy-if-newer - Only deploy if newer software is
available (requires -t or -T)
--cluster-type development|devnet|testnet|mainnet-beta
- Specify whether or not to launch the cluster in "development" mode with all features enabled at epoch 0,
or various other live clusters' feature set (default: development)
--slots-per-epoch SLOTS
- Override the number of slots in an epoch
--warp-slot WARP_SLOT
- Boot from a snapshot that has warped ahead to WARP_SLOT rather than a slot 0 genesis.
sanity/start-specific options:
-F - Discard validator nodes that didn't bootup successfully
-o noInstallCheck - Skip solana-install sanity
-o rejectExtraNodes - Require the exact number of nodes
stop-specific options:
none
logs-specific options:
none
netem-specific options:
--config - Netem configuration (as a double quoted string)
--parition - Percentage of network that should be configured with netem
--config-file - Configuration file for partition and netem configuration
--netem-cmd - Optional command argument to netem. Default is "add". Use "cleanup" to remove rules.
update-specific options:
--platform linux|osx|windows - Deploy the tarball using 'solana-install deploy ...' for the
given platform (multiple platforms may be specified)
(-t option must be supplied as well)
startnode/stopnode-specific options:
-i [ip address] - IP Address of the node to start or stop
startclients-specific options:
$CLIENT_OPTIONS
Note: if RUST_LOG is set in the environment it will be propogated into the
network nodes.
EOF
exit $exitcode
}
initLogDir() { # Initializes the netLogDir global variable. Idempotent
[[ -z $netLogDir ]] || return 0
netLogDir="$netDir"/log
declare netLogDateDir
netLogDateDir="$netDir"/log-$(date +"%Y-%m-%d_%H_%M_%S")
if [[ -d $netLogDir && ! -L $netLogDir ]]; then
echo "Warning: moving $netLogDir to make way for symlink."
mv "$netLogDir" "$netDir"/log.old
elif [[ -L $netLogDir ]]; then
rm "$netLogDir"
fi
mkdir -p "$netConfigDir" "$netLogDateDir"
ln -sf "$netLogDateDir" "$netLogDir"
echo "Log directory: $netLogDateDir"
}
annotate() {
[[ -z $BUILDKITE ]] || {
buildkite-agent annotate "$@"
}
}
annotateBlockexplorerUrl() {
declare blockstreamer=${blockstreamerIpList[0]}
if [[ -n $blockstreamer ]]; then
annotate --style info --context blockexplorer-url "Block explorer: http://$blockstreamer/"
fi
}
build() {
supported=("20.04")
declare MAYBE_DOCKER=
if [[ $(uname) != Linux || ! " ${supported[*]} " =~ $(lsb_release -sr) ]]; then
# shellcheck source=ci/rust-version.sh
source "$SOLANA_ROOT"/ci/rust-version.sh
MAYBE_DOCKER="ci/docker-run.sh $rust_stable_docker_image"
fi
SECONDS=0
(
cd "$SOLANA_ROOT"
echo "--- Build started at $(date)"
set -x
rm -rf farf
buildVariant=
if $debugBuild; then
buildVariant=--debug
fi
$MAYBE_DOCKER bash -c "
set -ex
scripts/cargo-install-all.sh farf $buildVariant --validator-only
"
)
(
set +e
COMMIT="$(git rev-parse HEAD)"
BRANCH="$(git rev-parse --abbrev-ref HEAD)"
TAG="$(git describe --exact-match --tags HEAD 2>/dev/null)"
if [[ $TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]]; then
NOTE=$TAG
else
NOTE=$BRANCH
fi
(
echo "channel: devbuild $NOTE"
echo "commit: $COMMIT"
) > "$SOLANA_ROOT"/farf/version.yml
)
echo "Build took $SECONDS seconds"
}
SOLANA_HOME="\$HOME/solana"
CARGO_BIN="\$HOME/.cargo/bin"
startCommon() {
declare ipAddress=$1
test -d "$SOLANA_ROOT"
if $skipSetup; then
# shellcheck disable=SC2029
ssh "${sshOptions[@]}" "$ipAddress" "
set -x;
mkdir -p $SOLANA_HOME/config;
rm -rf ~/config;
mv $SOLANA_HOME/config ~;
rm -rf $SOLANA_HOME;
mkdir -p $SOLANA_HOME $CARGO_BIN;
mv ~/config $SOLANA_HOME/
"
else
# shellcheck disable=SC2029
ssh "${sshOptions[@]}" "$ipAddress" "
set -x;
rm -rf $SOLANA_HOME;
mkdir -p $CARGO_BIN
"
fi
[[ -z "$externalNodeSshKey" ]] || ssh-copy-id -f -i "$externalNodeSshKey" "${sshOptions[@]}" "solana@$ipAddress"
syncScripts "$ipAddress"
}
syncScripts() {
echo "rsyncing scripts... to $ipAddress"
declare ipAddress=$1
rsync -vPrc -e "ssh ${sshOptions[*]}" \
--exclude 'net/log*' \
"$SOLANA_ROOT"/{fetch-perf-libs.sh,fetch-spl.sh,scripts,net,multinode-demo} \
"$ipAddress":"$SOLANA_HOME"/ > /dev/null
}
# Deploy local binaries to bootstrap validator. Other validators and clients later fetch the
# binaries from it
deployBootstrapValidator() {
declare ipAddress=$1
echo "Deploying software to bootstrap validator ($ipAddress)"
case $deployMethod in
tar)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:$CARGO_BIN/"
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/version.yml "$ipAddress:~/"
;;
local)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:$CARGO_BIN/"
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/version.yml "$ipAddress:~/"
;;
skip)
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
}
startBootstrapLeader() {
declare ipAddress=$1
declare nodeIndex="$2"
declare logFile="$3"
echo "--- Starting bootstrap validator: $ipAddress"
echo "start log: $logFile"
(
set -x
startCommon "$ipAddress" || exit 1
[[ -z "$externalPrimordialAccountsFile" ]] || rsync -vPrc -e "ssh ${sshOptions[*]}" "$externalPrimordialAccountsFile" \
"$ipAddress:$remoteExternalPrimordialAccountsFile"
deployBootstrapValidator "$ipAddress"
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \
$deployMethod \
bootstrap-validator \
$entrypointIp \
$((${#validatorIpList[@]} + ${#blockstreamerIpList[@]})) \
\"$RUST_LOG\" \
$skipSetup \
$failOnValidatorBootupFailure \
\"$remoteExternalPrimordialAccountsFile\" \
\"$maybeDisableAirdrops\" \
\"$internalNodesStakeLamports\" \
\"$internalNodesLamports\" \
$nodeIndex \
${#clientIpList[@]} \"$benchTpsExtraArgs\" \
\"$genesisOptions\" \
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority $maybeAllowPrivateAddr $maybeAccountsDbSkipShrink $maybeSkipRequireTower\" \
\"$gpuMode\" \
\"$maybeWarpSlot\" \
\"$waitForNodeInit\" \
\"$extraPrimordialStakes\" \
\"$TMPFS_ACCOUNTS\" \
"
) >> "$logFile" 2>&1 || {
cat "$logFile"
echo "^^^ +++"
exit 1
}
}
startNode() {
declare ipAddress=$1
declare nodeType=$2
declare nodeIndex="$3"
initLogDir
declare logFile="$netLogDir/validator-$ipAddress.log"
if [[ -z $nodeType ]]; then
echo nodeType not specified
exit 1
fi
if [[ -z $nodeIndex ]]; then
echo nodeIndex not specified
exit 1
fi
echo "--- Starting $nodeType: $ipAddress"
echo "start log: $logFile"
(
set -x
startCommon "$ipAddress"
if [[ $nodeType = blockstreamer ]] && [[ -n $letsEncryptDomainName ]]; then
#
# Create/renew TLS certificate
#
declare localArchive=~/letsencrypt-"$letsEncryptDomainName".tgz
if [[ -r "$localArchive" ]]; then
timeout 30s scp "${sshOptions[@]}" "$localArchive" "$ipAddress:letsencrypt.tgz"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"sudo -H /certbot-restore.sh $letsEncryptDomainName maintainers@solana.foundation"
rm -f letsencrypt.tgz
timeout 30s scp "${sshOptions[@]}" "$ipAddress:/letsencrypt.tgz" letsencrypt.tgz
test -s letsencrypt.tgz # Ensure non-empty before overwriting $localArchive
cp letsencrypt.tgz "$localArchive"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \
$deployMethod \
$nodeType \
$entrypointIp \
$((${#validatorIpList[@]} + ${#blockstreamerIpList[@]})) \
\"$RUST_LOG\" \
$skipSetup \
$failOnValidatorBootupFailure \
\"$remoteExternalPrimordialAccountsFile\" \
\"$maybeDisableAirdrops\" \
\"$internalNodesStakeLamports\" \
\"$internalNodesLamports\" \
$nodeIndex \
${#clientIpList[@]} \"$benchTpsExtraArgs\" \
\"$genesisOptions\" \
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize $maybeWaitForSupermajority $maybeAllowPrivateAddr $maybeAccountsDbSkipShrink $maybeSkipRequireTower\" \
\"$gpuMode\" \
\"$maybeWarpSlot\" \
\"$waitForNodeInit\" \
\"$extraPrimordialStakes\" \
\"$TMPFS_ACCOUNTS\" \
"
) >> "$logFile" 2>&1 &
declare pid=$!
ln -sf "validator-$ipAddress.log" "$netLogDir/validator-$pid.log"
pids+=("$pid")
}
startClient() {
declare ipAddress=$1
declare clientToRun="$2"
declare clientIndex="$3"
initLogDir
declare logFile="$netLogDir/client-$clientToRun-$ipAddress.log"
echo "--- Starting client: $ipAddress - $clientToRun"
echo "start log: $logFile"
(
set -x
startCommon "$ipAddress"
ssh "${sshOptions[@]}" -f "$ipAddress" \
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \
$clientToRun \"$RUST_LOG\" \"$benchTpsExtraArgs\" $clientIndex"
) >> "$logFile" 2>&1 || {
cat "$logFile"
echo "^^^ +++"
exit 1
}
}
startClients() {
for ((i=0; i < "$numClients" && i < "$numClientsRequested"; i++)) do
if [[ $i -lt "$numBenchTpsClients" ]]; then
startClient "${clientIpList[$i]}" "solana-bench-tps" "$i"
else
startClient "${clientIpList[$i]}" "idle"
fi
done
}
sanity() {
declare skipBlockstreamerSanity=$1
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
declare ok=true
declare bootstrapLeader=${validatorIpList[0]}
declare blockstreamer=${blockstreamerIpList[0]}
annotateBlockexplorerUrl
echo "--- Sanity: $bootstrapLeader"
(
set -x
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$bootstrapLeader" \
"./solana/net/remote/remote-sanity.sh $bootstrapLeader $sanityExtraArgs \"$RUST_LOG\""
) || ok=false
$ok || exit 1
if [[ -z $skipBlockstreamerSanity && -n $blockstreamer ]]; then
# If there's a blockstreamer node run a reduced sanity check on it as well
echo "--- Sanity: $blockstreamer"
(
set -x
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$blockstreamer" \
"./solana/net/remote/remote-sanity.sh $blockstreamer $sanityExtraArgs \"$RUST_LOG\""
) || ok=false
$ok || exit 1
fi
$metricsWriteDatapoint "testnet-deploy net-sanity-complete=1"
}
deployUpdate() {
if [[ -z $updatePlatforms ]]; then
echo "No update platforms"
return
fi
if [[ -z $releaseChannel ]]; then
echo "Release channel not specified (use -t option)"
exit 1
fi
declare ok=true
declare bootstrapLeader=${validatorIpList[0]}
for updatePlatform in $updatePlatforms; do
echo "--- Deploying solana-install update: $updatePlatform"
(
set -x
scripts/solana-install-update-manifest-keypair.sh "$updatePlatform"
timeout 30s scp "${sshOptions[@]}" \
update_manifest_keypair.json "$bootstrapLeader:solana/update_manifest_keypair.json"
# shellcheck disable=SC2029 # remote-deploy-update.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$bootstrapLeader" \
"./solana/net/remote/remote-deploy-update.sh $releaseChannel $updatePlatform"
) || ok=false
$ok || exit 1
done
}
getNodeType() {
echo "getNodeType: $nodeAddress"
[[ -n $nodeAddress ]] || {
echo "Error: nodeAddress not set"
exit 1
}
nodeIndex=0 # <-- global
nodeType=validator # <-- global
for ipAddress in "${validatorIpList[@]}" b "${blockstreamerIpList[@]}"; do
if [[ $ipAddress = b ]]; then
nodeType=blockstreamer
continue
fi
if [[ $ipAddress = "$nodeAddress" ]]; then
echo "getNodeType: $nodeType ($nodeIndex)"
return
fi
((nodeIndex = nodeIndex + 1))
done
echo "Error: Unknown node: $nodeAddress"
exit 1
}
prepareDeploy() {
case $deployMethod in
tar)
if [[ -n $releaseChannel ]]; then
echo "Downloading release from channel: $releaseChannel"
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
declare updateDownloadUrl=https://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
(
set -x
curl -L -I "$updateDownloadUrl"
curl -L --retry 5 --retry-delay 2 --retry-connrefused \
-o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl"
)
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
fi
(
set -x
rm -rf "$SOLANA_ROOT"/solana-release
cd "$SOLANA_ROOT"; tar jfxv "$tarballFilename"
cat "$SOLANA_ROOT"/solana-release/version.yml
)
;;
local)
if $doBuild; then
build
else
echo "Build skipped due to --no-build"
fi
;;
skip)
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
if [[ -n $deployIfNewer ]]; then
if [[ $deployMethod != tar ]]; then
echo "Error: --deploy-if-newer only supported for tar deployments"
exit 1
fi
echo "Fetching current software version"
(
set -x
rsync -vPrc -e "ssh ${sshOptions[*]}" "${validatorIpList[0]}":~/version.yml current-version.yml
)
cat current-version.yml
if ! diff -q current-version.yml "$SOLANA_ROOT"/solana-release/version.yml; then
echo "Cluster software version is old. Update required"
else
echo "Cluster software version is current. No update required"
exit 0
fi
fi
}
deploy() {
initLogDir
echo "Deployment started at $(date)"
$metricsWriteDatapoint "testnet-deploy net-start-begin=1"
declare bootstrapLeader=true
for nodeAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}"; do
nodeType=
nodeIndex=
getNodeType
if $bootstrapLeader; then
SECONDS=0
declare bootstrapNodeDeployTime=
startBootstrapLeader "$nodeAddress" $nodeIndex "$netLogDir/bootstrap-validator-$ipAddress.log"
bootstrapNodeDeployTime=$SECONDS
$metricsWriteDatapoint "testnet-deploy net-bootnode-leader-started=1"
bootstrapLeader=false
SECONDS=0
pids=()
else
startNode "$ipAddress" $nodeType $nodeIndex
# Stagger additional node start time. If too many nodes start simultaneously
# the bootstrap node gets more rsync requests from the additional nodes than
# it can handle.
sleep 2
fi
done
for pid in "${pids[@]}"; do
declare ok=true
wait "$pid" || ok=false
if ! $ok; then
echo "+++ validator failed to start"
cat "$netLogDir/validator-$pid.log"
if $failOnValidatorBootupFailure; then
exit 1
else
echo "Failure is non-fatal"
fi
fi
done
if ! $waitForNodeInit; then
# Handle async init
declare startTime=$SECONDS
for ipAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}"; do
declare timeWaited=$((SECONDS - startTime))
if [[ $timeWaited -gt 600 ]]; then
break
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node-wait-init.sh $((600 - timeWaited))"
done
fi
$metricsWriteDatapoint "testnet-deploy net-validators-started=1"
additionalNodeDeployTime=$SECONDS
annotateBlockexplorerUrl
sanity skipBlockstreamerSanity # skip sanity on blockstreamer node, it may not
# have caught up to the bootstrap validator yet
echo "--- Sleeping $clientDelayStart seconds after validators are started before starting clients"
sleep "$clientDelayStart"
SECONDS=0
startClients
clientDeployTime=$SECONDS
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
declare networkVersion=unknown
case $deployMethod in
tar)
networkVersion="$(
(
set -o pipefail
grep "^commit: " "$SOLANA_ROOT"/solana-release/version.yml | head -n1 | cut -d\ -f2
) || echo "tar-unknown"
)"
;;
local)
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
;;
skip)
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
echo
echo "--- Deployment Successful"
echo "Bootstrap validator deployment took $bootstrapNodeDeployTime seconds"
echo "Additional validator deployment (${#validatorIpList[@]} validators, ${#blockstreamerIpList[@]} blockstreamer nodes) took $additionalNodeDeployTime seconds"
echo "Client deployment (${#clientIpList[@]} instances) took $clientDeployTime seconds"
echo "Network start logs in $netLogDir"
}
stopNode() {
local ipAddress=$1
local block=$2
initLogDir
declare logFile="$netLogDir/stop-validator-$ipAddress.log"
echo "--- Stopping node: $ipAddress"
echo "stop log: $logFile"
syncScripts "$ipAddress"
(
# Since cleanup.sh does a pkill, we cannot pass the command directly,
# otherwise the process which is doing the killing will be killed because
# the script itself will match the pkill pattern
set -x
# shellcheck disable=SC2029 # It's desired that PS4 be expanded on the client side
ssh "${sshOptions[@]}" "$ipAddress" "PS4=\"$PS4\" ./solana/net/remote/cleanup.sh"
) >> "$logFile" 2>&1 &
declare pid=$!
ln -sf "stop-validator-$ipAddress.log" "$netLogDir/stop-validator-$pid.log"
if $block; then
wait $pid || true
else
pids+=("$pid")
fi
}
stop() {
SECONDS=0
$metricsWriteDatapoint "testnet-deploy net-stop-begin=1"
declare loopCount=0
pids=()
for ipAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}" "${clientIpList[@]}"; do
stopNode "$ipAddress" false
# Stagger additional node stop time to avoid too many concurrent ssh
# sessions
((loopCount++ % 4 == 0)) && sleep 2
done
echo --- Waiting for nodes to finish stopping
for pid in "${pids[@]}"; do
echo -n "$pid "
wait "$pid" || true
done
echo
$metricsWriteDatapoint "testnet-deploy net-stop-complete=1"
echo "Stopping nodes took $SECONDS seconds"
}
checkPremptibleInstances() {
# The validatorIpList nodes may be preemptible instances that can disappear at
# any time. Try to detect when a validator has been preempted to help the user
# out.
#
# Of course this isn't airtight as an instance could always disappear
# immediately after its successfully pinged.
for ipAddress in "${validatorIpList[@]}"; do
(
timeout 5s ping -c 1 "$ipAddress" | tr - _ &>/dev/null
) || {
cat <<EOF
Warning: $ipAddress may have been preempted.
Run |./gce.sh config| to restart it
EOF
exit 1
}
done
}
releaseChannel=
deployMethod=local
deployIfNewer=
sanityExtraArgs=
skipSetup=false
updatePlatforms=
nodeAddress=
numIdleClients=0
numBenchTpsClients=0
benchTpsExtraArgs=
failOnValidatorBootupFailure=true
genesisOptions=
numValidatorsRequested=
externalPrimordialAccountsFile=
remoteExternalPrimordialAccountsFile=
internalNodesStakeLamports=
internalNodesLamports=
maybeNoSnapshot=""
maybeLimitLedgerSize=""
maybeSkipLedgerVerify=""
maybeDisableAirdrops=""
maybeWaitForSupermajority=""
maybeAllowPrivateAddr=""
maybeAccountsDbSkipShrink=""
maybeSkipRequireTower=""
debugBuild=false
doBuild=true
gpuMode=auto
netemPartition=""
netemConfig=""
netemConfigFile=""
netemCommand="add"
clientDelayStart=0
netLogDir=
maybeWarpSlot=
waitForNodeInit=true
extraPrimordialStakes=0
command=$1
[[ -n $command ]] || usage
shift
shortArgs=()
while [[ -n $1 ]]; do
if [[ ${1:0:2} = -- ]]; then
if [[ $1 = --hashes-per-tick ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --slots-per-epoch ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --target-lamports-per-signature ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --faucet-lamports ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --cluster-type ]]; then
case "$2" in
development|devnet|testnet|mainnet-beta)
;;
*)
echo "Unexpected cluster type: \"$2\""
exit 1
;;
esac
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --slots-per-epoch ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --no-snapshot-fetch ]]; then
maybeNoSnapshot="$1"
shift 1
elif [[ $1 = --deploy-if-newer ]]; then
deployIfNewer=1
shift 1
elif [[ $1 = --no-deploy ]]; then
deployMethod=skip
shift 1
elif [[ $1 = --no-build ]]; then
doBuild=false
shift 1
elif [[ $1 = --limit-ledger-size ]]; then
maybeLimitLedgerSize="$1 $2"
shift 2
elif [[ $1 = --skip-poh-verify ]]; then
maybeSkipLedgerVerify="$1"
shift 1
elif [[ $1 = --skip-setup ]]; then
skipSetup=true
shift 1
elif [[ $1 = --platform ]]; then
updatePlatforms="$updatePlatforms $2"
shift 2
elif [[ $1 = --internal-nodes-stake-lamports ]]; then
internalNodesStakeLamports="$2"
shift 2
elif [[ $1 = --internal-nodes-lamports ]]; then
internalNodesLamports="$2"
shift 2
elif [[ $1 = --external-accounts-file ]]; then
externalPrimordialAccountsFile="$2"
remoteExternalPrimordialAccountsFile=/tmp/external-primordial-accounts.yml
shift 2
elif [[ $1 = --no-airdrop ]]; then
maybeDisableAirdrops="$1"
shift 1
elif [[ $1 = --debug ]]; then
debugBuild=true
shift 1
elif [[ $1 = --partition ]]; then
netemPartition=$2
shift 2
elif [[ $1 = --config ]]; then
netemConfig=$2
shift 2
elif [[ $1 == --config-file ]]; then
netemConfigFile=$2
shift 2
elif [[ $1 == --netem-cmd ]]; then
netemCommand=$2
shift 2
elif [[ $1 = --gpu-mode ]]; then
gpuMode=$2
case "$gpuMode" in
on|off|auto|cuda)
;;
*)
echo "Unexpected GPU mode: \"$gpuMode\""
exit 1
;;
esac
shift 2
elif [[ $1 == --client-delay-start ]]; then
clientDelayStart=$2
shift 2
elif [[ $1 == --wait-for-supermajority ]]; then
maybeWaitForSupermajority="$1 $2"
shift 2
elif [[ $1 == --warp-slot ]]; then
maybeWarpSlot="$1 $2"
shift 2
elif [[ $1 == --async-node-init ]]; then
waitForNodeInit=false
shift 1
elif [[ $1 == --extra-primordial-stakes ]]; then
extraPrimordialStakes=$2
shift 2
elif [[ $1 = --allow-private-addr ]]; then
# May also be added by loadConfigFile if 'gce.sh create' was invoked
# without -P.
maybeAllowPrivateAddr="$1"
shift 1
elif [[ $1 = --accounts-db-skip-shrink ]]; then
maybeAccountsDbSkipShrink="$1"
shift 1
elif [[ $1 = --skip-require-tower ]]; then
maybeSkipRequireTower="$1"
shift 1
else
usage "Unknown long option: $1"
fi
else
shortArgs+=("$1")
shift
fi
done
while getopts "h?T:t:o:f:rc:Fn:i:d" opt "${shortArgs[@]}"; do
case $opt in
h | \?)
usage
;;
T)
tarballFilename=$OPTARG
[[ -r $tarballFilename ]] || usage "File not readable: $tarballFilename"
deployMethod=tar
;;
t)
case $OPTARG in
edge|beta|stable|v*)
releaseChannel=$OPTARG
deployMethod=tar
;;
*)
usage "Invalid release channel: $OPTARG"
;;
esac
;;
n)
numValidatorsRequested=$OPTARG
;;
r)
skipSetup=true
;;
o)
case $OPTARG in
rejectExtraNodes|noInstallCheck)
sanityExtraArgs="$sanityExtraArgs -o $OPTARG"
;;
*)
usage "Unknown option: $OPTARG"
;;
esac
;;
c)
getClientTypeAndNum() {
if ! [[ $OPTARG == *'='* ]]; then
echo "Error: Expecting tuple \"clientType=numClientType=extraArgs\" but got \"$OPTARG\""
exit 1
fi
local keyValue
IFS='=' read -ra keyValue <<< "$OPTARG"
local clientType=${keyValue[0]}
local numClients=${keyValue[1]}
local extraArgs=${keyValue[2]}
re='^[0-9]+$'
if ! [[ $numClients =~ $re ]] ; then
echo "error: numClientType must be a number but got \"$numClients\""
exit 1
fi
case $clientType in
idle)
numIdleClients=$numClients
# $extraArgs ignored for 'idle'
;;
bench-tps)
numBenchTpsClients=$numClients
benchTpsExtraArgs=$extraArgs
;;
*)
echo "Unknown client type: $clientType"
exit 1
;;
esac
}
getClientTypeAndNum
;;
F)
failOnValidatorBootupFailure=false
;;
i)
nodeAddress=$OPTARG
;;
d)
debugBuild=true
;;
*)
usage "Error: unhandled option: $opt"
;;
esac
done
loadConfigFile
if [[ -n $numValidatorsRequested ]]; then
truncatedNodeList=( "${validatorIpList[@]:0:$numValidatorsRequested}" )
unset validatorIpList
validatorIpList=( "${truncatedNodeList[@]}" )
fi
numClients=${#clientIpList[@]}
numClientsRequested=$((numBenchTpsClients + numIdleClients))
if [[ "$numClientsRequested" -eq 0 ]]; then
numBenchTpsClients=$numClients
numClientsRequested=$numClients
else
if [[ "$numClientsRequested" -gt "$numClients" ]]; then
echo "Error: More clients requested ($numClientsRequested) then available ($numClients)"
exit 1
fi
fi
if [[ -n "$maybeWaitForSupermajority" && -n "$maybeWarpSlot" ]]; then
read -r _ waitSlot <<<"$maybeWaitForSupermajority"
read -r _ warpSlot <<<"$maybeWarpSlot"
if [[ $waitSlot -ne $warpSlot ]]; then
echo "Error: When specifying both --wait-for-supermajority and --warp-slot,"
echo "they must use the same slot. ($waitSlot != $warpSlot)"
exit 1
fi
fi
echo "net.sh: Primordial stakes: $extraPrimordialStakes"
if [[ $extraPrimordialStakes -gt 0 ]]; then
# Extra primoridial stakes require that all of the validators start at
# the same time. Force async init and wait for supermajority here.
waitForNodeInit=false
if [[ -z "$maybeWaitForSupermajority" ]]; then
waitSlot=
if [[ -n "$maybeWarpSlot" ]]; then
read -r _ waitSlot <<<"$maybeWarpSlot"
else
waitSlot=1
fi
maybeWaitForSupermajority="--wait-for-supermajority $waitSlot"
fi
fi
checkPremptibleInstances
case $command in
restart)
prepareDeploy
stop
deploy
;;
start)
prepareDeploy
deploy
;;
prepare)
prepareDeploy
;;
sanity)
sanity
;;
stop)
stop
;;
update)
deployUpdate
;;
upgrade)
bootstrapValidatorIp="${validatorIpList[0]}"
prepareDeploy
deployBootstrapValidator "$bootstrapValidatorIp"
# (start|stop)Node need refactored to support restarting the bootstrap validator
;;
stopnode)
if [[ -z $nodeAddress ]]; then
usage "node address (-i) not specified"
exit 1
fi
stopNode "$nodeAddress" true
;;
startnode)
if [[ -z $nodeAddress ]]; then
usage "node address (-i) not specified"
exit 1
fi
nodeType=
nodeIndex=
getNodeType
startNode "$nodeAddress" $nodeType $nodeIndex
;;
startclients)
startClients
;;
logs)
initLogDir
fetchRemoteLog() {
declare ipAddress=$1
declare log=$2
echo "--- fetching $log from $ipAddress"
(
set -x
timeout 30s scp "${sshOptions[@]}" \
"$ipAddress":solana/"$log".log "$netLogDir"/remote-"$log"-"$ipAddress".log
) || echo "failed to fetch log"
}
fetchRemoteLog "${validatorIpList[0]}" faucet
for ipAddress in "${validatorIpList[@]}"; do
fetchRemoteLog "$ipAddress" validator
done
for ipAddress in "${clientIpList[@]}"; do
fetchRemoteLog "$ipAddress" client
done
for ipAddress in "${blockstreamerIpList[@]}"; do
fetchRemoteLog "$ipAddress" validator
done
;;
netem)
if [[ -n $netemConfigFile ]]; then
remoteNetemConfigFile="$(basename "$netemConfigFile")"
if [[ $netemCommand = "add" ]]; then
for ipAddress in "${validatorIpList[@]}"; do
"$here"/scp.sh "$netemConfigFile" solana@"$ipAddress":"$SOLANA_HOME"
done
fi
for i in "${!validatorIpList[@]}"; do
"$here"/ssh.sh solana@"${validatorIpList[$i]}" 'solana/scripts/net-shaper.sh' \
"$netemCommand" ~solana/solana/"$remoteNetemConfigFile" "${#validatorIpList[@]}" "$i"
done
else
num_nodes=$((${#validatorIpList[@]}*netemPartition/100))
if [[ $((${#validatorIpList[@]}*netemPartition%100)) -gt 0 ]]; then
num_nodes=$((num_nodes+1))
fi
if [[ "$num_nodes" -gt "${#validatorIpList[@]}" ]]; then
num_nodes=${#validatorIpList[@]}
fi
# Stop netem on all nodes
for ipAddress in "${validatorIpList[@]}"; do
"$here"/ssh.sh solana@"$ipAddress" 'solana/scripts/netem.sh delete < solana/netem.cfg || true'
done
# Start netem on required nodes
for ((i=0; i<num_nodes; i++ )); do :
"$here"/ssh.sh solana@"${validatorIpList[$i]}" "echo $netemConfig > solana/netem.cfg; solana/scripts/netem.sh add \"$netemConfig\""
done
fi
;;
*)
echo "Internal error: Unknown command: $command"
usage
exit 1
esac
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-N-IP/13-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-N-IP/13-1024+0+512-HPMI-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_within_sentences_high_pmi_first_two_thirds_sixth --eval_function penultimate_sixth_eval |
def foo(a, b=345, c=22):
if a == 1:
return b * c
else:
return a + b + c |
import React from "react";
import { mount } from "enzyme";
import App from "../../src/App";
describe("App", () => {
it("renders Hello, world.", () => {
const wrapper = mount(<App />);
expect(wrapper.find(".hello").text()).toContain("Hello, viewers.");
});
});
|
package lgbt.princess.v
/**
* This package contains the [[SemVer]] class for representing [[https://semver.org/ SemVer versions]], as well as
* symbolic methods for conveniently constructing and extracting SemVer versions.
*/
package object semver
|
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for configuring kubernetes master and node instances. It is
# uploaded in the manifests tar ball.
# TODO: this script duplicates templating logic from cluster/saltbase/salt
# using sed. It should use an actual template parser on the manifest
# files.
set -o errexit
set -o nounset
set -o pipefail
### Hardcoded constants
METADATA_SERVER_IP="${METADATA_SERVER_IP:-169.254.169.254}"
# Standard curl flags.
CURL_FLAGS='--fail --silent --show-error --retry 5 --retry-delay 3 --connect-timeout 10 --retry-connrefused'
function convert-manifest-params {
# A helper function to convert the manifest args from a string to a list of
# flag arguments.
# Old format:
# command=["/bin/sh", "-c", "exec KUBE_EXEC_BINARY --param1=val1 --param2-val2"].
# New format:
# command=["KUBE_EXEC_BINARY"] # No shell dependencies.
# args=["--param1=val1", "--param2-val2"]
IFS=' ' read -ra FLAGS <<< "$1"
params=""
for flag in "${FLAGS[@]}"; do
params+="\n\"$flag\","
done
if [ -n "$params" ]; then
echo "${params::-1}" # drop trailing comma
fi
}
function append-param-if-not-present {
# A helper function to add flag to an arguments string
# if no such flag is present already
local params="$1"
local -r flag="$2"
local -r value="$3"
if [[ ! "${params}" =~ "--${flag}"[=\ ] ]]; then
params+=" --${flag}=${value}"
fi
echo "${params}"
}
function setup-os-params {
# Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
# /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
# now, set a generic core_pattern that users can work with.
echo "/core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# secure_random generates a secure random string of bytes. This function accepts
# a number of secure bytes desired and returns a base64 encoded string with at
# least the requested entropy. Rather than directly reading from /dev/urandom,
# we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
# entropy pool has been initialized sufficiently for the desired operation
# before reading from /dev/urandom.
#
# ARGS:
# #1: number of secure bytes to generate. We round up to the nearest factor of 32.
function secure_random {
local infobytes="${1}"
if ((infobytes <= 0)); then
echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
return 1
fi
local out=""
for (( i = 0; i < "${infobytes}"; i += 32 )); do
# uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
# three uuids and take their sum. The sum is encoded in ASCII hex, hence the
# 64 character cut.
out+="$(
(
uuidgen --random;
uuidgen --random;
uuidgen --random;
) | sha256sum \
| head -c 64
)";
done
# Finally, convert the ASCII hex to base64 to increase the density.
echo -n "${out}" | xxd -r -p | base64 -w 0
}
# Helper for configuring iptables rules for metadata server.
#
# $1 is the command flag (-I or -D).
# $2 is the firewall action (LOG or REJECT).
# $3 is the prefix for log output.
# $4 is "!" to optionally invert the uid range.
function gce-metadata-fw-helper {
local -r command="$1"
local action="$2"
local -r prefix="$3"
local -r invert="${4:-}"
# Expand rule action to include relevant option flags.
case "${action}" in
LOG)
action="LOG --log-prefix "${prefix}:" --log-uid --log-tcp-options --log-ip-option"
;;
esac
# Deliberately allow word split here
# shellcheck disable=SC2086
iptables -w ${command} OUTPUT -p tcp --dport 80 -d ${METADATA_SERVER_IP} -m owner ${invert:-} --uid-owner=${METADATA_SERVER_ALLOWED_UID_RANGE:-0-2999} -j ${action}
}
# WARNING: DO NOT USE THE FILTER TABLE! Some implementations of network policy
# think they own it and will stomp all over your changes. At this time, the
# mangle table is less contentious so use that if possible.
function config-ip-firewall {
echo "Configuring IP firewall rules"
# Do not consider loopback addresses as martian source or destination while
# routing. This enables the use of 127/8 for local routing purposes.
sysctl -w net.ipv4.conf.all.route_localnet=1
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -w -A INPUT -w -p TCP -j ACCEPT
iptables -w -A INPUT -w -p UDP -j ACCEPT
iptables -w -A INPUT -w -p ICMP -j ACCEPT
iptables -w -A INPUT -w -p SCTP -j ACCEPT
fi
if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
iptables -w -A FORWARD -w -p TCP -j ACCEPT
iptables -w -A FORWARD -w -p UDP -j ACCEPT
iptables -w -A FORWARD -w -p ICMP -j ACCEPT
iptables -w -A FORWARD -w -p SCTP -j ACCEPT
fi
# Flush iptables nat table
iptables -w -t nat -F || true
if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then
echo "Add rules for ip masquerade"
iptables -w -t nat -N IP-MASQ
iptables -w -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ
iptables -w -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: RFC 1918 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 240.0.0.0/4 -m comment --comment "ip-masq: RFC 5735 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.0.2.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 198.51.100.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 203.0.113.0/24 -m comment --comment "ip-masq: RFC 5737 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 100.64.0.0/10 -m comment --comment "ip-masq: RFC 6598 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 198.18.0.0/15 -m comment --comment "ip-masq: RFC 6815 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.0.0.0/24 -m comment --comment "ip-masq: RFC 6890 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.88.99.0/24 -m comment --comment "ip-masq: RFC 7526 reserved range is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
fi
# If METADATA_CONCEALMENT_NO_FIREWALL is set, don't create a firewall on this
# node because we don't expect the daemonset to run on this node.
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]] && [[ ! "${METADATA_CONCEALMENT_NO_FIREWALL:-}" == "true" ]]; then
echo "Add rule for metadata concealment"
ip addr add dev lo 169.254.169.252/32
iptables -w -t nat -I PREROUTING -p tcp ! -i eth0 -d "${METADATA_SERVER_IP}" --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:988
iptables -w -t nat -I PREROUTING -p tcp ! -i eth0 -d "${METADATA_SERVER_IP}" --dport 8080 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:987
fi
iptables -w -t mangle -I OUTPUT -s 169.254.169.254 -j DROP
# Log all metadata access not from approved processes.
case "${METADATA_SERVER_FIREWALL_MODE:-off}" in
log)
echo "Installing metadata firewall logging rules"
gce-metadata-fw-helper -I LOG "MetadataServerFirewallReject" !
gce-metadata-fw-helper -I LOG "MetadataServerFirewallAccept"
;;
esac
}
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
mkdir -p /var/lib/kube-proxy
fi
}
# Gets the total number of $(1) and $(2) type disks specified
# by the user in ${NODE_LOCAL_SSDS_EXT}
function get-local-disk-num() {
local interface="${1}"
local format="${2}"
localdisknum=0
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
for ssdgroup in "${ssdgroups[@]}"; do
IFS="," read -r -a ssdopts <<< "${ssdgroup}"
local opnum="${ssdopts[0]}"
local opinterface="${ssdopts[1]}"
local opformat="${ssdopts[2]}"
if [[ "${opformat,,}" == "${format,,}" && "${opinterface,,}" == "${interface,,}" ]]; then
localdisknum=$((localdisknum+opnum))
fi
done
fi
}
# Creates a symlink for a ($1) so that it may be used as block storage
function safe-block-symlink(){
local device="${1}"
local symdir="${2}"
mkdir -p "${symdir}"
get-or-generate-uuid "${device}"
local myuuid="${retuuid}"
local sym="${symdir}/local-ssd-${myuuid}"
# Do not "mkdir -p ${sym}" as that will cause unintended symlink behavior
ln -s "${device}" "${sym}"
echo "Created a symlink for SSD $ssd at ${sym}"
chmod a+w "${sym}"
}
# Gets a pregenerated UUID from ${ssdmap} if it exists, otherwise generates a new
# UUID and places it inside ${ssdmap}
function get-or-generate-uuid(){
local device="${1}"
local ssdmap="/home/kubernetes/localssdmap.txt"
echo "Generating or getting UUID from ${ssdmap}"
if [[ ! -e "${ssdmap}" ]]; then
touch "${ssdmap}"
chmod +w "${ssdmap}"
fi
# each line of the ssdmap looks like "${device} persistent-uuid"
local myuuid
if grep -q "${device}" "${ssdmap}"; then
#create symlink based on saved uuid
myuuid=$(grep "${device}" "${ssdmap}" | cut -d ' ' -f 2)
else
# generate new uuid and add it to the map
if ! myuuid=$(uuidgen); then
echo "Failed to generate valid UUID with uuidgen" >&2
exit 2
fi
echo "${device} ${myuuid}" >> "${ssdmap}"
fi
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${device} when symlinking." >&2
exit 2
fi
retuuid="${myuuid}"
}
#Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
local device
local mountpoint
device="$1"
mountpoint="$2"
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F "${device}"
fi
mkdir -p "${mountpoint}"
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
chmod a+w "${mountpoint}"
}
# Gets a devices UUID and bind mounts the device to mount location in
# /mnt/disks/by-id/
function unique-uuid-bind-mount(){
local mountpoint
local actual_device
mountpoint="$1"
actual_device="$2"
# Trigger udev refresh so that newly formatted devices are propagated in by-uuid
udevadm control --reload-rules
udevadm trigger
udevadm settle
# find uuid for actual_device
local myuuid
myuuid=$(find -L /dev/disk/by-uuid -maxdepth 1 -samefile /dev/"${actual_device}" -printf '%P')
# myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${actual_device} when mounting." >&2
exit 2
fi
# bindpoint should be the full path of the to-be-bound device
local bindpoint="${UUID_MNT_PREFIX}-${interface}-fs/local-ssd-${myuuid}"
safe-bind-mount "${mountpoint}" "${bindpoint}"
}
# Bind mounts device at mountpoint to bindpoint
function safe-bind-mount(){
local mountpoint="${1}"
local bindpoint="${2}"
# Mount device to the mountpoint
mkdir -p "${bindpoint}"
echo "Binding '${mountpoint}' at '${bindpoint}'"
mount --bind "${mountpoint}" "${bindpoint}"
chmod a+w "${bindpoint}"
}
# Mounts, bindmounts, or symlinks depending on the interface and format
# of the incoming device
function mount-ext(){
local ssd="${1}"
local devicenum="${2}"
local interface="${3}"
local format="${4}"
if [[ -z "${devicenum}" ]]; then
echo "Failed to get the local disk number for device ${ssd}" >&2
exit 2
fi
# TODO: Handle partitioned disks. Right now this code just ignores partitions
if [[ "${format}" == "fs" ]]; then
if [[ "${interface}" == "scsi" ]]; then
local actual_device
actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != sd* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
exit 1
fi
local mountpoint="/mnt/disks/ssd${devicenum}"
else
# This path is required because the existing Google images do not
# expose NVMe devices in /dev/disk/by-id so we are using the /dev/nvme instead
local actual_device
actual_device=$(echo "${ssd}" | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != nvme* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
exit 1
fi
local mountpoint="/mnt/disks/ssd-nvme${devicenum}"
fi
safe-format-and-mount "${ssd}" "${mountpoint}"
# We only do the bindmount if users are using the new local ssd request method
# see https://github.com/kubernetes/kubernetes/pull/53466#discussion_r146431894
if [[ -n "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
unique-uuid-bind-mount "${mountpoint}" "${actual_device}"
fi
elif [[ "${format}" == "block" ]]; then
local symdir="${UUID_BLOCK_PREFIX}-${interface}-block"
safe-block-symlink "${ssd}" "${symdir}"
else
echo "Disk format must be either fs or block, got ${format}"
fi
}
# Local ssds, if present, are mounted or symlinked to their appropriate
# locations
function ensure-local-ssds() {
if [ "${NODE_LOCAL_SSDS_EPHEMERAL:-false}" == "true" ]; then
ensure-local-ssds-ephemeral-storage
return
fi
get-local-disk-num "scsi" "block"
local scsiblocknum="${localdisknum}"
local i=0
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "${ssd}" ]; then
local devicenum
devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/')
if [[ "${i}" -lt "${scsiblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "scsi" "block"
else
# GKE does not set NODE_LOCAL_SSDS so all non-block devices
# are assumed to be filesystem devices
mount-ext "${ssd}" "${devicenum}" "scsi" "fs"
fi
i=$((i+1))
else
echo "No local SCSI SSD disks found."
fi
done
# The following mounts or symlinks NVMe devices
get-local-disk-num "nvme" "block"
local nvmeblocknum="${localdisknum}"
get-local-disk-num "nvme" "fs"
local nvmefsnum="${localdisknum}"
# Check if NVMe SSD specified.
if [ "${nvmeblocknum}" -eq "0" ] && [ "${nvmefsnum}" -eq "0" ]; then
echo "No local NVMe SSD specified."
return
fi
local i=0
for ssd in /dev/nvme*; do
if [ -e "${ssd}" ]; then
# This workaround to find if the NVMe device is a disk is required because
# the existing Google images does not expose NVMe devices in /dev/disk/by-id
if [[ $(udevadm info --query=property --name="${ssd}" | grep DEVTYPE | sed "s/DEVTYPE=//") == "disk" ]]; then
# shellcheck disable=SC2155
local devicenum=$(echo "${ssd}" | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/')
if [[ "${i}" -lt "${nvmeblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "nvme" "block"
else
mount-ext "${ssd}" "${devicenum}" "nvme" "fs"
fi
i=$((i+1))
fi
else
echo "No local NVMe SSD disks found."
fi
done
}
# Local SSDs, if present, are used in a single RAID 0 array and directories that
# back ephemeral storage are mounted on them (kubelet root, container runtime
# root and pod logs).
function ensure-local-ssds-ephemeral-storage() {
local devices=()
# Get nvme devices
for ssd in /dev/nvme*n*; do
if [ -e "${ssd}" ]; then
# This workaround to find if the NVMe device is a local SSD is required
# because the existing Google images does not them in /dev/disk/by-id
if [[ "$(lsblk -o MODEL -dn "${ssd}")" == "nvme_card" ]]; then
devices+=("${ssd}")
fi
fi
done
if [ "${#devices[@]}" -eq 0 ]; then
echo "No local NVMe SSD disks found."
return
fi
local device="${devices[0]}"
if [ "${#devices[@]}" -ne 1 ]; then
seen_arrays=(/dev/md/*)
device=${seen_arrays[0]}
echo "Setting RAID array with local SSDs on device ${device}"
if [ ! -e "$device" ]; then
device="/dev/md/0"
echo "y" | mdadm --create "${device}" --level=0 --raid-devices=${#devices[@]} "${devices[@]}"
fi
fi
local ephemeral_mountpoint="/mnt/stateful_partition/kube-ephemeral-ssd"
safe-format-and-mount "${device}" "${ephemeral_mountpoint}"
# mount container runtime root dir on SSD
local container_runtime="${CONTAINER_RUNTIME:-docker}"
systemctl stop "$container_runtime"
# Some images remount the container runtime root dir.
umount "/var/lib/${container_runtime}" || true
# Move the container runtime's directory to the new location to preserve
# preloaded images.
if [ ! -d "${ephemeral_mountpoint}/${container_runtime}" ]; then
mv "/var/lib/${container_runtime}" "${ephemeral_mountpoint}/${container_runtime}"
fi
safe-bind-mount "${ephemeral_mountpoint}/${container_runtime}" "/var/lib/${container_runtime}"
systemctl start "$container_runtime"
# mount kubelet root dir on SSD
mkdir -p "${ephemeral_mountpoint}/kubelet"
safe-bind-mount "${ephemeral_mountpoint}/kubelet" "/var/lib/kubelet"
# mount pod logs root dir on SSD
mkdir -p "${ephemeral_mountpoint}/log_pods"
safe-bind-mount "${ephemeral_mountpoint}/log_pods" "/var/log/pods"
}
# Installs logrotate configuration files
function setup-logrotate() {
mkdir -p /etc/logrotate.d/
if [[ "${ENABLE_LOGROTATE_FILES:-true}" = "true" ]]; then
# Configure log rotation for all logs in /var/log, which is where k8s services
# are configured to write their log files. Whenever logrotate is ran, this
# config will:
# * rotate the log file if its size is > 100Mb OR if one day has elapsed
# * save rotated logs into a gzipped timestamped backup
# * log file timestamp (controlled by 'dateformat') includes seconds too. This
# ensures that logrotate can generate unique logfiles during each rotation
# (otherwise it skips rotation if 'maxsize' is reached multiple times in a
# day).
# * keep only 5 old (rotated) logs, and will discard older logs.
cat > /etc/logrotate.d/allvarlogs <<EOF
/var/log/*.log {
rotate ${LOGROTATE_FILES_MAX_COUNT:-5}
copytruncate
missingok
notifempty
compress
maxsize ${LOGROTATE_MAX_SIZE:-100M}
daily
dateext
dateformat -%Y%m%d-%s
create 0644 root root
}
EOF
fi
if [[ "${ENABLE_POD_LOG:-false}" = "true" ]]; then
# Configure log rotation for pod logs in /var/log/pods/NAMESPACE_NAME_UID.
cat > /etc/logrotate.d/allpodlogs <<EOF
/var/log/pods/*/*.log {
rotate ${POD_LOG_MAX_FILE:-5}
copytruncate
missingok
notifempty
compress
maxsize ${POD_LOG_MAX_SIZE:-5M}
daily
dateext
dateformat -%Y%m%d-%s
create 0644 root root
}
EOF
fi
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
function find-master-pd {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
# leave a directory be if it already exists.
function mount-master-pd {
find-master-pd
if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
return
fi
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/google-master-pd"
local -r mount_point="/mnt/disks/master-pd"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
# Contains all the data stored in etcd.
mkdir -p "${mount_point}/var/etcd"
chmod 700 "${mount_point}/var/etcd"
ln -s -f "${mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${mount_point}/srv/kubernetes"
ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${mount_point}/srv/sshproxy"
ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
chown -R etcd "${mount_point}/var/etcd"
chgrp -R etcd "${mount_point}/var/etcd"
}
# append_or_replace_prefixed_line ensures:
# 1. the specified file exists
# 2. existing lines with the specified ${prefix} are removed
# 3. a new line with the specified ${prefix}${suffix} is appended
function append_or_replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
local -r dirname=$(dirname "${file}")
local -r tmpfile=$(mktemp "${dirname}/filtered.XXXX")
touch "${file}"
awk -v pfx="${prefix}" 'substr($0,1,length(pfx)) != pfx { print }' "${file}" > "${tmpfile}"
echo "${prefix}${suffix}" >> "${tmpfile}"
mv "${tmpfile}" "${file}"
}
function write-pki-data {
local data="${1}"
local path="${2}"
# remove the path if it exists
rm -f "${path}"
if [[ -n "${KUBE_PKI_READERS_GROUP:-}" ]]; then
(umask 027; echo "${data}" | base64 --decode > "${path}")
chgrp "${KUBE_PKI_READERS_GROUP:-}" "${path}"
chmod g+r "${path}"
else
(umask 077; echo "${data}" | base64 --decode > "${path}")
fi
}
function create-node-pki {
echo "Creating node pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
CA_CERT_BUNDLE="${CA_CERT}"
fi
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
write-pki-data "${CA_CERT_BUNDLE}" "${CA_CERT_BUNDLE_PATH}"
if [[ -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
write-pki-data "${KUBELET_CERT}" "${KUBELET_CERT_PATH}"
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
write-pki-data "${KUBELET_KEY}" "${KUBELET_KEY_PATH}"
fi
if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
mkdir -p "${pki_dir}/konnectivity-agent"
KONNECTIVITY_AGENT_CA_CERT_PATH="${pki_dir}/konnectivity-agent/ca.crt"
KONNECTIVITY_AGENT_CLIENT_KEY_PATH="${pki_dir}/konnectivity-agent/client.key"
KONNECTIVITY_AGENT_CLIENT_CERT_PATH="${pki_dir}/konnectivity-agent/client.crt"
write-pki-data "${KONNECTIVITY_AGENT_CA_CERT}" "${KONNECTIVITY_AGENT_CA_CERT_PATH}"
write-pki-data "${KONNECTIVITY_AGENT_CLIENT_KEY}" "${KONNECTIVITY_AGENT_CLIENT_KEY_PATH}"
write-pki-data "${KONNECTIVITY_AGENT_CLIENT_CERT}" "${KONNECTIVITY_AGENT_CLIENT_CERT_PATH}"
fi
}
function create-master-pki {
echo "Creating master pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
CA_CERT_PATH="${pki_dir}/ca.crt"
write-pki-data "${CA_CERT}" "${CA_CERT_PATH}"
# this is not true on GKE
if [[ -n "${CA_KEY:-}" ]]; then
CA_KEY_PATH="${pki_dir}/ca.key"
write-pki-data "${CA_KEY}" "${CA_KEY_PATH}"
fi
if [[ -z "${APISERVER_SERVER_CERT:-}" || -z "${APISERVER_SERVER_KEY:-}" ]]; then
APISERVER_SERVER_CERT="${MASTER_CERT}"
APISERVER_SERVER_KEY="${MASTER_KEY}"
fi
APISERVER_SERVER_CERT_PATH="${pki_dir}/apiserver.crt"
write-pki-data "${APISERVER_SERVER_CERT}" "${APISERVER_SERVER_CERT_PATH}"
APISERVER_SERVER_KEY_PATH="${pki_dir}/apiserver.key"
write-pki-data "${APISERVER_SERVER_KEY}" "${APISERVER_SERVER_KEY_PATH}"
if [[ -z "${APISERVER_CLIENT_CERT:-}" || -z "${APISERVER_CLIENT_KEY:-}" ]]; then
APISERVER_CLIENT_CERT="${KUBEAPISERVER_CERT}"
APISERVER_CLIENT_KEY="${KUBEAPISERVER_KEY}"
fi
APISERVER_CLIENT_CERT_PATH="${pki_dir}/apiserver-client.crt"
write-pki-data "${APISERVER_CLIENT_CERT}" "${APISERVER_CLIENT_CERT_PATH}"
APISERVER_CLIENT_KEY_PATH="${pki_dir}/apiserver-client.key"
write-pki-data "${APISERVER_CLIENT_KEY}" "${APISERVER_CLIENT_KEY_PATH}"
if [[ -z "${SERVICEACCOUNT_CERT:-}" || -z "${SERVICEACCOUNT_KEY:-}" ]]; then
SERVICEACCOUNT_CERT="${MASTER_CERT}"
SERVICEACCOUNT_KEY="${MASTER_KEY}"
fi
if [[ -n "${OLD_MASTER_CERT:-}" && -n "${OLD_MASTER_KEY:-}" ]]; then
OLD_MASTER_CERT_PATH="${pki_dir}/oldapiserver.crt"
echo "${OLD_MASTER_CERT}" | base64 --decode > "${OLD_MASTER_CERT_PATH}"
OLD_MASTER_KEY_PATH="${pki_dir}/oldapiserver.key"
echo "${OLD_MASTER_KEY}" | base64 --decode > "${OLD_MASTER_KEY_PATH}"
fi
SERVICEACCOUNT_CERT_PATH="${pki_dir}/serviceaccount.crt"
write-pki-data "${SERVICEACCOUNT_CERT}" "${SERVICEACCOUNT_CERT_PATH}"
SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
write-pki-data "${SERVICEACCOUNT_KEY}" "${SERVICEACCOUNT_KEY_PATH}"
if [[ -n "${REQUESTHEADER_CA_CERT:-}" ]]; then
REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
write-pki-data "${REQUESTHEADER_CA_CERT}" "${REQUESTHEADER_CA_CERT_PATH}"
PROXY_CLIENT_KEY_PATH="${pki_dir}/proxy_client.key"
write-pki-data "${PROXY_CLIENT_KEY}" "${PROXY_CLIENT_KEY_PATH}"
PROXY_CLIENT_CERT_PATH="${pki_dir}/proxy_client.crt"
write-pki-data "${PROXY_CLIENT_CERT}" "${PROXY_CLIENT_CERT_PATH}"
fi
if [[ -n "${KONNECTIVITY_SERVER_CA_CERT:-}" ]]; then
mkdir -p "${pki_dir}"/konnectivity-server
KONNECTIVITY_SERVER_CA_CERT_PATH="${pki_dir}/konnectivity-server/ca.crt"
write-pki-data "${KONNECTIVITY_SERVER_CA_CERT}" "${KONNECTIVITY_SERVER_CA_CERT_PATH}"
KONNECTIVITY_SERVER_KEY_PATH="${pki_dir}/konnectivity-server/server.key"
write-pki-data "${KONNECTIVITY_SERVER_KEY}" "${KONNECTIVITY_SERVER_KEY_PATH}"
KONNECTIVITY_SERVER_CERT_PATH="${pki_dir}/konnectivity-server/server.crt"
write-pki-data "${KONNECTIVITY_SERVER_CERT}" "${KONNECTIVITY_SERVER_CERT_PATH}"
KONNECTIVITY_SERVER_CLIENT_KEY_PATH="${pki_dir}/konnectivity-server/client.key"
write-pki-data "${KONNECTIVITY_SERVER_CLIENT_KEY}" "${KONNECTIVITY_SERVER_CLIENT_KEY_PATH}"
KONNECTIVITY_SERVER_CLIENT_CERT_PATH="${pki_dir}/konnectivity-server/client.crt"
write-pki-data "${KONNECTIVITY_SERVER_CLIENT_CERT}" "${KONNECTIVITY_SERVER_CLIENT_CERT_PATH}"
fi
if [[ -n "${KONNECTIVITY_AGENT_CA_CERT:-}" ]]; then
mkdir -p "${pki_dir}"/konnectivity-agent
KONNECTIVITY_AGENT_CA_KEY_PATH="${pki_dir}/konnectivity-agent/ca.key"
write-pki-data "${KONNECTIVITY_AGENT_CA_KEY}" "${KONNECTIVITY_AGENT_CA_KEY_PATH}"
KONNECTIVITY_AGENT_CA_CERT_PATH="${pki_dir}/konnectivity-agent/ca.crt"
write-pki-data "${KONNECTIVITY_AGENT_CA_CERT}" "${KONNECTIVITY_AGENT_CA_CERT_PATH}"
KONNECTIVITY_AGENT_KEY_PATH="${pki_dir}/konnectivity-agent/server.key"
write-pki-data "${KONNECTIVITY_AGENT_KEY}" "${KONNECTIVITY_AGENT_KEY_PATH}"
KONNECTIVITY_AGENT_CERT_PATH="${pki_dir}/konnectivity-agent/server.crt"
write-pki-data "${KONNECTIVITY_AGENT_CERT}" "${KONNECTIVITY_AGENT_CERT_PATH}"
fi
}
# After the first boot and on upgrade, these files exist on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.) One exception is if METADATA_CLOBBERS_CONFIG is
# enabled.
function create-master-auth {
echo "Creating master auth files"
local -r auth_dir="/etc/srv/kubernetes"
local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
rm "${known_tokens_csv}"
fi
if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
fi
if [[ -n "${KUBE_BOOTSTRAP_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BOOTSTRAP_TOKEN}," "gcp:kube-bootstrap,uid:gcp:kube-bootstrap,system:masters"
fi
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
fi
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
fi
if [[ -n "${KUBE_CLUSTER_AUTOSCALER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}," "cluster-autoscaler,uid:cluster-autoscaler"
fi
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
fi
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${NODE_PROBLEM_DETECTOR_TOKEN}," "system:node-problem-detector,uid:node-problem-detector"
fi
if [[ -n "${GCE_GLBC_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${GCE_GLBC_TOKEN}," "system:controller:glbc,uid:system:controller:glbc"
fi
if [[ -n "${ADDON_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${ADDON_MANAGER_TOKEN}," "system:addon-manager,uid:system:addon-manager,system:masters"
fi
if [[ -n "${KONNECTIVITY_SERVER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KONNECTIVITY_SERVER_TOKEN}," "system:konnectivity-server,uid:system:konnectivity-server"
create-kubeconfig "konnectivity-server" "${KONNECTIVITY_SERVER_TOKEN}"
fi
if [[ -n "${MONITORING_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${MONITORING_TOKEN}," "system:monitoring,uid:system:monitoring,system:monitoring"
fi
if [[ -n "${EXTRA_STATIC_AUTH_COMPONENTS:-}" ]]; then
# Create a static Bearer token and kubeconfig for extra, comma-separated components.
IFS="," read -r -a extra_components <<< "${EXTRA_STATIC_AUTH_COMPONENTS:-}"
for extra_component in "${extra_components[@]}"; do
local token
token="$(secure_random 32)"
append_or_replace_prefixed_line "${known_tokens_csv}" "${token}," "system:${extra_component},uid:system:${extra_component}"
create-kubeconfig "${extra_component}" "${token}"
done
fi
local use_cloud_config="false"
cat <<EOF >/etc/gce.conf
[global]
EOF
if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
cat <<EOF >>/etc/gce.conf
api-endpoint = ${GCE_API_ENDPOINT}
EOF
fi
if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
fi
if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
container-api-endpoint = ${CONTAINER_API_ENDPOINT}
EOF
fi
if [[ -n "${PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
project-id = ${PROJECT_ID}
EOF
fi
if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-project-id = ${NETWORK_PROJECT_ID}
EOF
fi
if [[ -n "${STACK_TYPE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
stack-type = ${STACK_TYPE}
EOF
fi
if [[ -n "${NODE_NETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-name = ${NODE_NETWORK}
EOF
fi
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
EOF
fi
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
use_cloud_config="true"
if [[ -n "${NODE_TAGS:-}" ]]; then
# split NODE_TAGS into an array by comma.
IFS=',' read -r -a node_tags <<< "${NODE_TAGS}"
else
local -r node_tags=("${NODE_INSTANCE_PREFIX}")
fi
cat <<EOF >>/etc/gce.conf
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
for tag in "${node_tags[@]}"; do
cat <<EOF >>/etc/gce.conf
node-tags = ${tag}
EOF
done
fi
if [[ -n "${MULTIZONE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
# Multimaster indicates that the cluster is HA.
# Currently the only HA clusters are regional.
# If we introduce zonal multimaster this will need to be revisited.
if [[ -n "${MULTIMASTER:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
regional = ${MULTIMASTER}
EOF
fi
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
use_cloud_config="true"
# split GCE_ALPHA_FEATURES into an array by comma.
IFS=',' read -r -a alpha_features <<< "${GCE_ALPHA_FEATURES}"
for feature in "${alpha_features[@]}"; do
cat <<EOF >>/etc/gce.conf
alpha-features = ${feature}
EOF
done
fi
if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then
use_cloud_config="true"
cat <<EOF >> /etc/gce.conf
secondary-range-name = ${SECONDARY_RANGE_NAME}
EOF
fi
if [[ "${use_cloud_config}" != "true" ]]; then
rm -f /etc/gce.conf
fi
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authn.config
clusters:
- name: gcp-authentication-server
cluster:
server: ${GCP_AUTHN_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authentication-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authz.config
clusters:
- name: gcp-authorization-server
cluster:
server: ${GCP_AUTHZ_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authorization-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
cat <<EOF >/etc/srv/kubernetes/egress_selector_configuration.yaml
apiVersion: apiserver.k8s.io/v1beta1
kind: EgressSelectorConfiguration
egressSelections:
- name: cluster
connection:
proxyProtocol: GRPC
transport:
uds:
udsName: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket
- name: controlplane
connection:
proxyProtocol: Direct
- name: etcd
connection:
proxyProtocol: Direct
EOF
elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
cat <<EOF >/etc/srv/kubernetes/egress_selector_configuration.yaml
apiVersion: apiserver.k8s.io/v1beta1
kind: EgressSelectorConfiguration
egressSelections:
- name: cluster
connection:
proxyProtocol: HTTPConnect
transport:
tcp:
url: https://127.0.0.1:8131
tlsConfig:
caBundle: /etc/srv/kubernetes/pki/konnectivity-server/ca.crt
clientKey: /etc/srv/kubernetes/pki/konnectivity-server/client.key
clientCert: /etc/srv/kubernetes/pki/konnectivity-server/client.crt
- name: controlplane
connection:
proxyProtocol: Direct
- name: etcd
connection:
proxyProtocol: Direct
EOF
else
echo "KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE must be set to either grpc or http-connect"
exit 1
fi
fi
if [[ -n "${WEBHOOK_GKE_EXEC_AUTH:-}" ]]; then
if [[ -z "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then
1>&2 echo "You requested GKE exec auth support for webhooks, but EXEC_AUTH_PLUGIN_URL was not specified. This configuration depends on gke-exec-auth-plugin for authenticating to the webhook endpoint."
exit 1
fi
if [[ -z "${TOKEN_URL:-}" || -z "${TOKEN_BODY:-}" || -z "${TOKEN_BODY_UNQUOTED:-}" ]]; then
1>&2 echo "You requested GKE exec auth support for webhooks, but TOKEN_URL, TOKEN_BODY, and TOKEN_BODY_UNQUOTED were not provided. gke-exec-auth-plugin requires these values for its configuration."
exit 1
fi
# kubeconfig to be used by webhooks with GKE exec auth support. Note that
# the path to gke-exec-auth-plugin is the path when mounted inside the
# kube-apiserver pod.
cat <<EOF >/etc/srv/kubernetes/webhook.kubeconfig
apiVersion: v1
kind: Config
users:
- name: '*.googleapis.com'
user:
exec:
apiVersion: "client.authentication.k8s.io/v1alpha1"
command: /usr/bin/gke-exec-auth-plugin
args:
- --mode=alt-token
- --alt-token-url=${TOKEN_URL}
- --alt-token-body=${TOKEN_BODY_UNQUOTED}
EOF
fi
if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
# Emit a basic admission control configuration file, with no plugins specified.
cat <<EOF >/etc/srv/kubernetes/admission_controller_config.yaml
apiVersion: apiserver.k8s.io/v1alpha1
kind: AdmissionConfiguration
plugins:
EOF
# Add resourcequota config to limit critical pods to kube-system by default
cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
- name: "ResourceQuota"
configuration:
apiVersion: apiserver.config.k8s.io/v1
kind: ResourceQuotaConfiguration
limitedResources:
- resource: pods
matchScopes:
- scopeName: PriorityClass
operator: In
values: ["system-node-critical", "system-cluster-critical"]
EOF
if [[ "${ADMISSION_CONTROL:-}" == *"ImagePolicyWebhook"* ]]; then
if [[ -z "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
1>&2 echo "The ImagePolicyWebhook admission control plugin was requested, but GCP_IMAGE_VERIFICATION_URL was not provided."
exit 1
fi
1>&2 echo "ImagePolicyWebhook admission control plugin requested. Configuring it to point at ${GCP_IMAGE_VERIFICATION_URL}"
# ImagePolicyWebhook does not use gke-exec-auth-plugin for authenticating
# to the webhook endpoint. Emit its special kubeconfig.
cat <<EOF >/etc/srv/kubernetes/gcp_image_review.kubeconfig
clusters:
- name: gcp-image-review-server
cluster:
server: ${GCP_IMAGE_VERIFICATION_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-image-review-server
user: kube-apiserver
name: webhook
EOF
# Append config for ImagePolicyWebhook to the shared admission controller
# configuration file.
cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
- name: ImagePolicyWebhook
configuration:
imagePolicy:
kubeConfigFile: /etc/srv/kubernetes/gcp_image_review.kubeconfig
allowTTL: 30
denyTTL: 30
retryBackoff: 500
defaultAllow: true
EOF
fi
# If GKE exec auth for webhooks has been requested, then
# ValidatingAdmissionWebhook should use it. Otherwise, run with the default
# config.
if [[ -n "${WEBHOOK_GKE_EXEC_AUTH:-}" ]]; then
1>&2 echo "ValidatingAdmissionWebhook requested, and WEBHOOK_GKE_EXEC_AUTH specified. Configuring ValidatingAdmissionWebhook to use gke-exec-auth-plugin."
# Append config for ValidatingAdmissionWebhook to the shared admission
# controller configuration file.
cat <<EOF >>/etc/srv/kubernetes/admission_controller_config.yaml
- name: ValidatingAdmissionWebhook
configuration:
apiVersion: apiserver.config.k8s.io/v1alpha1
kind: WebhookAdmission
kubeConfigFile: /etc/srv/kubernetes/webhook.kubeconfig
EOF
fi
fi
}
# Write the config for the audit policy.
function create-master-audit-policy {
local -r path="${1}"
local -r policy="${2:-}"
if [[ -n "${policy}" ]]; then
echo "${policy}" > "${path}"
return
fi
# Known api groups
local -r known_apis='
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "node.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
- level: None
users: ["cluster-autoscaler"]
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["configmaps", "endpoints"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests because of performance impact.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps", "serviceaccounts/token"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get responses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
EOF
}
# Writes the configuration file used by the webhook advanced auditing backend.
function create-master-audit-webhook-config {
local -r path="${1}"
if [[ -n "${GCP_AUDIT_URL:-}" ]]; then
# The webhook config file is a kubeconfig file describing the webhook endpoint.
cat <<EOF >"${path}"
clusters:
- name: gcp-audit-server
cluster:
server: ${GCP_AUDIT_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-audit-server
user: kube-apiserver
name: webhook
EOF
fi
}
function create-kubeconfig {
local component=$1
local token=$2
local path="/etc/srv/kubernetes/${component}/kubeconfig"
mkdir -p "/etc/srv/kubernetes/${component}"
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
gke-internal-create-kubeconfig "${component}" "${token}" "${path}"
else
echo "Creating kubeconfig file for component ${component}"
cat <<EOF >"${path}"
apiVersion: v1
kind: Config
users:
- name: ${component}
user:
token: ${token}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: ${component}
name: ${component}
current-context: ${component}
EOF
fi
}
# Arg 1: the IP address of the API server
function create-kubelet-kubeconfig() {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create Kubelet kubeconfig file!"
exit 1
fi
if [[ "${CREATE_BOOTSTRAP_KUBECONFIG:-true}" == "true" ]]; then
echo "Creating kubelet bootstrap-kubeconfig file"
cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: ${KUBELET_CERT_PATH}
client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
cluster:
server: https://${apiserver_address}
certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
elif [[ "${FETCH_BOOTSTRAP_KUBECONFIG:-false}" == "true" ]]; then
echo "Fetching kubelet bootstrap-kubeconfig file from metadata"
get-metadata-value "instance/attributes/bootstrap-kubeconfig" >/var/lib/kubelet/bootstrap-kubeconfig
else
echo "Fetching kubelet kubeconfig file from metadata"
get-metadata-value "instance/attributes/kubeconfig" >/var/lib/kubelet/kubeconfig
fi
}
# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
# should register to the apiserver.
function create-master-kubelet-auth {
# Only configure the kubelet on the master if the required variables are
# set in the environment.
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
REGISTER_MASTER_KUBELET="true"
create-kubelet-kubeconfig "${KUBELET_APISERVER}"
fi
}
function create-kubeproxy-user-kubeconfig {
echo "Creating kube-proxy user kubeconfig file"
cat <<EOF >/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
}
function create-kube-scheduler-config {
echo "Creating kube-scheduler config file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/config
${KUBE_SCHEDULER_CONFIG}
EOF
}
# TODO(#92143): Remove legacy policy config creation once kube-scheduler config is GA.
function create-kubescheduler-policy-config {
echo "Creating kube-scheduler policy config file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/policy-config
${SCHEDULER_POLICY_CONFIG}
EOF
}
function create-node-problem-detector-kubeconfig {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create node-problem-detector kubeconfig file!"
exit 1
fi
echo "Creating node-problem-detector kubeconfig file"
mkdir -p /var/lib/node-problem-detector
cat <<EOF >/var/lib/node-problem-detector/kubeconfig
apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: local
cluster:
server: https://${apiserver_address}
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: node-problem-detector
name: service-account-context
current-context: service-account-context
EOF
}
function create-node-problem-detector-kubeconfig-from-kubelet {
echo "Creating node-problem-detector kubeconfig from /var/lib/kubelet/kubeconfig"
mkdir -p /var/lib/node-problem-detector
cp /var/lib/kubelet/kubeconfig /var/lib/node-problem-detector/kubeconfig
}
function create-master-etcd-auth {
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes"
echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
fi
}
function create-master-etcd-apiserver-auth {
if [[ -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes/pki"
ETCD_APISERVER_CA_KEY_PATH="${auth_dir}/etcd-apiserver-ca.key"
echo "${ETCD_APISERVER_CA_KEY}" | base64 --decode > "${ETCD_APISERVER_CA_KEY_PATH}"
# Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
ETCD_APISERVER_CA_CERT_PATH="${auth_dir}/etcd-apiserver-ca.crt"
echo "${ETCD_APISERVER_CA_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CA_CERT_PATH}"
ETCD_APISERVER_SERVER_KEY_PATH="${auth_dir}/etcd-apiserver-server.key"
echo "${ETCD_APISERVER_SERVER_KEY}" | base64 --decode > "${ETCD_APISERVER_SERVER_KEY_PATH}"
ETCD_APISERVER_SERVER_CERT_PATH="${auth_dir}/etcd-apiserver-server.crt"
echo "${ETCD_APISERVER_SERVER_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_SERVER_CERT_PATH}"
# Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
ETCD_APISERVER_CLIENT_KEY_PATH="${auth_dir}/etcd-apiserver-client.key"
echo "${ETCD_APISERVER_CLIENT_KEY}" | base64 --decode > "${ETCD_APISERVER_CLIENT_KEY_PATH}"
# Keep in sync with add-replica-to-etcd/remove-replica-from-etcd in util.sh.
ETCD_APISERVER_CLIENT_CERT_PATH="${auth_dir}/etcd-apiserver-client.crt"
echo "${ETCD_APISERVER_CLIENT_CERT}" | base64 --decode | gunzip > "${ETCD_APISERVER_CLIENT_CERT_PATH}"
fi
}
function docker-installed {
if systemctl cat docker.service &> /dev/null ; then
return 0
else
return 1
fi
}
# util function to add a docker option to daemon.json file only if the daemon.json file is present.
# accepts only one argument (docker options)
function addockeropt {
DOCKER_OPTS_FILE=/etc/docker/daemon.json
if [ "$#" -lt 1 ]; then
echo "No arguments are passed while adding docker options. Expect one argument"
exit 1
elif [ "$#" -gt 1 ]; then
echo "Only one argument is accepted"
exit 1
fi
# appends the given input to the docker opts file i.e. /etc/docker/daemon.json file
if [ -f "$DOCKER_OPTS_FILE" ]; then
cat >> "${DOCKER_OPTS_FILE}" <<EOF
$1
EOF
fi
}
function disable_aufs() {
# disable aufs module if aufs is loaded
if lsmod | grep "aufs" &> /dev/null ; then
sudo modprobe -r aufs
fi
}
function set_docker_options_non_ubuntu() {
# set docker options mtu and storage driver for non-ubuntu
# as it is default for ubuntu
if [[ -n "$(command -v lsb_release)" && $(lsb_release -si) == "Ubuntu" ]]; then
echo "Not adding docker options on ubuntu, as these are default on ubuntu. Bailing out..."
return
fi
addockeropt "\"mtu\": 1460,"
addockeropt "\"storage-driver\": \"overlay2\","
echo "setting live restore"
# Disable live-restore if the environment variable is set.
if [[ "${DISABLE_DOCKER_LIVE_RESTORE:-false}" == "true" ]]; then
addockeropt "\"live-restore\": false,"
else
addockeropt "\"live-restore\": true,"
fi
}
function assemble-docker-flags {
echo "Assemble docker options"
# log the contents of the /etc/docker/daemon.json if already exists
if [ -f /etc/docker/daemon.json ]; then
echo "Contents of the old docker config"
cat /etc/docker/daemon.json
fi
cat <<EOF >/etc/docker/daemon.json
{
EOF
addockeropt "\"pidfile\": \"/var/run/docker.pid\",
\"iptables\": false,
\"ip-masq\": false,"
echo "setting log-level"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
addockeropt "\"log-level\": \"debug\","
else
addockeropt "\"log-level\": \"warn\","
fi
echo "setting network bridge"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
addockeropt "\"bip\": \"169.254.123.1/24\","
else
addockeropt "\"bridge\": \"cbr0\","
fi
echo "setting registry mirror"
# TODO (vteratipally) move the registry-mirror completely to /etc/docker/daemon.json
local docker_opts=""
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
docker_opts+="--registry-mirror=${DOCKER_REGISTRY_MIRROR_URL} "
fi
disable_aufs
set_docker_options_non_ubuntu
echo "setting docker logging options"
# Configure docker logging
addockeropt "\"log-driver\": \"${DOCKER_LOG_DRIVER:-json-file}\","
addockeropt "\"log-opts\": {
\"max-size\": \"${DOCKER_LOG_MAX_SIZE:-10m}\",
\"max-file\": \"${DOCKER_LOG_MAX_FILE:-5}\"
}"
cat <<EOF >>/etc/docker/daemon.json
}
EOF
echo "DOCKER_OPTS=\"${docker_opts}${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker
# Ensure TasksMax is sufficient for docker.
# (https://github.com/kubernetes/kubernetes/issues/51977)
echo "Extend the docker.service configuration to set a higher pids limit"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/01tasksmax.conf
[Service]
TasksMax=infinity
EOF
systemctl daemon-reload
echo "Docker command line is updated. Restart docker to pick it up"
systemctl restart docker
}
# This function assembles the kubelet systemd service file and starts it
# using systemctl.
function start-kubelet {
echo "Start kubelet"
local kubelet_bin="${KUBE_HOME}/bin/kubelet"
local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
local -r builtin_kubelet="/usr/bin/kubelet"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
# Determine which binary to use on test clusters. We use the built-in
# version only if the downloaded version is the same as the built-in
# version. This allows GCI to run some of the e2e tests to qualify the
# built-in kubelet.
if [[ -x "${builtin_kubelet}" ]]; then
local -r builtin_version="$("${builtin_kubelet}" --version=true | cut -f2 -d " ")"
if [[ "${builtin_version}" == "${version}" ]]; then
kubelet_bin="${builtin_kubelet}"
fi
fi
fi
echo "Using kubelet binary at ${kubelet_bin}"
local -r kubelet_env_file="/etc/default/kubelet"
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start kubelet.service
}
# This function assembles the node problem detector systemd service file and
# starts it using systemctl.
function start-node-problem-detector {
echo "Start node problem detector"
local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector"
echo "Using node problem detector binary at ${npd_bin}"
local flags="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}"
if [[ -z "${flags}" ]]; then
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
# TODO(random-liu): Handle this for alternative container runtime.
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
local -r sm_config="${KUBE_HOME}/node-problem-detector/config/systemd-monitor.json"
local -r ssm_config="${KUBE_HOME}/node-problem-detector/config/system-stats-monitor.json"
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json"
local -r custom_sm_config="${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json"
flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
flags+=" --logtostderr"
flags+=" --config.system-log-monitor=${km_config},${dm_config},${sm_config}"
flags+=" --config.system-stats-monitor=${ssm_config}"
flags+=" --config.custom-plugin-monitor=${custom_km_config},${custom_sm_config}"
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
flags+=" --port=${npd_port}"
if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then
flags+=" ${EXTRA_NPD_ARGS}"
fi
fi
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
# Write the systemd service file for node problem detector.
cat <<EOF >/etc/systemd/system/node-problem-detector.service
[Unit]
Description=Kubernetes node problem detector
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
ExecStart=${npd_bin} ${flags}
[Install]
WantedBy=multi-user.target
EOF
systemctl start node-problem-detector.service
}
# Create the log file and set its properties.
#
# $1 is the file to create.
# $2: the log owner uid to set for the log file.
# $3: the log owner gid to set for the log file. If $KUBE_POD_LOG_READERS_GROUP
# is set then this value will not be used.
function prepare-log-file {
touch "$1"
if [[ -n "${KUBE_POD_LOG_READERS_GROUP:-}" ]]; then
chmod 640 "$1"
chown "${2:-root}":"${KUBE_POD_LOG_READERS_GROUP}" "$1"
else
chmod 644 "$1"
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" "$1"
fi
}
# Prepares parameters for kube-proxy manifest.
# $1 source path of kube-proxy manifest.
# Assumptions: HOST_PLATFORM and HOST_ARCH are specified by calling detect_host_info.
function prepare-kube-proxy-manifest-variables {
local -r src_file=$1;
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
kube_docker_registry=${KUBE_DOCKER_REGISTRY}
fi
local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ "${KUBE_PROXY_MODE:-}" == "ipvs" ]];then
# use 'nf_conntrack' instead of 'nf_conntrack_ipv4' for linux kernel >= 4.19
# https://github.com/kubernetes/kubernetes/pull/70398
local -r kernel_version=$(uname -r | cut -d\. -f1,2)
local conntrack_module="nf_conntrack"
if [[ $(printf '%s\n4.18\n' "${kernel_version}" | sort -V | tail -1) == "4.18" ]]; then
conntrack_module="nf_conntrack_ipv4"
fi
if sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh ${conntrack_module}; then
params+=" --proxy-mode=ipvs"
else
# If IPVS modules are not present, make sure the node does not come up as
# healthy.
exit 1
fi
fi
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
params+=" ${KUBEPROXY_TEST_ARGS}"
fi
if [[ -n "${DETECT_LOCAL_MODE:-}" ]]; then
params+=" --detect-local-mode=${DETECT_LOCAL_MODE}"
fi
local container_env=""
local kube_cache_mutation_detector_env_name=""
local kube_cache_mutation_detector_env_value=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="env:"
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" "${src_file}"
# TODO(#99245): Use multi-arch image and get rid of this.
sed -i -e "s@{{pillar\['host_arch'\]}}@${HOST_ARCH}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" "${src_file}"
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" "${src_file}"
sed -i -e "s@{{ cpurequest }}@${KUBE_PROXY_CPU_REQUEST:-100m}@g" "${src_file}"
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" "${src_file}"
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" "${src_file}"
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" "${src_file}"
fi
}
# Starts kube-proxy static pod.
function start-kube-proxy {
echo "Start kube-proxy static pod"
prepare-log-file /var/log/kube-proxy.log
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest"
prepare-kube-proxy-manifest-variables "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Replaces the variables in the etcd manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable 'suffix'
# $2: value for variable 'port'
# $3: value for variable 'server_port'
# $4: value for variable 'cpulimit'
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
local resolve_host_script_py='
import socket
import time
import sys
timeout_sec=300
def resolve(host):
for attempt in range(timeout_sec):
try:
print(socket.gethostbyname(host))
break
except Exception as e:
sys.stderr.write("error: resolving host %s to IP failed: %s\n" % (host, e))
time.sleep(1)
continue
'
local -r host_ip=$(python3 -c "${resolve_host_script_py}"$'\n'"resolve(\"${host_name}\")")
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
local etcd_apiserver_protocol="http"
local etcd_creds=""
local etcd_apiserver_creds="${ETCD_APISERVER_CREDS:-}"
local etcd_extra_args="${ETCD_EXTRA_ARGS:-}"
local suffix="$1"
local etcd_listen_metrics_port="$2"
local etcdctl_certs=""
if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]]; then
cluster_state="${INITIAL_ETCD_CLUSTER_STATE}"
fi
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
etcd_protocol="https"
fi
# mTLS should only be enabled for etcd server but not etcd-events. if $1 suffix is empty, it's etcd server.
if [[ -z "${suffix}" && -n "${ETCD_APISERVER_CA_KEY:-}" && -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
etcd_apiserver_creds=" --client-cert-auth --trusted-ca-file ${ETCD_APISERVER_CA_CERT_PATH} --cert-file ${ETCD_APISERVER_SERVER_CERT_PATH} --key-file ${ETCD_APISERVER_SERVER_KEY_PATH} "
etcdctl_certs="--cacert ${ETCD_APISERVER_CA_CERT_PATH} --cert ${ETCD_APISERVER_CLIENT_CERT_PATH} --key ${ETCD_APISERVER_CLIENT_KEY_PATH}"
etcd_apiserver_protocol="https"
etcd_listen_metrics_port="2382"
etcd_extra_args+=" --listen-metrics-urls=http://${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}:${etcd_listen_metrics_port} "
fi
if [[ -n "${ETCD_PROGRESS_NOTIFY_INTERVAL:-}" ]]; then
etcd_extra_args+=" --experimental-watch-progress-notify-interval=${ETCD_PROGRESS_NOTIFY_INTERVAL}"
fi
for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
if [[ -n "${etcd_cluster}" ]]; then
etcd_cluster+=","
fi
etcd_cluster+="${etcd_host}"
done
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
sed -i -e "s@{{ *listen_client_ip *}}@${ETCD_LISTEN_CLIENT_IP:-127.0.0.1}@g" "${temp_file}"
# Get default storage backend from manifest file.
local -r default_storage_backend=$( \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" "${temp_file}" | \
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=${ETCD_QUOTA_BACKEND_BYTES:-4294967296}@g" "${temp_file}"
else
sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
fi
sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
if [[ -n "${ETCD_IMAGE:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_apiserver_protocol *}}@$etcd_apiserver_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
sed -i -e "s@{{ *etcdctl_certs *}}@$etcdctl_certs@g" "${temp_file}"
sed -i -e "s@{{ *etcd_apiserver_creds *}}@$etcd_apiserver_creds@g" "${temp_file}"
sed -i -e "s@{{ *etcd_extra_args *}}@$etcd_extra_args@g" "${temp_file}"
if [[ -n "${ETCD_VERSION:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
# Replace the volume host path.
sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
# Replace the run as user and run as group
container_security_context=""
if [[ -n "${ETCD_RUNASUSER:-}" && -n "${ETCD_RUNASGROUP:-}" ]]; then
container_security_context="\"securityContext\": {\"runAsUser\": ${ETCD_RUNASUSER}, \"runAsGroup\": ${ETCD_RUNASGROUP}, \"allowPrivilegeEscalation\": false, \"capabilities\": {\"drop\": [\"all\"]}},"
fi
sed -i -e "s@{{security_context}}@${container_security_context}@g" "${temp_file}"
mv "${temp_file}" /etc/kubernetes/manifests
}
# Starts etcd server pod (and etcd-events pod if needed).
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-etcd-servers {
echo "Start etcd pods"
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
if [[ -n "${ETCD_RUNASUSER:-}" && -n "${ETCD_RUNASGROUP:-}" ]]; then
chown -R "${ETCD_RUNASUSER}":"${ETCD_RUNASGROUP}" /mnt/disks/master-pd/var/etcd
fi
prepare-log-file /var/log/etcd.log "${ETCD_RUNASUSER:-0}"
prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
prepare-log-file /var/log/etcd-events.log "${ETCD_RUNASUSER:-0}"
prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
}
# Replaces the variables in the konnectivity-server manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable "agent_port"
# $2: value for variable "health_port"
# $3: value for variable "admin_port"
function prepare-konnectivity-server-manifest {
local -r temp_file="/tmp/konnectivity-server.yaml"
params=()
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/konnectivity-server.yaml" "${temp_file}"
params+=("--log-file=/var/log/konnectivity-server.log")
params+=("--logtostderr=false")
params+=("--log-file-max-size=0")
if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
params+=("--uds-name=/etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket")
elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
# HTTP-CONNECT can work with either UDS or mTLS.
# Linking them here to make sure we get good coverage with two test configurations.
params+=("--server-ca-cert=${KONNECTIVITY_SERVER_CA_CERT_PATH}")
params+=("--server-cert=${KONNECTIVITY_SERVER_CERT_PATH}")
params+=("--server-key=${KONNECTIVITY_SERVER_KEY_PATH}")
params+=("--cluster-ca-cert=${KONNECTIVITY_AGENT_CA_CERT_PATH}")
fi
params+=("--cluster-cert=/etc/srv/kubernetes/pki/apiserver.crt")
params+=("--cluster-key=/etc/srv/kubernetes/pki/apiserver.key")
if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'grpc' ]]; then
params+=("--mode=grpc")
params+=("--server-port=0")
params+=("--agent-namespace=kube-system")
params+=("--agent-service-account=konnectivity-agent")
params+=("--authentication-audience=system:konnectivity-server")
params+=("--kubeconfig=/etc/srv/kubernetes/konnectivity-server/kubeconfig")
params+=("--proxy-strategies=default")
elif [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
# GRPC can work with either UDS or mTLS.
params+=("--mode=http-connect")
params+=("--server-port=8131")
params+=("--agent-namespace=")
params+=("--agent-service-account=")
params+=("--authentication-audience=")
# Need to fix ANP code to allow kubeconfig to be set with mtls.
params+=("--kubeconfig=")
params+=("--proxy-strategies=destHost,default")
else
echo "KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE must be set to either grpc or http-connect"
exit 1
fi
params+=("--agent-port=$1")
params+=("--health-port=$2")
params+=("--admin-port=$3")
params+=("--kubeconfig-qps=75")
params+=("--kubeconfig-burst=150")
params+=("--keepalive-time=60s")
params+=("--frontend-keepalive-time=60s")
konnectivity_args=""
for param in "${params[@]}"; do
konnectivity_args+=", \"${param}\""
done
sed -i -e "s@{{ *konnectivity_args *}}@${konnectivity_args}@g" "${temp_file}"
sed -i -e "s@{{ *agent_port *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *health_port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *admin_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@30@g" "${temp_file}"
if [[ -n "${KONNECTIVITY_SERVER_RUNASUSER:-}" && -n "${KONNECTIVITY_SERVER_RUNASGROUP:-}" && -n "${KONNECTIVITY_SERVER_SOCKET_WRITER_GROUP:-}" ]]; then
sed -i -e "s@{{ *run_as_user *}}@runAsUser: ${KONNECTIVITY_SERVER_RUNASUSER}@g" "${temp_file}"
sed -i -e "s@{{ *run_as_group *}}@runAsGroup: ${KONNECTIVITY_SERVER_RUNASGROUP}@g" "${temp_file}"
sed -i -e "s@{{ *supplemental_groups *}}@supplementalGroups: [${KUBE_PKI_READERS_GROUP}]@g" "${temp_file}"
sed -i -e "s@{{ *container_security_context *}}@securityContext:@g" "${temp_file}"
sed -i -e "s@{{ *capabilities *}}@capabilities:@g" "${temp_file}"
sed -i -e "s@{{ *drop_capabilities *}}@drop: [ ALL ]@g" "${temp_file}"
sed -i -e "s@{{ *disallow_privilege_escalation *}}@allowPrivilegeEscalation: false@g" "${temp_file}"
mkdir -p /etc/srv/kubernetes/konnectivity-server/
chown -R "${KONNECTIVITY_SERVER_RUNASUSER}":"${KONNECTIVITY_SERVER_RUNASGROUP}" /etc/srv/kubernetes/konnectivity-server
chmod g+w /etc/srv/kubernetes/konnectivity-server
else
sed -i -e "s@{{ *run_as_user *}}@@g" "${temp_file}"
sed -i -e "s@{{ *run_as_group *}}@@g" "${temp_file}"
sed -i -e "s@{{ *supplemental_groups *}}@@g" "${temp_file}"
sed -i -e "s@{{ *container_security_context *}}@@g" "${temp_file}"
sed -i -e "s@{{ *capabilities *}}@@g" "${temp_file}"
sed -i -e "s@{{ *drop_capabilities *}}@@g" "${temp_file}"
sed -i -e "s@{{ *disallow_privilege_escalation *}}@@g" "${temp_file}"
fi
mv "${temp_file}" /etc/kubernetes/manifests
}
# Starts konnectivity server pod.
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-konnectivity-server {
echo "Start konnectivity server pods"
prepare-log-file /var/log/konnectivity-server.log "${KONNECTIVITY_SERVER_RUNASUSER:-0}"
prepare-konnectivity-server-manifest "8132" "8133" "8134"
}
# Calculates the following variables based on env variables, which will be used
# by the manifests of several kube-master components.
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
# FLEXVOLUME_HOSTPATH_MOUNT
# FLEXVOLUME_HOSTPATH_VOLUME
# INSECURE_PORT_MAPPING
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
CLOUD_CONFIG_MOUNT=""
if [[ -f /etc/gce.conf ]]; then
CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
fi
DOCKER_REGISTRY="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
fi
FLEXVOLUME_HOSTPATH_MOUNT=""
FLEXVOLUME_HOSTPATH_VOLUME=""
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true},"
FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}},"
fi
INSECURE_PORT_MAPPING=""
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" == "true" ]]; then
# INSECURE_PORT_MAPPING is used by sed
# shellcheck disable=SC2089
INSECURE_PORT_MAPPING='{ "name": "local", "containerPort": 8080, "hostPort": 8080},'
fi
# shellcheck disable=SC2090
export INSECURE_PORT_MAPPING
}
# A helper function that bind mounts kubelet dirs for running mount in a chroot
function prepare-mounter-rootfs {
echo "Prepare containerized mounter"
mount --bind "${CONTAINERIZED_MOUNTER_HOME}" "${CONTAINERIZED_MOUNTER_HOME}"
mount -o remount,exec "${CONTAINERIZED_MOUNTER_HOME}"
CONTAINERIZED_MOUNTER_ROOTFS="${CONTAINERIZED_MOUNTER_HOME}/rootfs"
mount --rbind /var/lib/kubelet/ "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
}
# Updates node labels used by addons.
function update-legacy-addon-node-labels() {
# need kube-apiserver to be ready
until kubectl get nodes; do
sleep 5
done
update-node-label "beta.kubernetes.io/metadata-proxy-ready=true,cloud.google.com/metadata-proxy-ready!=true" "cloud.google.com/metadata-proxy-ready=true"
update-node-label "beta.kubernetes.io/kube-proxy-ds-ready=true,node.kubernetes.io/kube-proxy-ds-ready!=true" "node.kubernetes.io/kube-proxy-ds-ready=true"
update-node-label "beta.kubernetes.io/masq-agent-ds-ready=true,node.kubernetes.io/masq-agent-ds-ready!=true" "node.kubernetes.io/masq-agent-ds-ready=true"
}
# A helper function for labeling all nodes matching a given selector.
# Runs: kubectl label --overwrite nodes -l "${1}" "${2}"
# Retries on failure
#
# $1: label selector of nodes
# $2: label to apply
function update-node-label() {
local selector="$1"
local label="$2"
local retries=5
until (( retries == 0 )); do
if kubectl label --overwrite nodes -l "${selector}" "${label}"; then
break
fi
(( retries-- ))
sleep 3
done
}
# Starts kubernetes controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-controller-manager {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
if ! deploy-kube-controller-manager-via-kube-up; then
echo "kube-controller-manager is configured to not be deployed through kube-up."
return
fi
fi
echo "Start kubernetes controller-manager"
create-kubeconfig "kube-controller-manager" "${KUBE_CONTROLLER_MANAGER_TOKEN}"
prepare-log-file /var/log/kube-controller-manager.log "${KUBE_CONTROLLER_MANAGER_RUNASUSER:-0}"
# Calculate variables and assemble the command line.
local params=("${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"}" "${CONTROLLER_MANAGER_TEST_ARGS:-}" "${CLOUD_CONFIG_OPT}")
local config_path='/etc/srv/kubernetes/kube-controller-manager/kubeconfig'
params+=("--use-service-account-credentials")
params+=("--cloud-provider=gce")
params+=("--kubeconfig=${config_path}" "--authentication-kubeconfig=${config_path}" "--authorization-kubeconfig=${config_path}")
params+=("--root-ca-file=${CA_CERT_BUNDLE_PATH}")
params+=("--service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}")
params+=("--volume-host-allow-local-loopback=false")
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=("--enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}")
fi
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=("--cluster-name=${INSTANCE_PREFIX}")
fi
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
params+=("--cluster-cidr=${CLUSTER_IP_RANGE}")
fi
if [[ -n "${CA_KEY:-}" ]]; then
params+=("--cluster-signing-cert-file=${CA_CERT_PATH}")
params+=("--cluster-signing-key-file=${CA_KEY_PATH}")
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=("--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}")
fi
if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
params+=("--concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}")
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=("--allocate-node-cidrs=true")
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=("--allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}")
fi
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=("--terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}")
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=("--cidr-allocator-type=${NODE_IPAM_MODE}")
params+=("--configure-cloud-routes=false")
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=("--feature-gates=${FEATURE_GATES}")
fi
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
params+=("--flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}")
fi
if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]]; then
params+=("--cluster-signing-duration=$CLUSTER_SIGNING_DURATION")
fi
if [[ -n "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
params+=("--pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE")
params+=("--pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE")
fi
if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
params+=("--controllers=${RUN_CONTROLLERS}")
fi
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
local paramstring
paramstring="$(convert-manifest-params "${params[*]}")"
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
# Evaluate variables.
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
if [[ -n "${KUBE_CONTROLLER_MANAGER_RUNASUSER:-}" && -n "${KUBE_CONTROLLER_MANAGER_RUNASGROUP:-}" ]]; then
sed -i -e "s@{{runAsUser}}@\"runAsUser\": ${KUBE_CONTROLLER_MANAGER_RUNASUSER},@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@\"runAsGroup\":${KUBE_CONTROLLER_MANAGER_RUNASGROUP},@g" "${src_file}"
sed -i -e "s@{{supplementalGroups}}@\"supplementalGroups\": [ ${KUBE_PKI_READERS_GROUP} ],@g" "${src_file}"
else
sed -i -e "s@{{runAsUser}}@@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@@g" "${src_file}"
sed -i -e "s@{{supplementalGroups}}@@g" "${src_file}"
fi
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes scheduler.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in compute-master-manifest-variables)
# DOCKER_REGISTRY
function start-kube-scheduler {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
if ! deploy-kube-scheduler-via-kube-up; then
echo "kube-scheduler is configured to not be deployed through kube-up."
return
fi
fi
echo "Start kubernetes scheduler"
create-kubeconfig "kube-scheduler" "${KUBE_SCHEDULER_TOKEN}"
# User and group should never contain characters that need to be quoted
# shellcheck disable=SC2086
prepare-log-file /var/log/kube-scheduler.log ${KUBE_SCHEDULER_RUNASUSER:-2001}
# Calculate variables and set them in the manifest.
params=("${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"}" "${SCHEDULER_TEST_ARGS:-}")
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=("--feature-gates=${FEATURE_GATES}")
fi
# Scheduler Component Config takes precedence over some flags.
if [[ -n "${KUBE_SCHEDULER_CONFIG:-}" ]]; then
create-kube-scheduler-config
params+=("--config=/etc/srv/kubernetes/kube-scheduler/config")
else
params+=("--kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig")
if [[ -n "${SCHEDULER_POLICY_CONFIG:-}" ]]; then
create-kubescheduler-policy-config
params+=("--use-legacy-policy-config")
params+=("--policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config")
fi
fi
local config_path
config_path='/etc/srv/kubernetes/kube-scheduler/kubeconfig'
params+=("--authentication-kubeconfig=${config_path}" "--authorization-kubeconfig=${config_path}")
local paramstring
paramstring="$(convert-manifest-params "${params[*]}")"
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
sed -i -e "s@{{params}}@${paramstring}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
sed -i -e "s@{{runAsUser}}@${KUBE_SCHEDULER_RUNASUSER:-2001}@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@${KUBE_SCHEDULER_RUNASGROUP:-2001}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts cluster autoscaler.
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
function start-cluster-autoscaler {
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Start kubernetes cluster autoscaler"
setup-addon-manifests "addons" "rbac/cluster-autoscaler"
create-kubeconfig "cluster-autoscaler" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}"
prepare-log-file /var/log/cluster-autoscaler.log
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
local params
read -r -a params <<< "${AUTOSCALER_MIG_CONFIG}"
params+=("${CLOUD_CONFIG_OPT}" "${AUTOSCALER_EXPANDER_CONFIG:---expander=price}")
params+=("--kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig")
# split the params into separate arguments passed to binary
local params_split
params_split=$(eval 'for param in "${params[@]}"; do echo -n \""$param"\",; done')
params_split=${params_split%?}
sed -i -e "s@{{params}}@${params_split}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{%.*%}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
fi
}
# A helper function for setting up addon manifests.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: (optional) auxiliary manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/$1/$2"
copy-manifests "${src_dir}/$2" "${dst_dir}"
# If the PodSecurityPolicy admission controller is enabled,
# set up the corresponding addon policies.
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
local -r psp_dir="${src_dir}/${3:-$2}/podsecuritypolicies"
if [[ -d "${psp_dir}" ]]; then
copy-manifests "${psp_dir}" "${dst_dir}"
fi
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
# manifests directory.
function download-extra-addons {
local -r out_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/gce-extras"
mkdir -p "${out_dir}"
# shellcheck disable=SC2206
local curl_cmd=(
"curl"
${CURL_FLAGS}
)
if [[ -n "${EXTRA_ADDONS_HEADER:-}" ]]; then
curl_cmd+=("-H" "${EXTRA_ADDONS_HEADER}")
fi
curl_cmd+=("-o" "${out_dir}/extras.json")
curl_cmd+=("${EXTRA_ADDONS_URL}")
"${curl_cmd[@]}"
}
# A function that fetches a GCE metadata value and echoes it out.
# Args:
# $1 : URL path after /computeMetadata/v1/ (without heading slash).
# $2 : An optional default value to echo out if the fetch fails.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function get-metadata-value {
local default="${2:-}"
local status
# shellcheck disable=SC2086
curl ${CURL_FLAGS} \
-H 'Metadata-Flavor: Google' \
"http://metadata/computeMetadata/v1/${1}" \
|| status="$?"
status="${status:-0}"
if [[ "${status}" -eq 0 || -z "${default}" ]]; then
return "${status}"
else
echo "${default}"
fi
}
# A helper function for copying manifests and setting dir/files
# permissions.
#
# $1: absolute source dir
# $2: absolute destination dir
function copy-manifests {
local -r src_dir="$1"
local -r dst_dir="$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
if [[ -n "$(ls "${src_dir}"/*.yaml 2>/dev/null)" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
if [[ -n "$(ls "${src_dir}"/*.json 2>/dev/null)" ]]; then
cp "${src_dir}/"*.json "${dst_dir}"
fi
if [[ -n "$(ls "${src_dir}"/*.yaml.in 2>/dev/null)" ]]; then
cp "${src_dir}/"*.yaml.in "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Fluentd resources are modified using ScalingPolicy CR, which may not be
# available at this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd {
local any_overrides=false
if [[ -n "${FLUENTD_GCP_MEMORY_LIMIT:-}" ]]; then
any_overrides=true
fi
if [[ -n "${FLUENTD_GCP_CPU_REQUEST:-}" ]]; then
any_overrides=true
fi
if [[ -n "${FLUENTD_GCP_MEMORY_REQUEST:-}" ]]; then
any_overrides=true
fi
if ! $any_overrides; then
# Nothing to do here.
exit
fi
# Wait until ScalingPolicy CRD is in place.
until kubectl get scalingpolicies.scalingpolicy.kope.io
do
sleep 10
done
# Single-shot, not managed by addon manager. Can be later modified or removed
# at will.
cat <<EOF | kubectl apply -f -
apiVersion: scalingpolicy.kope.io/v1alpha1
kind: ScalingPolicy
metadata:
name: fluentd-gcp-scaling-policy
namespace: kube-system
spec:
containers:
- name: fluentd-gcp
resources:
requests:
- resource: cpu
base: ${FLUENTD_GCP_CPU_REQUEST:-}
- resource: memory
base: ${FLUENTD_GCP_MEMORY_REQUEST:-}
limits:
- resource: memory
base: ${FLUENTD_GCP_MEMORY_LIMIT:-}
EOF
}
# Trigger background process that will ultimately update fluentd resource
# requirements.
function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# VolumeSnapshot CRDs and controller are installed by cluster addon manager,
# which may not be available at this point. Run this as a background process.
function wait-for-volumesnapshot-crd-and-controller {
# Wait until volumesnapshot CRDs and controller are in place.
echo "Wait until volume snapshot CRDs are installed"
until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io
do
sleep 10
done
until kubectl get volumesnapshotcontents.snapshot.storage.k8s.io
do
sleep 10
done
until kubectl get volumesnapshots.snapshot.storage.k8s.io
do
sleep 10
done
echo "Wait until volume snapshot RBAC rules are installed"
until kubectl get clusterrolebinding volume-snapshot-controller-role
do
sleep 10
done
echo "Wait until volume snapshot controller is installed"
until kubectl get statefulset volume-snapshot-controller | grep volume-snapshot-controller | grep "1/1"
do
sleep 10
done
}
# Trigger background process that will wait for volumesnapshot CRDs
# and snapshot-controller to be installed
function start-volumesnapshot-crd-and-controller {
wait-for-volumesnapshot-crd-and-controller &
}
# Update {{ fluentd_container_runtime_service }} with actual container runtime name,
# and {{ container_runtime_endpoint }} with actual container runtime
# endpoint.
function update-container-runtime {
local -r file="$1"
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}"
sed -i \
-e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-docker}}@g" \
-e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
"${file}"
}
# Remove configuration in yaml file if node journal is not enabled.
function update-node-journal {
local -r configmap_yaml="$1"
if [[ "${ENABLE_NODE_JOURNAL:-}" != "true" ]]; then
# Removes all lines between two patterns (throws away node-journal)
sed -i -e "/# BEGIN_NODE_JOURNAL/,/# END_NODE_JOURNAL/d" "${configmap_yaml}"
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration, or
# removes component if it is disabled.
function update-prometheus-to-sd-parameters {
if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then
sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1"
sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1"
else
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
# removes component if it is disabled.
function update-daemon-set-prometheus-to-sd-parameters {
if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
else
update-prometheus-to-sd-parameters "$1"
fi
}
# Updates parameters in yaml file for event-exporter configuration
function update-event-exporter {
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
sed -i -e "s@{{ exporter_sd_resource_model }}@${stackdriver_resource_model}@g" "$1"
sed -i -e "s@{{ exporter_sd_endpoint }}@${STACKDRIVER_ENDPOINT:-}@g" "$1"
}
function update-dashboard-deployment {
if [ -n "${CUSTOM_KUBE_DASHBOARD_BANNER:-}" ]; then
sed -i -e "s@\( \+\)# PLATFORM-SPECIFIC ARGS HERE@\1- --system-banner=${CUSTOM_KUBE_DASHBOARD_BANNER}\n\1- --system-banner-severity=WARNING@" "$1"
fi
}
# Sets up the manifests of coreDNS for k8s addons.
function setup-coredns-manifest {
setup-addon-manifests "addons" "0-dns/coredns"
local -r coredns_file="${dst_dir}/0-dns/coredns/coredns.yaml"
mv "${dst_dir}/0-dns/coredns/coredns.yaml.in" "${coredns_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${coredns_file}"
sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${coredns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
sed -i'' -e "s@{{.Target}}@${COREDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
fi
}
# Sets up the manifests of Fluentd configmap and yamls for k8s addons.
function setup-fluentd {
local -r dst_dir="$1"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
# Ingest logs against old resources like "gke_container" and "gce_instance" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "old".
if [[ "${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}" == "new" ]]; then
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
fluentd_gcp_configmap_name="fluentd-gcp-config"
else
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap-old.yaml"
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
fi
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-1.6.17}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-daemon-set-prometheus-to-sd-parameters "${fluentd_gcp_yaml}"
start-fluentd-resource-update "${fluentd_gcp_yaml}"
update-container-runtime "${fluentd_gcp_configmap_yaml}"
update-node-journal "${fluentd_gcp_configmap_yaml}"
}
# Sets up the manifests of kube-dns for k8s addons.
function setup-kube-dns-manifest {
setup-addon-manifests "addons" "0-dns/kube-dns"
local -r kubedns_file="${dst_dir}/0-dns/kube-dns/kube-dns.yaml"
mv "${dst_dir}/0-dns/kube-dns/kube-dns.yaml.in" "${kubedns_file}"
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
# Replace with custom GKE kube-dns deployment.
cat > "${kubedns_file}" <<EOF
$CUSTOM_KUBE_DNS_YAML
EOF
update-prometheus-to-sd-parameters "${kubedns_file}"
fi
# Replace the salt configurations with variable values.
sed -i -e "s@dns_domain@${DNS_DOMAIN}@g" "${kubedns_file}"
sed -i -e "s@dns_server@${DNS_SERVER_IP}@g" "${kubedns_file}"
sed -i -e "s@dns_memory_limit@${DNS_MEMORY_LIMIT:-170Mi}@g" "${kubedns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
sed -i'' -e "s@{{.Target}}@${KUBEDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
fi
}
# Sets up the manifests of local dns cache agent for k8s addons.
function setup-nodelocaldns-manifest {
setup-addon-manifests "addons" "0-dns/nodelocaldns"
local -r localdns_file="${dst_dir}/0-dns/nodelocaldns/nodelocaldns.yaml"
setup-addon-custom-yaml "addons" "0-dns/nodelocaldns" "nodelocaldns.yaml" "${CUSTOM_NODELOCAL_DNS_YAML:-}"
# eventually all the __PILLAR__ stuff will be gone, but theyre still in nodelocaldns for backward compat.
sed -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
}
# Sets up the manifests of netd for k8s addons.
function setup-netd-manifest {
local -r netd_file="${dst_dir}/netd/netd.yaml"
mkdir -p "${dst_dir}/netd"
touch "${netd_file}"
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
# Replace with custom GCP netd deployment.
cat > "${netd_file}" <<EOF
$CUSTOM_NETD_YAML
EOF
fi
}
# A helper function to set up a custom yaml for a k8s addon.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: manifest file
# $4: custom yaml
function setup-addon-custom-yaml {
local -r manifest_path="/etc/kubernetes/$1/$2/$3"
local -r custom_yaml="$4"
if [ -n "${custom_yaml:-}" ]; then
# Replace with custom manifest.
cat > "${manifest_path}" <<EOF
$custom_yaml
EOF
fi
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/addons"
create-kubeconfig "addon-manager" "${ADDON_MANAGER_TOKEN}"
# User and group should never contain characters that need to be quoted
# shellcheck disable=SC2086
prepare-log-file /var/log/kube-addon-manager.log ${KUBE_ADDON_MANAGER_RUNASUSER:-2002}
# prep addition kube-up specific rbac objects
setup-addon-manifests "addons" "rbac/kubelet-api-auth"
setup-addon-manifests "addons" "rbac/kubelet-cert-rotation"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
setup-addon-manifests "addons" "rbac/legacy-kubelet-user"
else
setup-addon-manifests "addons" "rbac/legacy-kubelet-user-disable"
fi
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
setup-addon-manifests "addons" "podsecuritypolicies"
fi
# Set up manifests of other addons.
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
# Replace with custom GKE kube proxy.
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
$CUSTOM_KUBE_PROXY_YAML
EOF
update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
fi
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
fi
if [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]]; then
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
metadata_agent_cluster_level_cpu_request="${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-40m}"
metadata_agent_cluster_level_memory_request="${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-50Mi}"
setup-addon-manifests "addons" "metadata-agent/stackdriver"
metadata_agent_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_cluster_level_cpu_request }}@${metadata_agent_cluster_level_cpu_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_cluster_level_memory_request }}@${metadata_agent_cluster_level_memory_request}@g" "${metadata_agent_yaml}"
fi
fi
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
setup-addon-manifests "addons" "metrics-server"
base_metrics_server_cpu="40m"
base_metrics_server_memory="40Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="16"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_server_cpu="40m"
base_metrics_server_memory="35Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="5"
fi
local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
fi
if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
fi
# Setting up the konnectivity-agent daemonset
if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
setup-addon-manifests "addons" "konnectivity-agent"
setup-konnectivity-agent-manifest
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
# Create a new directory for the DNS addon and prepend a "0" on the name.
# Prepending "0" to the directory ensures that add-on manager
# creates the dns service first. This ensures no other add-on
# can "steal" the designated DNS clusterIP.
BASE_ADDON_DIR=${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty
BASE_DNS_DIR=${BASE_ADDON_DIR}/dns
NEW_DNS_DIR=${BASE_ADDON_DIR}/0-dns
mkdir "${NEW_DNS_DIR}" && mv "${BASE_DNS_DIR}"/* "${NEW_DNS_DIR}" && rm -r "${BASE_DNS_DIR}"
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-coredns-manifest
else
setup-kube-dns-manifest
fi
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
setup-nodelocaldns-manifest
fi
fi
if [[ "${ENABLE_NETD:-}" == "true" ]]; then
setup-netd-manifest
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
setup-addon-manifests "addons" "fluentd-elasticsearch"
local -r fluentd_es_configmap_yaml="${dst_dir}/fluentd-elasticsearch/fluentd-es-configmap.yaml"
update-container-runtime ${fluentd_es_configmap_yaml}
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
setup-fluentd ${dst_dir}
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
update-event-exporter ${event_exporter_yaml}
update-prometheus-to-sd-parameters ${event_exporter_yaml}
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
local -r dashboard_deployment_yaml="${dst_dir}/dashboard/dashboard-deployment.yaml"
update-dashboard-deployment ${dashboard_deployment_yaml}
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
setup-addon-manifests "addons" "node-problem-detector"
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
# Setup role binding(s) for standalone node problem detector.
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
setup-addon-manifests "addons" "node-problem-detector/standalone"
fi
setup-addon-manifests "addons" "node-problem-detector/kubelet-user-standalone" "node-problem-detector"
fi
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
setup-addon-manifests "admission-controls" "limit-range" "gce"
fi
setup-addon-manifests "addons" "admission-resource-quota-critical-pods"
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
setup-addon-custom-yaml "addons" "calico-policy-controller" "calico-node-daemonset.yaml" "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
setup-addon-custom-yaml "addons" "calico-policy-controller" "typha-deployment.yaml" "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
if [[ "${ENABLE_VOLUME_SNAPSHOTS:-}" == "true" ]]; then
setup-addon-manifests "addons" "volumesnapshots/crd"
setup-addon-manifests "addons" "volumesnapshots/volume-snapshot-controller"
start-volumesnapshot-crd-and-controller
fi
if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "ip-masq-agent"
fi
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
setup-addon-manifests "addons" "istio/auth"
else
setup-addon-manifests "addons" "istio/noauth"
fi
fi
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
download-extra-addons
setup-addon-manifests "addons" "gce-extras"
fi
# Place addon manager pod manifest.
src_file="${src_dir}/kube-addon-manager.yaml"
sed -i -e "s@{{kubectl_prune_whitelist_override}}@${KUBECTL_PRUNE_WHITELIST_OVERRIDE:-}@g" "${src_file}"
sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
sed -i -e "s@{{runAsUser}}@${KUBE_ADDON_MANAGER_RUNASUSER:-2002}@g" "${src_file}"
sed -i -e "s@{{runAsGroup}}@${KUBE_ADDON_MANAGER_RUNASGROUP:-2002}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
function setup-konnectivity-agent-manifest {
local -r manifest="/etc/kubernetes/addons/konnectivity-agent/konnectivity-agent-ds.yaml"
sed -i "s|__APISERVER_IP__|${KUBERNETES_MASTER_NAME}|g" "${manifest}"
if [[ "${KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" == 'http-connect' ]]; then
sed -i "s|__EXTRA_PARAMS__|\t\t\"--agent-cert=/etc/srv/kubernetes/pki/konnectivity-agent/client.crt\",\n\t\t\"--agent-key=/etc/srv/kubernetes/pki/konnectivity-agent/client.key\",|g" "${manifest}"
sed -i "s|__EXTRA_VOL_MNTS__| - name: pki\n mountPath: /etc/srv/kubernetes/pki/konnectivity-agent|g" "${manifest}"
sed -i "s|__EXTRA_VOLS__| - name: pki\n hostPath:\n path: /etc/srv/kubernetes/pki/konnectivity-agent|g" "${manifest}"
else
sed -i "s|__EXTRA_PARAMS__||g" "${manifest}"
sed -i "s|__EXTRA_VOL_MNTS__||g" "${manifest}"
sed -i "s|__EXTRA_VOLS__||g" "${manifest}"
fi
}
# Setups manifests for ingress controller and gce-specific policies for service controller.
function start-lb-controller {
setup-addon-manifests "addons" "loadbalancing"
# Starts a l7 loadbalancing controller for ingress.
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
setup-addon-manifests "addons" "rbac/cluster-loadbalancing/glbc"
create-kubeconfig "l7-lb-controller" "${GCE_GLBC_TOKEN}"
local -r src_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
local -r dest_manifest="/etc/kubernetes/manifests/glbc.manifest"
if [[ -n "${CUSTOM_INGRESS_YAML:-}" ]]; then
echo "${CUSTOM_INGRESS_YAML}" > "${dest_manifest}"
else
cp "${src_manifest}" "${dest_manifest}"
fi
# Override the glbc image if GCE_GLBC_IMAGE is specified.
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
sed -i "s|image:.*|image: ${GCE_GLBC_IMAGE}|" "${dest_manifest}"
fi
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
mount -B /var/lib/kubelet /var/lib/kubelet/
mount -B -o remount,exec,suid,dev /var/lib/kubelet
# TODO(#60123): The kubelet should create the cert-dir directory if it doesn't exist
mkdir -p /var/lib/kubelet/pki/
# Mount /var/lib/kubelet/pki on a tmpfs so it doesn't persist across
# reboots. This can help avoid some rare instances of corrupt cert files
# (e.g. created but not written during a shutdown). Kubelet crash-loops
# in these cases. Do this after above mount calls so it isn't overwritten.
echo "Mounting /var/lib/kubelet/pki on tmpfs"
mount -t tmpfs tmpfs /var/lib/kubelet/pki
}
# Override for GKE custom master setup scripts (no-op outside of GKE).
function gke-master-start {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
echo "Running GKE internal configuration script"
. "${KUBE_HOME}/bin/gke-internal-configure-helper.sh"
gke-internal-master-start
elif [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
echo "setting up local admin kubeconfig"
create-kubeconfig "local-admin" "${KUBE_BEARER_TOKEN}"
echo "export KUBECONFIG=/etc/srv/kubernetes/local-admin/kubeconfig" > /etc/profile.d/kubeconfig.sh
fi
}
function reset-motd {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref
gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
"
gitref="${version//*+/}"
fi
cat > /etc/motd <<EOF
Welcome to Kubernetes ${version}!
You can find documentation for Kubernetes at:
http://docs.kubernetes.io/
The source for this release can be found at:
/home/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
https://storage.googleapis.com/gke-release/kubernetes/release/${version}/kubernetes-src.tar.gz
It is based on the Kubernetes source at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
/home/kubernetes/LICENSES
EOF
}
function override-kubectl {
echo "overriding kubectl"
echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
# source the file explicitly otherwise we have
# issues on a ubuntu OS image finding the kubectl
# shellcheck disable=SC1091
source /etc/profile.d/kube_env.sh
# Add ${KUBE_HOME}/bin into sudoer secure path.
local sudo_path
sudo_path=$(sudo env | grep "^PATH=")
if [[ -n "${sudo_path}" ]]; then
sudo_path=${sudo_path#PATH=}
(
umask 027
echo "Defaults secure_path=\"${KUBE_HOME}/bin:${sudo_path}\"" > /etc/sudoers.d/kube_secure_path
)
fi
}
function override-pv-recycler {
if [[ -z "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
echo "PV_RECYCLER_OVERRIDE_TEMPLATE is not set"
exit 1
fi
PV_RECYCLER_VOLUME="{\"name\": \"pv-recycler-mount\",\"hostPath\": {\"path\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"type\": \"FileOrCreate\"}},"
PV_RECYCLER_MOUNT="{\"name\": \"pv-recycler-mount\",\"mountPath\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"readOnly\": true},"
cat > "${PV_RECYCLER_OVERRIDE_TEMPLATE}" <<\EOF
version: v1
kind: Pod
metadata:
generateName: pv-recycler-
namespace: default
spec:
activeDeadlineSeconds: 60
restartPolicy: Never
volumes:
- name: vol
containers:
- name: pv-recycler
image: k8s.gcr.io/busybox:1.27
command:
- /bin/sh
args:
- -c
- test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z $(ls -A /scrub) || exit 1
volumeMounts:
- name: vol
mountPath: /scrub
EOF
}
function wait-till-apiserver-ready() {
until kubectl get nodes; do
sleep 5
done
}
function ensure-master-bootstrap-kubectl-auth {
# By default, `kubectl` uses http://localhost:8080
# If the insecure port is disabled, kubectl will need to use an admin-authenticated kubeconfig.
if [[ -n "${KUBE_BOOTSTRAP_TOKEN:-}" ]]; then
create-kubeconfig "kube-bootstrap" "${KUBE_BOOTSTRAP_TOKEN}"
export KUBECONFIG=/etc/srv/kubernetes/kube-bootstrap/kubeconfig
fi
}
function setup-containerd {
echo "Generate containerd config"
local config_path="${CONTAINERD_CONFIG_PATH:-"/etc/containerd/config.toml"}"
mkdir -p "$(dirname "${config_path}")"
local cni_template_path="${KUBE_HOME}/cni.template"
cat > "${cni_template_path}" <<EOF
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "ptp",
"mtu": 1460,
"ipam": {
"type": "host-local",
"subnet": "{{.PodCIDR}}",
"routes": [
{
"dst": "0.0.0.0/0"
}
]
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
EOF
if [[ "${KUBERNETES_MASTER:-}" != "true" ]]; then
if [[ "${NETWORK_POLICY_PROVIDER:-"none"}" != "none" || "${ENABLE_NETD:-}" == "true" ]]; then
# Use Kubernetes cni daemonset on node if network policy provider is specified
# or netd is enabled.
cni_template_path=""
fi
fi
cat > "${config_path}" <<EOF
version = 2
# Kubernetes requires the cri plugin.
required_plugins = ["io.containerd.grpc.v1.cri"]
# Kubernetes doesn't use containerd restart manager.
disabled_plugins = ["io.containerd.internal.v1.restart"]
oom_score = -999
[debug]
level = "${CONTAINERD_LOG_LEVEL:-"info"}"
[plugins."io.containerd.grpc.v1.cri"]
stream_server_address = "127.0.0.1"
max_container_log_line_size = ${CONTAINERD_MAX_CONTAINER_LOG_LINE:-262144}
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "${KUBE_HOME}/bin"
conf_dir = "/etc/cni/net.d"
conf_template = "${cni_template_path}"
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://mirror.gcr.io","https://registry-1.docker.io"]
EOF
if [[ "${CONTAINER_RUNTIME_TEST_HANDLER:-}" == "true" ]]; then
cat >> "${config_path}" <<EOF
# Setup a runtime with the magic name ("test-handler") used for Kubernetes
# runtime class tests ...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.test-handler]
runtime_type = "io.containerd.runc.v2"
EOF
fi
# Reuse docker group for containerd.
local -r containerd_gid="$(grep ^docker: /etc/group | cut -d: -f 3)"
if [[ -n "${containerd_gid:-}" ]]; then
cat >> "${config_path}" <<EOF
# reuse id of the docker group
[grpc]
gid = ${containerd_gid}
EOF
fi
chmod 644 "${config_path}"
echo "Restart containerd to load the config change"
systemctl restart containerd
}
# This function detects the platform/arch of the machine where the script runs,
# and sets the HOST_PLATFORM and HOST_ARCH environment variables accordingly.
# Callers can specify HOST_PLATFORM_OVERRIDE and HOST_ARCH_OVERRIDE to skip the detection.
# This function is adapted from the detect_client_info function in cluster/get-kube-binaries.sh
# and kube::util::host_os, kube::util::host_arch functions in hack/lib/util.sh
# This function should be synced with detect_host_info in ./configure.sh
function detect_host_info() {
HOST_PLATFORM=${HOST_PLATFORM_OVERRIDE:-"$(uname -s)"}
case "${HOST_PLATFORM}" in
Linux|linux)
HOST_PLATFORM="linux"
;;
*)
echo "Unknown, unsupported platform: ${HOST_PLATFORM}." >&2
echo "Supported platform(s): linux." >&2
echo "Bailing out." >&2
exit 2
esac
HOST_ARCH=${HOST_ARCH_OVERRIDE:-"$(uname -m)"}
case "${HOST_ARCH}" in
x86_64*|i?86_64*|amd64*)
HOST_ARCH="amd64"
;;
aHOST_arch64*|aarch64*|arm64*)
HOST_ARCH="arm64"
;;
*)
echo "Unknown, unsupported architecture (${HOST_ARCH})." >&2
echo "Supported architecture(s): amd64 and arm64." >&2
echo "Bailing out." >&2
exit 2
;;
esac
}
# Initializes variables used by the log-* functions.
#
# get-metadata-value must be defined before calling this function.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-init {
# Used by log-* functions.
LOG_CLUSTER_ID=$(get-metadata-value 'instance/attributes/cluster-uid' 'get-metadata-value-error')
LOG_INSTANCE_NAME=$(hostname)
LOG_BOOT_ID=$(journalctl --list-boots | grep -E '^ *0' | awk '{print $2}')
declare -Ag LOG_START_TIMES
declare -ag LOG_TRAP_STACK
LOG_STATUS_STARTED='STARTED'
LOG_STATUS_COMPLETED='COMPLETED'
LOG_STATUS_ERROR='ERROR'
}
# Sets an EXIT trap.
# Args:
# $1:... : the trap command.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-trap-push {
local t="${*:1}"
LOG_TRAP_STACK+=("${t}")
# shellcheck disable=2064
trap "${t}" EXIT
}
# Removes and restores an EXIT trap.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-trap-pop {
# Remove current trap.
unset 'LOG_TRAP_STACK[-1]'
# Restore previous trap.
if [ ${#LOG_TRAP_STACK[@]} -ne 0 ]; then
local t="${LOG_TRAP_STACK[-1]}"
# shellcheck disable=2064
trap "${t}" EXIT
else
# If no traps in stack, clear.
trap EXIT
fi
}
# Logs the end of a bootstrap step that errored.
# Args:
# $1 : bootstrap step name.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-error {
local bootstep="$1"
log-proto "${bootstep}" "${LOG_STATUS_ERROR}" "encountered non-zero exit code"
}
# Wraps a command with bootstrap logging.
# Args:
# $1 : bootstrap step name.
# $2... : the command to run.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-wrap {
local bootstep="$1"
local command="${*:2}"
log-trap-push "log-error ${bootstep}"
log-proto "${bootstep}" "${LOG_STATUS_STARTED}"
$command
log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}"
log-trap-pop
}
# Logs a bootstrap step start. Prefer log-wrap.
# Args:
# $1 : bootstrap step name.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-start {
local bootstep="$1"
log-trap-push "log-error ${bootstep}"
log-proto "${bootstep}" "${LOG_STATUS_STARTED}"
}
# Logs a bootstrap step end. Prefer log-wrap.
# Args:
# $1 : bootstrap step name.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-end {
local bootstep="$1"
log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}"
log-trap-pop
}
# Writes a log proto to stdout.
# Args:
# $1: bootstrap step name.
# $2: status. Either 'STARTED', 'COMPLETED', or 'ERROR'.
# $3: optional status reason.
#
# NOTE: this function is duplicated in configure.sh, any changes here should be
# duplicated there as well.
function log-proto {
local bootstep="$1"
local status="$2"
local status_reason="${3:-}"
# Get current time.
local current_time
current_time="$(date --utc '+%s.%N')"
# ...formatted as UTC RFC 3339.
local timestamp
timestamp="$(date --utc --date="@${current_time}" '+%FT%T.%NZ')"
# Calculate latency.
local latency='null'
if [ "${status}" == "${LOG_STATUS_STARTED}" ]; then
LOG_START_TIMES["${bootstep}"]="${current_time}"
else
local start_time="${LOG_START_TIMES["${bootstep}"]}"
unset 'LOG_START_TIMES['"${bootstep}"']'
# Bash cannot do non-integer math, shell out to awk.
latency="$(echo "${current_time} ${start_time}" | awk '{print $1 - $2}')s"
# The default latency is null which cannot be wrapped as a string so we must
# do it here instead of the printf.
latency="\"${latency}\""
fi
printf '[cloud.kubernetes.monitoring.proto.SerialportLog] {"cluster_hash":"%s","vm_instance_name":"%s","boot_id":"%s","timestamp":"%s","bootstrap_status":{"step_name":"%s","status":"%s","status_reason":"%s","latency":%s}}\n' \
"${LOG_CLUSTER_ID}" "${LOG_INSTANCE_NAME}" "${LOG_BOOT_ID}" "${timestamp}" "${bootstep}" "${status}" "${status_reason}" "${latency}"
}
########### Main Function ###########
function main() {
echo "Start to configure instance for kubernetes"
log-wrap 'DetectHostInfo' detect_host_info
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Resource requests of master components.
KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
KUBE_HOME="/home/kubernetes"
KUBE_BIN=${KUBE_HOME}/bin
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
log-start 'SourceKubeEnv'
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
exit 1
fi
source "${KUBE_HOME}/kube-env"
log-end 'SourceKubeEnv'
if [[ -f "${KUBE_HOME}/kubelet-config.yaml" ]]; then
echo "Found Kubelet config file at ${KUBE_HOME}/kubelet-config.yaml"
KUBELET_CONFIG_FILE_ARG="--config ${KUBE_HOME}/kubelet-config.yaml"
fi
if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
log-wrap 'SourceKubeMasterCerts' source "${KUBE_HOME}/kube-master-certs"
fi
log-start 'VerifyKubeUser'
if [[ -n "${KUBE_USER:-}" ]]; then
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER format."
exit 1
fi
fi
log-end 'VerifyKubeUser'
log-start 'GenerateTokens'
KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
GCE_GLBC_TOKEN="$(secure_random 32)"
fi
ADDON_MANAGER_TOKEN="$(secure_random 32)"
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" != "true" ]]; then
KUBE_BOOTSTRAP_TOKEN="$(secure_random 32)"
fi
if [[ "${PREPARE_KONNECTIVITY_SERVICE:-false}" == "true" ]]; then
KONNECTIVITY_SERVER_TOKEN="$(secure_random 32)"
fi
if [[ "${ENABLE_MONITORING_TOKEN:-false}" == "true" ]]; then
MONITORING_TOKEN="$(secure_random 32)"
fi
log-end 'GenerateTokens'
log-wrap 'SetupOSParams' setup-os-params
log-wrap 'ConfigIPFirewall' config-ip-firewall
log-wrap 'CreateDirs' create-dirs
log-wrap 'EnsureLocalSSDs' ensure-local-ssds
log-wrap 'SetupKubeletDir' setup-kubelet-dir
log-wrap 'SetupLogrotate' setup-logrotate
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
log-wrap 'MountMasterPD' mount-master-pd
log-wrap 'CreateNodePKI' create-node-pki
log-wrap 'CreateMasterPKI' create-master-pki
log-wrap 'CreateMasterAuth' create-master-auth
log-wrap 'EnsureMasterBootstrapKubectlAuth' ensure-master-bootstrap-kubectl-auth
log-wrap 'CreateMasterKubeletAuth' create-master-kubelet-auth
log-wrap 'CreateMasterEtcdAuth' create-master-etcd-auth
log-wrap 'CreateMasterEtcdApiserverAuth' create-master-etcd-apiserver-auth
log-wrap 'OverridePVRecycler' override-pv-recycler
log-wrap 'GKEMasterStart' gke-master-start
else
log-wrap 'CreateNodePKI' create-node-pki
log-wrap 'CreateKubeletKubeconfig' create-kubelet-kubeconfig "${KUBERNETES_MASTER_NAME}"
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
log-wrap 'CreateKubeproxyUserKubeconfig' create-kubeproxy-user-kubeconfig
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
log-wrap 'CreateNodeProblemDetectorKubeconfig' create-node-problem-detector-kubeconfig "${KUBERNETES_MASTER_NAME}"
elif [[ -f "/var/lib/kubelet/kubeconfig" ]]; then
log-wrap 'CreateNodeProblemDetectorKubeconfigFromKubelet' create-node-problem-detector-kubeconfig-from-kubelet
else
echo "Either NODE_PROBLEM_DETECTOR_TOKEN or /var/lib/kubelet/kubeconfig must be set"
exit 1
fi
fi
fi
log-wrap 'OverrideKubectl' override-kubectl
container_runtime="${CONTAINER_RUNTIME:-docker}"
# Run the containerized mounter once to pre-cache the container image.
if [[ "${container_runtime}" == "docker" ]]; then
log-wrap 'AssembleDockerFlags' assemble-docker-flags
elif [[ "${container_runtime}" == "containerd" ]]; then
if docker-installed; then
# We still need to configure docker so it wouldn't reserver the 172.17.0/16 subnet
# And if somebody will start docker to build or pull something, logging will also be set up
log-wrap 'AssembleDockerFlags' assemble-docker-flags
# stop docker if it is present as we want to use just containerd
log-wrap 'StopDocker' systemctl stop docker || echo "unable to stop docker"
fi
log-wrap 'SetupContainerd' setup-containerd
fi
log-start 'SetupKubePodLogReadersGroupDir'
if [[ -n "${KUBE_POD_LOG_READERS_GROUP:-}" ]]; then
mkdir -p /var/log/pods/
chgrp -R "${KUBE_POD_LOG_READERS_GROUP:-}" /var/log/pods/
chmod -R g+s /var/log/pods/
fi
log-end 'SetupKubePodLogReadersGroupDir'
log-wrap 'StartKubelet' start-kubelet
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
log-wrap 'ComputeMasterManifestVariables' compute-master-manifest-variables
if [[ -z "${ETCD_SERVERS:-}" ]]; then
log-wrap 'StartEtcdServers' start-etcd-servers
fi
log-wrap 'SourceConfigureKubeApiserver' source ${KUBE_BIN}/configure-kubeapiserver.sh
log-wrap 'StartKubeApiserver' start-kube-apiserver
if [[ "${RUN_KONNECTIVITY_PODS:-false}" == "true" ]]; then
log-wrap 'StartKonnectivityServer' start-konnectivity-server
fi
log-wrap 'StartKubeControllerManager' start-kube-controller-manager
log-wrap 'StartKubeScheduler' start-kube-scheduler
log-wrap 'WaitTillApiserverReady' wait-till-apiserver-ready
log-wrap 'StartKubeAddons' start-kube-addons
log-wrap 'StartClusterAutoscaler' start-cluster-autoscaler
log-wrap 'StartLBController' start-lb-controller
log-wrap 'UpdateLegacyAddonNodeLabels' update-legacy-addon-node-labels &
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
log-wrap 'StartKubeProxy' start-kube-proxy
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
log-wrap 'StartNodeProblemDetector' start-node-problem-detector
fi
fi
log-wrap 'ResetMotd' reset-motd
log-wrap 'PrepareMounterRootfs' prepare-mounter-rootfs
# Wait for all background jobs to finish.
wait
echo "Done for the configuration for kubernetes"
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
log-init
log-wrap 'ConfigureHelperMain' main "${@}"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
# Give kube-bootstrap-logs-forwarder.service some time to write all logs.
sleep 3
fi
fi
|
#!/bin/bash
echo "a.speed:10000" > /dev/ttyUSB0
echo "a.turn:fwd" > /dev/ttyUSB0
echo "b.cmd.param:64.kvalhold" > /dev/ttyUSB0
echo "b.cmd.softstop" > /dev/ttyUSB0
echo "c.speed:10000" > /dev/ttyUSB0
echo "c.turn:fwd" > /dev/ttyUSB0
echo "d.speed:10000" > /dev/ttyUSB0
echo "d.turn:rev" > /dev/ttyUSB0
echo "run" > /dev/ttyUSB0
|
#!/usr/bin/env sh
docker push bausparkadse/aws-ecr-gitlab:3
|
#!/bin/sh
set -eu
# shellcheck source=cygwin/path.sh
. "$EOPEN_ROOT/cygwin/path.sh"
# shellcheck source=share/wd.sh
. "$EOPEN_ROOT/share/wd.sh"
|
SELECT COUNT(*)
FROM (SELECT COUNT(*) AS total_orders
FROM orders
GROUP BY user_id
HAVING total_orders >= 5) AS users; |
require 'spec_helper'
RSpec.describe 'LucidMail' do
context 'on server' do
it 'can create a mail' do
result = on_server do
mail = LucidMail.new(component: 'EmailComponent', props: { name: 'Werner' }, from: '<EMAIL>', to: '<EMAIL>', subject: 'Welcome')
mail.build
mail.rendered_component
end
expect(result).to include 'Welcome Werner!'
end
it 'asset imports are ok' do
result = Isomorfeus.assets['mail.js'].to_s
expect(result).to eq <<~JAVASCRIPT
import * as Redux from "redux";
global.Redux = Redux;
import * as Preact from "preact";
global.Preact = Preact;
import * as PreactHooks from "preact/hooks";
global.PreactHooks = PreactHooks;
import { Router, Link, Redirect, Route, Switch } from "wouter-preact";
global.Router = Router;
global.Link = Link;
global.Redirect = Redirect;
global.Route = Route;
global.Switch = Switch;
import { render } from "preact-render-to-string";
global.Preact.renderToString = render;
import staticLocationHook from "wouter-preact/static-location";
global.staticLocationHook = staticLocationHook;
import WebSocket from "ws";
global.WebSocket = WebSocket;
import("./mail_loader.js");
JAVASCRIPT
end
end
end
|
#!/bin/bash
# Check if Go is installed
if command -v go &> /dev/null
then
# Cross compile using Docker
docker run --rm -v "$(pwd)":/usr/src/myapp -w /usr/src/myapp golang:latest go build -v ./cmd/...
else
# Cross compile using curl
curl -O https://dl.google.com/go/go1.16.5.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.16.5.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin
go build -v ./cmd/...
fi
# Display installation message
echo "helm-hashtag installed!"
echo "See the README for usage examples."
echo |
package com.learning;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.context.annotation.ComponentScan;
@SpringBootConfiguration
@EnableAutoConfiguration
@ComponentScan
public class ModuleGroupConfiguration {
}
|
#!/bin/bash
for ID in {1..200}
do
message="{\"action\": \"test\", \"params\": [$2, \"$1-$ID\"]}";
length=${#message}
{ echo "$length~$message"; } | telnet localhost 5000;
done
|
<gh_stars>1-10
import ctypes
import os
from .. import Indicators_t, Control_t
if os.name == 'nt':
_LIBRARY_FILE = "dd-drivecontroller.dll"
else:
_LIBRARY_FILE = "libdd-drivecontroller.so"
try:
_CURRENT_SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
_DEFAULT_LIBRARY_PATH = os.path.join(_CURRENT_SCRIPT_PATH, "..", "..", "..", "bin")
os.environ["PATH"] += os.pathsep + _DEFAULT_LIBRARY_PATH
except:
pass
_LIBRARY = ctypes.cdll.LoadLibrary(_LIBRARY_FILE)
class CDriveController():
def __init__(self, Lanes):
Constructor = _LIBRARY.CDriveController_create
Constructor.restype = ctypes.c_void_p
Constructor.argtypes = [ctypes.c_uint32]
self._Object = ctypes.c_void_p(Constructor(Lanes))
def __del__(self):
_LIBRARY.CDriveController_destroy(self._Object)
def control(self, Indicators, Control):
control = _LIBRARY.CDriveController_control
control.argtypes = [ctypes.c_void_p, ctypes.POINTER(Indicators_t), ctypes.POINTER(Control_t)]
control(self._Object, Indicators, Control)
|
#Install or upgrade NLTK and numpy
pip install --upgrade nltk
pip install --upgrade numpy
mkdir -p dependencies
mkdir -p models
cd dependencies
#Get the source of udpipe (we need it to train models)
git clone https://github.com/ufal/udpipe.git
##Download the syntactically annotated data for Latvian that has been created in the Artificial Intelligence Laboratory of the Institute of Mathematics and Computer Science of the University of Latvia (or in short: www.ailab.lv)
#Be cautios when using it though! The data has a restrictive CC BY NC license! This basically means: not for free! More information: https://github.com/UniversalDependencies/UD_Latvian
git clone https://github.com/UniversalDependencies/UD_Latvian.git
##However, if you need to get just a tagger, there is data for training a tagger that is more open - GPL (https://github.com/PeterisP/LVTagger/blob/master/LICENSE.txt).
#It may require some re-formatting though!
##Download the syntactically annotated data for English. Authors: Universal Dependencies English Web Treebank © 2013-2017 by The Board of Trustees of The Leland Stanford Junior University.
##The English data is licensed under a very open license: CC BY 4.0. More information: https://github.com/UniversalDependencies/UD_English
git clone https://github.com/UniversalDependencies/UD_English.git
##We need to train UDPipe models for Latvian and English using the available data:
#First, compile udpipe
cd udpipe/src
make
#Now, we train the Latvian and English tokenisation, morphological tagging and syntactic (dependency) parsing models.
#The tokeniser is trained using a bidirectional LSTM artificial neural network architecture that for each character predicts whether there is a token boundary after the character, whether there is a sentence boundary after the character and whether there is no boundary after the character.
#The tagger models are trained using the averaged perceptron ML methods (Semi-Supervised Training for the Averaged Perceptron POS Tagger, http://aclweb.org/anthology//E/E09/E09-1087.pdf).
#The parser is a transition-based, non-projective dependency parser that uses a neural net-work classifier for prediction (Straka et al., 2016).
./udpipe --train ../../../models/lv.model.output --heldout=../../UD_Latvian/lv_lvtb-ud-train.conllu ../../UD_Latvian/lv_lvtb-ud-dev.conllu --tagger="models=2"
./udpipe --train ../../../models/en.model.output --heldout=../../UD_English/en_ewt-ud-train.conllu ../../UD_English/en_ewt-ud-dev.conllu --tagger="models=2"
#You can read more about the UDPipe toolkit in the paper by Straka et al. (2016): http://www.lrec-conf.org/proceedings/lrec2016/pdf/873_Paper.pdf .
#Then, we will need to set up python bindings. NOTE! YOU SHOULD SPECIFY HERE THE CORRECT PATH TO THE PYTHON INCLUDE FILES (Python.h)!
cd ../bindings/python
make PYTHON_INCLUDE=/usr/include/python2.7/
pip install ufal.udpipe
#Alternatively you could try:
#So that you accidentally do not mess up your system, I commented the following line out! But NOTE that you have to make the library visible to python!
#sudo cp -r ufal* /usr/local/lib/python2.7/dist-packages/
#If you use Anaconda, you might need to specify here:
#make PYTHON_INCLUDE=~/anaconda2/include/python2.7/
#sudo cp -r ufal* ~/anaconda2/lib/python2.7/site-packages/
cd ../../..
##We need to train a truecasing model as the UDPipe models seem to missclassify the first words in a sentence rather often due to capitalisation.
git clone https://github.com/nreimers/truecaser.git
wget http://data.statmt.org/wmt17/translation-task/europarl-v8.lv.tgz
tar zxf europarl-v8.lv.tgz
rm europarl-v8.lv.tgz
mv training-monolingual/europarl-v8.lv ./
rmdir training-monolingual
wget http://www.statmt.org/wmt14/training-monolingual-europarl-v7/europarl-v7.en.gz
gunzip europarl-v7.en.gz
cd ..
#download NLTK data (will be necessary for the truecaser)
python -m nltk.downloader -d ~/nltk_data all
python train-truecaser.py -i dependencies/europarl-v8.lv -o models/truecasing-model.lv
python train-truecaser.py -i dependencies/europarl-v7.en -o models/truecasing-model.en
#We will look at the output with a conll data visualisation tool.
cd dependencies
git clone https://github.com/spyysalo/conllu.js.git
git clone https://github.com/spyysalo/annodoc.git
#git clone https://github.com/nlplab/brat.git
cd ..
|
#!/bin/bash
#
source /etc/profile.d/modules.sh
source ~/.bashrc
#
export OMP_NUM_THREADS=56
export MKL_NUM_THREADS=1
#
julia --check-bounds=no --math-mode=fast --optimize=3 --inline=yes --compiled-modules=yes 6-311++G_2d_2p/uracil_trimer.jl
|
public class BubbleSort {
public static void sort(int[] arr) {
int n = arr.length;
for (int i = 0; i < n-1; i++) {
for (int j = 0; j < n-i-1; j++) {
if (arr[j] > arr[j+1]) {
// swap elements
int temp = arr[j];
arr[j] = arr[j+1];
arr[j+1] = temp;
}
}
}
}
public static void main(String[] args) {
int[] arr = {3, 5, 2, 4};
// Before sort
System.out.println("Before sort:");
for (int element : arr) {
System.out.print(element + " ");
}
System.out.println();
// After sort
sort(arr);
System.out.println("After sort");
for (int element : arr) {
System.out.print(element + " ");
}
}
} |
<gh_stars>1-10
#include "stdafx.h"
#include "draw_texture_3d.h"
#include "matrix_call.h"
#include "flowvis/shader.h"
#include "mmcore/CoreInstance.h"
#include "mmcore/view/CallRender3D_2.h"
#include "mmcore/view/Camera_2.h"
#include "compositing/CompositingCalls.h"
#include "vislib/Exception.h"
#include "vislib/graphics/gl/GLSLShader.h"
#include "vislib/graphics/gl/ShaderSource.h"
#include "vislib/math/Cuboid.h"
#include "vislib/sys/Log.h"
#include "glowl/Texture2D.hpp"
#include "glm/vec4.hpp"
#include <exception>
#include <string>
namespace megamol {
namespace flowvis {
draw_texture_3d::draw_texture_3d()
: texture_slot("texture", "Input texture")
, model_matrix_slot("model_matrix", "Model matrix for positioning of the rendered texture quad") {
// Connect input
this->texture_slot.SetCompatibleCall<compositing::CallTexture2DDescription>();
this->MakeSlotAvailable(&this->texture_slot);
this->model_matrix_slot.SetCompatibleCall<matrix_call::matrix_call_description>();
this->MakeSlotAvailable(&this->model_matrix_slot);
}
draw_texture_3d::~draw_texture_3d() { this->Release(); }
bool draw_texture_3d::create() { return true; }
void draw_texture_3d::release() {
// Remove shader
if (this->render_data.initialized) {
glDetachShader(this->render_data.prog, this->render_data.vs);
glDetachShader(this->render_data.prog, this->render_data.fs);
glDeleteProgram(this->render_data.prog);
}
}
bool draw_texture_3d::Render(core::view::CallRender3D_2& call) {
auto tc_ptr = this->texture_slot.CallAs<compositing::CallTexture2D>();
// Get input texture
if (tc_ptr == nullptr) {
vislib::sys::Log::DefaultLog.WriteError("No input connected");
return false;
}
auto& tc = *tc_ptr;
if (!tc(0)) {
vislib::sys::Log::DefaultLog.WriteError("Error getting texture");
return false;
}
this->render_data.texture = tc.getData();
// Get camera
core::view::Camera_2 cam;
call.GetCamera(cam);
cam_type::matrix_type view, proj;
cam.calc_matrices(view, proj);
// Build shaders
if (!this->render_data.initialized) {
const std::string vertex_shader =
"#version 330 \n" \
"uniform mat4 model_mx; \n" \
"uniform mat4 view_mx; \n" \
"uniform mat4 proj_mx; \n" \
"out vec2 tex_coords; \n" \
"void main() { \n" \
" const vec2 vertices[6] =\n" \
" vec2[6](vec2(0.0f, 0.0f), vec2(1.0f, 0.0f), vec2(1.0f, 1.0f),\n" \
" vec2(0.0f, 0.0f), vec2(1.0f, 1.0f), vec2(0.0f, 1.0f));\n" \
" tex_coords = vertices[gl_VertexID]; \n" \
" gl_Position = proj_mx * view_mx * model_mx * vec4(tex_coords, 0.0f, 1.0f); \n" \
"}";
const std::string fragment_shader =
"#version 330\n" \
"uniform sampler2D tex2D; \n" \
"in vec2 tex_coords; \n" \
"out vec4 fragColor; \n" \
"void main() { \n" \
" fragColor = texture(tex2D, tex_coords); \n" \
"}";
try
{
this->render_data.vs = utility::make_shader(vertex_shader, GL_VERTEX_SHADER);
this->render_data.fs = utility::make_shader(fragment_shader, GL_FRAGMENT_SHADER);
this->render_data.prog = utility::make_program({this->render_data.vs, this->render_data.fs});
}
catch (const std::exception& e)
{
vislib::sys::Log::DefaultLog.WriteError(e.what());
return false;
}
this->render_data.initialized = true;
}
// Draw quad with given texture and model matrix
const auto blend_enabled = glIsEnabled(GL_BLEND);
if (!blend_enabled) glEnable(GL_BLEND);
GLint blend_src_rgb, blend_src_alpha, blend_dst_rgb, blend_dst_alpha;
glGetIntegerv(GL_BLEND_SRC_RGB, &blend_src_rgb);
glGetIntegerv(GL_BLEND_SRC_ALPHA, &blend_src_alpha);
glGetIntegerv(GL_BLEND_DST_RGB, &blend_dst_rgb);
glGetIntegerv(GL_BLEND_DST_ALPHA, &blend_dst_alpha);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glUseProgram(this->render_data.prog);
glActiveTexture(GL_TEXTURE0);
this->render_data.texture->bindTexture();
glUniform1i(glGetUniformLocation(this->render_data.prog, "tex2D"), 0);
glUniformMatrix4fv(glGetUniformLocation(this->render_data.prog, "model_mx"), 1, GL_FALSE, glm::value_ptr(this->render_data.model_matrix));
glUniformMatrix4fv(glGetUniformLocation(this->render_data.prog, "view_mx"), 1, GL_FALSE, glm::value_ptr(static_cast<glm::mat4>(view)));
glUniformMatrix4fv(glGetUniformLocation(this->render_data.prog, "proj_mx"), 1, GL_FALSE, glm::value_ptr(static_cast<glm::mat4>(proj)));
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindTexture(GL_TEXTURE_2D, 0);
glUseProgram(0);
glBlendFuncSeparate(blend_src_rgb, blend_dst_rgb, blend_src_alpha, blend_dst_alpha);
if (!blend_enabled) glDisable(GL_BLEND);
return true;
}
bool draw_texture_3d::GetExtents(core::view::CallRender3D_2& call) {
auto tc_ptr = this->texture_slot.CallAs<compositing::CallTexture2D>();
auto mc_ptr = this->model_matrix_slot.CallAs<matrix_call>();
// Get input texture meta data
if (tc_ptr == nullptr) {
vislib::sys::Log::DefaultLog.WriteError("No input connected");
return false;
}
auto& tc = *tc_ptr;
if (!tc(1)) {
vislib::sys::Log::DefaultLog.WriteError("Error getting texture extent");
return false;
}
// Get model matrix
if (mc_ptr == nullptr) {
this->render_data.model_matrix = glm::mat4(1.0f);
} else {
auto& mc = *mc_ptr;
if (!mc(0)) {
vislib::sys::Log::DefaultLog.WriteWarn("Error getting model matrix. Using unit matrix instead");
this->render_data.model_matrix = glm::mat4(1.0f);
} else {
this->render_data.model_matrix = mc.get_matrix();
}
}
// Set bounding box
const auto origin = this->render_data.model_matrix * glm::vec4(0.0f, 0.0f, 0.0f, 1.0f);
const auto diagonal = this->render_data.model_matrix * glm::vec4(1.0f, 1.0f, 0.0f, 1.0f);
vislib::math::Cuboid<float> bounding_box;
bounding_box.Set(origin.x, origin.y, origin.z, diagonal.x, diagonal.y, diagonal.z);
auto& boxes = call.AccessBoundingBoxes();
boxes.SetBoundingBox(bounding_box);
boxes.SetClipBox(bounding_box);
call.SetTimeFramesCount(1);
return true;
}
} // namespace flowvis
} // namespace megamol
|
/* ssln_adi.h - includes files needed by ssln_adi.c
ENUMS:
TYPEDEFS:
DEFINITIONS:
MACROS:
*/
#ifndef SSLN_ADI_H
#include <ssln_cmn.h>
#define SSLN_ADI_H NULL
#endif
|
const MS_PER_SEC = 1000;
const SECONDS_PER_MINUTE = 60;
const MINUTES_PER_HOUR = 60;
const HOURS_PER_DAY = 24;
export const msToHumanReadable = (ms) => {
const humanTimeParts = [];
const time = ms > 0 ? ms : 0;
const seconds = Math.floor((time / MS_PER_SEC) % SECONDS_PER_MINUTE);
const minutes = Math.floor((time / (MS_PER_SEC * SECONDS_PER_MINUTE)) % MINUTES_PER_HOUR);
const hours = Math.floor(
(time / (MS_PER_SEC * SECONDS_PER_MINUTE * MINUTES_PER_HOUR)) % HOURS_PER_DAY
);
const days = Math.floor(
(time / (MS_PER_SEC * SECONDS_PER_MINUTE * MINUTES_PER_HOUR * HOURS_PER_DAY)) % HOURS_PER_DAY
);
if (days > 0) {
humanTimeParts.push(`${days}d`);
}
if (hours > 0) {
humanTimeParts.push(`${hours}h`);
}
if (minutes > 0) {
humanTimeParts.push(`${minutes}m`);
}
if (seconds > 0) {
humanTimeParts.push(`${seconds}s`);
}
if (humanTimeParts.length === 0) {
return '0s';
}
return humanTimeParts.join(' ');
};
|
<filename>datasampler/__init__.py
import datasampler.class_random_sampler
import datasampler.random_sampler
import datasampler.rrandom_sampler
import datasampler.fid_batchmatch_sampler
import datasampler.greedy_coreset_sampler
import datasampler.spc_fid_batchmatch_sampler
import datasampler.distmoment_batchmatch_sampler
import datasampler.disthist_batchmatch_sampler
import datasampler.criterion_batchmatch_sampler
import datasampler.d2_coreset_sampler
import datasampler.full_random_sampler
import datasampler.full_greedy_coreset_sampler
def select(sampler, opt, image_dict, image_list=None, **kwargs):
if 'batchmatch' in sampler:
if sampler=='fid_batchmatch':
sampler_lib = fid_batchmatch_sampler
elif sampler=='distmoment_batchmatch':
sampler_lib = distmoment_batchmatch_sampler
elif sampler=='disthist_batchmatch':
sampler_lib = disthist_batchmatch_sampler
elif sampler=='spc_fid_batchmatch':
sampler_lib = spc_fid_batchmatch_sampler
elif sampler=='criterion_batchmatch':
sampler_lib = criterion_batchmatch_sampler
if 'random' in sampler:
sampler_lib = random_sampler
if 'class' in sampler:
sampler_lib = class_random_sampler
if 'full' in sampler:
sampler_lib = full_random_sampler
if 'rrandom'==sampler:
sampler_lib = rrandom_sampler
if 'coreset' in sampler:
if 'greedy' in sampler:
if 'full' in sampler:
sampler_lib = full_greedy_coreset_sampler
else:
sampler_lib = greedy_coreset_sampler
if 'd2' in sampler:
sampler_lib = d2_coreset_sampler
sampler = sampler_lib.Sampler(opt,image_dict=image_dict,image_list=image_list)
return sampler
|
/*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.incubator.net.l2monitoring.soam.loss;
import java.time.Duration;
import org.onosproject.incubator.net.l2monitoring.soam.MilliPct;
/**
* The default implementation of {@link LossAvailabilityStat}.
*/
public abstract class DefaultLaStat implements LossAvailabilityStat {
private final Duration elapsedTime;
private final boolean suspectStatus;
private final Long forwardHighLoss;
private final Long backwardHighLoss;
private final Long forwardConsecutiveHighLoss;
private final Long backwardConsecutiveHighLoss;
private final Long forwardAvailable;
private final Long backwardAvailable;
private final Long forwardUnavailable;
private final Long backwardUnavailable;
private final MilliPct forwardMinFrameLossRatio;
private final MilliPct forwardMaxFrameLossRatio;
private final MilliPct forwardAverageFrameLossRatio;
private final MilliPct backwardMinFrameLossRatio;
private final MilliPct backwardMaxFrameLossRatio;
private final MilliPct backwardAverageFrameLossRatio;
protected DefaultLaStat(DefaultLaStatBuilder builder) {
this.elapsedTime = builder.elapsedTime;
this.suspectStatus = builder.suspectStatus;
this.forwardHighLoss = builder.forwardHighLoss;
this.backwardHighLoss = builder.backwardHighLoss;
this.forwardConsecutiveHighLoss = builder.forwardConsecutiveHighLoss;
this.backwardConsecutiveHighLoss = builder.backwardConsecutiveHighLoss;
this.forwardAvailable = builder.forwardAvailable;
this.backwardAvailable = builder.backwardAvailable;
this.forwardUnavailable = builder.forwardUnavailable;
this.backwardUnavailable = builder.backwardUnavailable;
this.forwardMinFrameLossRatio = builder.forwardMinFrameLossRatio;
this.forwardMaxFrameLossRatio = builder.forwardMaxFrameLossRatio;
this.forwardAverageFrameLossRatio = builder.forwardAverageFrameLossRatio;
this.backwardMinFrameLossRatio = builder.backwardMinFrameLossRatio;
this.backwardMaxFrameLossRatio = builder.backwardMaxFrameLossRatio;
this.backwardAverageFrameLossRatio = builder.backwardAverageFrameLossRatio;
}
@Override
public Duration elapsedTime() {
return elapsedTime;
}
@Override
public boolean suspectStatus() {
return suspectStatus;
}
@Override
public Long forwardHighLoss() {
return forwardHighLoss;
}
@Override
public Long backwardHighLoss() {
return backwardHighLoss;
}
@Override
public Long forwardConsecutiveHighLoss() {
return forwardConsecutiveHighLoss;
}
@Override
public Long backwardConsecutiveHighLoss() {
return backwardConsecutiveHighLoss;
}
@Override
public Long forwardAvailable() {
return forwardAvailable;
}
@Override
public Long backwardAvailable() {
return backwardAvailable;
}
@Override
public Long forwardUnavailable() {
return forwardUnavailable;
}
@Override
public Long backwardUnavailable() {
return backwardUnavailable;
}
@Override
public MilliPct forwardMinFrameLossRatio() {
return forwardMinFrameLossRatio;
}
@Override
public MilliPct forwardMaxFrameLossRatio() {
return forwardMaxFrameLossRatio;
}
@Override
public MilliPct forwardAverageFrameLossRatio() {
return forwardAverageFrameLossRatio;
}
@Override
public MilliPct backwardMinFrameLossRatio() {
return backwardMinFrameLossRatio;
}
@Override
public MilliPct backwardMaxFrameLossRatio() {
return backwardMaxFrameLossRatio;
}
@Override
public MilliPct backwardAverageFrameLossRatio() {
return backwardAverageFrameLossRatio;
}
/**
* Abstract base class for builders of.
* {@link LossAvailabilityStat}.
*/
protected abstract static class DefaultLaStatBuilder implements LaStatBuilder {
private final Duration elapsedTime;
private final boolean suspectStatus;
private Long forwardHighLoss;
private Long backwardHighLoss;
private Long forwardConsecutiveHighLoss;
private Long backwardConsecutiveHighLoss;
private Long forwardAvailable;
private Long backwardAvailable;
private Long forwardUnavailable;
private Long backwardUnavailable;
private MilliPct forwardMinFrameLossRatio;
private MilliPct forwardMaxFrameLossRatio;
private MilliPct forwardAverageFrameLossRatio;
private MilliPct backwardMinFrameLossRatio;
private MilliPct backwardMaxFrameLossRatio;
private MilliPct backwardAverageFrameLossRatio;
protected DefaultLaStatBuilder(Duration elapsedTime, boolean suspectStatus) {
this.elapsedTime = elapsedTime;
this.suspectStatus = suspectStatus;
}
@Override
public LaStatBuilder forwardHighLoss(Long forwardHighLoss) {
this.forwardHighLoss = forwardHighLoss;
return this;
}
@Override
public LaStatBuilder backwardHighLoss(Long backwardHighLoss) {
this.backwardHighLoss = backwardHighLoss;
return this;
}
@Override
public LaStatBuilder forwardConsecutiveHighLoss(
Long forwardConsecutiveHighLoss) {
this.forwardConsecutiveHighLoss = forwardConsecutiveHighLoss;
return this;
}
@Override
public LaStatBuilder backwardConsecutiveHighLoss(
Long backwardConsecutiveHighLoss) {
this.backwardConsecutiveHighLoss = backwardConsecutiveHighLoss;
return this;
}
@Override
public LaStatBuilder forwardAvailable(Long forwardAvailable) {
this.forwardAvailable = forwardAvailable;
return this;
}
@Override
public LaStatBuilder backwardAvailable(Long backwardAvailable) {
this.backwardAvailable = backwardAvailable;
return this;
}
@Override
public LaStatBuilder forwardUnavailable(Long forwardUnavailable) {
this.forwardUnavailable = forwardUnavailable;
return this;
}
@Override
public LaStatBuilder backwardUnavailable(Long backwardUnavailable) {
this.backwardUnavailable = backwardUnavailable;
return this;
}
@Override
public LaStatBuilder forwardMinFrameLossRatio(
MilliPct forwardMinFrameLossRatio) {
this.forwardMinFrameLossRatio = forwardMinFrameLossRatio;
return this;
}
@Override
public LaStatBuilder forwardMaxFrameLossRatio(
MilliPct forwardMaxFrameLossRatio) {
this.forwardMaxFrameLossRatio = forwardMaxFrameLossRatio;
return this;
}
@Override
public LaStatBuilder forwardAverageFrameLossRatio(
MilliPct forwardAverageFrameLossRatio) {
this.forwardAverageFrameLossRatio = forwardAverageFrameLossRatio;
return this;
}
@Override
public LaStatBuilder backwardMinFrameLossRatio(
MilliPct backwardMinFrameLossRatio) {
this.backwardMinFrameLossRatio = backwardMinFrameLossRatio;
return this;
}
@Override
public LaStatBuilder backwardMaxFrameLossRatio(
MilliPct backwardMaxFrameLossRatio) {
this.backwardMaxFrameLossRatio = backwardMaxFrameLossRatio;
return this;
}
@Override
public LaStatBuilder backwardAverageFrameLossRatio(
MilliPct backwardAverageFrameLossRatio) {
this.backwardAverageFrameLossRatio = backwardAverageFrameLossRatio;
return this;
}
}
}
|
// constant_ops.rs
pub mod constant_ops {
pub fn perform_mathematical_operation(a: i32, b: i32) -> i32 {
a + b
}
pub fn compare_values<T: PartialEq>(a: T, b: T) -> bool {
a == b
}
pub fn convert_to_string<T: ToString>(value: T) -> String {
value.to_string()
}
}
// env_ops.rs
pub mod env_ops {
use std::env;
pub fn get_env_variable(key: &str) -> Option<String> {
env::var(key).ok()
}
pub fn set_env_variable(key: &str, value: &str) {
env::set_var(key, value);
}
pub fn delete_env_variable(key: &str) {
env::remove_var(key);
}
}
// local_ops.rs
pub mod local_ops {
pub fn declare_variable<T>(value: T) -> T {
value
}
pub fn assign_variable<T>(variable: &mut T, value: T) {
*variable = value;
}
// Scope management can be handled implicitly by Rust's ownership and borrowing rules
}
// mem_ops.rs
pub mod mem_ops {
use std::alloc::{alloc, dealloc, Layout};
use std::ptr;
pub fn allocate_memory(size: usize) -> *mut u8 {
unsafe {
let layout = Layout::from_size_align(size, std::mem::align_of::<u8>()).unwrap();
alloc(layout)
}
}
pub fn deallocate_memory(ptr: *mut u8, size: usize) {
unsafe {
let layout = Layout::from_size_align(size, std::mem::align_of::<u8>()).unwrap();
dealloc(ptr, layout);
}
}
} |
#!/bin/bash
rm ./vmr.xsd
awk "/<xs:annotation>/{h=1};!h;/<\/xs:annotation>/{h=0}" ./original/vmr.xsd > ./vmr.xsd
rm ./datatypes.xsd
awk "/<xs:annotation>/{h=1};!h;/<\/xs:annotation>/{h=0}" ./original/datatypes.xsd > ./datatypes.xsd
|
<reponame>OsakaStarbux/Stock-Price-Checker
/*
*
*
* FILL IN EACH UNIT TEST BELOW COMPLETELY
* -----[Keep the tests in the same order!]----
* (if additional are added, keep them at the very end!)
*/
var chai = require('chai');
//var StockHandler = require('../controllers/stockHandler.js');
//var stockPrices = new StockHandler();
suite('Unit Tests', function(){
//none requiered
}); |
<filename>test/utils/utils.test.js
/**
* @file Tests from Utils Module to ensure compatibility
* @since 1.0.0-alpha3
*/
import { Utils } from '@lib/ootk-utils.js'; // eslint-disable-line
const numDigits = 8;
const earthRadius = 6378.137;
const sincos45deg = Math.sqrt(2) / 2;
describe('Doppler factor', () => {
it('without observer movement', () => {
// North Pole
const observerEcf = {
x: 0,
y: 0,
z: earthRadius,
};
const positionEcf = {
x: 0,
y: 0,
z: earthRadius + 500,
};
// Escape velocity
const velocityEcf = {
x: 7.91,
y: 0,
z: 0,
};
const dopFactor = Utils.dopplerFactor(observerEcf, positionEcf, velocityEcf);
expect(dopFactor).toBeCloseTo(1, numDigits);
});
it('movement of observer is not affected', () => {
const observerEcf = {
x: earthRadius,
y: 0,
z: 0,
};
const positionEcf = {
x: earthRadius + 500,
y: 0,
z: 0,
};
const velocityEcf = {
x: 0,
y: 7.91,
z: 0,
};
const dopFactor = Utils.dopplerFactor(observerEcf, positionEcf, velocityEcf);
expect(dopFactor).toBeCloseTo(1, numDigits);
});
it('special case', () => {
const observerEcf = {
x: earthRadius,
y: 0,
z: 0,
};
const positionEcf = {
x: (earthRadius + 500) * sincos45deg, // z*sin(45)
y: (earthRadius + 500) * sincos45deg, // z*cos(45)
z: 0,
};
const velocityEcf = {
x: 7.91 * sincos45deg,
y: 7.91 * sincos45deg,
z: 0,
};
const dopFactor = Utils.dopplerFactor(observerEcf, positionEcf, velocityEcf);
expect(dopFactor).toBeCloseTo(1.0000107847789212, numDigits);
});
test('if negative range rate works', () => {
const observerEcf = {
x: earthRadius,
y: 0,
z: 0,
};
const positionEcf = {
x: (earthRadius + 500) * sincos45deg, // z*sin(45)
y: (earthRadius + 500) * sincos45deg, // z*cos(45)
z: 0,
};
const velocityEcf = {
x: -7.91 * sincos45deg,
y: -7.91 * sincos45deg,
z: 0,
};
const dopFactor = Utils.dopplerFactor(observerEcf, positionEcf, velocityEcf);
expect(dopFactor).toBeCloseTo(1.000013747277977, numDigits);
});
});
describe('Distance function', () => {
test('if distance calculation is correct', () => {
expect(Utils.distance({ x: 1000, y: 1000, z: 1000 }, { x: 1000, y: 1000, z: 1000 })).toEqual(0);
expect(Utils.distance({ x: 1000, y: 1000, z: 1000 }, { x: 1000, y: 1000, z: 1100 })).toEqual(
100,
);
});
});
|
class BoxManager:
__arghelp__ = "Class for managing parent-child relationship and boxing rules"
def __init__(self):
super().__init__()
self._load_plug = None
def box_parent(self, parent, box):
"""
Method to set up the parent-child relationship and create a plug for loading the box.
Args:
parent: The parent box
box: The child box
"""
super().box_parent(parent, box)
self._load_plug = parent.addexport(
'__box_%s_load' % box.name, 'fn() -> err',
scope=parent.name, source=self.__argname__, weak=True)
def build_mk(self, output, box):
"""
Method to create a boxing rule, to be invoked if embedding an elf is needed.
Args:
output: The output of the boxing rule
box: The box for which the boxing rule is created
"""
data_init = None
if any(section.name == 'data' for section in box.sections):
# Implement the logic to create a boxing rule if embedding an elf is needed
# Use the 'output' and 'box' parameters to create the boxing rule
data_init = "create boxing rule for embedding an elf"
return data_init |
<filename>lib/praxis/controller.rb
require 'active_support/concern'
require 'active_support/inflector'
module Praxis
module Controller
extend ActiveSupport::Concern
included do
attr_reader :request
attr_accessor :response
Application.instance.controllers << self
self.instance_eval do
@before_callbacks = Hash.new
@after_callbacks = Hash.new
end
end
module ClassMethods
attr_reader :before_callbacks, :after_callbacks
def implements(definition)
define_singleton_method(:definition) do
definition
end
definition.controller = self
end
def actions
definition.actions
end
def action(name)
actions.fetch(name)
end
def before(*stage_path, **conditions, &block)
stage_path = [:action] if stage_path.empty?
@before_callbacks[stage_path] ||= Array.new
@before_callbacks[stage_path] << [conditions, block]
end
def after(*stage_path, **conditions, &block)
stage_path = [:action] if stage_path.empty?
@after_callbacks[stage_path] ||= Array.new
@after_callbacks[stage_path] << [conditions, block]
end
end
def initialize(request, response=Responses::Ok.new)
@request = request
@response = response
end
end
end
|
<filename>ch13-ajax/ch13-rhinounit/rhinounit_1_2_1/jslint/jslintant.js
// jslintant.js
// The following is copied almost completely from <NAME>' blog at
//
// http://dev2dev.bea.com/blog/jsnyders/archive/2007/11/using_jslint_from_ant.html
//
// It is included as part of the rhinounit testsuite for completeness
importClass(java.io.File);
importClass(Packages.org.apache.tools.ant.util.FileUtils);
importClass(java.io.FileReader);
var options = attributes.get("options")
var fileset;
var ds;
var srcFiles;
var jsfile;
var fulljslint;
var lintfailed = false;
// read fulljslint and eval it into this script.
var jsLintPath = "jslint/fulljslint.js";
if (attributes.get("jslintpath")) {
jsLintPath = attributes.get("jslintpath");
}
var jsLintFile = new File(jsLintPath);
if (jsLintFile.isAbsolute() === false)
{
jsLintFile = new File(project.getProperty("basedir"), jsLintPath);
}
var readerLint = new FileReader(jsLintFile);
var fulljslint = new String(FileUtils.readFully(readerLint));
eval(fulljslint.toString());
// continue
self.log("Attribute options = " + options);
eval("options = " + options + ";");
var filesets = elements.get("fileset");
for (var j = 0; j < filesets.size(); j++) {
fileset = filesets.get(j);
ds = fileset.getDirectoryScanner(project);
srcFiles = ds.getIncludedFiles();
// for each srcFile
for (i = 0; i < srcFiles.length; i++) {
jsfile = new File(fileset.getDir(project), srcFiles[i]);
checkFile(jsfile);
}
}
if (lintfailed) {
self.fail("JS Lint validation failed.")
}
function checkFile(file){
// read the file into a string and make it a real
// JavaScript string!
var reader = new FileReader(file);
// readFully returns java.lang.String
// new String makes it a java String object
var input = new String(FileUtils.readFully(reader));
// this makes the type string, which is important
// because JSLINT assumes that input is an array
// if it is not typeof string.
input = input.toString().replace(/\t/g," ");
if (!input) {
print("jslint: Couldn't open file '" + file.toString() + "'.");
return;
}
if (!JSLINT(input, options)) {
self.log("jslint: " + JSLINT.errors.length + " Problems found in " + file.toString());
for (var i = 0; i < JSLINT.errors.length; i += 1) {
var e = JSLINT.errors[i];
if (e) {
self.log('Lint at line ' + (e.line + 1) + ' character ' +
(e.character + 1) + ': ' + e.reason);
self.log((e.evidence || '').
replace(/^\s*(\S*(\s+\S+)*)\s*$/, "$1"));
self.log('');
}
}
self.log("<exclude name=\"" + file.toString() + "\"/>");
lintfailed = true;
} else {
self.log("jslint: No problems found in " + file.toString());
}
}
|
<reponame>NikVogri/manineta-site<filename>src/pages/itemTemplate.js<gh_stars>0
import React from "react"
import Layout from "../components/Layout/Layout.component"
import { Container } from "react-bootstrap"
import { graphql } from "gatsby"
import ItemInfo from "../components/ItemInfo/ItemInfo.component"
import SimilarContent from "../components/SimilarContent/SimilarContent.component"
import SEO from "../components/SEO/SEO"
const itemTemplate = ({ data }) => {
let similarItems
if (data) {
similarItems = data.allContentfulIzdelki.edges.filter(
item => item.izdelek.imeIzdelka !== data.contentfulIzdelki.imeIzdelka
)
}
return (
<Layout darkNav>
<SEO
title={data.contentfulIzdelki.imeIzdelka}
description={`${data.contentfulIzdelki.opisIzdelka}`}
/>
<Container>
<ItemInfo data={data.contentfulIzdelki} />
{similarItems.length > 0 && (
<h4 style={{ margin: "2rem 0 1rem 0" }}>Podobno</h4>
)}
<SimilarContent data={similarItems} />
</Container>
</Layout>
)
}
export const getTempData = graphql`
query itemPages($slug: String, $podzavihek: String) {
contentfulIzdelki(slugIzdelka: { eq: $slug }) {
imeIzdelka
cenaIzdelka
prejsnjaCena
slugIzdelka
materijalIzdelka
contentful_id
zalogaIzdelka
podzavihek
velikostIzdelka
opisIzdelka {
internal {
content
}
}
slikeIzdelka {
fluid(quality: 90) {
...GatsbyContentfulFluid
}
}
}
allContentfulIzdelki(filter: { podzavihek: { eq: $podzavihek } }) {
edges {
izdelek: node {
imeIzdelka
cenaIzdelka
prejsnjaCena
slugIzdelka
podzavihek
contentful_id
slikeIzdelka {
fixed(width: 180, height: 210) {
...GatsbyContentfulFixed
}
}
}
}
}
}
`
export default itemTemplate
|
package org.bian.dto;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonCreator;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import javax.validation.Valid;
/**
* BQInternalPublicationRetrieveOutputModelInternalPublicationInstanceAnalysis
*/
public class BQInternalPublicationRetrieveOutputModelInternalPublicationInstanceAnalysis {
private Object internalPublicationInstanceAnalysisRecord = null;
private String internalPublicationInstanceAnalysisReportType = null;
private String internalPublicationInstanceAnalysisParameters = null;
private Object internalPublicationInstanceAnalysisReport = null;
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::UNCEFACT::Binary general-info: The inputs and results of the instance analysis that can be on-going, periodic and actual and projected
* @return internalPublicationInstanceAnalysisRecord
**/
public Object getInternalPublicationInstanceAnalysisRecord() {
return internalPublicationInstanceAnalysisRecord;
}
public void setInternalPublicationInstanceAnalysisRecord(Object internalPublicationInstanceAnalysisRecord) {
this.internalPublicationInstanceAnalysisRecord = internalPublicationInstanceAnalysisRecord;
}
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::UNCEFACT::Code general-info: The type of external performance analysis report available
* @return internalPublicationInstanceAnalysisReportType
**/
public String getInternalPublicationInstanceAnalysisReportType() {
return internalPublicationInstanceAnalysisReportType;
}
public void setInternalPublicationInstanceAnalysisReportType(String internalPublicationInstanceAnalysisReportType) {
this.internalPublicationInstanceAnalysisReportType = internalPublicationInstanceAnalysisReportType;
}
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::UNCEFACT::Text general-info: The selection parameters for the analysis (e.g. period, algorithm type)
* @return internalPublicationInstanceAnalysisParameters
**/
public String getInternalPublicationInstanceAnalysisParameters() {
return internalPublicationInstanceAnalysisParameters;
}
public void setInternalPublicationInstanceAnalysisParameters(String internalPublicationInstanceAnalysisParameters) {
this.internalPublicationInstanceAnalysisParameters = internalPublicationInstanceAnalysisParameters;
}
/**
* `status: Not Mapped` core-data-type-reference: BIAN::DataTypesLibrary::CoreDataTypes::UNCEFACT::Binary general-info: The external analysis report in any suitable form including selection filters where appropriate
* @return internalPublicationInstanceAnalysisReport
**/
public Object getInternalPublicationInstanceAnalysisReport() {
return internalPublicationInstanceAnalysisReport;
}
public void setInternalPublicationInstanceAnalysisReport(Object internalPublicationInstanceAnalysisReport) {
this.internalPublicationInstanceAnalysisReport = internalPublicationInstanceAnalysisReport;
}
}
|
<gh_stars>0
import os
import requests
from discord import Member
from discord_slash.utils.manage_commands import create_option, SlashCommandOptionType
from .infrastructure import record_usage, registered_guild_and_admin_or_mod_only, CommandDefinition, defer_cmd
headers = {
'Authorization': os.getenv("DISCORD_BOT_TOKEN")
}
async def _kick(ctx, member: Member, *, reason):
await registered_guild_and_admin_or_mod_only(ctx)
record_usage(ctx)
await defer_cmd(ctx)
if not reason:
return await ctx.send("Please provide a reason.")
modCase = {
"title": reason[:99],
"description": reason,
"modid": ctx.author.id,
"userid": member.id,
"punishment": "Kick",
"labels": [],
"PunishmentType": 2,
"PunishmentActive": True
}
r = requests.post(f"http://masz_backend/internalapi/v1/guilds/{ctx.guild.id}/modcases", json=modCase, headers=headers)
if r.status_code == 201:
await ctx.send(f"Case #{r.json()['caseid']} created and user kicked.\nFollow this link for more information: {os.getenv('META_SERVICE_BASE_URL', 'URL not set.')}")
elif r.status_code == 401:
await ctx.send("You are not allowed to do this.")
else:
await ctx.send(f"Something went wrong.\nCode: {r.status_code}\nText: {r.text}")
kick = CommandDefinition(
func=_kick,
short_help="Kick a member.",
long_help="Kick a member. This also creates a modcase.",
usage="kick <username|userid|usermention> <reason>",
options=[
create_option("member", "Member to kick.", SlashCommandOptionType.USER, True),
create_option("reason", "Reason to kick.", SlashCommandOptionType.STRING, True),
]
)
|
package com.company;
import java.util.Scanner;
public class Exercise_4_5 {
public static void main(String[] args) {
Scanner input = new Scanner(System.in);
System.out.print("Enter the number of sides: ");
int n = input.nextInt();
System.out.print("Enter the side: ");
double s = input.nextDouble();
double area = n * s * s / (4 * Math.tan(Math.PI / n));
System.out.println("The area of the polygon is " + area);
}
} |
<gh_stars>0
module Teamweek
module Pipes
module Trello
class Repository
attr_reader :client
def initialize(options)
@client = build_client(options)
end
def pull_data
fail NotImplementedError
end
def map_data
fail NotImplementedError
end
def pull
pull_data.map { |item| map_data(item) }
end
private
def build_client(options)
Client.new(
options[:http_client],
options[:foreign_workspace_id]
)
end
end
end
end
end
|
<reponame>lananh265/social-network
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_flash_auto_twotone = void 0;
var ic_flash_auto_twotone = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0V0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M3 2v12h3v9l7-12H9l4-9zm14 0l-3.2 9h1.9l.7-2h3.2l.7 2h1.9L19 2h-2zm-.15 5.65L18 4l1.15 3.65h-2.3z"
},
"children": []
}]
};
exports.ic_flash_auto_twotone = ic_flash_auto_twotone; |
interface Vehicle {
name: string;
wheels: number;
}
interface Car extends Vehicle {
door: number;
}
interface Motor extends Vehicle {
jok: number;
}
// Implements Car yang Extends Vehicle
class Civix implements Car {
name: string = "Civix";
wheels: number = 4;
door: number = 2;
}
class Supra implements Motor {
name: string = "Supra";
wheels: number = 2;
jok: number = 1;
}
|
def find_div_by_five(nums):
"""Find all numbers in a list that are divisible by 5"""
div_by_five = []
for num in nums:
if num % 5 == 0:
div_by_five.append(num)
return div_by_five
result = find_div_by_five([2,4,10,20,25])
print(result) |
<reponame>tdm1223/Algorithm
// 15917. 노솔브 방지문제야!!
// 2021.08.26
// 수학
#include<iostream>
using namespace std;
int main()
{
int t, k;
scanf("%d", &t);
for (int i = 0; i < t; i++)
{
scanf("%d", &k);
if ((k & (-k)) == k)
{
printf("1\n");
}
else
{
printf("0\n");
}
}
return 0;
}
|
/*!
* vuex-smart-module v0.4.6
* https://github.com/ktsn/vuex-smart-module
*
* @license
* Copyright (c) 2018 katashin
* Released under the MIT license
* https://github.com/ktsn/vuex-smart-module/blob/master/LICENSE
*/
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('vuex')) :
typeof define === 'function' && define.amd ? define(['exports', 'vuex'], factory) :
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.VuexSmartModule = {}, global.Vuex));
}(this, (function (exports, vuex) { 'use strict';
/*! *****************************************************************************
Copyright (c) Microsoft Corporation.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
***************************************************************************** */
var __assign = function() {
__assign = Object.assign || function __assign(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
function inject(F, injection) {
var proto = F.prototype;
var descs = {};
Object.keys(injection).forEach(function (key) {
descs[key] = {
configurable: true,
enumerable: true,
writable: true,
value: injection[key],
};
});
return Object.create(proto, descs);
}
var Getters = /** @class */ (function () {
function Getters() {
}
Getters.prototype.$init = function (_store) { };
Object.defineProperty(Getters.prototype, "state", {
get: function () {
return this.__ctx__.state;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Getters.prototype, "getters", {
get: function () {
return this.__ctx__.getters;
},
enumerable: false,
configurable: true
});
return Getters;
}());
var Mutations = /** @class */ (function () {
function Mutations() {
}
Object.defineProperty(Mutations.prototype, "state", {
get: function () {
return this.__ctx__.state;
},
enumerable: false,
configurable: true
});
return Mutations;
}());
var Actions = /** @class */ (function () {
function Actions() {
}
Actions.prototype.$init = function (_store) { };
Object.defineProperty(Actions.prototype, "state", {
get: function () {
return this.__ctx__.state;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Actions.prototype, "getters", {
get: function () {
return this.__ctx__.getters;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Actions.prototype, "commit", {
get: function () {
return this.__ctx__.commit;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Actions.prototype, "dispatch", {
get: function () {
return this.__ctx__.dispatch;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Actions.prototype, "actions", {
/**
* IMPORTANT: Each action type maybe incorrect - return type of all actions should be `Promise<any>`
* but the ones under `actions` are same as what you declared in this actions class.
* The reason why we declare the type in such way is to avoid recursive type error.
* See: https://github.com/ktsn/vuex-smart-module/issues/30
*/
get: function () {
return this.__ctx__.actions;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Actions.prototype, "mutations", {
get: function () {
return this.__ctx__.mutations;
},
enumerable: false,
configurable: true
});
return Actions;
}());
var noop = function () { };
function combine() {
var fs = [];
for (var _i = 0; _i < arguments.length; _i++) {
fs[_i] = arguments[_i];
}
return function (x) {
fs.forEach(function (f) { return f(x); });
};
}
function get(path, value) {
return path.reduce(function (acc, key) {
return acc[key];
}, value);
}
function mapValues(record, fn) {
var res = {};
Object.keys(record).forEach(function (key) {
res[key] = fn(record[key], key);
});
return res;
}
function error(message) {
console.error("[vuex-smart-module] " + message);
}
function assert(condition, message) {
if (!condition) {
throw new Error("[vuex-smart-module] " + message);
}
}
function deprecated(message) {
console.warn("[vuex-smart-module] DEPRECATED: " + message);
}
function traverseDescriptors(proto, Base, fn, exclude) {
if (exclude === void 0) { exclude = { constructor: true }; }
if (proto.constructor === Base) {
return;
}
Object.getOwnPropertyNames(proto).forEach(function (key) {
// Ensure to only choose most extended properties
if (exclude[key])
return;
exclude[key] = true;
var desc = Object.getOwnPropertyDescriptor(proto, key);
fn(desc, key);
});
traverseDescriptors(Object.getPrototypeOf(proto), Base, fn, exclude);
}
function gatherHandlerNames(proto, Base) {
var ret = [];
traverseDescriptors(proto, Base, function (desc, name) {
if (typeof desc.value !== 'function') {
return;
}
ret.push(name);
});
return ret;
}
function createLazyContextPosition(module) {
var message = 'The module need to be registered a store before using `Module#context` or `createMapper`';
return {
get path() {
assert(module.path !== undefined, message);
return module.path;
},
get namespace() {
assert(module.namespace !== undefined, message);
return module.namespace;
},
};
}
function normalizedDispatch(dispatch, namespace, type, payload, options) {
if (typeof type === 'string') {
return dispatch(namespace + type, payload, options);
}
else {
return dispatch(__assign(__assign({}, type), { type: namespace + type.type }), payload);
}
}
function commit(store, namespace, type, payload, options) {
normalizedDispatch(store.commit, namespace, type, payload, options);
}
function dispatch(store, namespace, type, payload, options) {
return normalizedDispatch(store.dispatch, namespace, type, payload, options);
}
function getters(store, namespace) {
var sliceIndex = namespace.length;
var getters = {};
Object.keys(store.getters).forEach(function (key) {
var sameNamespace = namespace === key.slice(0, sliceIndex);
var name = key.slice(sliceIndex);
if (!sameNamespace || !name) {
return;
}
Object.defineProperty(getters, name, {
get: function () { return store.getters[key]; },
enumerable: true,
});
});
return getters;
}
var Context = /** @class */ (function () {
/** @internal */
function Context(pos, store, moduleOptions) {
var _this = this;
this.pos = pos;
this.store = store;
this.moduleOptions = moduleOptions;
this.commit = function (type, payload, options) {
return commit(_this.store, _this.pos.namespace, type, payload, options);
};
this.dispatch = function (type, payload, options) {
return dispatch(_this.store, _this.pos.namespace, type, payload, options);
};
}
Object.defineProperty(Context.prototype, "mutations", {
get: function () {
var _this = this;
if (this.__mutations__) {
return this.__mutations__;
}
var mutations = {};
var mutationsClass = this.moduleOptions.mutations;
if (mutationsClass) {
var mutationNames = gatherHandlerNames(mutationsClass.prototype, Mutations);
mutationNames.forEach(function (name) {
Object.defineProperty(mutations, name, {
value: function (payload) { return _this.commit(name, payload); },
enumerable: true,
});
});
}
return (this.__mutations__ = mutations);
},
enumerable: false,
configurable: true
});
Object.defineProperty(Context.prototype, "actions", {
get: function () {
var _this = this;
if (this.__actions__) {
return this.__actions__;
}
var actions = {};
var actionsClass = this.moduleOptions.actions;
if (actionsClass) {
var actionNames = gatherHandlerNames(actionsClass.prototype, Actions);
actionNames.forEach(function (name) {
Object.defineProperty(actions, name, {
value: function (payload) { return _this.dispatch(name, payload); },
enumerable: true,
});
});
}
return (this.__actions__ = actions);
},
enumerable: false,
configurable: true
});
Object.defineProperty(Context.prototype, "state", {
get: function () {
return get(this.pos.path, this.store.state);
},
enumerable: false,
configurable: true
});
Object.defineProperty(Context.prototype, "getters", {
get: function () {
return getters(this.store, this.pos.namespace);
},
enumerable: false,
configurable: true
});
Object.defineProperty(Context.prototype, "modules", {
get: function () {
var _this = this;
var modules = {};
var children = this
.moduleOptions.modules;
if (!children) {
return modules;
}
Object.keys(children).forEach(function (key) {
var child = children[key];
Object.defineProperty(modules, key, {
get: function () {
return new Context(createLazyContextPosition(child), _this.store, child.options);
},
});
});
return modules;
},
enumerable: false,
configurable: true
});
return Context;
}());
function createMapper(module) {
return new ComponentMapper(createLazyContextPosition(module));
}
var ComponentMapper = /** @class */ (function () {
function ComponentMapper(pos) {
this.pos = pos;
}
ComponentMapper.prototype.mapState = function (map) {
var pos = this.pos;
return createMappedObject(map, function (value) {
return function mappedStateComputed() {
var state = get(pos.path, this.$store.state);
if (typeof value === 'function') {
var getters$1 = getters(this.$store, pos.namespace);
return value.call(this, state, getters$1);
}
else {
return state[value];
}
};
});
};
ComponentMapper.prototype.mapGetters = function (map) {
var pos = this.pos;
return createMappedObject(map, function (value) {
function mappedGetterComputed() {
return this.$store.getters[pos.namespace + value];
}
// mark vuex getter for devtools
mappedGetterComputed.vuex = true;
return mappedGetterComputed;
});
};
ComponentMapper.prototype.mapMutations = function (map) {
var pos = this.pos;
return createMappedObject(map, function (value) {
return function mappedMutationMethod() {
var _this = this;
var args = [];
for (var _i = 0; _i < arguments.length; _i++) {
args[_i] = arguments[_i];
}
var commit$1 = function (type, payload) {
return commit(_this.$store, pos.namespace, type, payload);
};
return typeof value === 'function'
? value.apply(this, [commit$1].concat(args))
: commit$1(value, args[0]);
};
});
};
ComponentMapper.prototype.mapActions = function (map) {
var pos = this.pos;
return createMappedObject(map, function (value) {
return function mappedActionMethod() {
var _this = this;
var args = [];
for (var _i = 0; _i < arguments.length; _i++) {
args[_i] = arguments[_i];
}
var dispatch$1 = function (type, payload) {
return dispatch(_this.$store, pos.namespace, type, payload);
};
return typeof value === 'function'
? value.apply(this, [dispatch$1].concat(args))
: dispatch$1(value, args[0]);
};
});
};
return ComponentMapper;
}());
function createMappedObject(map, fn) {
var normalized = !Array.isArray(map)
? map
: map.reduce(function (acc, key) {
acc[key] = key;
return acc;
}, {});
return mapValues(normalized, fn);
}
var Module = /** @class */ (function () {
function Module(options) {
if (options === void 0) { options = {}; }
this.options = options;
this.mapper = new ComponentMapper(createLazyContextPosition(this));
}
Module.prototype.clone = function () {
var options = __assign({}, this.options);
if (options.modules) {
options.modules = mapValues(options.modules, function (m) { return m.clone(); });
}
return new Module(options);
};
Module.prototype.context = function (store) {
return new Context(createLazyContextPosition(this), store, this.options);
};
Module.prototype.mapState = function (map) {
deprecated('`Module#mapState` is deprecated. Use `createMapper` instead.');
return this.mapper.mapState(map);
};
Module.prototype.mapGetters = function (map) {
deprecated('`Module#mapGetters` is deprecated. Use `createMapper` instead.');
return this.mapper.mapGetters(map);
};
Module.prototype.mapMutations = function (map) {
deprecated('`Module#mapMutations` is deprecated. Use `createMapper` instead.');
return this.mapper.mapMutations(map);
};
Module.prototype.mapActions = function (map) {
deprecated('`Module#mapActions` is deprecated. Use `createMapper` instead.');
return this.mapper.mapActions(map);
};
Module.prototype.getStoreOptions = function () {
var injectStoreActionName = 'vuex-smart-module/injectStore';
var _a = this.create([], ''), options = _a.options, injectStore = _a.injectStore;
if (!options.actions) {
options.actions = {};
}
options.actions[injectStoreActionName] = function () {
injectStore(this);
};
var plugin = function (store) {
store.dispatch(injectStoreActionName);
var originalHotUpdate = store.hotUpdate;
store.hotUpdate = function (options) {
originalHotUpdate.call(store, options);
store.dispatch(injectStoreActionName);
};
};
return __assign(__assign({}, options), { plugins: [plugin] });
};
/* @internal */
Module.prototype.create = function (path, namespace) {
assert(!this.path || this.path.join('.') === path.join('.'), 'You are reusing one module on multiple places in the same store.\n' +
'Clone it by `module.clone()` method to make sure every module in the store is unique.');
this.path = path;
this.namespace = namespace;
var _a = this.options, _b = _a.namespaced, namespaced = _b === void 0 ? true : _b, state = _a.state, getters = _a.getters, mutations = _a.mutations, actions = _a.actions, modules = _a.modules;
var children = !modules
? undefined
: Object.keys(modules).reduce(function (acc, key) {
var _a;
var m = modules[key];
var nextNamespaced = (_a = m.options.namespaced) !== null && _a !== void 0 ? _a : true;
var nextNamespaceKey = nextNamespaced ? key + '/' : '';
var res = m.create(path.concat(key), namespaced ? namespace + nextNamespaceKey : nextNamespaceKey);
acc.options[key] = res.options;
acc.injectStore = combine(acc.injectStore, res.injectStore);
return acc;
}, {
options: {},
injectStore: noop,
});
var gettersInstance = getters && initGetters(getters, this);
var mutationsInstance = mutations && initMutations(mutations, this);
var actionsInstance = actions && initActions(actions, this);
return {
options: {
namespaced: namespaced,
state: function () { return (state ? new state() : {}); },
getters: gettersInstance && gettersInstance.getters,
mutations: mutationsInstance && mutationsInstance.mutations,
actions: actionsInstance && actionsInstance.actions,
modules: children && children.options,
},
injectStore: combine(children ? children.injectStore : noop, gettersInstance ? gettersInstance.injectStore : noop, mutationsInstance ? mutationsInstance.injectStore : noop, actionsInstance ? actionsInstance.injectStore : noop),
};
};
return Module;
}());
function hotUpdate(store, module) {
var _a = module.create([], ''), options = _a.options, injectStore = _a.injectStore;
store.hotUpdate(options);
injectStore(store);
}
function initGetters(Getters$1, module) {
var getters = new Getters$1();
var options = {};
// Proxy all getters to print useful warning on development
function proxyGetters(getters, origin) {
var proxy = Object.create(getters);
Object.keys(options).forEach(function (key) {
Object.defineProperty(proxy, key, {
get: function () {
error("You are accessing " + Getters$1.name + "#" + key + " from " + Getters$1.name + "#" + origin +
' but direct access to another getter is prohibitted.' +
(" Access it via this.getters." + key + " instead."));
return getters[key];
},
configurable: true,
});
});
return proxy;
}
traverseDescriptors(Getters$1.prototype, Getters, function (desc, key) {
if (typeof desc.value !== 'function' && !desc.get) {
return;
}
var methodFn = desc.value;
var getterFn = desc.get;
options[key] = function () {
var proxy = proxyGetters(getters, key);
if (getterFn) {
return getterFn.call(proxy);
}
if (methodFn) {
return methodFn.bind(proxy);
}
};
}, {
constructor: true,
$init: true,
});
return {
getters: options,
injectStore: function (store) {
var context = module.context(store);
if (!getters.hasOwnProperty('__ctx__')) {
Object.defineProperty(getters, '__ctx__', {
get: function () { return context; },
});
}
getters.$init(store);
},
};
}
function initMutations(Mutations$1, module) {
var mutations = new Mutations$1();
var options = {};
// Proxy all mutations to print useful warning on development
function proxyMutations(mutations, origin) {
var proxy = Object.create(mutations);
Object.keys(options).forEach(function (key) {
proxy[key] = function () {
var args = [];
for (var _i = 0; _i < arguments.length; _i++) {
args[_i] = arguments[_i];
}
error("You are accessing " + Mutations$1.name + "#" + key + " from " + Mutations$1.name + "#" + origin +
' but accessing another mutation is prohibitted.' +
' Use an action to consolidate the mutation chain.');
mutations[key].apply(mutations, args);
};
});
return proxy;
}
traverseDescriptors(Mutations$1.prototype, Mutations, function (desc, key) {
if (typeof desc.value !== 'function') {
return;
}
options[key] = function (_, payload) {
var proxy = proxyMutations(mutations, key);
return mutations[key].call(proxy, payload);
};
});
return {
mutations: options,
injectStore: function (store) {
var context = module.context(store);
if (!mutations.hasOwnProperty('__ctx__')) {
Object.defineProperty(mutations, '__ctx__', {
get: function () { return context; },
});
}
},
};
}
function initActions(Actions$1, module) {
var actions = new Actions$1();
var options = {};
// Proxy all actions to print useful warning on development
function proxyActions(actions, origin) {
var proxy = Object.create(actions);
Object.keys(options).forEach(function (key) {
proxy[key] = function () {
var args = [];
for (var _i = 0; _i < arguments.length; _i++) {
args[_i] = arguments[_i];
}
error("You are accessing " + Actions$1.name + "#" + key + " from " + Actions$1.name + "#" + origin +
' but direct access to another action is prohibitted.' +
(" Access it via this.dispatch('" + key + "') instead."));
actions[key].apply(actions, args);
};
});
return proxy;
}
traverseDescriptors(Actions$1.prototype, Actions, function (desc, key) {
if (typeof desc.value !== 'function') {
return;
}
options[key] = function (_, payload) {
var proxy = proxyActions(actions, key);
return actions[key].call(proxy, payload);
};
}, {
constructor: true,
$init: true,
});
return {
actions: options,
injectStore: function (store) {
var context = module.context(store);
if (!actions.hasOwnProperty('__ctx__')) {
Object.defineProperty(actions, '__ctx__', {
get: function () { return context; },
});
}
actions.$init(store);
},
};
}
function registerModule(store, path, namespace, module, options) {
var normalizedPath = typeof path === 'string' ? [path] : path;
var _a = module.create(normalizedPath, normalizeNamespace(namespace)), moduleOptions = _a.options, injectStore = _a.injectStore;
store.registerModule(normalizedPath, moduleOptions, options);
injectStore(store);
}
function unregisterModule(store, module) {
assert(module.path, 'The module seems not registered in the store');
store.unregisterModule(module.path);
}
function normalizeNamespace(namespace) {
if (namespace === '' || namespace === null) {
return '';
}
return namespace[namespace.length - 1] === '/' ? namespace : namespace + '/';
}
function createStore(rootModule, options) {
if (options === void 0) { options = {}; }
var _a = rootModule.create([], ''), rootModuleOptions = _a.options, injectStore = _a.injectStore;
var store = new vuex.Store(__assign(__assign(__assign({}, rootModuleOptions), options), { modules: __assign(__assign({}, rootModuleOptions.modules), options.modules), plugins: [injectStore].concat(options.plugins || []) }));
return store;
}
exports.Actions = Actions;
exports.Context = Context;
exports.Getters = Getters;
exports.Module = Module;
exports.Mutations = Mutations;
exports.createMapper = createMapper;
exports.createStore = createStore;
exports.hotUpdate = hotUpdate;
exports.inject = inject;
exports.registerModule = registerModule;
exports.unregisterModule = unregisterModule;
Object.defineProperty(exports, '__esModule', { value: true });
})));
|
#!/bin/bash
set -e
# Add host entries to match local docker development names.
echo "Adding service hosts records"
declare -a arr=("localstack")
for i in "${arr[@]}"; do
echo 127.0.0.1 "$i" | tee -a /etc/hosts
done
|
#!/bin/sh
set -e
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
3)
TARGET_DEVICE_ARGS="--target-device tv"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}"
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}"
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\""
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\""
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\""
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH"
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH"
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "IQKeyboardManager/IQKeyboardManager/Resources/IQKeyboardManager.bundle"
install_resource "ShareSDK3/SDK/ShareSDK/Support/Required/ShareSDK.bundle"
install_resource "ShareSDK3/SDK/ShareSDK/Support/PlatformSDK/QQSDK/TencentOpenApi_IOS_Bundle.bundle"
install_resource "ShareSDK3/SDK/ShareSDK/Support/PlatformSDK/SinaWeiboSDK/WeiboSDK.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "IQKeyboardManager/IQKeyboardManager/Resources/IQKeyboardManager.bundle"
install_resource "ShareSDK3/SDK/ShareSDK/Support/Required/ShareSDK.bundle"
install_resource "ShareSDK3/SDK/ShareSDK/Support/PlatformSDK/QQSDK/TencentOpenApi_IOS_Bundle.bundle"
install_resource "ShareSDK3/SDK/ShareSDK/Support/PlatformSDK/SinaWeiboSDK/WeiboSDK.bundle"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "${PODS_ROOT}*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
<reponame>quarkfin/QF-Lib
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import sign
from qf_lib.backtesting.portfolio.backtest_position import BacktestPosition
from qf_lib.backtesting.portfolio.transaction import Transaction
class BacktestEquityPosition(BacktestPosition):
def market_value(self) -> float:
return self.quantity() * self.current_price
def total_exposure(self) -> float:
return self._quantity * self.current_price
def _cash_to_buy_or_proceeds_from_sale(self, transaction: Transaction) -> float:
result = transaction.price * transaction.quantity + transaction.commission
return -result
def _compute_profit_and_loss_fraction(self, price: float, quantity: int):
if sign(quantity) * self.direction() == -1:
price_pnl = price - self._avg_price_per_unit
# We multiply by the direction, so that the in case of finding a pair of transaction going in opposite
# directions, the realized pnl of this operation would consider the direction of the position
quantity = self.direction() * abs(quantity)
return price_pnl * quantity
else:
return 0.0
|
import React from "react";
import { Container } from "reactstrap";
const Contactus = () => {
return(
<div id="contactus">
<Container>
<br/><br/><br/><br/>
<div className="ui conatiner">
<div className="ui black segment center aligned">
<h2><b>CONTACT US</b></h2>
</div>
<br/><br/>
<div className="doubling stackable ui three column grid center aligned">
<div className="column">
<i className="circular home huge icon"></i>
<h4><b>ADDRESS</b></h4>
<br/>
<p>702 Gulmohar,<NAME>, Virar West - 401303.</p>
</div>
<div className="column">
<i className="circular phone huge icon"></i>
<h4><b>PHONE NUMBER</b></h4>
<br/>
<p><NAME> :- +973 3538 5486</p>
<p><NAME> :- +91 88790 76270</p>
<p><NAME> :- +91 93736 73889</p>
<p><NAME> :- +91 89755 81443</p>
</div>
<div className="column">
<i className="circular mail huge icon"></i>
<h4><b>EMAIL</b></h4>
<br/>
<p><EMAIL></p>
</div>
</div>
</div>
<br/><br/>
<div className="container">
<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3762.272521505711!2d72.79802851468722!3d19.44381368687795!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x3be7abd4ef5be8ef%3A0xc0197fcdc3c88c20!2sMayfair+Virar+Gardens!5e0!3m2!1sen!2sbh!4v1478336462616" width="100%" height="380" frameborder="0" allowfullscreen></iframe>
</div>
<br/><br/><br/><br/>
</Container>
</div>
)
}
export default Contactus; |
#!/bin/sh
test_description="Migration to many different versions"
GUEST_IPFS_2_TO_3="sharness/bin/fs-repo-2-to-3"
. lib/test-lib.sh
test_expect_success "start a docker container" '
DOCID=$(start_docker)
'
test_install_version "v0.3.7"
test_expect_success "'ipfs init' succeeds" '
export IPFS_PATH=/root/.ipfs &&
exec_docker "$DOCID" "IPFS_PATH=$IPFS_PATH BITS=2048 ipfs init" >actual 2>&1 ||
test_fsh cat actual
'
test_expect_success ".ipfs/ has been created" '
exec_docker "$DOCID" "test -d /root/.ipfs && test -f /root/.ipfs/config"
exec_docker "$DOCID" "test -d /root/.ipfs/datastore && test -d /root/.ipfs/blocks"
'
test_repo_version "0.3.7"
test_install_version "v0.3.10"
test_repo_version "0.3.10"
test_install_version "v0.4.0"
test_repo_version "0.4.0"
test_install_version "v0.3.8"
# By design reverting a migration has to be run manually
test_expect_success "'fs-repo-2-to-3 -revert' succeeds" '
exec_docker "$DOCID" "$GUEST_IPFS_2_TO_3 -revert -path=/root/.ipfs" >actual
'
test_repo_version "0.3.8"
test_install_version "v0.3.10"
test_repo_version "0.3.10"
test_expect_success "stop docker container" '
stop_docker "$DOCID"
'
test_done
|
#!/bin/bash
if [[ -d "$1" ]]; then
current="$1"
cd "$1" || exit 1
shift
else
exit 1
fi
args=( "$@" )
for i in "${!args[@]}"; do
args[$i]=".${args[i]#$current}"
done
exec urxvt -name floating -e vidir "${args[@]}"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.