text stringlengths 1 1.05M |
|---|
#!/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
cd /opt/progs
rm map.sql
wget http://ts19.travian.com/map.sql
if [ "$?" != "0" ]
then
exit 1
fi
sed -i "s/\`x_world\`/ts19com/g" map.sql
PGPASSWORD=123456 psql tr -U www-data -c "DELETE FROM ts19com;";
PGPASSWORD=123456 psql tr -U www-data -f map.sql
PGPASSWORD=123456 psql tr -U www-data -c "SELECT * FROM populate_table( 'ts19com' );"
echo `date` >> lastrunningts19com
exit 0
|
/*
* <NAME>
* 11/13/16
* EmployeeType.java
*/
package edu.greenriver.it.hr.employees;
/**
* Enumeration for employee type.
*/
public enum EmployeeType{
HOURLY, SALARY
}
|
<filename>src/main/java/domainentitites/PrivilegesMethods.java
package domainentitites;
import generalmethods.DeleteRequest;
import generalmethods.GetRequest;
import generalmethods.PostRequest;
import io.restassured.response.Response;
public class PrivilegesMethods {
GetRequest getRequest = new GetRequest();
DeleteRequest deleteRequest = new DeleteRequest();
PostRequest postRequest = new PostRequest();
public Response getReturnAllPrivilegesOnTheSite(String endPointPrivileges , String filterQuery) {
Response response = getRequest.canGiveResponseUsingQuery(endPointPrivileges,filterQuery);
return response;
}
}
|
package scanner;
import org.jooby.mvc.GET;
import org.jooby.mvc.Path;
@Path("/")
public class MyController {
@GET
public String index() {
return "It works!";
}
}
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package stubs
import com.github.tomakehurst.wiremock.client.MappingBuilder
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.stubbing.StubMapping
trait MockGenericDownstreamService extends WireMockRunner {
def getFromDownstreamService(url: String, status: Int, body: Option[String] = None, delay: Int = 0): StubMapping =
stubForDownstreamService(get(urlMatching(url)), status, body, delay)
def postToDownstreamService(url: String, status: Int, body: Option[String] = None, delay: Int = 0): StubMapping =
stubForDownstreamService(post(urlMatching(url)), status, body, delay)
private def stubForDownstreamService(call: MappingBuilder, status: Int, body: Option[String], delay: Int): StubMapping = {
val response = aResponse()
.withStatus(status)
.withFixedDelay(delay)
removeStub(call)
stubFor(call.willReturn(body.fold(response)(response.withBody)))
}
def verifyGetFromDownStreamService(url: String): Unit = verify(1, getRequestedFor(urlMatching(url)))
}
|
# SPDX-License-Identifier: BSD-3-Clause
source helpers.sh
cleanup() {
if [ "$1" != "no-shut-down" ]; then
shut_down
fi
}
trap cleanup EXIT
start_up
cleanup "no-shut-down"
# Storage hierarchy
tpm2_hierarchycontrol -C p shEnable set
tpm2_hierarchycontrol -C p shEnable clear
tpm2_hierarchycontrol -C p shEnable set
tpm2_hierarchycontrol -C o shEnable clear
# Endorsement hierarchy
tpm2_hierarchycontrol -C p ehEnable set
tpm2_hierarchycontrol -C p ehEnable clear
tpm2_hierarchycontrol -C p ehEnable set
tpm2_hierarchycontrol -C e ehEnable clear
# Platform NV
tpm2_hierarchycontrol -C p phEnableNV set
tpm2_hierarchycontrol -C p phEnableNV clear
tpm2_hierarchycontrol -C p phEnableNV set
# Platform hierarchy
tpm2_hierarchycontrol -C p phEnable clear
# 0 the handler
trap - ERR
# ERROR: phEnable may not be 1 using this command
tpm2_hierarchycontrol -C p phEnable set
# EROOR: Only platform hierarchy handle can be specified for 1
tpm2_hierarchycontrol -C o shEnable set
tpm2_hierarchycontrol -C o ehEnable set
tpm2_hierarchycontrol -C o phEnable set
tpm2_hierarchycontrol -C o phEnableNV set
tpm2_hierarchycontrol -C e shEnable set
tpm2_hierarchycontrol -C e ehEnable set
tpm2_hierarchycontrol -C e phEnable set
tpm2_hierarchycontrol -C e phEnableNV set
# ERROR: Permanent handle lockout not supported by this command
tpm2_hierarchycontrol -C l shEnable set
tpm2_hierarchycontrol -C l ehEnable set
tpm2_hierarchycontrol -C l phEnable set
tpm2_hierarchycontrol -C l phEnableNV set
tpm2_hierarchycontrol -C l shEnable clear
tpm2_hierarchycontrol -C l ehEnable clear
tpm2_hierarchycontrol -C l phEnable clear
tpm2_hierarchycontrol -C l phEnableNV clear
# ERROR: Only platform and its authorization can be specified for 0
tpm2_hierarchycontrol -C o ehEnable clear
tpm2_hierarchycontrol -C o phEnable clear
tpm2_hierarchycontrol -C o phEnableNV clear
tpm2_hierarchycontrol -C e shEnable clear
tpm2_hierarchycontrol -C e phEnable clear
tpm2_hierarchycontrol -C e phEnableNV clear
exit 0
|
#ifndef _MENUITEMSPINNER_H_
#define _MENUITEMSPINNER_H_
/*
MenuItemSpinner is based on MenuItemNumeric.
https://github.com/lovyan03/M5Stack_TreeView/blob/master/src/MenuItemNumeric.h
*/
#include <MenuItem.h>
#include <WString.h>
class MenuItemSpinner : public MenuItem {
public:
int value = 0;
bool canLoop = true;
MenuItemSpinner(const String& title, int maximum, int value, int tg = 0, TCallBackEnter cb = 0)
: MenuItem(title, tg, cb), value(value), maximum(maximum) {};
MenuItemSpinner(const String& title, int maximum, int value, TCallBackEnter cb)
: MenuItem(title, cb), value(value), maximum(maximum) {};
virtual void onAfterDraw();
virtual void onEnter();
virtual String getStringOfItem(int value) {return String(value);}
void setValue(int value);
protected:
int minimum = 0;
int maximum = 1;
void drawNum(int value, int flg);
};
#endif |
class Solution {
public boolean isIdealPermutation(int[] A) {
int min = A[A.length - 1];
for(int i = A.length - 3;i >= 0;i--) {
if(min < A[i]) {
return false;
}
min = Math.min(min, A[i + 1]);
}
return true;
}
} |
<gh_stars>0
import os
import itertools
import sys
#dmdcategories = ['acpa', 'concdel','danais', 'dcppc', 'doris', 'styx']
nmin_ngram, nmaxngram = 1, 2
local_weights = ['tf']
global_weights = ['chi2', 'idf']
nfolds = 4
def main(dmd_category, wd):
print(dmd_category)
raw_path = os.path.join(wd, 'raw', dmd_category)
preprocessed_path = os.path.join(wd, 'pp', '%s.tsv'%dmd_category)
cv_path = os.path.join(wd, 'cv')
vec_path = os.path.join(wd, 'processed')
models_path = os.path.join(wd, 'models')
#_dirpath = os.join(wd, 'pp')
#os.system('''python -m ginipls.data.make_dataset select-data taj-sens-resultat-data "amende civile" "32-1 code de procédure civile + 559 code de procédure civile : pour procédure abusive" data/raw/txt-all/acpa data/raw/CASSANDRA.tsv data/raw/txt-oneclaim/acpa''')
os.system('python -m ginipls.data.make_dataset --logging preprocess taj-sens-resultat --language=fr --lowercase --lemmatizer=treetagger %s %s' % (raw_path, preprocessed_path))
os.system('python -m ginipls.data.make_dataset form-evaluation-data cv-traintest-from-dataset-file %d %s %s' % (nfolds, preprocessed_path, cv_path))
for k, lw, gw in itertools.product(range(nfolds), local_weights, global_weights):
for datasplit in ['train', 'test']:
splittextdatapath = os.path.join(cv_path, '%s_cv%d_%s.tsv' %(dmd_category, k, datasplit))
splitvecdatapath = os.path.join(vec_path, '%s_cv%d_%s_%s%s%d%d.tsv' %(dmd_category, k, datasplit, lw, gw, nmin_ngram, nmaxngram))
splitvsmpath = os.path.join(models_path, '%s_cv%d_%s%s%d%d.vsm' %(dmd_category, k, lw, gw, nmin_ngram, nmaxngram))
os.system('python -m ginipls.data.make_dataset --logging vectorize --vsm_scheme=%s%s --label_col=@label --index_col=@id --text_col=@text --ngram_nmin=%d --ngram_nmax=%d %s %s %s' % (lw,gw, nmin_ngram, nmaxngram, splittextdatapath, splitvsmpath, splitvecdatapath))
#break
if __name__ == "__main__":
# python -m ginipls.data.make_taj_sens_resultat_dataset acpa data/taj-sens-resultat
demand_category = sys.argv[1] if len(sys.argv) > 1 else 'acpa'
wd = sys.argv[2] if len(sys.argv) > 2 else 'data/taj-sens-resultat' #working dir
main(demand_category, wd) |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command checks that the built commands can function together for
# simple scenarios. It does not require Docker so it can run in travis.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
source "${KUBE_ROOT}/hack/lib/test.sh"
# Stops the running kubectl proxy, if there is one.
function stop-proxy()
{
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
PROXY_PID=
PROXY_PORT=
}
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
function start-proxy()
{
stop-proxy
kube::log::status "Starting kubectl proxy"
for retry in $(seq 1 3); do
PROXY_PORT=$(kube::util::get_random_port)
kube::log::status "On try ${retry}, use proxy port ${PROXY_PORT} if it's free"
if kube::util::test_host_port_free "127.0.0.1" "${PROXY_PORT}"; then
if [ $# -eq 0 ]; then
kubectl proxy -p ${PROXY_PORT} --www=. 1>&2 & break
else
kubectl proxy -p ${PROXY_PORT} --www=. --api-prefix="$1" 1>&2 & break
fi
fi
sleep 1;
done
PROXY_PID=$!
if [ $# -eq 0 ]; then
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
else
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
fi
}
function cleanup()
{
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
stop-proxy
kube::etcd::cleanup
rm -rf "${KUBE_TEMP}"
kube::log::status "Clean up complete"
}
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
function check-curl-proxy-code()
{
local status
local -r address=$1
local -r desired=$2
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
if [ "${status}" == "${desired}" ]; then
return 0
fi
echo "For address ${full_address}, got ${status} but wanted ${desired}"
return 1
}
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
function kubectl-with-retry()
{
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
for count in $(seq 0 3); do
kubectl "$@" 2> ${ERROR_FILE} || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
rm "${ERROR_FILE}"
break
fi
done
}
kube::util::trap_add cleanup EXIT SIGINT
kube::util::ensure-temp-dir
kube::etcd::start
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-4001}
API_PORT=${API_PORT:-8080}
API_HOST=${API_HOST:-127.0.0.1}
KUBELET_PORT=${KUBELET_PORT:-10250}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
# ensure ~/.kube/config isn't loaded by tests
HOME="${KUBE_TEMP}"
# Check kubectl
kube::log::status "Running kubectl with no options"
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
kube::log::status "Starting kubelet in masterless mode"
"${KUBE_OUTPUT_HOSTBIN}/kubelet" \
--really-crash-for-testing=true \
--root-dir=/tmp/kubelet.$$ \
--cert-dir="${TMPDIR:-/tmp/}" \
--docker-endpoint="fake://" \
--hostname-override="127.0.0.1" \
--address="127.0.0.1" \
--port="$KUBELET_PORT" \
--healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 &
KUBELET_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet(masterless)"
kill ${KUBELET_PID} 1>&2 2>/dev/null
kube::log::status "Starting kubelet in masterful mode"
"${KUBE_OUTPUT_HOSTBIN}/kubelet" \
--really-crash-for-testing=true \
--root-dir=/tmp/kubelet.$$ \
--cert-dir="${TMPDIR:-/tmp/}" \
--docker-endpoint="fake://" \
--hostname-override="127.0.0.1" \
--address="127.0.0.1" \
--api-servers="${API_HOST}:${API_PORT}" \
--port="$KUBELET_PORT" \
--healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 &
KUBELET_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet"
# Start kube-apiserver
kube::log::status "Starting kube-apiserver"
KUBE_API_VERSIONS="v1,extensions/v1beta1" "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
--address="127.0.0.1" \
--public-address-override="127.0.0.1" \
--port="${API_PORT}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--public-address-override="127.0.0.1" \
--kubelet-port=${KUBELET_PORT} \
--runtime-config=api/v1 \
--cert-dir="${TMPDIR:-/tmp/}" \
--runtime_config="extensions/v1beta1/deployments=true" \
--service-cluster-ip-range="10.0.0.0/24" 1>&2 &
APISERVER_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver"
# Start controller manager
kube::log::status "Starting controller-manager"
"${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \
--port="${CTLRMGR_PORT}" \
--master="127.0.0.1:${API_PORT}" 1>&2 &
CTLRMGR_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager"
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/api/v1/nodes/127.0.0.1" "apiserver(nodes)"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
kube::log::status "Checking kubectl version"
kubectl version
runTests() {
version="$1"
echo "Testing api version: $1"
if [[ -z "${version}" ]]; then
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
--match-server-version
)
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
else
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
--match-server-version
--api-version="${version}"
)
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "${version}" ]
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.cpuUtilization.targetPercentage"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
# Passing no arguments to create is an error
! kubectl create
#######################
# kubectl local proxy #
#######################
# Make sure the UI can be proxied
start-proxy
check-curl-proxy-code /ui 301
check-curl-proxy-code /metrics 200
check-curl-proxy-code /api/ui 404
if [[ -n "${version}" ]]; then
check-curl-proxy-code /api/${version}/namespaces 200
fi
check-curl-proxy-code /static/ 200
stop-proxy
# Make sure the in-development api is accessible by default
start-proxy
check-curl-proxy-code /apis 200
check-curl-proxy-code /apis/extensions/ 200
stop-proxy
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/ui 301
check-curl-proxy-code /custom/metrics 200
if [[ -n "${version}" ]]; then
check-curl-proxy-code /custom/api/${version}/namespaces 200
fi
stop-proxy
###########################
# POD creation / deletion #
###########################
kube::log::status "Testing kubectl(${version}:pods)"
### Create POD valid-pod from JSON
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is running
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
# Repeat above test using jsonpath template
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
# Describe command should print detailed information
kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image(s):" "Node:" "Labels:" "Status:" "Controllers"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image(s):" "Node:" "Labels:" "Status:" "Controllers"
### Dump current valid-pod POD
output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}")
### Delete POD valid-pod by id
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from dumped YAML
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
echo "${output_pod}" | kubectl create -f - "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod from JSON
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD redis-master from JSON
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod with label
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0
# Post-condition: no POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
### Create POD valid-pod from JSON
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with no parameter mustn't kill everything
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with --all and a label selector is not permitted
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete all PODs
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 # --all remove all the pods
# Post-condition: no POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
### Create two PODs
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl create -f examples/redis/redis-proxy.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
### Delete multiple PODs at once
# Pre-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two PODs
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl create -f examples/redis/redis-proxy.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
### Stop multiple PODs at once
# Pre-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create valid-pod POD
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Label the valid-pod POD
# Pre-condition: valid-pod is not labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
# Command
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
# Post-conditon: valid-pod is labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
### Delete POD by label
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 "${kube_flags[@]}"
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create valid-pod POD
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## Patch pod can change image
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# prove that yaml input works too
YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
## Patch pod from JSON can change image
# Command
kubectl patch "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "kubernetes/pause"}]}}'
# Post-condition: valid-pod POD has image kubernetes/pause
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'kubernetes/pause:'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error"
## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
# Command
# Needs to retry because other party may change the resource.
for count in $(seq 0 3); do
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
rm "${ERROR_FILE}"
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
break
fi
done
## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
((resourceVersion+=100))
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the conflict
if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
## --force replace pod can change other field, e.g., spec.container.name
# Command
kubectl get "${kube_flags[@]}" pod valid-pod -o json | sed 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
#cleaning
rm /tmp/tmp-valid-pod.json
## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
echo -e '#!/bin/bash\nsed -i "s/nginx/gcr.io\/google_containers\/serve_hostname/g" $1' > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod
# Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:'
# cleaning
rm /tmp/tmp-editor.sh
[ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ]
[ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ]
[ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ]
[ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ]
### Overwriting an existing label is not permitted
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
### --overwrite must be used to overwrite existing label, can be applied to all resources
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is valid-pod-super-sayan
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
### Delete POD by label
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 "${kube_flags[@]}"
# Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two PODs from 1 yaml file
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
### Delete two PODs from 1 yaml file
# Pre-condition: redis-master and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:'
# Command
kubectl delete -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: no PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply should update configuration annotations only if apply is already called
## 1. kubectl create doesn't set the annotation
# Pre-Condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is running
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | sed 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is applied
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
## 4. kubectl replace updates an existing annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | sed 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
## Configuration annotations should be set when --save-config is enabled
## 1. kubectl create --save-config should generate configuration annotation
# Pre-Condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 2. kubectl edit --save-config should generate configuration annotation
# Pre-Condition: no POD is running, then create pod "test-pod", which shouldn't have configuration annotation
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e '#!/bin/bash\nsed -i "s/test-pod-label/test-pod-label-edited/g" $@' > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 3. kubectl replace --save-config should generate configuration annotation
# Pre-Condition: no POD is running, then create pod "test-pod", which shouldn't have configuration annotation
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx --image=nginx --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service is running
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC is running, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
kubectl autoscale -f examples/guestbook/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,hpa frontend "${kube_flags[@]}"
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is running
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl run should create deployments or jobs
# Pre-Condition: no Job is running
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Pre-Condition: no Deployment is running
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx --image=nginx --generator=deployment/v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
kubectl delete rc -l deployment.kubernetes.io/podTemplateHash "${kube_flags[@]}"
##############
# Namespaces #
##############
### Create a new namespace
# Pre-condition: only the "default" namespace exists
kube::test::get_object_assert 'namespaces' "{{range.items}}{{$id_field}}:{{end}}" 'default:'
# Command
kubectl create namespace my-namespace
# Post-condition: namespace 'my-namespace' is created.
kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
# Clean up
kubectl delete namespace my-namespace
##############
# Pods in Namespaces #
##############
### Create POD valid-pod in specific namespace
# Pre-condition: no POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" --namespace=other -f docs/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod in specific namespace
# Pre-condition: valid-pod POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
# Post-condition: no POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
##############
# Secrets #
##############
### Create a generic secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a docker-registry secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
#################
# Pod templates #
#################
### Create PODTEMPLATE
# Pre-condition: no PODTEMPLATE
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
# Post-condition: nginx PODTEMPLATE is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
### Printing pod templates works
kubectl get podtemplates "${kube_flags[@]}"
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
############
# Services #
############
kube::log::status "Testing kubectl(${version}:services)"
### Create redis-master service from JSON
# Pre-condition: Only the default kubernetes services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master service is running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Describe command should print detailed information
kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
### Dump current redis-master service
output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}")
### Delete redis-master-service by id
# Pre-condition: redis-master service is running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create redis-master-service from dumped JSON
# Pre-condition: Only the default kubernetes services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
# Post-condition: redis-master service is running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
### Create redis-master-${version}-test service
# Pre-condition: redis-master-service service is running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "service-${version}-test"
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 80
}
]
}
}
__EOF__
# Post-condition:redis-master-service service is running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
### Identity
kubectl get service "${kube_flags[@]}" service-${version}-test -o json | kubectl replace "${kube_flags[@]}" -f -
### Delete services by id
# Pre-condition: redis-master-service service is running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
kubectl delete service "service-${version}-test" "${kube_flags[@]}"
# Post-condition: Only the default kubernetes services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create two services
# Pre-condition: Only the default kubernetes services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master and redis-slave services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
### Delete multiple services at once
# Pre-condition: redis-master and redis-slave services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
# Command
kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
# Post-condition: Only the default kubernetes services are running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
###########################
# Replication controllers #
###########################
kube::log::status "Testing kubectl(${version}:replicationcontrollers)"
### Create and stop controller, make sure it doesn't leak pods
# Pre-condition: no replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend controller
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replication controller frontend from JSON
# Pre-condition: no replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rc 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rc "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
### Scale replication controller frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with (wrong) current-replicas and replicas
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with replicas only
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
### Scale replication controller from JSON with replicas only
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --replicas=2 -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale multiple replication controllers
kubectl create -f examples/guestbook/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}"
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
# Post-condition: 4 replicas each
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
# Clean-up
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
### Scale a job
kubectl create -f docs/user-guide/job.yaml "${kube_flags[@]}"
# Command
kubectl scale --replicas=2 job/pi
# Post-condition: 2 replicas for pi
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
### Scale a deployment
kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
# TODO: Remove once deployment reaping is implemented
kubectl delete rc --all "${kube_flags[@]}"
### Expose replication controller as service
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl expose rc frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Command
kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
# Command
kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
# Create a service using service/v1 generator
kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
# Verify that expose service works without specifying a port.
kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
# Post-condition: service exists with the same port as the original service.
kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
# Cleanup services
kubectl delete pod valid-pod "${kube_flags[@]}"
kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
### Expose negative invalid resource test
# Pre-condition: don't need
# Command
output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
# Post-condition: the error message has "cannot expose" string
kube::test::if_has_string "${output_message}" 'cannot expose'
### Try to generate a service with invalid name (exceeding maximum valid size)
# Pre-condition: use --name flag
output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: should fail due to invalid name
kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
# Pre-condition: default run without --name flag; should succeed by truncating the inherited name
output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: inherited name from pod has been truncated
kube::test::if_has_string "${output_message}" '\"kubernetes-serve-hostnam\" exposed'
# Clean-up
kubectl delete svc kubernetes-serve-hostnam "${kube_flags[@]}"
### Expose multiport object as a new service
# Pre-condition: don't use --port flag
output_message=$(kubectl expose -f docs/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
# Post-condition: expose succeeded
kube::test::if_has_string "${output_message}" '\"etcd-server\" exposed'
# Post-condition: generated service has both ports from the exposed pod
kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 4001'
# Clean-up
kubectl delete svc etcd-server "${kube_flags[@]}"
### Delete replication controller with id
# Pre-condition: frontend replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replication controllers
# Pre-condition: no replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple controllers at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
# Post-condition: no replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Auto scale replication controller
# Pre-condition: no replication controller is running
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, rc specified by file
kubectl autoscale -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, default CPU utilization (80%), rc specified by name
kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment is running
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f examples/extensions/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, default CPU utilization (80%)
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete rc -l deployment.kubernetes.io/podTemplateHash "${kube_flags[@]}"
######################
# Multiple Resources #
######################
kube::log::status "Testing kubectl(${version}:multiple resources)"
FILES="hack/testdata/multi-resource-yaml
hack/testdata/multi-resource-list
hack/testdata/multi-resource-json
hack/testdata/multi-resource-rclist
hack/testdata/multi-resource-svclist"
YAML=".yaml"
JSON=".json"
for file in $FILES; do
if [ -f $file$YAML ]
then
file=$file$YAML
replace_file="${file%.yaml}-modify.yaml"
else
file=$file$JSON
replace_file="${file%.json}-modify.json"
fi
has_svc=true
has_rc=true
two_rcs=false
two_svcs=false
if [[ "${file}" == *rclist* ]]; then
has_svc=false
two_rcs=true
fi
if [[ "${file}" == *svclist* ]]; then
has_rc=false
two_svcs=true
fi
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
echo "Testing with file ${file} and replace with file ${replace_file}"
# Pre-condition: no service (other than default kubernetes services) or replication controller is running
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f "${file}" "${kube_flags[@]}"
# Post-condition: mock service (and mock2) is running
if [ "$has_svc" = true ]; then
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:mock2:'
else
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:'
fi
fi
# Post-condition: mock rc (and mock2) is running
if [ "$has_rc" = true ]; then
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Command
kubectl get -f "${file}" "${kube_flags[@]}"
# Command: watching multiple resources should return "not supported" error
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
fi
kubectl describe -f "${file}" "${kube_flags[@]}"
# Command
kubectl replace -f $replace_file --force "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
# Command: kubectl edit multiple resources
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e '#!/bin/bash\nsed -i "s/status\:\ replaced/status\:\ edited/g" $@' > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service and mock rc (and mock2) are labeled
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
# Cleanup resources created
kubectl delete -f "${file}" "${kube_flags[@]}"
done
######################
# Persistent Volumes #
######################
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
############################
# Persistent Volume Claims #
############################
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
#########
# Nodes #
#########
kube::log::status "Testing kubectl(${version}:nodes)"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
### kubectl patch update can mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
# Post-condition: node is unschedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
# Post-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
#####################
# Retrieve multiple #
#####################
kube::log::status "Testing kubectl(${version}:multiget)"
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
#####################
# Resource aliasing #
#####################
kube::log::status "Testing resource aliasing"
kubectl create -f examples/cassandra/cassandra-controller.yaml "${kube_flags[@]}"
kubectl scale rc cassandra --replicas=1 "${kube_flags[@]}"
kubectl create -f examples/cassandra/cassandra-service.yaml "${kube_flags[@]}"
kube::test::get_object_assert "all -l'app=cassandra'" "{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}" 'cassandra:cassandra:cassandra:'
kubectl delete all -l app=cassandra "${kube_flags[@]}"
###########
# Explain #
###########
kube::log::status "Testing kubectl(${version}:explain)"
kubectl explain pods
# shortcuts work
kubectl explain po
kubectl explain po.status.message
###########
# Swagger #
###########
if [[ -n "${version}" ]]; then
# Verify schema
file="${KUBE_TEMP}/schema-${version}.json"
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/${version}" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]]
[[ "$(grep "List of pods" "${file}")" ]]
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
fi
kube::test::clear_all
}
kube_api_versions=(
""
v1
)
for version in "${kube_api_versions[@]}"; do
KUBE_API_VERSIONS="v1,extensions/v1beta1" runTests "${version}"
done
kube::log::status "TEST PASSED"
|
from django.contrib.auth.models import Group
from rest_framework import serializers
from .models import Status, Criticality
class StatusSerializer(serializers.ModelSerializer):
"""."""
class Meta:
model = Status
fields = "__all__"
class GroupSerializer(serializers.ModelSerializer):
"""."""
class Meta:
model = Group
fields = "__all__"
class CriticalitySerializer(serializers.ModelSerializer):
"""."""
class Meta:
model = Criticality
fields = "__all__"
|
package cormoran.pepper.primitives;
public interface IIntAppendable {
void appendInt(int value);
}
|
def calculate_radar_position(offset, car_coordinate_system):
# Assuming the car's coordinate system is defined elsewhere
car_position_in_global = get_global_position(car_coordinate_system) # Function to get the global position of the car's coordinate system
radar_position_in_car_cs = (offset[0], offset[1], offset[2]) # Radar position relative to the car's coordinate system
# Transform radar position from car's coordinate system to global coordinate system
radar_position_in_global = (
car_position_in_global[0] + radar_position_in_car_cs[0],
car_position_in_global[1] + radar_position_in_car_cs[1],
car_position_in_global[2] + radar_position_in_car_cs[2]
)
return radar_position_in_global |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2019 Petr Vorel <pvorel@suse.cz>
# Copyright (c) 2018-2019 ARM Ltd. All Rights Reserved.
. tst_test.sh
# Find mountpoint to given subsystem
# get_cgroup_mountpoint SUBSYSTEM
# RETURN: 0 if mountpoint found, otherwise 1
get_cgroup_mountpoint()
{
local subsystem=$1
local mntpoint
[ $# -eq 0 ] && tst_brk TBROK "get_cgroup_mountpoint: subsystem not defined"
mntpoint=$(grep cgroup /proc/mounts | grep -w $subsystem | awk '{ print $2 }')
[ -z "$mntpoint" ] && return 1
echo $mntpoint
return 0
}
# Check if given subsystem is supported and enabled
# is_cgroup_subsystem_available_and_enabled SUBSYSTEM
# RETURN: 0 if subsystem supported and enabled, otherwise 1
is_cgroup_subsystem_available_and_enabled()
{
local val
local subsystem=$1
[ $# -eq 0 ] && tst_brk TBROK "is_cgroup_subsystem_available_and_enabled: subsystem not defined"
val=$(grep -w $subsystem /proc/cgroups | awk '{ print $4 }')
[ "$val" = "1" ] && return 0
return 1
}
|
public static string CapitalizeFirstLetter(string inputString)
{
return inputString.First().ToString().ToUpper() + inputString.Substring(1);
}
string inputString = "This is a string";
string outputString = CapitalizeFirstLetter(inputString);
Console.WriteLine("Input String: " + inputString);
Console.WriteLine("Output String: " + outputString);
Output:
Input String: This is a string
Output String: This Is A String |
// Binary Search
def binarySearch(arr, target):
low = 0
high = len(arr) - 1
while low <= high:
mid = (low + high)//2
if (arr[mid] == target):
return mid
elif target < arr[mid]:
high = mid - 1
else:
low = mid + 1
return -1 |
package com.androidapp.share.dialog;
import android.annotation.SuppressLint;
import android.app.Dialog;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.drawable.BitmapDrawable;
import android.graphics.drawable.Drawable;
import android.os.Build;
import android.support.annotation.RequiresApi;
import android.support.v4.content.ContextCompat;
import android.support.v4.graphics.drawable.DrawableCompat;
import android.support.v7.widget.GridLayoutManager;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.OrientationHelper;
import android.support.v7.widget.RecyclerView;
import android.text.TextUtils;
import android.util.TypedValue;
import android.view.Gravity;
import android.view.View;
import android.view.ViewGroup;
import android.view.WindowManager;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.androidapp.share.R;
import com.androidapp.share.bean.ShareEnum;
import com.androidapp.share.util.Utils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class ShareDialog {
public static final int HORIZONTAL = OrientationHelper.HORIZONTAL;
public static final int VERTICAL = OrientationHelper.VERTICAL;
public static final int LINEAR = 0;
public static final int GRID = 1;
private CustomDialog customDialog;
public ShareDialog(Context context) {
customDialog = new CustomDialog(context);
}
public ShareDialog title(String title) {
customDialog.title(title);
return this;
}
public ShareDialog title(int title) {
customDialog.title(title);
return this;
}
public ShareDialog background(int res) {
customDialog.background(res);
return this;
}
public ShareDialog inflateMenu(OnShareItemClickListener onShareItemClickListener) {
customDialog.inflateMenu(onShareItemClickListener);
return this;
}
public ShareDialog layout(int layout) {
customDialog.layout(layout);
return this;
}
public ShareDialog orientation(int orientation) {
customDialog.orientation(orientation);
return this;
}
public ShareDialog addItems(List<ShareEnum> items, OnShareItemClickListener onShareItemClickListener) {
customDialog.addItems(items, onShareItemClickListener);
return this;
}
/**
* @deprecated
*/
public ShareDialog itemClick(OnShareItemClickListener listener) {
customDialog.setItemClick(listener);
return this;
}
public void show() {
customDialog.show();
}
private final class CustomDialog extends Dialog {
private LinearLayout background;
private LinearLayout container;
private TextView titleView;
private DialogAdapter adapter;
private int padding;
private int topPadding;
private int leftPadding;
private int topIcon;
private int leftIcon;
private int orientation;
private int layout;
CustomDialog(Context context) {
super(context, R.style.BottomDialog);
init();
}
private void init() {
padding = getContext().getResources().getDimensionPixelSize(R.dimen.app_normal_margin);
topPadding = getContext().getResources().getDimensionPixelSize(R.dimen.app_tiny_margin);
leftPadding = getContext().getResources().getDimensionPixelSize(R.dimen.app_normal_margin);
topIcon = getContext().getResources().getDimensionPixelSize(R.dimen.bottom_dialog_top_icon);
leftIcon = getContext().getResources().getDimensionPixelSize(R.dimen.bottom_dialog_left_icon);
setContentView(R.layout.share_dialog);
setCancelable(true);
setCanceledOnTouchOutside(true);
getWindow().setGravity(Gravity.BOTTOM);
getWindow().setLayout(WindowManager.LayoutParams.MATCH_PARENT, WindowManager.LayoutParams.WRAP_CONTENT);
background = (LinearLayout) findViewById(R.id.background);
titleView = (TextView) findViewById(R.id.title);
container = (LinearLayout) findViewById(R.id.container);
findViewById(R.id.cancel).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
dismiss();
}
});
}
void addItems(List<ShareEnum> items, OnShareItemClickListener onShareItemClickListener) {
ViewGroup.LayoutParams params = new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.WRAP_CONTENT);
RecyclerView.LayoutManager manager;
adapter = new DialogAdapter(items, layout, orientation);
adapter.setItemClick(onShareItemClickListener);
if (layout == LINEAR)
manager = new LinearLayoutManager(getContext(), orientation, false);
else if (layout == GRID)
manager = new GridLayoutManager(getContext(), 5, orientation, false);
else manager = new LinearLayoutManager(getContext(), orientation, false);
RecyclerView recyclerView = new RecyclerView(getContext());
recyclerView.setLayoutParams(params);
recyclerView.setLayoutManager(manager);
recyclerView.setAdapter(adapter);
container.addView(recyclerView);
}
public void title(int title) {
title(getContext().getString(title));
}
public void title(String title) {
titleView.setText(title);
titleView.setVisibility(View.VISIBLE);
}
public void layout(int layout) {
this.layout = layout;
if (adapter != null) adapter.setLayout(layout);
}
public void orientation(int orientation) {
this.orientation = orientation;
if (adapter != null) adapter.setOrientation(orientation);
}
public void background(int res) {
background.setBackgroundResource(res);
}
@SuppressLint("RestrictedApi")
void inflateMenu(OnShareItemClickListener onShareItemClickListener) {
List<ShareEnum> items = new ArrayList<>();
items.add(ShareEnum.moments);
items.add(ShareEnum.wechat);
items.add(ShareEnum.weibo);
items.add(ShareEnum.qq);
items.add(ShareEnum.qzone);
addItems(items, onShareItemClickListener);
}
void setItemClick(OnShareItemClickListener onShareItemClickListener) {
adapter.setItemClick(onShareItemClickListener);
}
/**
* recycler view adapter, provide HORIZONTAL and VERTICAL item style
*/
private class DialogAdapter extends RecyclerView.Adapter<RecyclerView.ViewHolder> {
private List<ShareEnum> mItems = Collections.emptyList();
private OnShareItemClickListener itemClickListener;
private int orientation;
private int layout;
DialogAdapter(List<ShareEnum> mItems, int layout, int orientation) {
setList(mItems);
this.layout = layout;
this.orientation = orientation;
}
private void setList(List<ShareEnum> items) {
mItems = items == null ? new ArrayList<ShareEnum>() : items;
}
void setItemClick(OnShareItemClickListener onShareItemClickListener) {
this.itemClickListener = onShareItemClickListener;
}
public void setOrientation(int orientation) {
this.orientation = orientation;
notifyDataSetChanged();
}
public void setLayout(int layout) {
this.layout = layout;
notifyDataSetChanged();
}
@Override
public RecyclerView.ViewHolder onCreateViewHolder(ViewGroup parent, int viewType) {
if (layout == GRID)
return new TopHolder(new LinearLayout(parent.getContext()));
else if (orientation == HORIZONTAL)
return new TopHolder(new LinearLayout(parent.getContext()));
else return new LeftHolder(new LinearLayout(parent.getContext()));
}
@RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
@Override
public void onBindViewHolder(RecyclerView.ViewHolder holder, int position) {
final ShareEnum item = mItems.get(position);
TopHolder topHolder;
LeftHolder leftHolder;
if (layout == GRID) {
topHolder = (TopHolder) holder;
topHolder.item.setText(item.getItemTitle());
topHolder.item.setCompoundDrawablesWithIntrinsicBounds(null, topHolder.icon(getContext().getDrawable(item.getItemIcon())), null, null);
topHolder.item.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (itemClickListener != null) itemClickListener.click(item);
}
});
} else if (orientation == HORIZONTAL) {
topHolder = (TopHolder) holder;
topHolder.item.setText(item.getItemTitle());
topHolder.item.setCompoundDrawablesWithIntrinsicBounds(null, topHolder.icon(getContext().getDrawable(item.getItemIcon())), null, null);
topHolder.item.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (itemClickListener != null) itemClickListener.click(item);
}
});
} else {
leftHolder = (LeftHolder) holder;
leftHolder.item.setText(item.getItemTitle());
leftHolder.item.setCompoundDrawablesWithIntrinsicBounds(leftHolder.icon(getContext().getDrawable(item.getItemIcon())), null, null, null);
leftHolder.item.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (itemClickListener != null) itemClickListener.click(item);
}
});
}
}
@Override
public int getItemCount() {
return mItems.size();
}
/**
* horizontal item adapter
*/
class TopHolder extends RecyclerView.ViewHolder {
private TextView item;
TopHolder(View view) {
super(view);
ViewGroup.LayoutParams params = new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.WRAP_CONTENT);
params.width = Utils.getScreenWidth(getContext()) / 5;
item = new TextView(view.getContext());
item.setLayoutParams(params);
item.setMaxLines(1);
item.setEllipsize(TextUtils.TruncateAt.END);
item.setGravity(Gravity.CENTER);
item.setTextColor(ContextCompat.getColor(view.getContext(), R.color.gray_font_dark));
item.setTextSize(TypedValue.COMPLEX_UNIT_PX, getContext().getResources().getDimension(R.dimen.font_small));
item.setCompoundDrawablePadding(topPadding);
item.setPadding(0, padding, 0, padding);
TypedValue typedValue = new TypedValue();
view.getContext().getTheme().resolveAttribute(android.R.attr.selectableItemBackground, typedValue, true);
item.setBackgroundResource(typedValue.resourceId);
((LinearLayout) view).addView(item);
}
private Drawable icon(Drawable drawable) {
if (drawable != null) {
Bitmap bitmap = ((BitmapDrawable) drawable).getBitmap();
@SuppressWarnings("SuspiciousNameCombination") Drawable resizeIcon = new BitmapDrawable(getContext().getResources(), Bitmap.createScaledBitmap(bitmap, topIcon, topIcon, true));
Drawable.ConstantState state = resizeIcon.getConstantState();
resizeIcon = DrawableCompat.wrap(state == null ? resizeIcon : state.newDrawable().mutate());
return resizeIcon;
}
return null;
}
}
class LeftHolder extends RecyclerView.ViewHolder {
private TextView item;
LeftHolder(View view) {
super(view);
ViewGroup.LayoutParams params = new ViewGroup.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.WRAP_CONTENT);
view.setLayoutParams(params);
item = new TextView(view.getContext());
item.setLayoutParams(params);
item.setMaxLines(1);
item.setEllipsize(TextUtils.TruncateAt.END);
item.setGravity(Gravity.CENTER_VERTICAL);
item.setTextColor(ContextCompat.getColor(view.getContext(), R.color.black));
item.setTextSize(TypedValue.COMPLEX_UNIT_PX, getContext().getResources().getDimension(R.dimen.font_normal));
item.setCompoundDrawablePadding(leftPadding);
item.setPadding(padding, padding, padding, padding);
TypedValue typedValue = new TypedValue();
view.getContext().getTheme().resolveAttribute(android.R.attr.selectableItemBackground, typedValue, true);
item.setBackgroundResource(typedValue.resourceId);
((LinearLayout) view).addView(item);
}
private Drawable icon(Drawable drawable) {
if (drawable != null) {
Bitmap bitmap = ((BitmapDrawable) drawable).getBitmap();
@SuppressWarnings("SuspiciousNameCombination") Drawable resizeIcon = new BitmapDrawable(getContext().getResources(), Bitmap.createScaledBitmap(bitmap, leftIcon, leftIcon, true));
Drawable.ConstantState state = resizeIcon.getConstantState();
resizeIcon = DrawableCompat.wrap(state == null ? resizeIcon : state.newDrawable().mutate());
return resizeIcon;
}
return null;
}
}
}
}
}
|
<reponame>idjevm/FightPandemics
import React, { useState, createContext, useContext } from "react";
import { Row, Col } from "antd";
import {
Container,
Option,
TitleStep,
OptionButton,
BackButton,
CreateProfileButton,
CreateOrgLink,
} from "components/CreatePost/StyledPostAs";
import SubmitButton from "components/Button/SubmitButton";
import Form from "./Form/Form";
import SvgIcon from "components/Icon/SvgIcon";
import person from "assets/icons/person.svg";
import organization from "assets/icons/organization.svg";
import back from "assets/icons/back-arrow-gray.svg";
import closeButton from "assets/icons/close-btn.svg";
import { theme } from "constants/theme";
const { typography } = theme;
const CreatePostContext = createContext();
const organizations = [
{ id: 1, title: "Notion" },
{ id: 2, title: "Notion" },
];
const Step1 = () => {
const createPostContext = useContext(CreatePostContext);
const { currentStep, setCurrentStep } = createPostContext;
return (
currentStep === 1 && (
<>
<TitleStep>Continue Posting As</TitleStep>
<Row gutter={14} justify="center">
<Col span={12}>
<Option
img={person}
text="Individual"
onClick={() => setCurrentStep(3)}
/>
</Col>
<Col span={12}>
<Option
img={organization}
text="Organisation"
onClick={() => setCurrentStep(2)}
/>
</Col>
</Row>
</>
)
);
};
const Step2 = () => {
const createPostContext = useContext(CreatePostContext);
const { setForm, currentStep, setCurrentStep } = createPostContext;
return (
currentStep === 2 && (
<>
<TitleStep>Posting as an Organisation</TitleStep>
<BackButton src={back} onClick={() => setCurrentStep(1)} />
{organizations.map((item) => {
return (
<OptionButton
key={item.id}
onClick={() => {
setForm({ organization: item });
setCurrentStep(3);
}}
>
{item.title}
</OptionButton>
);
})}
<CreateOrgLink to={""}>Create new one</CreateOrgLink>
</>
)
);
};
const Step3 = ({ onCancel }) => {
const { currentStep, setCurrentStep } = useContext(CreatePostContext);
if (currentStep !== 3) return null;
return (
<Form
setCurrentStep={setCurrentStep}
onClose={() => {
setCurrentStep(1);
onCancel();
}}
/>
);
};
const Wrapper = ({ onCancel, visible, children }) => {
const { currentStep } = useContext(CreatePostContext);
return (
<Container
title={" "}
style={{ textAlign: "center" }}
footer={null}
visible={visible && currentStep !== 3}
destroyOnClose={true}
overflow-y: hidden;
closeIcon={
<SvgIcon
src={closeButton}
style={{
position: "absolute",
right: "4.0rem",
top: "1.7rem",
filter: currentStep === 4 ? "" : "brightness(0.6)",
}}
/>
}
onCancel={onCancel}
currentStep={currentStep}
>
{children}
</Container>
);
};
const Step4 = () => {
const createPostContext = useContext(CreatePostContext);
const { setForm, currentStep, setCurrentStep } = createPostContext;
return (
currentStep === 4 && (
<>
<TitleStep fontSize={typography.size.xlarge} currentStep={currentStep}>
Success
</TitleStep>
<CreateProfileButton primary>Create Profile</CreateProfileButton>
<CreateOrgLink to={""}>Skip</CreateOrgLink>
</>
)
);
};
const CreatePost = (props) => {
const [currentStep, setCurrentStep] = useState(1);
const [form, setForm] = useState({});
return (
<CreatePostContext.Provider
value={{ form, setForm, currentStep, setCurrentStep }}
>
<Wrapper {...props}>
<Step1 />
<Step2 />
<Step4 />
</Wrapper>
<Step3 {...props} />
</CreatePostContext.Provider>
);
};
export default CreatePost;
|
#!/bin/sh
sudo docker build -t node-images-server .
sudo docker run -p 9090:9090 -d -it --rm --name node-images-server node-images-server
|
import { commit, modify } from '@collectable/core';
import { CONST, ListStructure, OFFSET_ANCHOR, appendValues } from '../internals';
/**
* Appends a new value to the end of a list, growing the size of the list by one.
*
* @template T - The type of value contained by the list
* @param value - The value to append to the list
* @param list - The list to which the value should be appended
* @returns A list containing the appended value
*/
export function append<T> (value: T, list: ListStructure<T>): ListStructure<T> {
list = modify(list);
var tail = list._right;
var slot = tail.slot;
if(tail.group !== 0 && tail.offset === 0 && slot.group !== 0 && slot.size < CONST.BRANCH_FACTOR) {
list._lastWrite = OFFSET_ANCHOR.RIGHT;
list._size++;
if(slot.group === list._group) {
slot.adjustRange(0, 1, true);
}
else {
slot = slot.cloneWithAdjustedRange(list._group, 0, 1, true, true);
if(tail.group !== list._group) {
tail = tail.cloneToGroup(list._group);
list._right = tail;
}
tail.slot = slot;
}
tail.sizeDelta++;
tail.slotsDelta++;
slot.slots[slot.slots.length - 1] = arguments[0];
}
else {
appendValues(list, [value]);
}
return commit(list);
}
|
#!/bin/sh
#
# strip.sh
#
# Copyright (c) 2013 repk
#
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <repk@triplefau.lt> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return repk
# ----------------------------------------------------------------------------
#
load_utils() {
. ${TOOLS_BASEDIR}/utils.sh
}
main() {
load_utils
check_file ${PKGMK_COMMONCONF}
check_file ${PKGMK_CROSSCONF}
. ${PKGMK_COMMONCONF}
PKGMK_TARGETCONF="${PKGMK_BASEDIR}/config/${CONFTARGET}/target.conf"
check_file ${PKGMK_TARGETCONF}
. ${PKGMK_TARGETCONF}
. ${PKGMK_CROSSCONF}
unset LD_LIBRARY_PATH
for f in $(find ${CLFS_DIR} -type f -executable); do
${FILEPROG} ${f} | grep -i "not stripped" >& /dev/null
if [ ${?} -eq 0 ]; then
${STRIP} --strip-debug ${f}
fi
done
}
TOOLS_BASEDIR=$(dirname $(readlink -e $0))
PKGMK_BASEDIR=$(dirname ${TOOLS_BASEDIR})
PKGMK_COMMONCONF="${PKGMK_BASEDIR}/config/common.conf"
PKGMK_CROSSCONF="${PKGMK_BASEDIR}/config/cross.conf"
FILEPROG=file
#Debug
DBGLVL=2
main "$@"
|
<reponame>serginij/project-manager
import { styled } from 'linaria/react'
export const FormTitle = styled.h3`
text-align: left;
`
|
<filename>console/src/boost_1_78_0/libs/spirit/example/qi/nabialek.cpp<gh_stars>1-10
/*=============================================================================
Copyright (c) 2003 <NAME>
Copyright (c) 2001-2010 <NAME>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
///////////////////////////////////////////////////////////////////////////////
//
// The Nabialek trick.
//
// [ <NAME>; Somewhere, sometime in 2003... ] spirit1
// [ JDG November 17, 2009 ] spirit2
// [ JDG January 10, 2010 ] Updated to use rule pointers
// for efficiency.
//
///////////////////////////////////////////////////////////////////////////////
#include <boost/spirit/include/qi.hpp>
#include <boost/phoenix/operator.hpp>
#include <iostream>
#include <string>
namespace client
{
namespace qi = boost::spirit::qi;
namespace ascii = boost::spirit::ascii;
///////////////////////////////////////////////////////////////////////////////
// Our nabialek_trick grammar
///////////////////////////////////////////////////////////////////////////////
template <typename Iterator>
struct nabialek_trick : qi::grammar<
Iterator, ascii::space_type, qi::locals<qi::rule<Iterator, ascii::space_type>*> >
{
nabialek_trick() : nabialek_trick::base_type(start)
{
using ascii::alnum;
using qi::lexeme;
using qi::lazy;
using qi::_a;
using qi::_1;
id = lexeme[*(ascii::alnum | '_')];
one = id;
two = id >> ',' >> id;
keyword.add
("one", &one)
("two", &two)
;
start = *(keyword[_a = _1] >> lazy(*_a));
}
qi::rule<Iterator, ascii::space_type> id, one, two;
qi::rule<Iterator, ascii::space_type, qi::locals<qi::rule<Iterator, ascii::space_type>*> > start;
qi::symbols<char, qi::rule<Iterator, ascii::space_type>*> keyword;
};
}
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int
main()
{
using boost::spirit::ascii::space;
typedef std::string::const_iterator iterator_type;
typedef client::nabialek_trick<iterator_type> nabialek_trick;
nabialek_trick g; // Our grammar
std::string str = "one only\none again\ntwo first,second";
std::string::const_iterator iter = str.begin();
std::string::const_iterator end = str.end();
bool r = phrase_parse(iter, end, g, space);
if (r && iter == end)
{
std::cout << "-------------------------\n";
std::cout << "Parsing succeeded\n";
std::cout << "-------------------------\n";
}
else
{
std::string rest(iter, end);
std::cout << "-------------------------\n";
std::cout << "Parsing failed\n";
std::cout << "stopped at: \": " << rest << "\"\n";
std::cout << "-------------------------\n";
}
return 0;
}
|
import { Component, OnInit } from '@angular/core';
import { UtilService } from 'src/app/util/util.service';
import { Router, ActivatedRoute } from '@angular/router';
import { ProjectService } from '../service/project.service';
import { Team } from './team';
import { User } from '../user/user';
import { Modal } from '../util/modal';
@Component({
selector: 'app-team',
templateUrl: './team.component.html',
styleUrls: ['./team.component.css']
})
export class TeamComponent implements OnInit {
projectId:string;
teams:Array<Team>;
allTeam:Team = new Team({"id": "allTeam", "name": "All Team"});
users:Array<User>;
currTeam:Team = new Team();
showTeam:boolean = false;
showUser:boolean = false;
loadingUser:boolean = false;
savingUser:boolean = false;
noTeam:boolean = false;
showSearchUserUl:boolean = false;
addUserToTeam:Team = new Team();
newUser:User;
newUserId:string;
searchUsers:Array<User> = new Array<User>();
loadingUserSearch:boolean = false;
notificationMessage:string = "";
showSuccessNotification:boolean = false;
showFailureNotification:boolean = false;
removingUser:boolean = false;
modal:Modal = new Modal();
constructor(private projectService:ProjectService,
private route: ActivatedRoute,
private router:Router,
private utilService:UtilService) { }
ngOnInit() {
this.route.paramMap.subscribe(params => {
console.log(params.get('id'));
this.projectId = params.get("id");
if(this.projectId) {
this.loadTeamDetails(this.projectId);
} else {
this.navigateToProjectList();
}
});
// Show Project Drop down in header
this.utilService.showProjectDropDown();
this.utilService.currPage = "project";
this.utilService.listenToCurrentProjectChanged()
.subscribe(
(projectId) => {
this.navigateToTeam(projectId);
}
);
}
loadTeamDetails(projectId:string) {
this.projectService.fetchTeamsByProjectIds([projectId]).subscribe(
(teams:Array<Team>) => {
this.teams = teams;
if(teams.length > 0) {
this.fetchUsersForAllTeam();
this.currTeam = this.allTeam;
this.showTeam = true;
this.showUser = true;
} else {
this.noTeam = true;
}
console.log("teams", teams);
}
)
}
fetchUsersForAllTeam() {
this.users = new Array<User>();
console.log("fetchForAllTeams", this.teams);
this.teams.forEach((team:Team) => {
console.log(team.name);
this.projectService.fetchUsersByTeamId(team.id)
.subscribe((users:Array<User>) => {
users.forEach((u) => {
if(!this.checkUserExistsInArrayById(u.id, this.users)) this.users.push(u);
});
console.log("users", this.users);
});
});
}
checkUserExistsInArrayById(id:string, users:Array<User>):boolean {
return users.some(el => el.id == id);
}
fetchUsersForTeam(team:Team) {
this.users = new Array<User>();
this.projectService.fetchUsersByTeamId(team.id)
.subscribe((users:Array<User>) => {
this.users = users;
});
}
updateCurrTeam() {
if(this.currTeam.id == "allTeam") {
this.fetchUsersForAllTeam();
} else
this.fetchUsersForTeam(this.currTeam);
}
startAddUser() {
if(this.currTeam.id == "allTeam")
this.addUserToTeam = this.teams[0];
else
this.addUserToTeam = this.currTeam;
this.newUser = undefined;
this.newUserId = "";
this.loadingUserSearch = true;
this.showSuccessNotification = false;
this.fetchUsersForTeam(this.addUserToTeam);
this.modal.name = "addUser";
this.showModal(this.modal);
}
cancelAddUser() {
this.addUserToTeam = new Team();
this.loadingUserSearch = false;
this.hideModal();
}
selectTeam() {
this.fetchUsersForTeam(this.addUserToTeam);
this.searchUsers = [];
}
searchUser(userId:string) {
this.showSearchUserUl = false;
if(userId && userId.trim().length > 0) {
this.projectService.fetchUserByUserIdOrNameLike(userId.trim())
.subscribe((users:Array<User>) => {
this.searchUsers = users.filter((u) => this.users.indexOf(u) == -1);
this.showSearchUserUl = true;
});
}
}
setNewUser(user:User) {
this.newUser = user;
this.showSearchUserUl = false;
}
saveAddUser() {
this.showSuccessNotification = false;
this.showFailureNotification = false;
console.log("newUser", this.newUser, "addUserToTeam", this.addUserToTeam);
this.projectService.addUserToTeam(this.addUserToTeam.id, this.newUser).subscribe(
() => {
this.notificationMessage = "User " + this.newUser.userId + ", successfully added to team "
+ this.addUserToTeam.name;
this.showSuccessNotification = true;
this.updateCurrTeam();
this.hideModal();
}, (err) => {
this.notificationMessage = "Unable to add User " + this.newUser.userId + ", to team "
+ this.addUserToTeam.name + ". Please try again later..!!";
this.showFailureNotification = true;
this.updateCurrTeam();
this.hideModal();
}
)
}
removeUserFromTeam(user:User, currTeam:Team) {
this.removingUser = true;
user["isRemoving"] = true;
this.showSuccessNotification = false;
this.showFailureNotification = false;
this.projectService.removeUserFromTeam(currTeam.id, user.id).subscribe(
() => {
setTimeout(() => {
this.removingUser = false;
this.notificationMessage = "User " + user.userId + ", successfully removed from team "
+ currTeam.name;
this.showSuccessNotification = true;
this.updateCurrTeam();
this.hideModal();
}, 3000);
}, (err) => {
this.notificationMessage = "Unable to remove User " + user.userId + ", from team "
+ currTeam.name + ". Please try again later..!!";
this.showFailureNotification = true;
this.updateCurrTeam();
this.hideModal();
}
)
}
private navigateToProjectList() {
this.router.navigateByUrl("/project-list");
}
private navigateToTeam(projectId:string) {
this.router.navigate(["/project", projectId, "/user-list"]).then( (e) => {
if (e) {
console.log("Navigation is successful!");
} else {
console.log("Navigation has failed!");
}
});
}
showModal(modal:Modal) {
this.modal = modal;
this.modal.isShow = true;
}
hideModal() {
this.modal.isShow = false;
}
}
|
# settings.py
...
INSTALLED_APPS = [
...
'cart',
...
]
# urls.py
from django.contrib import admin
from django.urls import path
from cart.views import cart_view
urlpatterns = [
path('admin/', admin.site.urls),
path('cart/', cart_view),
]
# views.py
from django.shortcuts import render
def cart_view(request):
return render(request, 'cart.html')
# cart.html
<h1>My Shopping Cart</h1>
<ul>
{% for item in cart.items %}
<li>{{ item.name }} - {{ item.price }}</li>
{% endfor %}
</ul> |
/**
* hub-detect
*
* Copyright (C) 2018 Black Duck Software, Inc.
* http://www.blackducksoftware.com/
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.blackducksoftware.integration.hub.detect.detector.nuget;
import com.blackducksoftware.integration.hub.detect.detector.Detector;
import com.blackducksoftware.integration.hub.detect.detector.DetectorEnvironment;
import com.blackducksoftware.integration.hub.detect.detector.DetectorException;
import com.blackducksoftware.integration.hub.detect.detector.DetectorType;
import com.blackducksoftware.integration.hub.detect.detector.ExtractionId;
import com.blackducksoftware.integration.hub.detect.detector.nuget.inspector.NugetInspector;
import com.blackducksoftware.integration.hub.detect.workflow.extraction.Extraction;
import com.blackducksoftware.integration.hub.detect.workflow.file.DetectFileFinder;
import com.blackducksoftware.integration.hub.detect.workflow.search.result.DetectorResult;
import com.blackducksoftware.integration.hub.detect.workflow.search.result.FilesNotFoundDetectorResult;
import com.blackducksoftware.integration.hub.detect.workflow.search.result.InspectorNotFoundDetectorResult;
import com.blackducksoftware.integration.hub.detect.workflow.search.result.PassedDetectorResult;
public class NugetProjectDetector extends Detector {
static final String[] SUPPORTED_PROJECT_PATTERNS = new String[] {
// C#
"*.csproj",
// F#
"*.fsproj",
// VB
"*.vbproj",
// Azure Stream Analytics
"*.asaproj",
// Docker Compose
"*.dcproj",
// Shared Projects
"*.shproj",
// Cloud Computing
"*.ccproj",
// Fabric Application
"*.sfproj",
// Node.js
"*.njsproj",
// VC++
"*.vcxproj",
// VC++
"*.vcproj",
// .NET Core
"*.xproj",
// Python
"*.pyproj",
// Hive
"*.hiveproj",
// Pig
"*.pigproj",
// JavaScript
"*.jsproj",
// U-SQL
"*.usqlproj",
// Deployment
"*.deployproj",
// Common Project System Files
"*.msbuildproj",
// SQL
"*.sqlproj",
// SQL Project Files
"*.dbproj",
// RStudio
"*.rproj"
};
private final DetectFileFinder fileFinder;
private final NugetInspectorManager nugetInspectorManager;
private final NugetInspectorExtractor nugetInspectorExtractor;
private NugetInspector inspector;
public NugetProjectDetector(final DetectorEnvironment environment, final DetectFileFinder fileFinder, final NugetInspectorManager nugetInspectorManager, final NugetInspectorExtractor nugetInspectorExtractor) {
super(environment, "Project", DetectorType.NUGET);
this.fileFinder = fileFinder;
this.nugetInspectorExtractor = nugetInspectorExtractor;
this.nugetInspectorManager = nugetInspectorManager;
}
@Override
public DetectorResult applicable() {
for (final String filepattern : SUPPORTED_PROJECT_PATTERNS) {
if (fileFinder.findFile(environment.getDirectory(), filepattern) != null) {
return new PassedDetectorResult();
}
}
return new FilesNotFoundDetectorResult(SUPPORTED_PROJECT_PATTERNS);
}
@Override
public DetectorResult extractable() throws DetectorException {
inspector = nugetInspectorManager.findNugetInspector();
if (inspector == null) {
return new InspectorNotFoundDetectorResult("nuget");
}
return new PassedDetectorResult();
}
@Override
public Extraction extract(final ExtractionId extractionId) {
return nugetInspectorExtractor.extract(environment.getDirectory(), inspector, extractionId);
}
}
|
module Webpack
# :nodoc:
module Rails
VERSION = "0.9.12"
end
end
|
python -m domainbed.scripts.train\
--data_dir=./domainbed/data/\
--algorithm ERM\
--dataset DomainNet\
--holdout_fraction 0.2\
--skip_model_save False\
--tensorboard_use True\
--output_dir=../result_domainbed/ERM_DNET/5\
--test_env 0 1 2 3 4\
--gpu 3\
|
class User {
private $slug;
private $login;
private $password;
public function __construct($slug, $login, $password) {
$this->slug = $slug;
$this->login = $login;
$this->password = $password;
}
public function getSlug() {
return $this->slug;
}
public function getLogin() {
return $this->login;
}
public function getPassword() {
return $this->password;
}
public function validatePassword($inputPassword) {
return password_verify($inputPassword, $this->password);
}
}
// Example usage
$user = new User("john-doe", "johndoe@example.com", password_hash("password123", PASSWORD_DEFAULT));
echo $user->getSlug(); // Output: john-doe
echo $user->getLogin(); // Output: johndoe@example.com
echo $user->validatePassword("password123"); // Output: true
echo $user->validatePassword("wrongpassword"); // Output: false |
<gh_stars>0
print list(all_files('*.pye', os.environ['PATH']))
|
package integration
import (
"fmt"
"net"
"net/http"
"strconv"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
restclient "k8s.io/client-go/rest"
kapi "k8s.io/kubernetes/pkg/api"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"github.com/openshift/origin/pkg/authorization/authorizer/scope"
authorizationclient "github.com/openshift/origin/pkg/authorization/generated/internalclientset"
configapi "github.com/openshift/origin/pkg/cmd/server/api"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
oauthapi "github.com/openshift/origin/pkg/oauth/apis/oauth"
oauthapiserver "github.com/openshift/origin/pkg/oauth/apiserver"
oauthclient "github.com/openshift/origin/pkg/oauth/generated/internalclientset/typed/oauth/internalversion"
"github.com/openshift/origin/pkg/oc/admin/policy"
userclient "github.com/openshift/origin/pkg/user/generated/internalclientset/typed/user/internalversion"
testutil "github.com/openshift/origin/test/util"
testserver "github.com/openshift/origin/test/util/server"
)
type testRequest struct {
Method string
Path string
Result int
}
func TestNodeAuth(t *testing.T) {
// Server config
masterConfig, nodeConfig, adminKubeConfigFile, err := testserver.StartTestAllInOne()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer testserver.CleanupMasterEtcd(t, masterConfig)
// Cluster admin clients and client configs
adminClient, err := testutil.GetClusterAdminKubeClient(adminKubeConfigFile)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
adminConfig, err := testutil.GetClusterAdminClientConfig(adminKubeConfigFile)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Client configs for lesser users
masterKubeletClientConfig := configapi.GetKubeletClientConfig(*masterConfig)
_, nodePort, err := net.SplitHostPort(nodeConfig.ServingInfo.BindAddress)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
nodePortInt, err := strconv.ParseInt(nodePort, 0, 0)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
masterKubeletClientConfig.Port = uint(nodePortInt)
anonymousConfig := clientcmd.AnonymousClientConfig(adminConfig)
badTokenConfig := clientcmd.AnonymousClientConfig(adminConfig)
badTokenConfig.BearerToken = "<PASSWORD>"
bobKubeClient, bobConfig, err := testutil.GetClientForUser(adminConfig, "bob")
_, aliceConfig, err := testutil.GetClientForUser(adminConfig, "alice")
sa1KubeClient, sa1Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa1")
_, sa2Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa2")
// Grant Bob system:node-reader, which should let them read metrics and stats
addBob := &policy.RoleModificationOptions{
RoleName: bootstrappolicy.NodeReaderRoleName,
RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(authorizationclient.NewForConfigOrDie(adminConfig)),
Subjects: []kapi.ObjectReference{{Kind: "User", Name: "bob"}},
}
if err := addBob.AddRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// create a scoped token for bob that is only good for getting user info
bobUser, err := userclient.NewForConfigOrDie(bobConfig).Users().Get("~", metav1.GetOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
whoamiOnlyBobToken := &oauthapi.OAuthAccessToken{
ObjectMeta: metav1.ObjectMeta{Name: "whoami-token-plus-some-padding-here-to-make-the-limit"},
ClientName: oauthapiserver.OpenShiftCLIClientID,
ExpiresIn: 200,
Scopes: []string{scope.UserInfo},
UserName: bobUser.Name,
UserUID: string(bobUser.UID),
}
if _, err := oauthclient.NewForConfigOrDie(adminConfig).OAuthAccessTokens().Create(whoamiOnlyBobToken); err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, bobWhoamiOnlyConfig, err := testutil.GetClientForUser(adminConfig, "bob")
bobWhoamiOnlyConfig.BearerToken = whoamiOnlyBobToken.Name
// Grant sa1 system:cluster-reader, which should let them read metrics and stats
addSA1 := &policy.RoleModificationOptions{
RoleName: bootstrappolicy.ClusterReaderRoleName,
RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(authorizationclient.NewForConfigOrDie(adminConfig)),
Subjects: []kapi.ObjectReference{{Kind: "ServiceAccount", Namespace: "default", Name: "sa1"}},
}
if err := addSA1.AddRole(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Wait for policy cache
if err := testutil.WaitForClusterPolicyUpdate(bobKubeClient.Authorization(), "get", kapi.Resource("nodes/metrics"), true); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := testutil.WaitForClusterPolicyUpdate(sa1KubeClient.Authorization(), "get", kapi.Resource("nodes/metrics"), true); err != nil {
t.Fatalf("unexpected error: %v", err)
}
kubeletClientConfig := func(config *restclient.Config) *kubeletclient.KubeletClientConfig {
return &kubeletclient.KubeletClientConfig{
Port: uint(nodePortInt),
EnableHttps: true,
TLSClientConfig: config.TLSClientConfig,
BearerToken: config.BearerToken,
}
}
testCases := map[string]struct {
KubeletClientConfig *kubeletclient.KubeletClientConfig
Forbidden bool
NodeViewer bool
NodeAdmin bool
}{
"bad token": {
KubeletClientConfig: kubeletClientConfig(&badTokenConfig),
},
"anonymous": {
KubeletClientConfig: kubeletClientConfig(&anonymousConfig),
Forbidden: true,
},
"cluster admin": {
KubeletClientConfig: kubeletClientConfig(adminConfig),
NodeAdmin: true,
},
"master kubelet client": {
KubeletClientConfig: masterKubeletClientConfig,
NodeAdmin: true,
},
"bob": {
KubeletClientConfig: kubeletClientConfig(bobConfig),
NodeViewer: true,
},
// bob is normally a viewer, but when using a scoped token, he should end up denied
"bob-scoped": {
KubeletClientConfig: kubeletClientConfig(bobWhoamiOnlyConfig),
Forbidden: true,
},
"alice": {
KubeletClientConfig: kubeletClientConfig(aliceConfig),
Forbidden: true,
},
"sa1": {
KubeletClientConfig: kubeletClientConfig(sa1Config),
NodeViewer: true,
},
"sa2": {
KubeletClientConfig: kubeletClientConfig(sa2Config),
Forbidden: true,
},
}
for k, tc := range testCases {
var (
// expected result for requests a viewer should be able to make
viewResult int
// expected result for requests an admin should be able to make (that can actually complete with a 200 in our tests)
adminResultOK int
// expected result for requests an admin should be able to make (that return a 404 in this test if the authn/authz layer is completed)
adminResultMissing int
)
switch {
case tc.NodeAdmin:
viewResult = http.StatusOK
adminResultOK = http.StatusOK
adminResultMissing = http.StatusNotFound
case tc.NodeViewer:
viewResult = http.StatusOK
adminResultOK = http.StatusForbidden
adminResultMissing = http.StatusForbidden
case tc.Forbidden:
viewResult = http.StatusForbidden
adminResultOK = http.StatusForbidden
adminResultMissing = http.StatusForbidden
default:
viewResult = http.StatusUnauthorized
adminResultOK = http.StatusUnauthorized
adminResultMissing = http.StatusUnauthorized
}
requests := []testRequest{
// Responses to invalid paths are the same for all users
{"GET", "/", http.StatusNotFound},
{"GET", "/stats", http.StatusMovedPermanently}, // ServeMux redirects to the directory
{"GET", "/logs", http.StatusMovedPermanently}, // ServeMux redirects to the directory
{"GET", "/invalid", http.StatusNotFound},
// viewer requests
{"GET", "/metrics", viewResult},
{"GET", "/stats/", viewResult},
{"POST", "/stats/", viewResult}, // stats requests can be POSTs which contain query options
// successful admin requests
{"GET", "/healthz", adminResultOK},
{"GET", "/pods", adminResultOK},
{"GET", "/logs/", adminResultOK},
// not found admin requests
{"GET", "/containerLogs/mynamespace/mypod/mycontainer", adminResultMissing},
{"POST", "/exec/mynamespace/mypod/mycontainer?output=1", adminResultMissing},
{"POST", "/run/mynamespace/mypod/mycontainer", adminResultMissing},
{"POST", "/attach/mynamespace/mypod/mycontainer?output=1", adminResultMissing},
{"POST", "/portForward/mynamespace/mypod/mycontainer", adminResultMissing},
// GET is supported in origin on /exec and /attach for backwards compatibility
// make sure node admin permissions are required
{"GET", "/exec/mynamespace/mypod/mycontainer?output=1", adminResultMissing},
{"GET", "/attach/mynamespace/mypod/mycontainer?output=1", adminResultMissing},
}
rt, err := kubeletclient.MakeTransport(tc.KubeletClientConfig)
if err != nil {
t.Errorf("%s: unexpected error: %v", k, err)
continue
}
for _, r := range requests {
req, err := http.NewRequest(r.Method, fmt.Sprintf("https://%s:%d", nodeConfig.NodeName, nodePortInt)+r.Path, nil)
if err != nil {
t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err)
continue
}
resp, err := rt.RoundTrip(req)
if err != nil {
t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err)
continue
}
resp.Body.Close()
if resp.StatusCode != r.Result {
t.Errorf("%s: token=%s %s: expected %d, got %d", k, tc.KubeletClientConfig.BearerToken, r.Path, r.Result, resp.StatusCode)
continue
}
}
}
}
|
import React from 'react';
import { registerRootComponent } from 'expo';
import App from './storybook';
const ExampleApp = () => {
return <App />;
};
// registerRootComponent calls AppRegistry.registerComponent('main', () => App);
// It also ensures that whether you load the app in the Expo client or in a native build,
// the environment is set up appropriately
registerRootComponent(ExampleApp);
|
<reponame>duncte123/botAudioManager
/*
* MIT License
*
* Copyright (c) 2018 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package me.duncte123.botAudioManager.audioManagers;
import fredboat.audio.player.LavalinkManager;
import lavalink.client.player.IPlayer;
import net.dv8tion.jda.core.entities.Guild;
public class GuildMusicManager {
/**
* This is our player
*/
public final IPlayer player;
/**
* This is the scheduler
*/
public final TrackScheduler scheduler;
/**
* This is what actually sends the audio
*/
private final AudioPlayerSenderHandler sendHandler;
/**
* Constructor
*
* @param g The guild that we wannt the manager for
*/
public GuildMusicManager(Guild g) {
player = LavalinkManager.ins.createPlayer(g.getId());
scheduler = new TrackScheduler(player);
sendHandler = new AudioPlayerSenderHandler(player);
player.addListener(scheduler);
}
/**
* This will get our sendings handler
*
* @return The {@link AudioPlayerSenderHandler thing} that sends our audio
*/
public AudioPlayerSenderHandler getSendHandler() {
return sendHandler;
}
}
|
#!/bin/bash
LIBVPX_REPO="https://chromium.googlesource.com/webm/libvpx"
LIBVPX_COMMIT="16154dae714c3e88bfa17c25d5d6ad8198fac63a"
ffbuild_enabled() {
return 0
}
ffbuild_dockerstage() {
to_df "ADD $SELF /stage.sh"
to_df "RUN run_stage"
}
ffbuild_dockerbuild() {
git-mini-clone "$LIBVPX_REPO" "$LIBVPX_COMMIT" libvpx
cd libvpx
local myconf=(
--disable-shared
--enable-static
--enable-pic
--disable-examples
--disable-tools
--disable-docs
--enable-vp9-highbitdepth
--prefix="$FFBUILD_PREFIX"
)
if [[ $TARGET == win64 ]]; then
myconf+=(
--target=x86_64-win64-gcc
)
export CROSS="$FFBUILD_CROSS_PREFIX"
elif [[ $TARGET == win32 ]]; then
myconf+=(
--target=x86-win32-gcc
)
export CROSS="$FFBUILD_CROSS_PREFIX"
else
echo "Unknown target"
return -1
fi
./configure "${myconf[@]}" || return -1
make -j$(nproc) || return -1
make install || return -1
cd ..
rm -rf libvpx
}
ffbuild_configure() {
echo --enable-libvpx
}
ffbuild_unconfigure() {
echo --disable-libvpx
}
|
<reponame>waleedmashaqbeh/freequartz
/* Copyright 2010 Smartmobili SARL
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <pthread.h>
#include "CGBasePriv.h"
#include "CGTypesPriv.h"
static pthread_mutex_t type_register_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t nextID_lock = PTHREAD_MUTEX_INITIALIZER;
CG_CONST_STRING_DECL(kCGCTypeCopyDescFormat, "<%s %p>");
CFStringRef copy_description(CFTypeRef cf, CFDictionaryRef formatOptions)
{
CFStringRef ret;
const CFRuntimeClass* cfclass;
if (!cf)
return NULL;
cfclass = _CFRuntimeGetClassWithTypeID(CFGetTypeID(cf));
if (cfclass) {
ret = CFStringCreateWithFormat(CFGetAllocator(cf),
formatOptions,
kCGCTypeCopyDescFormat,
cfclass->className,
cf);
}
else
ret = NULL;
return ret;
}
CFStringRef copy_debug_description(CFTypeRef cf)
{
return copy_description(cf, NULL);
}
CFTypeID CGTypeRegisterWithCallbacks(CFTypeID* typeID, CFRuntimeClass* rtc)
{
CFRuntimeClass* l_rtc;
if (*typeID != _kCFRuntimeNotATypeID) { return *typeID; }
pthread_mutex_lock(&type_register_lock);
l_rtc = (CFRuntimeClass*) malloc(sizeof(CFRuntimeClass));
l_rtc->version = 0;
l_rtc->className = rtc->className;
l_rtc->init = rtc->init;
l_rtc->copy = rtc->copy;
l_rtc->finalize = rtc->finalize;
l_rtc->equal = rtc->equal;
l_rtc->hash = rtc->hash;
l_rtc->copyFormattingDesc = (!rtc->copyFormattingDesc) ? copy_description : rtc->copyFormattingDesc;
l_rtc->copyDebugDesc = (!rtc->copyDebugDesc) ? copy_debug_description : rtc->copyDebugDesc;
l_rtc->reclaim = rtc->reclaim;
if (rtc->reclaim)
l_rtc->version = 4;
*typeID = _CFRuntimeRegisterClass(l_rtc);
pthread_mutex_unlock(&type_register_lock);
return *typeID;
}
CFTypeRef CGTypeCreateInstance(CFTypeID id, CFIndex size)
{
return CGTypeCreateInstanceWithAllocator(0, id, size);
}
CFTypeID CGTypeGetNextIdentifier(CFTypeID* id)
{
CFTypeID ret;
pthread_mutex_lock(&nextID_lock);
ret = *id + 1;
*id = ret;
pthread_mutex_unlock(&nextID_lock);
return ret;
}
CFTypeRef CGTypeCreateInstanceWithAllocator(CFAllocatorRef allocator, CFTypeID id, CFIndex size)
{
CFTypeRef type;
char* ptr;
/* Create the CFType */
//
type = _CFRuntimeCreateInstance(allocator, id, size, NULL);
if (!type) goto Cleanup;
///* Init with zero values all fields except our base object */
ptr = (char*)type;
memset((void*)(ptr + sizeof(CFRuntimeBase)), 0, size);
return type;
Cleanup:
return NULL;
}
CFTypeRef CGTypeCreateSingleton(CFTypeID typeID, void *memory, CFIndex size)
{
if (memory == NULL) {
memory = malloc(size + sizeof(CFRuntimeBase));
if (!memory) {
CGPostError("%s: failed to create instance of type %jd",
CGTypeCreateSingleton, typeID);
return NULL;
}
}
memset(memory, 0, size + sizeof(CFRuntimeBase));
_CFRuntimeInitStaticInstance(memory, typeID);
return memory;
}
//CFMutableDictionaryRef CGCFDictionaryCreate() |
<gh_stars>1-10
import React from 'react';
import Navbar from '../components/Navbar';
import styles from '../styles/main.scss';
import PostLink from '../components/PostLink';
import { Helmet } from 'react-helmet';
import { graphql } from 'gatsby';
const IndexPage = ({
data: {
allMarkdownRemark: { edges },
},
}) => {
// this takes all the posts and creates a list of PostLink components for each
// it takes category into account
const sciencePosts = edges
.filter(edge => !!edge.node.frontmatter.date && edge.node.frontmatter.category === 'science')
.map(edge => <PostLink key={edge.node.id} post={edge.node} />)
const culturePosts = edges
.filter(edge => !!edge.node.frontmatter.date && edge.node.frontmatter.category === 'culture')
.map(edge => <PostLink key={edge.node.id} post={edge.node} />)
const wisdomPosts = edges
.filter(edge => !!edge.node.frontmatter.date && edge.node.frontmatter.category === 'wisdom')
.map(edge => <PostLink key={edge.node.id} post={edge.node} />)
return (
<div className="main">
<Helmet
title="The Blog Where Dreams Come Alive"
/>
<Navbar />
<div className="blog-index">
<h1 className="blog-section__heading" id="science">Science</h1>
<div className="blog-section">
{sciencePosts}
</div>
<h1 className="blog-section__heading" id="culture">Culture</h1>
<div className="blog-section">
{culturePosts}
</div>
<h1 className="blog-section__heading" id="wisdom">Wisdom</h1>
<div className="blog-section">
{wisdomPosts}
</div>
</div>
</div>
)
}
export default IndexPage;
export const pageQuery = graphql`
query {
allMarkdownRemark(sort: { order: DESC, fields: [frontmatter___date] }) {
edges {
node {
id
excerpt(pruneLength: 97)
frontmatter {
date(formatString: "MMMM DD, YYYY")
path
title
image
category
}
}
}
}
}
`
|
export const MANDARINE_GET_FILE_PATH = (path: string | URL): string => {
let filePath = undefined;
if(path instanceof URL) {
filePath = path.toString();
} else {
filePath = path;
}
return filePath;
}
export const getFilePathString = ["getFilePath", MANDARINE_GET_FILE_PATH.toString()]; |
package main
import (
"bytes"
"github.com/stretchr/testify/assert"
"log"
"os"
"testing"
"github.com/bjartek/go-with-the-flow/v2/gwtf"
)
/*
Tests must be in the same folder as flow.json with contracts and transactions/scripts in subdirectories in order for the path resolver to work correctly
*/
func TestTransaction(t *testing.T) {
g := gwtf.NewTestingEmulator()
t.Parallel()
t.Run("fail on missing signer", func(t *testing.T) {
g.TransactionFromFile("create_nft_collection").
Test(t). //This method will return a TransactionResult that we can assert upon
AssertFailure("You need to set the main signer") //we assert that there is a failure
})
t.Run("fail on wrong transaction name", func(t *testing.T) {
g.TransactionFromFile("create_nf_collection").
SignProposeAndPayAs("first").
Test(t). //This method will return a TransactionResult that we can assert upon
AssertFailure("Could not read transaction file from path=./transactions/create_nf_collection.cdc") //we assert that there is a failure
})
t.Run("Create NFT collection", func(t *testing.T) {
g.TransactionFromFile("create_nft_collection").
SignProposeAndPayAs("first").
Test(t). //This method will return a TransactionResult that we can assert upon
AssertSuccess(). //Assert that there are no errors and that the transactions succeeds
AssertNoEvents() //Assert that we did not emit any events.
})
t.Run("Mint tokens assert events", func(t *testing.T) {
g.TransactionFromFile("mint_tokens").
SignProposeAndPayAsService().
AccountArgument("first").
UFix64Argument("100.0").
Test(t).
AssertSuccess().
AssertEventCount(3). //assert the number of events returned
AssertEmitEventName("A.0ae53cb6e3f42a79.FlowToken.TokensMinted"). //assert the name of a single event
AssertEmitEventName("A.0ae53cb6e3f42a79.FlowToken.TokensMinted", "A.0ae53cb6e3f42a79.FlowToken.TokensDeposited", "A.0ae53cb6e3f42a79.FlowToken.MinterCreated"). //or assert more then one eventname in a go
AssertEmitEvent(gwtf.NewTestEvent("A.0ae53cb6e3f42a79.FlowToken.TokensMinted", map[string]interface{}{"amount": "100.00000000"})). //assert a given event, can also take multiple events if you like
AssertEmitEventJson("{\n \"name\": \"A.0ae53cb6e3f42a79.FlowToken.MinterCreated\",\n \"time\": \"1970-01-01T00:00:00Z\",\n \"fields\": {\n \"allowedAmount\": \"100.00000000\"\n }\n}") //assert a given event using json, can also take multiple events if you like
})
t.Run("Inline transaction with debug log", func(t *testing.T) {
g.Transaction(`
import Debug from "../contracts/Debug.cdc"
transaction(message:String) {
prepare(acct: AuthAccount, account2: AuthAccount) {
Debug.log(message)
}
}`).
SignProposeAndPayAs("first").
PayloadSigner("second").
StringArgument("foobar").
Test(t).
AssertSuccess().
AssertDebugLog("foobar") //assert that we have debug logged something. The assertion is contains so you do not need to write the entire debug log output if you do not like
})
t.Run("Raw account argument", func(t *testing.T) {
g.Transaction(`
import Debug from "../contracts/Debug.cdc"
transaction(user:Address) {
prepare(acct: AuthAccount) {
Debug.log(user.toString())
}
}`).
SignProposeAndPayAsService().
RawAccountArgument("0x1cf0e2f2f715450").
Test(t).
AssertSuccess().
AssertDebugLog("0x1cf0e2f2f715450")
})
t.Run("transaction that should fail", func(t *testing.T) {
g.Transaction(`
import Debug from "../contracts/Debug.cdc"
transaction(user:Address) {
prepare(acct: AuthAccount) {
Debug.log(user.toStrig())
}
}`).
SignProposeAndPayAsService().
RawAccountArgument("0x1cf0e2f2f715450").
Test(t).
AssertFailure("has no member `toStrig`") //assert failure with an error message. uses contains so you do not need to write entire message
})
t.Run("Assert print events", func(t *testing.T) {
var str bytes.Buffer
log.SetOutput(&str)
defer log.SetOutput(os.Stdout)
g.TransactionFromFile("mint_tokens").
SignProposeAndPayAsService().
AccountArgument("first").
UFix64Argument("100.0").RunPrintEventsFull()
assert.Contains(t, str.String(), "A.0ae53cb6e3f42a79.FlowToken.MinterCreated")
})
t.Run("Assert print events", func(t *testing.T) {
var str bytes.Buffer
log.SetOutput(&str)
defer log.SetOutput(os.Stdout)
g.TransactionFromFile("mint_tokens").
SignProposeAndPayAsService().
AccountArgument("first").
UFix64Argument("100.0").
RunPrintEvents(map[string][]string{"A.0ae53cb6e3f42a79.FlowToken.TokensDeposited": {"to"}})
assert.NotContains(t, str.String(), "0x1cf0e2f2f715450")
})
}
|
<gh_stars>0
package es.redmic.test.integration.common;
import java.io.IOException;
import java.io.InputStream;
/*-
* #%L
* API
* %%
* Copyright (C) 2019 REDMIC Project / Server
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.util.HashMap;
import java.util.Map;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.junit.Before;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.security.web.FilterChainProxy;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.context.WebApplicationContext;
import es.redmic.utils.httpclient.HttpClient;
@ActiveProfiles("test")
public abstract class IntegrationTestBase {
@Autowired
protected WebApplicationContext webApplicationContext;
@Autowired
protected FilterChainProxy springSecurityFilterChain;
@Autowired
protected ObjectMapper mapper;
protected MockMvc mockMvc;
HttpClient client = new HttpClient();
@Value("${oauth.server}")
private String OAUTH_SERVER_PATH;
// TEST USERS
@Value("${test.user.ADMINISTRATOR}")
private String ADMINISTRATOR_USER;
@Value("${test.user.OAG}")
private String OAG_USER;
@Value("${test.user.COLLABORATOR}")
private String COLLABORATOR_USER;
@Value("${test.user.USER}")
private String USER;
@Value("${test.user.PASSWORD}")
private String PASSWORD;
@Value("${test.oauth.AUTHORIZATION}")
private String AUTHORIZATION;
@Before
public void setUp() {
this.mockMvc = MockMvcBuilders.webAppContextSetup(webApplicationContext).addFilters(springSecurityFilterChain)
.build();
}
protected String getTokenAdministratorUser() {
return obtainAccessToken(ADMINISTRATOR_USER, PASSWORD);
}
protected String getTokenOAGUser() {
return obtainAccessToken(OAG_USER, PASSWORD);
}
protected String getTokenCollaboratorUser() {
return obtainAccessToken(COLLABORATOR_USER, PASSWORD);
}
protected String getTokenUser() {
return obtainAccessToken(USER, PASSWORD);
}
@SuppressWarnings("unchecked")
private String obtainAccessToken(String username, String password) {
MultiValueMap<String, String> params = new LinkedMultiValueMap<>();
params.add("grant_type", "password");
params.add("username", username);
params.add("password", password);
params.add("scope", "write");
Map<String, String> headers = new HashMap<>();
headers.put("Authorization", "Basic " + AUTHORIZATION);
Map<String, String> result = (Map<String, String>) client.post(OAUTH_SERVER_PATH + "/api/oauth/token", params,
headers, java.util.HashMap.class);
return result.get("access_token");
}
protected Object getModelToResource(String resourcePath, Class<?> resultClass) throws JsonParseException, JsonMappingException, IOException {
InputStream resource = getClass().getResourceAsStream(resourcePath);
return mapper.readValue(resource, resultClass);
}
}
|
import mldag
def test_args():
@mldag.returns(['some_result'])
def foo(a, a_default=None, *var_pos, b, b_default=None, **var_key):
return a, a_default, var_pos, b, b_default, var_key
p = mldag.mldag.MLDag()
a = mldag.as_node(foo, name='a')
p['a'] >> a['a']
p['a_default'] >> a['a_default']
p['args'] >> a['var_pos']
p['b'] >> a['b']
p['kwargs'] >> a['var_key']
a['some_result'] >> p['result']
run = p.transform(1, 2, 3, 4, b=10, kwarg=10)
assert run.outputs['result'] == (1, 2, (3, 4), 10, None, {'kwarg': 10})
def test_generic_1():
def foo(arg1, arg2):
"""
Parameters
----------
arg1 : int
some argument
arg2 : some type
Returns
----------
res1 : int
res2 : int
"""
return arg1, arg2
dag = mldag.MLDag()
foo_node = mldag.as_node(foo, 'test_foo1')
dag >> foo_node
foo_node >> dag
assert dag.input_names == ['arg1_test_foo1', 'arg2_test_foo1', 'run_id']
assert [i.name for i in dag.outputs] == ['res1', 'res2']
assert dag.output_names == ['res1', 'res2']
assert dag.transform(1, 2).outputs == {'res1': 1, 'res2': 2}
assert dag.transform(3, 4).outputs == {'res1': 3, 'res2': 4}
def test_generic_2():
@mldag.returns(['res1', 'res2'])
def foo(arg1, arg2):
return arg1, arg2
dag = mldag.MLDag()
foo_node = mldag.as_node(foo, 'test_foo1')
dag >> foo_node
foo_node >> dag
assert dag.input_names == ['arg1_test_foo1', 'arg2_test_foo1', 'run_id']
assert [i.name for i in dag.outputs] == ['res1', 'res2']
assert dag.output_names == ['res1', 'res2']
assert dag.transform(1, 2).outputs == {'res1': 1, 'res2': 2}
assert dag.transform(3, 4).outputs == {'res1': 3, 'res2': 4}
def test_generic_3():
def foo(arg1, arg2):
return arg1, arg2
dag = mldag.MLDag()
foo_node = mldag.as_node(foo, 'test_foo1')
dag >> foo_node
foo_node >> dag
assert dag.input_names == ['arg1_test_foo1', 'arg2_test_foo1', 'run_id']
assert [i.name for i in dag.outputs] == ['result']
assert dag.transform(1, 2).outputs == {'result': (1, 2)}
assert dag.transform(3, 4).outputs == {'result': (3, 4)}
def test_nested_1():
def foo(arg1: int, arg2: int) -> tuple:
return arg1, arg2
subdag = mldag.MLDag()
foo_node = mldag.as_node(foo, 'f1')
subdag >> foo_node >> subdag
dag = mldag.MLDag()
subdag_node = mldag.as_node(subdag, 'subdag')
dag >> subdag_node >> dag
assert dag.input_names == ['arg1_f1_subdag', 'arg2_f1_subdag', 'run_id']
assert [i.name for i in dag.outputs] == ['result']
assert dag.transform(1, 2).outputs == {'result': (1, 2)}
assert dag.transform(3, 4).outputs == {'result': (3, 4)}
if __name__ == '__main__':
test_args()
|
#!/bin/bash
if [[ ! -d build ]]; then
rm -rf build/
mkdir -p build
fi
cd build
# build also eclipse files.
cmake .. -G"Eclipse CDT4 - Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug
make -j8
|
def decimalToBinary(num):
if num > 1:
decimalToBinary(num // 2)
print(num % 2, end = '')
# Driver code
decimal = 10
decimalToBinary(decimal) |
export VER=`cat ../../.version`
dotnet nuget push FlowerBI.Engine/nupkg/FlowerBI.Engine.$VER.nupkg -k $NUGET_API_KEY -s https://api.nuget.org/v3/index.json
dotnet nuget push FlowerBI.Tools/nupkg/FlowerBI.Tools.$VER.nupkg -k $NUGET_API_KEY -s https://api.nuget.org/v3/index.json
|
<gh_stars>0
/**
* Copyright 2018-2020 Dynatrace LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dynatrace.openkit.util.json.objects;
import org.junit.Test;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.assertThat;
public class JSONStringValueTest {
@Test
public void isNullReturnsFalse() {
// then
assertThat(JSONStringValue.fromString("").isNull(), is(false));
}
@Test
public void isBooleanReturnsFalse() {
// then
assertThat(JSONStringValue.fromString("").isBoolean(), is(false));
}
@Test
public void isNumberReturnsFalse() {
// then
assertThat(JSONStringValue.fromString("").isNumber(), is(false));
}
@Test
public void isStringReturnsTrue() {
// then
assertThat(JSONStringValue.fromString("").isString(), is(true));
}
@Test
public void isArrayReturnsFalse() {
// then
assertThat(JSONStringValue.fromString("").isArray(), is(false));
}
@Test
public void isObjectReturnsFalse() {
// then
assertThat(JSONStringValue.fromString("").isObject(), is(false));
}
@Test
public void fromStringReturnsNullIfArgumentIsNull() {
// then
assertThat(JSONStringValue.fromString(null), is(nullValue()));
}
@Test
public void getValueGivesValueOfFactoryMethodArgument() {
// then
assertThat(JSONStringValue.fromString("").getValue(), is(equalTo("")));
assertThat(JSONStringValue.fromString("a").getValue(), is(equalTo("a")));
assertThat(JSONStringValue.fromString("foobar").getValue(), is(equalTo("foobar")));
}
}
|
<reponame>msh9/shorturl
package main
func main() {
return
}
|
#!/bin/bash
[ "$DEBUG" = "true" ] && set -x
CRON_LOG=/var/log/cron.log
# Setup Magento cron
echo "* * * * * root /usr/local/bin/php ${MAGENTO_ROOT}/bin/magento cron:run | grep -v \"Ran jobs by schedule\" >> ${MAGENTO_ROOT}/var/log/magento.cron.log" > /etc/cron.d/magento
# Get rsyslog running for cron output
touch $CRON_LOG
echo "cron.* $CRON_LOG" > /etc/rsyslog.d/cron.conf
service rsyslog start
# Configure Sendmail if required
if [ "$ENABLE_SENDMAIL" == "true" ]; then
/etc/init.d/sendmail start
fi
# Substitute in php.ini values
[ ! -z "${PHP_MEMORY_LIMIT}" ] && sed -i "s/!PHP_MEMORY_LIMIT!/${PHP_MEMORY_LIMIT}/" /usr/local/etc/php/conf.d/zz-magento.ini
[ ! -z "${UPLOAD_MAX_FILESIZE}" ] && sed -i "s/!UPLOAD_MAX_FILESIZE!/${UPLOAD_MAX_FILESIZE}/" /usr/local/etc/php/conf.d/zz-magento.ini
[ "$PHP_ENABLE_XDEBUG" = "true" ] && \
docker-php-ext-enable xdebug && \
echo "Xdebug is enabled"
# Configure composer
[ ! -z "${COMPOSER_GITHUB_TOKEN}" ] && \
composer config --global github-oauth.github.com $COMPOSER_GITHUB_TOKEN
[ ! -z "${COMPOSER_MAGENTO_USERNAME}" ] && \
composer config --global http-basic.repo.magento.com \
$COMPOSER_MAGENTO_USERNAME $COMPOSER_MAGENTO_PASSWORD
exec "$@"
|
package io.opensphere.core.control;
import java.awt.Point;
import java.awt.event.InputEvent;
import java.awt.event.KeyEvent;
import java.awt.event.MouseEvent;
import java.awt.event.MouseWheelEvent;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import io.opensphere.core.control.PickListener.PickEvent;
import io.opensphere.core.geometry.Geometry;
import io.opensphere.core.util.MathUtil;
import io.opensphere.core.util.collections.New;
import io.opensphere.core.util.collections.WeakHashSet;
import io.opensphere.core.util.lang.Pair;
/**
* Implementation of {@link ControlContext}.
*/
@SuppressWarnings("PMD.GodClass")
class ControlContextImpl implements ControlContext
{
/**
* The bindings that are activated during a compound event.
*/
private final Collection<Binding> myActivatedBindings = Collections.synchronizedCollection(new WeakHashSet<Binding>());
/**
* Listeners for control events. Using weak references to the listeners
* requires the registering class to maintain a reference to the listener.
*/
private final Map<BoundEventListener, BindingsToListener> myListeners = Collections
.synchronizedMap(New.<BoundEventListener, BindingsToListener>weakMap());
/** Bindings for mouse events. */
private final Collection<MouseBindingAbs> myMouseBindings = Collections
.synchronizedCollection(new WeakHashSet<MouseBindingAbs>());
/** The context's name, it must be unique among contexts. */
private final String myName;
/**
* Listeners for pick events. Using weak references to the listeners
* requires the registering class to maintain a reference to the listener.
*/
private final Collection<PickListener> myPickListeners = Collections.synchronizedCollection(new WeakHashSet<PickListener>());
/** Bindings for key press and release events. */
private final Collection<PressedReleasedKeyBindAbs> myPressedReleasedKeyBindings = Collections
.synchronizedCollection(new WeakHashSet<PressedReleasedKeyBindAbs>());
/** Bindings for key typed events. */
private final Collection<KeyTypedBind> myTypedKeyBindings = Collections
.synchronizedCollection(new WeakHashSet<KeyTypedBind>());
/**
* Construct a ControlContext.
*
* @param name Unique name of this context.
*/
ControlContextImpl(String name)
{
myName = name;
}
@Override
public void addListener(CompoundEventListener listener, DefaultBinding... bindings)
{
List<Binding> converted = New.list();
for (DefaultBinding defaultBinding : bindings)
{
if (defaultBinding instanceof DefaultKeyPressedBinding)
{
CompoundPressedReleasedBind compoundBind = new CompoundPressedReleasedBind(
(DefaultKeyPressedBinding)defaultBinding, listener, this);
converted.add(compoundBind);
myPressedReleasedKeyBindings.add(compoundBind);
}
else if (defaultBinding instanceof DefaultMouseBinding)
{
CompoundMouseBind mouseBind = new CompoundMouseBind((DefaultMouseBinding)defaultBinding, listener, this);
converted.add(mouseBind);
myMouseBindings.add(mouseBind);
}
else
{
throw new IllegalArgumentException("Bindings of type " + defaultBinding.getClass().getName()
+ " are not supported with CompoundEventListeners");
}
}
BindingsToListener bindingToListener = new BindingsToListener(this, listener,
converted.toArray(new Binding[converted.size()]));
myListeners.put(listener, bindingToListener);
}
@Override
public void addListener(DiscreteEventListener listener, DefaultBinding... bindings)
{
List<Binding> converted = New.list();
for (DefaultBinding defaultBinding : bindings)
{
if (defaultBinding instanceof DefaultKeyPressedBinding)
{
DiscretePressedReleasedBind keybinding = new DiscretePressedReleasedBind((DefaultKeyPressedBinding)defaultBinding,
listener, this);
converted.add(keybinding);
myPressedReleasedKeyBindings.add(keybinding);
}
else if (defaultBinding instanceof DefaultKeyTypedBinding)
{
KeyTypedBind typedBind = new KeyTypedBind((DefaultKeyTypedBinding)defaultBinding, listener, this);
converted.add(typedBind);
myTypedKeyBindings.add(typedBind);
}
else if (defaultBinding instanceof DefaultMouseWheelBinding)
{
DiscreteMouseBind mouseBind = new MouseWheelBind((DefaultMouseWheelBinding)defaultBinding, listener, this);
converted.add(mouseBind);
myMouseBindings.add(mouseBind);
}
else if (defaultBinding instanceof DefaultMouseBinding)
{
DiscreteMouseBind mouseBind = new DiscreteMouseBind((DefaultMouseBinding)defaultBinding, listener, this);
converted.add(mouseBind);
myMouseBindings.add(mouseBind);
}
}
BindingsToListener bindingToListener = new BindingsToListener(this, listener,
converted.toArray(new Binding[converted.size()]));
myListeners.put(listener, bindingToListener);
}
/**
* Add a mouse binding to my list of bindings.
*
* @param bind Binding to add.
*/
public void addMouseBinding(MouseBindingAbs bind)
{
synchronized (myMouseBindings)
{
myMouseBindings.add(bind);
}
}
@Override
public void addPickListener(PickListener listen)
{
myPickListeners.add(listen);
}
@Override
public Map<String, List<BindingsToListener>> getEventListenersByCategory()
{
LinkedHashMap<String, List<BindingsToListener>> listenersByCategory = new LinkedHashMap<>();
for (BindingsToListener btl : getBindingsToListeners())
{
String category = btl.getListener().getCategory();
if (listenersByCategory.containsKey(category))
{
List<BindingsToListener> insertedList = listenersByCategory.get(category);
insertedList.add(btl);
}
else
{
List<BindingsToListener> toInsert = New.list();
toInsert.add(btl);
listenersByCategory.put(btl.getListener().getCategory(), toInsert);
}
}
return listenersByCategory;
}
@Override
public String getName()
{
return myName;
}
@Override
public void keyPressed(KeyEvent e)
{
List<PressedReleasedKeyBindAbs> bindings = New.list();
synchronized (myPressedReleasedKeyBindings)
{
bindings.addAll(myPressedReleasedKeyBindings);
}
List<PressedReleasedKeyBindAbs> untargetedBinds = New.list();
for (PressedReleasedKeyBindAbs bind : bindings)
{
if (keyBindIsTarget(bind, e, untargetedBinds))
{
bind.keyPressed(e);
myActivatedBindings.add(bind);
}
if (e.isConsumed())
{
return;
}
}
for (PressedReleasedKeyBindAbs bind : untargetedBinds)
{
bind.keyPressed(e);
myActivatedBindings.add(bind);
if (e.isConsumed())
{
return;
}
}
}
@Override
public void keyReleased(KeyEvent e)
{
List<PressedReleasedKeyBindAbs> relBindings = New.list();
synchronized (myPressedReleasedKeyBindings)
{
relBindings.addAll(myPressedReleasedKeyBindings);
}
List<PressedReleasedKeyBindAbs> untargetedBinds = New.list();
for (PressedReleasedKeyBindAbs bind : relBindings)
{
if (keyBindIsTarget(bind, e, untargetedBinds))
{
bind.keyReleased(e);
myActivatedBindings.remove(bind);
}
if (e.isConsumed())
{
return;
}
}
for (PressedReleasedKeyBindAbs prBind : untargetedBinds)
{
prBind.keyReleased(e);
myActivatedBindings.remove(prBind);
if (e.isConsumed())
{
return;
}
}
}
@Override
public void keyTyped(KeyEvent e)
{
char keyChar = e.getKeyChar();
int modifiersEx = e.getModifiersEx();
List<KeyTypedBind> bindings = New.list();
synchronized (myTypedKeyBindings)
{
bindings.addAll(myTypedKeyBindings);
}
List<KeyTypedBind> untargetedBinds = New.list();
for (KeyTypedBind bind : bindings)
{
if (bind.getKeyChar() == keyChar && bind.getModifiersEx() == modifiersEx)
{
if (!bind.getListener().mustBeTargeted())
{
untargetedBinds.add(bind);
}
else if (bind.getListener().isTargeted())
{
bind.keyTyped(e);
}
}
if (e.isConsumed())
{
return;
}
}
for (KeyTypedBind ktBind : untargetedBinds)
{
ktBind.keyTyped(e);
if (e.isConsumed())
{
return;
}
}
}
@Override
public void mouseClicked(MouseEvent e)
{
performMouseBindAction(e, (bind, event) -> ((MouseBindingAbs)bind).mouseClicked((MouseEvent)event), false);
}
@Override
public void mouseDragged(MouseEvent e)
{
performMouseBindAction(e, (bind, event) -> ((MouseBindingAbs)bind).mouseDragged((MouseEvent)event), false);
}
@Override
public void mouseEntered(MouseEvent e)
{
performMouseBindAction(e, (bind, event) -> ((MouseBindingAbs)bind).mouseEntered((MouseEvent)event), true);
}
@Override
public void mouseExited(MouseEvent e)
{
performMouseBindAction(e, (bind, event) -> ((MouseBindingAbs)bind).mouseExited((MouseEvent)event), true);
}
@Override
public void mouseMoved(MouseEvent e)
{
performMouseBindAction(e, (bind, event) -> ((MouseBindingAbs)bind).mouseMoved((MouseEvent)event), false);
}
@Override
public void mousePressed(MouseEvent e)
{
MouseBindActionWorker worker = (bind, event) ->
{
((MouseBindingAbs)bind).mousePressed((MouseEvent)event);
myActivatedBindings.add(bind);
};
performMouseBindAction(e, worker, false);
}
@Override
public void mouseReleased(MouseEvent e)
{
MouseBindActionWorker worker = (bind, event) ->
{
((MouseBindingAbs)bind).mouseReleased((MouseEvent)event);
myActivatedBindings.remove(bind);
};
performMouseBindAction(e, worker, true);
}
@Override
public void mouseWheelMoved(MouseWheelEvent e)
{
MouseBindActionWorker worker = (bind, event) ->
{
((MouseBindingAbs)bind).mouseReleased((MouseEvent)event);
myActivatedBindings.remove(bind);
};
performMouseBindAction(e, worker, false);
}
@Override
public void notifyPicked(Geometry pickedGeom, Point position)
{
PickEvent evt = new PickEvent(pickedGeom, position);
synchronized (myPickListeners)
{
for (PickListener listen : myPickListeners)
{
listen.handlePickEvent(evt);
}
}
}
@Override
public void removeListener(BoundEventListener listener)
{
if (listener == null)
{
return;
}
BindingsToListener btl = myListeners.remove(listener);
if (btl != null)
{
Collection<Binding> binds = btl.getBindings();
myActivatedBindings.removeAll(binds);
myMouseBindings.removeAll(binds);
myPressedReleasedKeyBindings.removeAll(binds);
myTypedKeyBindings.removeAll(binds);
}
}
@Override
public void removeListeners(Collection<? extends BoundEventListener> listeners)
{
for (BoundEventListener listener : listeners)
{
removeListener(listener);
}
}
/**
* Remove a mouse binding from my list of bindings.
*
* @param bind Binding to remove.
*/
public void removeMouseBinding(Binding bind)
{
synchronized (myMouseBindings)
{
myMouseBindings.remove(bind);
}
}
@Override
public void removePickListener(PickListener listen)
{
myPickListeners.remove(listen);
}
/**
* Get the Listener to bindings associations.
*
* @return Listener to bindings associations.
*/
Collection<BindingsToListener> getBindingsToListeners()
{
synchronized (myListeners)
{
return New.list(myListeners.values());
}
}
/**
* Get all key press and release binding for the context.
*
* @return key press and release binding for the context.
*/
Collection<PressedReleasedKeyBindAbs> getPressedReleasedKeyBindings()
{
return myPressedReleasedKeyBindings;
}
/**
* Get all key typed binding for the context.
*
* @return key typed binding for the context.
*/
Collection<KeyTypedBind> getTypedKeyBindings()
{
return myTypedKeyBindings;
}
/**
* Get the modifier bits that are not already in use by other activated
* bindings.
*
* @param incomingModifiers The modifiers for the incoming event.
* @return The new modifiers.
*/
private int getApplicableModifiers(int incomingModifiers)
{
int usedModifiers = 0;
for (Binding binding : myActivatedBindings)
{
if (binding instanceof MouseBindingAbs)
{
usedModifiers |= ((MouseBindingAbs)binding).getModifiersEx();
}
}
// Non-carrying binary mask subtract. For example: 100101 - 000111 =
// 100000. This removes modifiers that are currently being used by an
// action from the modifiers used to evaluate incoming events.
return usedModifiers & incomingModifiers ^ incomingModifiers;
}
/**
* Get a copy of the mouse bindings.
*
* @return a negative integer, zero, or a positive integer as the first
* argument is less than, equal to, or greater than the second.
*/
private Pair<Collection<MouseBindingAbs>, Collection<MouseBindingAbs>> getMouseBindings()
{
List<MouseBindingAbs> untargetedBindings = New.list(myMouseBindings.size());
List<MouseBindingAbs> targetedBindings = New.list(myMouseBindings.size());
synchronized (myMouseBindings)
{
for (MouseBindingAbs bind : myMouseBindings)
{
if (bind.getListener().mustBeTargeted())
{
targetedBindings.add(bind);
}
else
{
untargetedBindings.add(bind);
}
}
}
Collections.sort(targetedBindings, ControlContextImpl::compare);
return new Pair<>(targetedBindings, untargetedBindings);
}
/**
* Compares mouse bindings based on listener's target priority.
*
* @param o1 first mouse binding
* @param o2 second mouse binding
* @return int
*/
private static int compare(MouseBindingAbs o1, MouseBindingAbs o2)
{
int pri1 = o1.getListener().getTargetPriority();
int pri2 = o2.getListener().getTargetPriority();
return pri1 > pri2 ? -1 : pri1 == pri2 ? 0 : 1;
}
/**
* Determine whether the binding is the target for the event. Bindings which
* have no pick geometries will be added to the un-targeted binds list.
* Un-targeted binds expect to receive the event whenever it is not consumed
* by a targeted bind.
*
* @param bind The binding to check.
* @param event The event.
* @param untargetedBinds The list of bindings which cannot be targeted.
* @return true when the given bindings is the target of the event.
*/
private boolean keyBindIsTarget(PressedReleasedKeyBindAbs bind, KeyEvent event,
List<PressedReleasedKeyBindAbs> untargetedBinds)
{
// if the key is a modifier, ignore modifiers.
boolean ignoreModifiers = bind.getKeyCode() == KeyEvent.VK_SHIFT || bind.getKeyCode() == KeyEvent.VK_ALT
|| bind.getKeyCode() == KeyEvent.VK_CONTROL;
// TODO why do we use getAppicableModifiers()?
int modifiersEx = getApplicableModifiers(event.getModifiersEx());
if (bind.getKeyCode() == event.getKeyCode() && (ignoreModifiers || bind.getModifiersEx() == modifiersEx))
{
if (!bind.getListener().mustBeTargeted())
{
untargetedBinds.add(bind);
}
else if (bind.getListener().isTargeted())
{
return true;
}
}
return false;
}
/**
* Check to see if the binding is applicable for the event.
*
* @param bind The binding to check.
* @param event The event to check against.
* @param ignoreModifiers when true, ignore the event modifiers when
* determining binding applicability.
* @return true when the the binding is applicable to the event.
*/
private boolean mouseBindIsApplicable(MouseBindingAbs bind, MouseEvent event, boolean ignoreModifiers)
{
boolean eventMatches = false;
// if this is a release or a drag, compound mouse event listeners want
// this event even though they only registered for the press.
if ((event.getID() == MouseEvent.MOUSE_RELEASED || event.getID() == MouseEvent.MOUSE_DRAGGED)
&& bind.getListener() instanceof CompoundEventListener && bind.getEventId() == MouseEvent.MOUSE_PRESSED)
{
eventMatches = true;
}
else
{
eventMatches = bind.getEventId() == event.getID();
}
boolean modifierMatches = bind.getModifiersEx() == event.getModifiersEx() || ignoreModifiers;
return eventMatches && modifierMatches;
}
/**
* Helper method to perform an action against a particular binding if the
* binding is applicable for the event.
*
* @param bind The binding to perform the action against.
* @param event The event associated with the action.
* @param worker The worker which will perform the action.
* @param ignoreModifiers when true, ignore the event modifiers when
* determining binding applicability.
*/
private void performActionIfApplicable(MouseBindingAbs bind, MouseEvent event, MouseBindActionWorker worker,
boolean ignoreModifiers)
{
if (bind.getListener().mustBeTargeted() && !bind.getListener().isTargeted())
{
return;
}
if (event instanceof MouseWheelEvent)
{
if (bind instanceof MouseWheelBind)
{
final MouseWheelBind mwBind = (MouseWheelBind)bind;
if (mwBind.getEventId() == event.getID() && mwBind.getModifiersEx() == event.getModifiersEx()
&& MathUtil.sameSign(mwBind.getWheelDirection(), ((MouseWheelEvent)event).getWheelRotation()))
{
worker.doAction(bind, event);
return;
}
}
}
else if (mouseBindIsApplicable(bind, event, ignoreModifiers))
{
worker.doAction(bind, event);
if (event.isConsumed())
{
return;
}
}
}
/**
* Find the appropriate binding for the event and have the worker execute
* the correct for the event against the binding.
*
* @param event The event which has occurred.
* @param worker The worker which will execute the action.
* @param ignoreModifiers When true, ignore the event modifiers when
* choosing the binding to execute against.
*/
private void performMouseBindAction(MouseEvent event, MouseBindActionWorker worker, boolean ignoreModifiers)
{
Pair<Collection<MouseBindingAbs>, Collection<MouseBindingAbs>> bindings = getMouseBindings();
for (MouseBindingAbs bind : bindings.getFirstObject())
{
performActionIfApplicable(bind, event, worker, ignoreModifiers);
if (event.isConsumed())
{
return;
}
}
for (MouseBindingAbs bind : bindings.getSecondObject())
{
performActionIfApplicable(bind, event, worker, ignoreModifiers);
if (event.isConsumed())
{
return;
}
}
}
/** A worker to perform a particular event action against a binding. */
@FunctionalInterface
interface MouseBindActionWorker
{
/**
* Perform the action on the binding.
*
* @param bind The binding for which the action is being executed.
* @param event The event associated with the binding.
*/
void doAction(Binding bind, InputEvent event);
}
}
|
import { expect } from 'chai';
import { mount } from 'enzyme';
import React from 'react';
import sinon from 'sinon';
import TakeSnapshot from '~/linodes/linode/backups/components/TakeSnapshot';
import { expectDispatchOrStoreErrors, expectRequest } from '@/common';
import { testLinode } from '@/data/linodes';
describe('linodes/linode/backups/components/TakeSnapshot', () => {
const sandbox = sinon.sandbox.create();
const dispatch = sandbox.spy();
afterEach(() => {
dispatch.reset();
sandbox.restore();
});
it('does not show take snapshot button for an auto backup', () => {
const page = mount(
<TakeSnapshot
dispatch={dispatch}
linode={testLinode}
/>
);
const takeSnapshot = page.find('button[name="takeSnapshot"]');
expect(takeSnapshot.length).to.equal(0);
});
it('should dispatch a snapshot request', async () => {
const page = mount(
<TakeSnapshot
dispatch={dispatch}
linode={testLinode}
/>
);
dispatch.reset();
await page.find('Form').props().onSubmit({ preventDefault() {} });
expect(dispatch.callCount).to.equal(1);
await expectDispatchOrStoreErrors(dispatch.firstCall.args[0], [
([fn]) => expectRequest(fn, '/linode/instances/1234/backups', { method: 'POST' }),
], 1);
});
});
|
<filename>hyperparameter_hunter/feature_engineering.py
"""This module is still in an experimental stage and should not be assumed to be "reliable", or
"useful", or anything else that might be expected of a normal module"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.keys.hashing import make_hash_sha256
from hyperparameter_hunter.utils.boltons_utils import remap, default_visit, default_enter
from hyperparameter_hunter.utils.general_utils import subdict
##################################################
# Import Miscellaneous Assets
##################################################
import ast
from contextlib import suppress
from inspect import getsource
import pandas as pd
from typing import List, Callable, Dict, Union
##################################################
# Global Variables
##################################################
EMPTY_SENTINEL = type("EMPTY_SENTINEL", tuple(), {})
DFDict = Dict[str, pd.DataFrame]
DescendantsType = Dict[str, Union["DescendantsType", None]]
N_DATASET_TRAIN = ["train_data", "train_inputs", "train_targets"]
N_DATASET_VALIDATION = ["validation_data", "validation_inputs", "validation_targets"]
N_DATASET_HOLDOUT = ["holdout_data", "holdout_inputs", "holdout_targets"]
N_DATASET_TEST = ["test_inputs"]
N_DATASET_ALL = ["all_data", "all_inputs", "all_targets"]
N_DATASET_NON_TRAIN = ["non_train_data", "non_train_inputs", "non_train_targets"]
STANDARD_DATASET_NAMES = N_DATASET_TRAIN + N_DATASET_VALIDATION + N_DATASET_HOLDOUT + N_DATASET_TEST
MERGED_DATASET_NAMES = N_DATASET_ALL + N_DATASET_NON_TRAIN
COUPLED_DATASET_CANDIDATES = [
N_DATASET_TRAIN,
N_DATASET_VALIDATION,
N_DATASET_HOLDOUT,
N_DATASET_ALL,
N_DATASET_NON_TRAIN,
]
class DatasetNameReport:
def __init__(self, params: List[str], stage: str):
"""Characterize the relationships between the dataset names `params`
Parameters
----------
params: List[str]
Dataset names requested by a feature engineering step callable. Must be a subset of
{"train_data", "train_inputs", "train_targets", "validation_data", "validation_inputs",
"validation_targets", "holdout_data", "holdout_inputs", "holdout_targets",
"test_inputs", "all_data", "all_inputs", "all_targets", "non_train_data",
"non_train_inputs", "non_train_targets"}
stage: String in {"pre_cv", "intra_cv"}
Feature engineering stage during which the datasets `params` are requested
Attributes
----------
merged_datasets: List[tuple]
Tuples of strings denoting paths to datasets that represent a merge between multiple
datasets. Merged datasets are those prefixed with either "all" or "non_train". These
paths are locations in `descendants`
coupled_datasets: List[tuple]
Tuples of strings denoting paths to datasets that represent a coupling of "inputs" and
"targets" datasets. Coupled datasets are those suffixed with "data". These paths are
locations in `descendants`, and the values at each path should be a dict containing keys
with "inputs" and "targets" suffixes
leaves: Dict[tuple, str]
Mapping of full path tuples in `descendants` to their leaf values. Tuple paths represent
the steps necessary to reach the standard dataset leaf value in `descendants` by
traversing merged and coupled datasets. Values in `leaves` should be identical to the
last element of the corresponding tuple key
descendants: DescendantsType
Nested dict in which all keys are dataset name strings, and all leaf values are `None`.
Represents the structure of the requested dataset names, traversing over merged and
coupled datasets (if necessary) in order to reach the standard dataset leaves"""
self.params: List[str] = params
self.stage: str = stage
self.merged_datasets: List[tuple] = []
self.coupled_datasets: List[tuple] = []
self.leaves: Dict[tuple, str] = dict()
self.descendants: DescendantsType = remap(
{_: _ for _ in self.params}, visit=self._visit, enter=self._enter, use_registry=False
)
@staticmethod
def _visit(path, key, value):
"""If `key` == `value`, return tuple of (`key`, None). Else `default_visit`"""
if key and key == value:
return (key, None)
return default_visit(path, key, value)
def _enter(self, path, key, value):
"""Update contents of `merged_datasets`, `coupled_datasets`, and `leaves` and direct
traversal of the sub-datasets that compose the current dataset name"""
#################### Merged Datasets ####################
if value in MERGED_DATASET_NAMES:
self.merged_datasets.append(path + (key,))
_names_for_merge = names_for_merge(value, self.stage)
return dict(), zip(_names_for_merge, _names_for_merge)
#################### Coupled Datasets ####################
for coupled_candidate in COUPLED_DATASET_CANDIDATES:
if value == coupled_candidate[0]:
self.coupled_datasets.append(path + (key,))
return dict(), zip(coupled_candidate[1:], coupled_candidate[1:])
#################### Leaf Datasets ####################
if key:
self.leaves[path + (key,)] = key
return default_enter(path, key, value)
def names_for_merge(merge_to: str, stage: str) -> List[str]:
"""Retrieve the names of the standard datasets that are allowed to be included in a merged
DataFrame of type `merge_to` at stage `stage`
Parameters
----------
merge_to: String
Type of merged dataframe to produce. Should be one of the following: {"all_data",
"all_inputs", "all_targets", "non_train_data", "non_train_inputs", "non_train_targets"}
stage: String in {"pre_cv", "intra_cv}
Feature engineering stage for which the merged dataframe is requested. The results produced
with each option differ only in that a `merged_df` created with `stage="pre_cv"` will never
contain "validation" data because it doesn't exist before cross-validation has begun.
Conversely, a `merged_df` created with `stage="intra_cv"` will contain the appropriate
"validation" data if it exists
Returns
-------
names: List
Subset of {"train_data", "train_inputs", "train_targets", "validation_data",
"validation_inputs", "validation_targets", "holdout_data", "holdout_inputs",
"holdout_targets", "test_inputs"}
Examples
--------
>>> names_for_merge("all_data", "intra_cv")
['train_data', 'validation_data', 'holdout_data']
>>> names_for_merge("all_inputs", "intra_cv")
['train_inputs', 'validation_inputs', 'holdout_inputs', 'test_inputs']
>>> names_for_merge("all_targets", "intra_cv")
['train_targets', 'validation_targets', 'holdout_targets']
>>> names_for_merge("all_data", "pre_cv")
['train_data', 'holdout_data']
>>> names_for_merge("all_inputs", "pre_cv")
['train_inputs', 'holdout_inputs', 'test_inputs']
>>> names_for_merge("all_targets", "pre_cv")
['train_targets', 'holdout_targets']
>>> names_for_merge("non_train_data", "intra_cv")
['validation_data', 'holdout_data']
>>> names_for_merge("non_train_inputs", "intra_cv")
['validation_inputs', 'holdout_inputs', 'test_inputs']
>>> names_for_merge("non_train_targets", "intra_cv")
['validation_targets', 'holdout_targets']
>>> names_for_merge("non_train_data", "pre_cv")
['holdout_data']
>>> names_for_merge("non_train_inputs", "pre_cv")
['holdout_inputs', 'test_inputs']
>>> names_for_merge("non_train_targets", "pre_cv")
['holdout_targets']"""
merge_type, data_group = merge_to.rsplit("_", 1)
names = [_ for _ in STANDARD_DATASET_NAMES if _.endswith(data_group)]
if stage == "pre_cv":
names = [_ for _ in names if _ not in N_DATASET_VALIDATION]
if merge_type == "non_train":
names = [_ for _ in names if not _.startswith("train")]
return names
def merge_dfs(merge_to: str, stage: str, dfs: DFDict) -> pd.DataFrame:
"""Construct a multi-indexed DataFrame containing the values of `dfs` deemed necessary by
`merge_to` and `stage`. This is the opposite of `split_merged_df`
Parameters
----------
merge_to: String
Type of `merged_df` to produce. Should be one of the following: {"all_data", "all_inputs",
"all_targets", "non_train_data", "non_train_inputs", "non_train_targets"}
stage: String in {"pre_cv", "intra_cv}
Feature engineering stage for which `merged_df` is requested
dfs: Dict
Mapping of dataset names to their DataFrame values. Keys in `dfs` should be a subset of
{"train_data", "train_inputs", "train_targets", "validation_data", "validation_inputs",
"validation_targets", "holdout_data", "holdout_inputs", "holdout_targets", "test_inputs"}
Returns
-------
merged_df: pd.DataFrame
Multi-indexed DataFrame, in which the first index is a string naming the dataset in `dfs`
from which the corresponding data originates. The following index(es) are the original
index(es) from the dataset in `dfs`. All primary indexes in `merged_df` will be one of the
strings considered to be valid keys for `dfs`
Raises
------
ValueError
If all the DataFrames that would have been used in `merged_df` are None. This can happen if
requesting `merge_to="non_train_targets"` during `stage="pre_cv"` when there is no holdout
dataset available. Under these circumstances, the holdout dataset targets would be the sole
contents of `merged_df`, rendering `merged_df` invalid since the data is unavailable
See Also
--------
names_for_merge: Describes how `stage` values differ"""
df_names = names_for_merge(merge_to, stage)
df_names = [_ for _ in df_names if dfs.get(_, None) is not None]
try:
merged_df = pd.concat([dfs[_] for _ in df_names], keys=df_names)
except ValueError as _ex:
raise ValueError(f"Merging {df_names} into {merge_to} does not produce DataFrame") from _ex
return merged_df
def split_merged_df(merged_df: pd.DataFrame) -> DFDict:
"""Separate a multi-indexed DataFrame into a dict mapping primary indexes in `merged_df` to
DataFrames containing one fewer dimension than `merged_df`. This is the opposite of `merge_dfs`
Parameters
----------
merged_df: pd.DataFrame
Multi-indexed DataFrame of the form returned by :func:`merge_dfs` to split into the separate
DataFrames named by the primary indexes of `merged_df`
Returns
-------
dfs: Dict
Mapping of dataset names to their DataFrame values. Keys in `dfs` will be a subset of
{"train_data", "train_inputs", "train_targets", "validation_data", "validation_inputs",
"validation_targets", "holdout_data", "holdout_inputs", "holdout_targets", "test_inputs"}
containing only those values that are also primary indexes in `merged_df`"""
dfs = dict()
for df_index in merged_df.index.levels[0]:
dfs[df_index] = merged_df.loc[df_index, :].copy()
return dfs
def validate_dataset_names(params: List[str], stage: str) -> List[str]:
"""Produce the names of merged datasets in `params` and verify there are no duplicate references
to any datasets in `params`
Parameters
----------
params: List[str]
Dataset names requested by a feature engineering step callable. Must be a subset of
{"train_data", "train_inputs", "train_targets", "validation_data", "validation_inputs",
"validation_targets", "holdout_data", "holdout_inputs", "holdout_targets",
"test_inputs", "all_data", "all_inputs", "all_targets", "non_train_data",
"non_train_inputs", "non_train_targets"}
stage: String in {"pre_cv", "intra_cv}
Feature engineering stage for which `merged_df` is requested
Returns
-------
List[str]
Names of merged datasets in `params`
Raises
------
ValueError
If requested `params` contain a duplicate reference to any dataset, either by way of
merging/coupling or not"""
report = DatasetNameReport(params, stage)
reverse_multidict = dict()
for leaf_path, leaf_name in report.leaves.items():
reverse_multidict.setdefault(leaf_name, set()).add(leaf_path)
for leaf_name, leaf_paths in reverse_multidict.items():
if len(leaf_paths) > 1:
err_str = f"Requested params include duplicate references to `{leaf_name}` by way of:"
err_str += "".join([f"\n - {a_path}" for a_path in leaf_paths])
err_str += "\nEach dataset may only be requested by a single param for each function"
raise ValueError(err_str)
return [_[0] if len(_) == 1 else _ for _ in report.merged_datasets]
class EngineerStep:
def __init__(self, f: Callable, stage=None, name=None, params=None, do_validate=False):
""":class:`FeatureEngineer` helper, compartmentalizing functions of singular engineer steps
Parameters
----------
f: Callable
Feature engineering step function that requests, modifies, and returns datasets `params`
stage: String in {"pre_cv", "intra_cv"}, or None, default=None
Feature engineering stage during which the callable `f` will be given the datasets
`params` to modify and return. If None, will be inferred based on `params`
name: String, or None, default=None
Identifier for the transformation applied by this engineering step. If None,
`f.__name__` will be used
params: List[str], or None, default=None
Dataset names requested by feature engineering step callable `f`. If None, will be
inferred by parsing the abstract syntax tree of `f`. Else, must be a subset of
{"train_data", "train_inputs", "train_targets", "validation_data", "validation_inputs",
"validation_targets", "holdout_data", "holdout_inputs", "holdout_targets",
"test_inputs", "all_data", "all_inputs", "all_targets", "non_train_data",
"non_train_inputs", "non_train_targets"}
do_validate: Boolean, or "strict", default=False
... Experimental...
Whether to validate the datasets resulting from feature engineering steps. If True,
hashes of the new datasets will be compared to those of the originals to ensure they
were actually modified. Results will be logged. If `do_validate`="strict", an exception
will be raised if any anomalies are found, rather than logging a message. If
`do_validate`=False, no validation will be performed"""
self._f = f
self._name = name
self.params = params
self._stage = stage
self.do_validate = do_validate
self.merged_datasets = []
self.original_hashes = dict()
self.updated_hashes = dict()
def __call__(self, **datasets: DFDict) -> DFDict:
"""Apply :attr:`f` to `datasets` to produce updated datasets. If `f` requests any
merged/coupled datasets (as reflected by :attr:`params`), conversions to accommodate those
requests will take place here
Parameters
----------
**datasets: DFDict
Original dict of datasets, containing all datasets, some of which may be superfluous, or
may require additional processing to resolve merged/coupled datasets
Returns
-------
new_datasets: DFDict
Dict of datasets, which have been updated by :attr:`f`. Any datasets that may have been
merged prior to being given to :attr:`f` have been split back into the original
datasets, with the updates made by :attr:`f`"""
self.original_hashes = hash_datasets(datasets)
datasets_for_f = self.get_datasets_for_f(datasets)
step_result = self.f(**datasets_for_f)
step_result = (step_result,) if not isinstance(step_result, tuple) else step_result
new_datasets = dict(zip(self.params, step_result))
for dataset_name, dataset_value in new_datasets.items():
if dataset_name in self.merged_datasets:
new_datasets = dict(new_datasets, **split_merged_df(dataset_value))
new_datasets = dict(datasets, **new_datasets)
self.updated_hashes = hash_datasets(new_datasets)
# TODO: Check `self.do_validate` here to decide whether to `compare_dataset_columns`
return new_datasets
def get_datasets_for_f(self, datasets: DFDict) -> DFDict:
"""Produce a dict of DataFrames containing only the merged datasets and standard datasets
requested in :attr:`params`. In other words, add the requested merged datasets and remove
unnecessary standard datasets
Parameters
----------
datasets: DFDict
Original dict of datasets, containing all datasets provided to
:meth:`EngineerStep.__call__`, some of which may be superfluous, or may require
additional processing to resolve merged/coupled datasets
Returns
-------
DFDict
Updated version of `datasets`, in which unnecessary datasets have been filtered out, and
the requested merged datasets have been added"""
self.merged_datasets: List[str] = validate_dataset_names(self.params, self.stage)
datasets_for_f = datasets
for _dataset_name in self.merged_datasets:
datasets_for_f[_dataset_name] = merge_dfs(_dataset_name, self.stage, datasets)
return subdict(datasets_for_f, keep=self.params)
def get_key_data(self) -> dict:
"""Produce a dict of critical attributes describing the :class:`EngineerStep` instance for
use by key-making classes
Returns
-------
Dict
Important attributes describing this :class:`EngineerStep` instance"""
return dict(
name=self.name,
f=self.f,
params=self.params,
stage=self.stage,
do_validate=self.do_validate,
original_hashes=self.original_hashes,
updated_hashes=self.updated_hashes,
)
@property
def f(self) -> Callable:
"""Feature engineering step callable that requests, modifies, and returns datasets"""
return self._f
@property
def name(self) -> str:
"""Identifier for the transformation applied by this engineering step"""
if self._name is None:
self._name = self.f.__name__
return self._name
@property
def params(self) -> list:
"""Dataset names requested by feature engineering step callable :attr:`f`. See documentation
in :meth:`EngineerStep.__init__` for more information/restrictions"""
return self._params
@params.setter
def params(self, value):
self._params = value if value is not None else get_engineering_step_params(self.f)
@property
def stage(self) -> str:
"""Feature engineering stage during which the `EngineerStep` will be executed"""
if self._stage is None:
self._stage = get_engineering_step_stage(self.params)
return self._stage
class FeatureEngineer:
def __init__(self, steps=None, do_validate=False, **datasets: DFDict):
"""Class to organize feature engineering step callables `steps` (:class:`EngineerStep`
instances) and the datasets that the steps request and return.
Parameters
----------
steps: List, or None, default=None
If not None, should be list containing any of the following: :class:`EngineerStep`
instances, or callables used to instantiate :class:`EngineerStep`
do_validate: Boolean, or "strict", default=False
... Experimental...
Whether to validate the datasets resulting from feature engineering steps. If True,
hashes of the new datasets will be compared to those of the originals to ensure they
were actually modified. Results will be logged. If `do_validate`="strict", an exception
will be raised if any anomalies are found, rather than logging a message. If
`do_validate`=False, no validation will be performed
**datasets: DFDict
Mapping of datasets necessary to perform feature engineering steps. This is not expected
to be provided on initialization and is offered primarily for debugging/testing"""
self.steps = []
self.do_validate = do_validate
self.datasets = datasets or {}
for step in steps or []:
self.add_step(step)
def __call__(self, stage: str, **datasets: DFDict):
"""Execute all feature engineering steps in :attr:`steps` for `stage`, with datasets
`datasets` as inputs
Parameters
----------
stage: String in {"pre_cv", "intra_cv"}
Feature engineering stage, specifying which :class:`EngineerStep` instances in
:attr:`steps` should be executed
datasets: DFDict
Original dict of datasets, containing all datasets, some of which may be superfluous, or
may require additional processing to resolve merged/coupled datasets"""
if datasets:
self.datasets = datasets
for i, step in enumerate(self.steps):
if step.stage == stage:
self.datasets = step(**self.datasets)
@property
def steps(self) -> List[EngineerStep]:
"""Feature engineering steps to execute in sequence on :meth:`FeatureEngineer.__call__"""
return self._steps
@steps.setter
def steps(self, value: list):
self._steps = value
def get_key_data(self) -> dict:
"""Produce a dict of critical attributes describing the :class:`FeatureEngineer` instance
for use by key-making classes
Returns
-------
Dict
Important attributes describing this :class:`FeatureEngineer` instance"""
return dict(
steps=[_.get_key_data() for _ in self.steps],
do_validate=self.do_validate,
datasets=self.datasets,
)
def add_step(
self,
step: Union[Callable, EngineerStep],
stage: str = None,
name: str = None,
before: str = EMPTY_SENTINEL,
after: str = EMPTY_SENTINEL,
number: int = EMPTY_SENTINEL,
):
"""Add an engineering step to :attr:`steps` to be executed with the other contents of
:attr:`steps` on :meth:`FeatureEngineer.__call__`
Parameters
----------
step: Callable, or `EngineerStep`
If `EngineerStep` instance, will be added directly to :attr:`steps`. Otherwise, must be
a feature engineering step callable that requests, modifies, and returns datasets, which
will be used to instantiate a :class:`EngineerStep` to add to :attr:`steps`
stage: String in {"pre_cv", "intra_cv"}, or None, default=None
Feature engineering stage during which the callable `step` will be executed
name: String, or None, default=None
Identifier for the transformation applied by this engineering step. If None and `step`
is not an `EngineerStep`, will be inferred during :class:`EngineerStep` instantiation
before: String, default=EMPTY_SENTINEL
... Experimental...
after: String, default=EMPTY_SENTINEL
... Experimental...
number: String, default=EMPTY_SENTINEL
... Experimental..."""
if isinstance(step, EngineerStep):
self._steps.append(step)
else:
self._steps.append(
EngineerStep(step, name=name, stage=stage, do_validate=self.do_validate)
)
# FLAG: Tally number of columns "transformed" and "added" at each step and report
def get_engineering_step_stage(datasets: List[str]) -> str:
"""Determine the stage in which a feature engineering step that requests `datasets` as input
should be executed
Parameters
----------
datasets: List[str]
Dataset names requested by a feature engineering step callable
Returns
-------
stage: {"pre_cv", "intra_cv"}
"pre_cv" if a step processing the given `datasets` should be executed in the
pre-cross-validation stage. "intra_cv" if the step should be executed for each
cross-validation split. If any of the elements in `datasets` is prefixed with "validation_"
or "non_train_", `stage` will be "intra_cv". Otherwise, it will be "pre_cv"
Notes
-----
Generally, feature engineering conducted in the "pre_cv" stage should regard each sample/row as
independent entities. For example, steps like converting a string day of the week to one-hot
encoded columns, or imputing missing values by replacement with -1 might be conducted "pre_cv",
since they are unlikely to introduce an information leakage. Conversely, steps like
scaling/normalization, whose results for the data in one row are affected by the data in other
rows should be performed "intra_cv" in order to recalculate the final values of the datasets for
each cross validation split and avoid information leakage
Technically, the inference of `stage="intra_cv"` due to the existence of a "non_train_"-prefixed
value in `datasets` could unnecessarily force steps to be executed "intra_cv" if, for example,
there is no validation data. However, this is safer than the alternative of executing these
steps "pre_cv", in which validation data would be a subset of train data, probably introducing
information leakage. A simple workaround for this is to explicitly provide :class:`EngineerStep`
with the desired `stage` parameter to bypass this inference
Examples
--------
>>> get_engineering_step_stage(["train_inputs", "validation_inputs", "holdout_inputs"])
'intra_cv'
>>> get_engineering_step_stage(["all_data"])
'pre_cv'
>>> get_engineering_step_stage(["all_inputs", "all_targets"])
'pre_cv'
>>> get_engineering_step_stage(["train_data", "non_train_data"])
'intra_cv'"""
if any(_.startswith("validation_") for _ in datasets):
return "intra_cv"
if any(_.startswith("non_train_") for _ in datasets):
return "intra_cv"
return "pre_cv"
class ParameterParser(ast.NodeVisitor):
def __init__(self):
"""`ast.NodeVisitor` subclass that collects the arguments specified in the signature of a
callable node, as well as the values returned by the callable, in the attributes `args` and
`returns`, respectively"""
self.args = []
self.returns = []
def visit_arg(self, node):
with suppress(AttributeError):
if isinstance(node.parent.parent, ast.FunctionDef):
if isinstance(node.parent.parent.parent, ast.Module):
self.args.append(node.arg)
self.generic_visit(node)
def visit_Return(self, node):
try:
self.returns.append(node.value.id)
except AttributeError:
for element in node.value.elts:
self.returns.append(element.id)
self.generic_visit(node)
def get_engineering_step_params(f: callable) -> List[str]:
"""Verify that callable `f` requests valid input parameters, and returns a tuple of the same
parameters, with the assumption that the parameters are modified by `f`
Parameters
----------
f: Callable
Feature engineering step function that requests, modifies, and returns datasets
Returns
-------
List
Argument/return value names declared by `f`
Examples
--------
>>> def impute_negative_one(all_data):
... all_data.fillna(-1, inplace=True)
... return all_data
>>> get_engineering_step_params(impute_negative_one)
['all_data']
>>> def standard_scale(train_inputs, non_train_inputs):
... scaler = StandardScaler()
... train_inputs[train_inputs.columns] = scaler.fit_transform(train_inputs.values)
... non_train_inputs[train_inputs.columns] = scaler.transform(non_train_inputs.values)
... return train_inputs, non_train_inputs
>>> get_engineering_step_params(standard_scale)
['train_inputs', 'non_train_inputs']
>>> def error_mismatch(train_inputs, non_train_inputs):
... return validation_inputs, holdout_inputs
>>> get_engineering_step_params(error_mismatch)
Traceback (most recent call last):
File "feature_engineering.py", line ?, in get_engineering_step_params
ValueError: Mismatched `f` inputs (['train_inputs', 'non_train_inputs']), and returns (['validation_inputs', 'holdout_inputs'])
>>> def error_invalid_dataset(train_inputs, foo):
... return train_inputs, foo
>>> get_engineering_step_params(error_invalid_dataset)
Traceback (most recent call last):
File "feature_engineering.py", line ?, in get_engineering_step_params
ValueError: Invalid dataset name in ['train_inputs', 'foo']"""
valid_datasets = MERGED_DATASET_NAMES + STANDARD_DATASET_NAMES
source_code = getsource(f)
tree = ast.parse(source_code)
#################### Add Links to Nodes' Parents ####################
for node in ast.walk(tree):
for child in ast.iter_child_nodes(node):
child.parent = node
#################### Collect Parameters and Returns ####################
parser = ParameterParser()
parser.visit(tree)
if parser.args != parser.returns:
raise ValueError(f"Mismatched `f` inputs ({parser.args}), and returns ({parser.returns})")
elif any(_ not in valid_datasets for _ in parser.args):
raise ValueError(f"Invalid dataset name in {parser.args}")
return parser.args
def _hash_dataset(dataset: pd.DataFrame) -> dict:
"""Generate hashes for `dataset` at various levels of specificity
Parameters
----------
dataset: pandas.DataFrame
DataFrame to be described with a dict of hashes
Returns
-------
dict
"dataset" (str): Hash of `dataset`, itself
"column_names" (str): Hash of `dataset.columns`, capturing names, order, and add/drops
"column_values" (dict): Keys are `dataset.columns`, and values are hashes for each column
Examples
--------
>>> assert _hash_dataset(pd.DataFrame(dict(a=[0, 1], b=[2, 3], c=[4, 5]))) == {
... 'dataset': 'UD0kfFLj7_eX4P5g02UWV-P04yuJkrsOcnS6yBa48Ps=',
... 'column_names': 'OUPCVME21ryrnjJtyZ1R-_rrr-wSMPxo9Gc1KxcdlhM=',
... 'column_values': {
... 'a': 'buQ0yuUUbLN57tC6050g7yWrvAdk-NwGIEEWHJC88EY=',
... 'b': 'j9nBFZVu4ZEnsoaRYiI93DcrbV3A_hzcKdf0P5gS7g4=',
... 'c': 'qO0pJn3TLhlsYj3nqliMBi8zds66JPsQ1uCJSFv9q9g=',
... },
... }
>>> assert _hash_dataset(pd.DataFrame(dict(a=[0, 1], b=[6, 7], d=[8, 9]))) == {
... 'dataset': '0jA8SnjKAbyG6tnwxwJ51Q8haeVcfMhBZ45ELuD2U6k=',
... 'column_names': 'G-xgYT0flyJV26HrfFYiMh_BiSkStKkh-Utqq94DZAM=',
... 'column_values': {
... 'a': 'buQ0yuUUbLN57tC6050g7yWrvAdk-NwGIEEWHJC88EY=',
... 'b': 'uIvA32AuBuj9LTU652UQUBI0VH9UmF2ZJeL4NefiiLg=',
... 'd': 'G_y3SLas04T-_ejL4AVACrDQM_uyT4HFxo1Ig1tF5Z8=',
... },
... }
>>> _hash_dataset(None)
{'dataset': None, 'column_names': None, 'column_values': None}"""
if dataset is None:
return dict(dataset=None, column_names=None, column_values=None)
return dict(
dataset=make_hash_sha256(dataset),
column_names=make_hash_sha256(dataset.columns),
column_values={_: make_hash_sha256(dataset[_]) for _ in dataset.columns},
)
def hash_datasets(datasets: dict) -> dict:
"""Describe `datasets` with dicts of hashes for their values, column names, and column values
Parameters
----------
datasets: Dict
Mapping of dataset names to `pandas.DataFrame` instances
Returns
-------
hashes: Dict
Mapping with same keys as `datasets`, whose values are dicts returned from
:func:`_hash_dataset` that provide hashes for each DataFrame and its column names/values
Examples
--------
>>> df_x = pd.DataFrame(dict(a=[0, 1], b=[2, 3], c=[4, 5]))
>>> df_y = pd.DataFrame(dict(a=[0, 1], b=[6, 7], d=[8, 9]))
>>> hash_datasets(dict(x=df_x, y=df_y)) == dict(x=_hash_dataset(df_x), y=_hash_dataset(df_y))
True"""
hashes = {k: _hash_dataset(v) for k, v in datasets.items()}
return hashes
# def _compare_hash_(columns_a: dict, columns_b: dict):
# """
#
# Parameters
# ----------
# columns_a
# columns_b
#
# Returns
# -------
#
# """
# columns_added = dict()
# columns_dropped = dict()
# columns_modified = dict()
# columns_unchanged = dict()
#
#
# def compare_dataset_columns(datasets_a: dict, datasets_b: dict):
# compare_column_hashes(..., ...)
# def step(order=None, before=None, after=None, returns="frame"):
# """
#
# Parameters
# ----------
# order: Integer, or None, default=None
# ...
# before: String, or None, default=None
# ...
# after: String, or None, default=None
# ...
# returns: {"frame", "cols"}, default="frame"
# ...
#
# Returns
# -------
#
# """
# ...
|
package Operations;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Collections;
public class SieveOfEratosthenes
{
public static void main(String [] args) throws IOException
{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
int n = Integer.parseInt(br.readLine());
ArrayList<Boolean> primes = new ArrayList<>(n+1);
for(int i=0; i<=n; i++)
primes.add(true);
primes.set(0, false);
primes.set(1, false);
for(int i=2; i<=Math.sqrt(n); i++)
{
if(primes.get(i))
{
int j = 2;
while(i*j<=n)
{
primes.set(i*j, false);
j++;
}
}
}
int s = 2;
for(int i=2; i<=n; i++)
{
if(primes.get(i))
System.out.print(i+" ");
}
System.out.println();
}
}
|
import React, { Fragment } from 'react';
import { Route, Switch } from 'react-router-dom';
import Collection from '../Components/Collection';
import Collections from '../Components/Collections';
import Base from '../Components/Base';
import NavBar from '../Components/Navbar';
import NotFound from '../Components/NotFound';
import Places from '../Components/Places';
import Publications from '../Components/Publications';
const Routes = () => (
<Fragment>
<NavBar />
<Switch>
<Route path='/' component={Base} exact={true} />
<Route path='/collections' component={Collections} exact={true} />
<Route path='/collections/:id' component={Collection} exact={true} />
<Route path='/publications' component={Publications} exact={true} />
<Route path='/places' component={Places} exact={true} />
<Route component={NotFound} />
</Switch>
</Fragment>
);
export default Routes;
|
#include <stdio.h>
#include "ConfigFile.h"
#include "../Utilities/Operations.h"
namespace DivaHook::FileSystem
{
ConfigFile::ConfigFile(const std::string &path) : TextFile(path)
{
return;
}
ConfigFile::ConfigFile(const std::string &directory, const std::string &file) : TextFile(directory, file)
{
return;
}
bool ConfigFile::TryGetValue(const std::string &key, std::string *&value)
{
auto pair = ConfigMap.find(key);
bool found = pair != ConfigMap.end();
value = found ? new std::string(pair->second) : nullptr;
return found;
}
void ConfigFile::Parse(std::ifstream &fileStream)
{
std::string line;
while (std::getline(fileStream, line))
{
if (IsComment(line))
continue;
auto splitline = Utilities::Split(line, "=");
for (auto &line : splitline)
Utilities::Trim(line);
ConfigMap.insert(std::make_pair(splitline[0], splitline[1]));
}
}
bool ConfigFile::IsComment(const std::string &line)
{
return line.size() <= 0 || line[0] == '#' || line._Starts_with("//");
}
}
|
#!/bin/bash
# This script downloads and installs the latest available Oracle Java 8 JDK CPU release or PSU release for compatible Macs
# Save current IFS state
OLDIFS=$IFS
IFS='.' read osvers_major osvers_minor osvers_dot_version <<< "$(/usr/bin/sw_vers -productVersion)"
# restore IFS to previous state
IFS=$OLDIFS
IdentifyLatestJDKRelease(){
# Determine the download URL for the latest CPU release or PSU release.
Java_8_JDK_CPU_URL=`/usr/bin/curl -s https://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html | grep -ioE "https://download.oracle.com/otn-pub/java/jdk/.*?/jdk-8u.*?x64.dmg" | head -1`
Java_8_JDK_PSU_URL=`/usr/bin/curl -s https://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html | grep -ioE "https://download.oracle.com/otn-pub/java/jdk/.*?/jdk-8u.*?x64.dmg" | tail -1`
# Use the Version variable to determine if the script should install the latest CPU release or PSU release.
if [[ "$Version" = "PSU" ]] && [[ "$Java_8_JDK_PSU_URL" != "" ]]; then
fileURL="$Java_8_JDK_PSU_URL"
/bin/echo "Installing Oracle Java 8 JDK Patch Set Update (PSU) -" "$Java_8_JDK_PSU_URL"
elif [[ "$Version" = "PSU" ]] && [[ "$Java_8_JDK_PSU_URL" = "" ]]; then
/bin/echo "Unable to identify download URL for requested Oracle Java 8 JDK Patch Set Update (PSU). Exiting."
exit 0
fi
if [[ "$Version" = "CPU" ]] && [[ "$Java_8_JDK_CPU_URL" != "" ]]; then
fileURL="$Java_8_JDK_CPU_URL"
/bin/echo "Installing Oracle Java 8 JDK Critical Patch Update (CPU) -" "$Java_8_JDK_PSU_URL"
elif [[ "$Version" = "CPU" ]] && [[ "$Java_8_JDK_CPU_URL" = "" ]]; then
/bin/echo "Unable to identify download URL for requested Oracle Java 8 JDK Critical Patch Update (CPU). Exiting."
exit 0
fi
}
if [[ ( ${osvers_major} -eq 10 && ${osvers_minor} -lt 8 ) ]]; then
echo "Oracle Java 8 JDK is not available for Mac OS X 10.7.5 or earlier."
else
# Specify name of downloaded disk image
java_eight_jdk_dmg="/tmp/java_eight_jdk.dmg"
# Use the Version variable to set if you want to download the latest CPU release or the latest PSU release.
# The difference between CPU and PSU releases is as follows:
#
# Critical Patch Update (CPU): contains both fixes to security vulnerabilities and critical bug fixes.
#
# Patch Set Update (PSU): contains all the fixes in the corresponding CPU, plus additional fixes to non-critical problems.
#
# For more details on the differences between CPU and PSU updates, please see the link below:
#
# http://www.oracle.com/technetwork/java/javase/cpu-psu-explained-2331472.html
#
# Setting the variable as shown below will set the script to install the CPU release:
#
# Version=CPU
#
# Setting the variable as shown below will set the script to install the PSU release:
#
# Version=PSU
#
# By default, the script is set to install the CPU release.
Version=CPU
# Identify the URL of the latest Oracle Java 8 JDK software disk image
# using the IdentifyLatestJDKRelease function.
IdentifyLatestJDKRelease
# Download the latest Oracle Java 8 JDK software disk image
# The curl -L option is needed because there is a redirect
# that the requested page has moved to a different location.
/usr/bin/curl --retry 3 -Lo "$java_eight_jdk_dmg" "$fileURL" -H "Cookie: oraclelicense=accept-securebackup-cookie"
# Specify a /tmp/java_eight_jdk.XXXX mountpoint for the disk image
TMPMOUNT=`/usr/bin/mktemp -d "$3"/tmp/java_eight_jdk.XXXX`
# Mount the latest Oracle Java 8 disk image to /tmp/java_eight_jdk.XXXX mountpoint
hdiutil attach "$java_eight_jdk_dmg" -mountpoint "$TMPMOUNT" -nobrowse -noverify -noautoopen
# Install Oracle Java 8 JDK from the installer package. This installer may
# be stored inside an install application on the disk image, or there
# may be an installer package available at the root of the mounted disk
# image.
if [[ -e "$(/usr/bin/find $TMPMOUNT -maxdepth 1 \( -iname \*JDK*\.pkg -o -iname \*JDK*\.mpkg \))" ]]; then
pkg_path="$(/usr/bin/find $TMPMOUNT -maxdepth 1 \( -iname \*JDK*\.pkg -o -iname \*JDK*\.mpkg \))"
fi
# Before installation, the installer's developer certificate is checked to
# see if it has been signed by Oracle's developer certificate. Once the
# certificate check has been passed, the package is then installed.
if [[ "${pkg_path}" != "" ]]; then
signature_check=`/usr/sbin/pkgutil --check-signature "$pkg_path" | awk /'Developer ID Installer/{ print $5 }'`
if [[ ${signature_check} = "Oracle" ]]; then
/bin/echo "The downloaded Oracle Java 8 JDK installer package is signed by Oracle's Developer ID Installer certificate."
/bin/echo "Proceeding with installation of the latest Oracle Java 8 JDK."
# Install Oracle Java 8 JDK from the installer package stored inside the disk image
/usr/sbin/installer -dumplog -verbose -pkg "${pkg_path}" -target "$3"
# Report on the currently installed version of the Oracle Java 8 JDK
javaJDKVersion=`/usr/bin/java -version 2>&1 | awk 'NR==1{ gsub(/"/,""); print $3 }'`
/bin/echo "Oracle Java 8 JDK $javaJDKVersion has been installed."
fi
fi
# Clean-up
# Unmount the Oracle Java 8 JDK disk image from /tmp/java_eight_jdk.XXXX
/usr/bin/hdiutil detach -force "$TMPMOUNT"
# Remove the /tmp/java_eight_jdk.XXXX mountpoint
/bin/rm -rf "$TMPMOUNT"
# Remove the downloaded disk image
/bin/rm -rf "$java_eight_jdk_dmg"
fi
exit 0 |
<reponame>lklots/fungi-quiz-api<filename>mock.js
const _ = require('lodash');
function mockQuestion(taxonId) {
const answer = taxonId || 47347;
return {
questionId: `questionId-${answer}`,
photos: [{
url: 'mushroom1.jpg',
origWidth: 500,
origHeight: 500,
},
{
url: 'mushroom2.jpg',
origWidth: 500,
origHeight: 500,
}],
choices: [{
taxonId: 47347,
name: '<NAME>',
commonName: '<NAME>',
},
{
taxonId: 67752,
name: 'Omphalotus olivascens',
commonName: 'Western American Jack-o\'-lantern Mushroom',
},
{
taxonId: 63538,
name: 'Hygrophoropsis aurantiaca',
commonName: 'False Chanterelle',
}],
};
}
module.exports = {
Mutation: () => ({
makeGuess: (_root, { questionId }) => _.last(_.split(questionId, '-')),
createQuiz: (_root, { taxonIds }) => {
return {
questions: taxonIds.map(mockQuestion),
};
},
}),
Question: () => mockQuestion(),
Choice: () => ({
taxonId: () => 47347,
name: () => '<NAME>',
commonName: () => '<NAME>',
}),
Photos: () => ({
origWidth: 500,
origHeight: 500,
url: 'mushroom.png',
}),
};
|
require 'rubygems'
require 'rake'
require 'rake/clean'
require 'rake/testtask'
begin
require 'rubygems/package_task'
rescue LoadError
require 'rake/gempackagetask'
rake_gempackage = true
end
spec = Gem::Specification.load('rhc.gemspec')
# Define a :package task that bundles the gem
if rake_gempackage
Rake::GemPackageTask.new(spec) do |pkg, args|
pkg.need_tar = false
end
else
Gem::PackageTask.new(spec) do |pkg, args|
pkg.need_tar = false
end
end
desc "Output the current version"
task :version, :version do |t, args|
version = args[:version] || /(Version: )(.*)/.match(File.read("client.spec"))[2]
raise "No version specified" unless version
puts "RPM version #{version}"
end
# Add the 'pkg' directory to the clean task
CLEAN.include("pkg")
desc "Build autocomplete script"
task :autocomplete do
require 'rhc'
RHC::Commands.load.to_commander
IO.write('autocomplete/rhc_bash', RHC::AutoComplete.new.to_s)
end |
const [day1Part1, day1Part2] = require('./src/day1');
const [day2Part1, day2Part2] = require('./src/day2');
const [day3Part1, day3Part2] = require('./src/day3');
const [day4Part1, day4Part2] = require('./src/day4');
const [day5Part1, day5Part2] = require('./src/day5');
const [day6Part1, day6Part2] = require('./src/day6');
const [day8Part1, day8Part2] = require('./src/day8');
const [day9Part1, day9Part2] = require('./src/day9');
const [day10Part1, day10Part2] = require('./src/day10');
const [day11Part1, day11Part2] = require('./src/day11');
const [day12Part1, day12Part2] = require('./src/day12');
console.log('Day 1 answers: ', day1Part1(), day1Part2());
console.log('Day 2 answers: ', day2Part1(), day2Part2());
console.log('Day 3 answers: ', day3Part1(3, 1), day3Part2());
console.log('Day 4 answers: ', day4Part1(), day4Part2());
console.log('Day 5 answers: ', day5Part1(), day5Part2());
console.log('Day 6 answers: ', day6Part1(), day6Part2());
console.log('Day 8 answers: ', day8Part1(), day8Part2());
console.log('Day 9 answers: ', day9Part1(), day9Part2());
console.log('Day 10 answers: ', day10Part1(), day10Part2());
console.log('Day 11 answers: ', day11Part1(), day11Part2());
console.log('Day 12 answers: ', day12Part1(), day12Part2());
|
package org.niuzuo.criminalintent;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.view.Window;
import android.view.WindowManager;
/**
* Created by zdns on 16/7/13.
*/
public class CrimeCameraActivity extends SingleFragmentActivity {
@Override
protected Fragment createFragment() {
return new CrimeCameraFragment();
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
}
}
|
import "./src/components/styles/global.css"
|
/***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works
* may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior
* written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
* STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************************************************************/
#ifndef MODEL_HEATEXCHANGERDESICCANTBALANCEDFLOWPERFORMANCEDATATYPE1_IMPL_HPP
#define MODEL_HEATEXCHANGERDESICCANTBALANCEDFLOWPERFORMANCEDATATYPE1_IMPL_HPP
#include <model/ModelAPI.hpp>
#include "ResourceObject_Impl.hpp"
namespace openstudio {
namespace model {
class HeatExchangerDesiccantBalancedFlow;
namespace detail {
/** HeatExchangerDesiccantBalancedFlowPerformanceDataType1_Impl is a ResourceObject_Impl that is the implementation class for HeatExchangerDesiccantBalancedFlowPerformanceDataType1.*/
class MODEL_API HeatExchangerDesiccantBalancedFlowPerformanceDataType1_Impl : public ResourceObject_Impl
{
public:
/** @name Constructors and Destructors */
//@{
HeatExchangerDesiccantBalancedFlowPerformanceDataType1_Impl(const IdfObject& idfObject, Model_Impl* model, bool keepHandle);
HeatExchangerDesiccantBalancedFlowPerformanceDataType1_Impl(const openstudio::detail::WorkspaceObject_Impl& other, Model_Impl* model,
bool keepHandle);
HeatExchangerDesiccantBalancedFlowPerformanceDataType1_Impl(const HeatExchangerDesiccantBalancedFlowPerformanceDataType1_Impl& other,
Model_Impl* model, bool keepHandle);
virtual ~HeatExchangerDesiccantBalancedFlowPerformanceDataType1_Impl() {}
//@}
/** @name Virtual Methods */
//@{
virtual const std::vector<std::string>& outputVariableNames() const override;
virtual IddObjectType iddObjectType() const override;
virtual ModelObject clone(Model model) const override;
// If this object is still used by at least one HeatExchangerDesiccantBalancedFlow, refuse to remove to avoid putting the HeatExchangerDesiccantBalancedFlow(s) in a broken state
virtual std::vector<IdfObject> remove() override;
//@}
/** @name Getters */
//@{
std::vector<HeatExchangerDesiccantBalancedFlow> heatExchangerDesiccantBalancedFlows() const;
bool isNominalAirFlowRateAutosized() const;
boost::optional<double> nominalAirFlowRate() const;
bool isNominalAirFaceVelocityAutosized() const;
boost::optional<double> nominalAirFaceVelocity() const;
double nominalElectricPower() const;
double temperatureEquationCoefficient1() const;
double temperatureEquationCoefficient2() const;
double temperatureEquationCoefficient3() const;
double temperatureEquationCoefficient4() const;
double temperatureEquationCoefficient5() const;
double temperatureEquationCoefficient6() const;
double temperatureEquationCoefficient7() const;
double temperatureEquationCoefficient8() const;
double minimumRegenerationInletAirHumidityRatioforTemperatureEquation() const;
double maximumRegenerationInletAirHumidityRatioforTemperatureEquation() const;
double minimumRegenerationInletAirTemperatureforTemperatureEquation() const;
double maximumRegenerationInletAirTemperatureforTemperatureEquation() const;
double minimumProcessInletAirHumidityRatioforTemperatureEquation() const;
double maximumProcessInletAirHumidityRatioforTemperatureEquation() const;
double minimumProcessInletAirTemperatureforTemperatureEquation() const;
double maximumProcessInletAirTemperatureforTemperatureEquation() const;
double minimumRegenerationAirVelocityforTemperatureEquation() const;
double maximumRegenerationAirVelocityforTemperatureEquation() const;
double minimumRegenerationOutletAirTemperatureforTemperatureEquation() const;
double maximumRegenerationOutletAirTemperatureforTemperatureEquation() const;
double minimumRegenerationInletAirRelativeHumidityforTemperatureEquation() const;
double maximumRegenerationInletAirRelativeHumidityforTemperatureEquation() const;
double minimumProcessInletAirRelativeHumidityforTemperatureEquation() const;
double maximumProcessInletAirRelativeHumidityforTemperatureEquation() const;
double humidityRatioEquationCoefficient1() const;
double humidityRatioEquationCoefficient2() const;
double humidityRatioEquationCoefficient3() const;
double humidityRatioEquationCoefficient4() const;
double humidityRatioEquationCoefficient5() const;
double humidityRatioEquationCoefficient6() const;
double humidityRatioEquationCoefficient7() const;
double humidityRatioEquationCoefficient8() const;
double minimumRegenerationInletAirHumidityRatioforHumidityRatioEquation() const;
double maximumRegenerationInletAirHumidityRatioforHumidityRatioEquation() const;
double minimumRegenerationInletAirTemperatureforHumidityRatioEquation() const;
double maximumRegenerationInletAirTemperatureforHumidityRatioEquation() const;
double minimumProcessInletAirHumidityRatioforHumidityRatioEquation() const;
double maximumProcessInletAirHumidityRatioforHumidityRatioEquation() const;
double minimumProcessInletAirTemperatureforHumidityRatioEquation() const;
double maximumProcessInletAirTemperatureforHumidityRatioEquation() const;
double minimumRegenerationAirVelocityforHumidityRatioEquation() const;
double maximumRegenerationAirVelocityforHumidityRatioEquation() const;
double minimumRegenerationOutletAirHumidityRatioforHumidityRatioEquation() const;
double maximumRegenerationOutletAirHumidityRatioforHumidityRatioEquation() const;
double minimumRegenerationInletAirRelativeHumidityforHumidityRatioEquation() const;
double maximumRegenerationInletAirRelativeHumidityforHumidityRatioEquation() const;
double minimumProcessInletAirRelativeHumidityforHumidityRatioEquation() const;
double maximumProcessInletAirRelativeHumidityforHumidityRatioEquation() const;
//@}
/** @name Setters */
//@{
void autosizeNominalAirFlowRate();
bool setNominalAirFlowRate(double nominalAirFlowRate);
void autosizeNominalAirFaceVelocity();
bool setNominalAirFaceVelocity(double nominalAirFaceVelocity);
bool setNominalElectricPower(double nominalElectricPower);
bool setTemperatureEquationCoefficient1(double temperatureEquationCoefficient1);
bool setTemperatureEquationCoefficient2(double temperatureEquationCoefficient2);
bool setTemperatureEquationCoefficient3(double temperatureEquationCoefficient3);
bool setTemperatureEquationCoefficient4(double temperatureEquationCoefficient4);
bool setTemperatureEquationCoefficient5(double temperatureEquationCoefficient5);
bool setTemperatureEquationCoefficient6(double temperatureEquationCoefficient6);
bool setTemperatureEquationCoefficient7(double temperatureEquationCoefficient7);
bool setTemperatureEquationCoefficient8(double temperatureEquationCoefficient8);
bool setMinimumRegenerationInletAirHumidityRatioforTemperatureEquation(double minimumRegenerationInletAirHumidityRatioforTemperatureEquation);
bool setMaximumRegenerationInletAirHumidityRatioforTemperatureEquation(double maximumRegenerationInletAirHumidityRatioforTemperatureEquation);
bool setMinimumRegenerationInletAirTemperatureforTemperatureEquation(double minimumRegenerationInletAirTemperatureforTemperatureEquation);
bool setMaximumRegenerationInletAirTemperatureforTemperatureEquation(double maximumRegenerationInletAirTemperatureforTemperatureEquation);
bool setMinimumProcessInletAirHumidityRatioforTemperatureEquation(double minimumProcessInletAirHumidityRatioforTemperatureEquation);
bool setMaximumProcessInletAirHumidityRatioforTemperatureEquation(double maximumProcessInletAirHumidityRatioforTemperatureEquation);
bool setMinimumProcessInletAirTemperatureforTemperatureEquation(double minimumProcessInletAirTemperatureforTemperatureEquation);
bool setMaximumProcessInletAirTemperatureforTemperatureEquation(double maximumProcessInletAirTemperatureforTemperatureEquation);
bool setMinimumRegenerationAirVelocityforTemperatureEquation(double minimumRegenerationAirVelocityforTemperatureEquation);
bool setMaximumRegenerationAirVelocityforTemperatureEquation(double maximumRegenerationAirVelocityforTemperatureEquation);
bool setMinimumRegenerationOutletAirTemperatureforTemperatureEquation(double minimumRegenerationOutletAirTemperatureforTemperatureEquation);
bool setMaximumRegenerationOutletAirTemperatureforTemperatureEquation(double maximumRegenerationOutletAirTemperatureforTemperatureEquation);
bool setMinimumRegenerationInletAirRelativeHumidityforTemperatureEquation(
double minimumRegenerationInletAirRelativeHumidityforTemperatureEquation);
bool setMaximumRegenerationInletAirRelativeHumidityforTemperatureEquation(
double maximumRegenerationInletAirRelativeHumidityforTemperatureEquation);
bool setMinimumProcessInletAirRelativeHumidityforTemperatureEquation(double minimumProcessInletAirRelativeHumidityforTemperatureEquation);
bool setMaximumProcessInletAirRelativeHumidityforTemperatureEquation(double maximumProcessInletAirRelativeHumidityforTemperatureEquation);
bool setHumidityRatioEquationCoefficient1(double humidityRatioEquationCoefficient1);
bool setHumidityRatioEquationCoefficient2(double humidityRatioEquationCoefficient2);
bool setHumidityRatioEquationCoefficient3(double humidityRatioEquationCoefficient3);
bool setHumidityRatioEquationCoefficient4(double humidityRatioEquationCoefficient4);
bool setHumidityRatioEquationCoefficient5(double humidityRatioEquationCoefficient5);
bool setHumidityRatioEquationCoefficient6(double humidityRatioEquationCoefficient6);
bool setHumidityRatioEquationCoefficient7(double humidityRatioEquationCoefficient7);
bool setHumidityRatioEquationCoefficient8(double humidityRatioEquationCoefficient8);
bool
setMinimumRegenerationInletAirHumidityRatioforHumidityRatioEquation(double minimumRegenerationInletAirHumidityRatioforHumidityRatioEquation);
bool
setMaximumRegenerationInletAirHumidityRatioforHumidityRatioEquation(double maximumRegenerationInletAirHumidityRatioforHumidityRatioEquation);
bool setMinimumRegenerationInletAirTemperatureforHumidityRatioEquation(double minimumRegenerationInletAirTemperatureforHumidityRatioEquation);
bool setMaximumRegenerationInletAirTemperatureforHumidityRatioEquation(double maximumRegenerationInletAirTemperatureforHumidityRatioEquation);
bool setMinimumProcessInletAirHumidityRatioforHumidityRatioEquation(double minimumProcessInletAirHumidityRatioforHumidityRatioEquation);
bool setMaximumProcessInletAirHumidityRatioforHumidityRatioEquation(double maximumProcessInletAirHumidityRatioforHumidityRatioEquation);
bool setMinimumProcessInletAirTemperatureforHumidityRatioEquation(double minimumProcessInletAirTemperatureforHumidityRatioEquation);
bool setMaximumProcessInletAirTemperatureforHumidityRatioEquation(double maximumProcessInletAirTemperatureforHumidityRatioEquation);
bool setMinimumRegenerationAirVelocityforHumidityRatioEquation(double minimumRegenerationAirVelocityforHumidityRatioEquation);
bool setMaximumRegenerationAirVelocityforHumidityRatioEquation(double maximumRegenerationAirVelocityforHumidityRatioEquation);
bool setMinimumRegenerationOutletAirHumidityRatioforHumidityRatioEquation(
double minimumRegenerationOutletAirHumidityRatioforHumidityRatioEquation);
bool setMaximumRegenerationOutletAirHumidityRatioforHumidityRatioEquation(
double maximumRegenerationOutletAirHumidityRatioforHumidityRatioEquation);
bool setMinimumRegenerationInletAirRelativeHumidityforHumidityRatioEquation(
double minimumRegenerationInletAirRelativeHumidityforHumidityRatioEquation);
bool setMaximumRegenerationInletAirRelativeHumidityforHumidityRatioEquation(
double maximumRegenerationInletAirRelativeHumidityforHumidityRatioEquation);
bool setMinimumProcessInletAirRelativeHumidityforHumidityRatioEquation(double minimumProcessInletAirRelativeHumidityforHumidityRatioEquation);
bool setMaximumProcessInletAirRelativeHumidityforHumidityRatioEquation(double maximumProcessInletAirRelativeHumidityforHumidityRatioEquation);
//@}
/** @name Other */
//@{
boost::optional<double> autosizedNominalAirFlowRate();
boost::optional<double> autosizedNominalAirFaceVelocity();
void autosize();
void applySizingValues();
//@}
protected:
private:
REGISTER_LOGGER("openstudio.model.HeatExchangerDesiccantBalancedFlowPerformanceDataType1");
};
} // namespace detail
} // namespace model
} // namespace openstudio
#endif // MODEL_HEATEXCHANGERDESICCANTBALANCEDFLOWPERFORMANCEDATATYPE1_IMPL_HPP
|
<gh_stars>0
/**
* Handle domain zones database
*
*/
'use strict';
const
path = require('path'),
fs = require('fs'),
child_process = require('child_process');
const
ZONE_REC = 'zone "${domain}" { type master; file "${data_folder}/${domain}"; };\n',
ZONE_START = 'zone "${domain}"', // start line for search
DATA_HEAD = '$TTL 1h\n' +
'@ IN SOA ${ns1}. ${root}. ( ${serial} 3600 600 86400 3600 ) ;(serial refresh retry expire min TTL)\n' +
' IN NS ${ns1}.\n' +
' IN NS ${ns2}.\n',
REG_SERIAL = /SOA[^\(]+\(\s*(\S+)/m,
DATA_TYPE_REC = {
A: '${name}. ${ttl} IN A ${data}\n', // name=provider.domain, data=192.168.1.69, ttl=1800
AAAA: '${name}. ${ttl} IN AAAA ${data}\n', // name=provider.domain, data=..IPv6.., ttl=1800
CNAME: '${name} ${ttl} IN CNAME ${data}.\n' // name=www, data = www.provider.domain, ttl=1800
};
class ZonesDB {
constructor(params) {
this.params = Object.assign({}, params);
let necessary_params = 'domain,ns1,ns2,root,config_file,data_folder'.split(',');
necessary_params.forEach(p => {
if (!(p in this.params)) {
throw new Error('Params ' + necessary_params.join(',') + ' are necessary ('+ p +')');
return;
}
})
}
/**
* Create new zone
* @param {String} domain
* @param {String} ip_address
* @param {Function} cb_error - callback with error
*/
add_zone(domain, ip_address, cb_error) {
cb_error = cb_error|| function(){};
this.create_data_file(domain, ip_address, 0, error => { // ttl
if (error) {
cb_error(error);
return;
}
// write config
fs.readFile(resolve(this.params.config_file), 'utf8', (error, data) => {
if (error) {
console.warn(error.message||error)
}
data = data||'';
if (this._findDomain(data, domain)>=0) {
cb_error(new Error("Such zone has been created already"));
return;
}
let params = Object.assign({}, this.params, {
domain,
data_folder: resolve(this.params.data_folder)
});
data += subst(ZONE_REC, params);
fs.writeFile(resolve(this.params.config_file), data, error => {
if (error) {
cb_error(error);
return;
}
setTimeout(()=>{
this.reload();
cb_error();
console.log('added domain', domain, ip_address);
},1)
})
})
})
}
/**
* Remove zone
* @param {String} domain
* @param {Function} cb_error - callback with error
*/
del_zone(domain, cb_error) {
cb_error = cb_error|| function(){};
// write config
fs.readFile(resolve(this.params.config_file), 'utf8', (error, data) => {
if (error) {
cb_error(error);
return;
}
let start = this._findDomain(data, domain);
if (start<0) {
cb_error("Can't find domain " + domain);
return;
}
data = data.substr(0, start) + data.substr(data.indexOf('\n', start)+1);
fs.writeFile(resolve(this.params.config_file), data, error => {
if (error) {
cb_error(error);
return;
}
console.log('deleted domain', domain);
fs.unlink(resolve(this.params.data_folder + '/' + domain), error => {
if (error) console.warn('Warning: domain file is not deleted!');
cb_error(this.reload());
});
});
})
}
/**
* Add or replace dns record
* const DATA_TYPE_REC['A'] = '${name}. ${ttl} IN A ${data}\n'; // name=my.domain, data=192.168.1.69, ttl=1800
* const DATA_TYPE_REC['CNAME'] = '${name} ${ttl} IN CNAME ${data}.\n'; // name=www.my.domain, data = my.domain, ttl=1800
* params = {name, ttl, data, type}
* for A | AAAA record name=domain is default
* @param {String} domain
* @param {String} type - support A,AAAA,CNAME
* @param {Object} params
* @param {String} params.name
* @param {String} params.data
* @param {Integer} params.ttl
*/
make_record(domain, type, params, cb_error) {
cb_error = cb_error ||function(){};
if (!(type in DATA_TYPE_REC)) {
cb_error(new Error('Unknown record type'));
return;
}
if (['A','AAAA','CNAME'].indexOf(type)<0) {
if (!params || !params.name || !params.data) {
cb_error(new Error('Wrong params (name or data)'));
return;
}
}
let _this = this;
params = Object.assign({ttl: ''}, params);
let path = resolve(this.params.data_folder + '/' + domain);
fs.readFile(path, 'utf8', (error, data) => {
if (error) {
cb_error(error);
return;
}
// search and replace if exists
let reg = new RegExp('^\\S*\\s+IN\\s+' + type + '\\s+.*$','gm');
let match;
while (match = reg.exec(data)) {
switch (type) {
case 'A':
case 'AAAA':
if (match[0].startsWith(domain)) { // it should be only 1
save(data, match);
return;
}
break;
if (match[0].startsWith(params.name)) {
save(data, match);
return;
}
case 'CNAME':
break;
}
}
// if new record
save(data);
})
function save(data, match) {
let new_data = subst(DATA_TYPE_REC[type], Object.assign({}, _this.params, params));
if (match) {
data = data.substr(0,match.index) + new_data + data.substr(match.index + match[0].length + 1); // replace matched record
}
else {
data += new_data;
}
// update serial
data = data.replace(REG_SERIAL, (m,p1)=>m.replace(p1, getDATE(p1)))
fs.writeFile(path, data, error => {
if (!error) _this.reload();
cb_error(error);
})
}
}
// ----------------------------
_findDomain(data, domain) {
return data.indexOf(subst(ZONE_START, Object.assign({}, this.params, { domain })));
}
// write data file with A record
create_data_file(domain, ip_address, ttl, cb_error) {
cb_error = cb_error|| function(){};
let params = Object.assign({}, this.params, {
ttl: ttl||'',
name: domain,
data: ip_address,
serial: getDATE(), // date in iso format 2018111908
data_folder: resolve(this.params.data_folder)
});
let data = subst(DATA_HEAD + DATA_TYPE_REC['A'], params);
fs.writeFile(resolve(this.params.data_folder + '/' + domain), data, error => {
if (error) {
cb_error(error);
return;
}
cb_error();
})
}
reload(){
try {
child_process.execSync('service bind9 reload');//command[, options]
}
catch(e) {
console.error(e);
}
}
}
function subst(str, context) {
for (var p in context) {
str = str.split('${' + p + '}').join(context[p]);
}
return str;
}
function resolve(filepath) {
if (filepath[0] === '~') {
return path.join(process.env.HOME, filepath.slice(1));
}
return filepath;
}
function getDATE(old_value) {
let new_value = new Date().toISOString().replace(/\D/g,'').substr(0,10);
if (parseInt(new_value) <= parseInt(old_value))
new_value = parseInt(old_value) + 1;
return new_value;
}
module.exports = ZonesDB; |
/**
*
*/
package models;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import javax.persistence.Entity;
import javax.persistence.CascadeType;
import javax.persistence.JoinColumn;
import javax.persistence.JoinTable;
import javax.persistence.OneToMany;
import javax.persistence.ManyToMany;
/**
* @author allen
*
*/
@Entity
public class Event extends Post {
/**
*
*/
private static final long serialVersionUID = 1L;
public boolean isClosed;
public Integer capacity;
public Date time;
public String location;
// @OneToOne
// public MapLocation mapLocation;
public Event() {
super();
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public Date getTime() {
return time;
}
public void setTime(Date time) {
this.time = time;
}
@ManyToMany
@JoinTable(
name="CMF_JOIN",
joinColumns={@JoinColumn(name="USER_ID", referencedColumnName="id")},
inverseJoinColumns={@JoinColumn(name="EVENT_ID", referencedColumnName="id")})
public List<User> confirmedUsers = new ArrayList<User>();
@ManyToMany
@JoinTable(
name="WAT_JOIN",
joinColumns={@JoinColumn(name="USER_ID", referencedColumnName="id")},
inverseJoinColumns={@JoinColumn(name="EVENT_ID", referencedColumnName="id")})
public List<User> onWaitingListUsers = new ArrayList<User>();
@ManyToMany()
public List<User> members = new ArrayList<User>();
// @ManyToMany()
// public List<User> waitingMembers = new ArrayList<User>();
public Event(boolean isClosed, Integer capacity, MapLocation mapLocation,
List<User> confirmedUsers, List<User> onWaitingListUsers) {
super();
this.isClosed = isClosed;
this.capacity = capacity;
this.mapLocation = mapLocation;
this.confirmedUsers = confirmedUsers;
this.onWaitingListUsers = onWaitingListUsers;
this.postType = "event";
}
public Event(String title, String description, String postContent,
MapLocation mapLocation, Date postingDate, User sender,
PostContent content, List<String> tags, Integer capacity) {
super(title, description, postContent, mapLocation, postingDate, sender,
content, tags);
this.title = title;
this.description = description;
this.mapLocation = mapLocation;
this.postingDate = postingDate;
this.sender = sender;
this.content = content;
this.tags = tags;
this.rating = 0.0;
this.capacity = 0;
this.postType = "event";
}
public Event(String title, String description, MapLocation mapLocation,
Date postingDate, User sender, PostContent content,
List<String> tags, Integer capacity) {
this.title = title;
this.description = description;
this.mapLocation = mapLocation;
this.postingDate = postingDate;
this.sender = sender;
this.content = content;
this.tags = tags;
this.rating = 0.0;
this.capacity = 0;
this.postType = "event";
}
public boolean isClosed() {
return isClosed;
}
public void setClosed(boolean isClosed) {
this.isClosed = isClosed;
}
public Integer getCapacity() {
return capacity;
}
public void setCapacity(Integer capacity) {
this.capacity = capacity;
}
public List<User> getConfirmedUsers() {
return confirmedUsers;
}
public void setConfirmedUsers(List<User> confirmedUsers) {
this.confirmedUsers = confirmedUsers;
}
public List<User> getOnWaitingListUsers() {
return onWaitingListUsers;
}
public void setOnWaitingListUsers(List<User> onWaitingListUsers) {
this.onWaitingListUsers = onWaitingListUsers;
}
public static long getSerialversionuid() {
return serialVersionUID;
}
public boolean isFull(){
Long eventId = this.id;
boolean isFull = false;
if (eventId != null) {
Event post = Event.findById(eventId);
if (post != null) {
if (post.confirmedUsers.size() < post.capacity){
isFull = false;
System.out.println("#not full");
}else{
isFull = true;
System.out.println("#is full");
}
}
}
return isFull;
}
public List<User> getMembers() {
return members;
}
public void setMembers(List<User> members) {
this.members = members;
}
@Override
public String toString() {
return "Event [isClosed=" + isClosed + ", capacity=" + capacity
+ ", time=" + time + ", location=" + location
+ ", confirmedUsers=" + confirmedUsers
+ ", onWaitingListUsers=" + onWaitingListUsers + ", members="
+ members + ", title=" + title + ", description=" + description
+ ", postingDate=" + postingDate + ", postContent="
+ postContent + ", rating=" + rating + ", postType=" + postType
+ ", mapLocation=" + mapLocation + ", content=" + content
+ ", sender=" + sender + ", tags=" + tags + ", comments="
+ comments + ", id=" + id + "]";
}
public boolean isConfirmed(Long userId){
Long eventId = this.id;
boolean isSigned = false;
if (userId != null) {
User user = User.findById(userId);
Event post = Event.findById(eventId);
if (user != null) {
if (post.confirmedUsers.contains(user) ){
isSigned = true;
System.out.println("###You are already confirmed");
}else{
isSigned = false;
System.out.println("###You haven't signed up");
}
}
}
return isSigned;
}
public boolean isOnWaiting(Long userId){
Long eventId = this.id;
boolean isSigned = false;
if (userId != null) {
User user = User.findById(userId);
Event post = Event.findById(eventId);
if (user != null) {
if (post.onWaitingListUsers.contains(user)){
isSigned = true;
System.out.println("###You are already on waiting list");
}else{
isSigned = false;
System.out.println("###You haven't signed up");
}
}
}
return isSigned;
}
}
|
#!/bin/bash
scriptDir="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
# Start via Ambari
echo "Starting the Kafka service"
curl -u admin:admin -H "X-Requested-By: ambari" -X PUT -d '{"RequestInfo": {"context" :"Start Kafka"}, "Body": {"ServiceInfo": {"state": "STARTED"}}}' http://sandbox-hdf.hortonworks.com:8080/api/v1/clusters/Sandbox/services/KAFKA | python $scriptDir/wait-until-done.py
# Start via local package
#$(find / -type f -wholename '/usr/hd*/kafka' -print -quit 2> /dev/null) start
kafkaTopicsSh=$(find / -type f -wholename '/usr/hd*/kafka-topics.sh' -print -quit 2> /dev/null)
#echo "Deleting existing Kafka topics"
$kafkaTopicsSh --zookeeper sandbox-hdf.hortonworks.com:2181 --delete --topic trucking_data_truck 2> /dev/null
$kafkaTopicsSh --zookeeper sandbox-hdf.hortonworks.com:2181 --delete --topic trucking_data_traffic 2> /dev/null
$kafkaTopicsSh --zookeeper sandbox-hdf.hortonworks.com:2181 --delete --topic trucking_data_joined 2> /dev/null
$kafkaTopicsSh --zookeeper sandbox-hdf.hortonworks.com:2181 --delete --topic trucking_data_driverstats 2> /dev/null
echo "Creating Kafka topics"
$kafkaTopicsSh --create --zookeeper sandbox-hdf.hortonworks.com:2181 --replication-factor 1 --partitions 1 --topic trucking_data_truck
$kafkaTopicsSh --create --zookeeper sandbox-hdf.hortonworks.com:2181 --replication-factor 1 --partitions 1 --topic trucking_data_traffic
$kafkaTopicsSh --create --zookeeper sandbox-hdf.hortonworks.com:2181 --replication-factor 1 --partitions 1 --topic trucking_data_joined
$kafkaTopicsSh --create --zookeeper sandbox-hdf.hortonworks.com:2181 --replication-factor 1 --partitions 1 --topic trucking_data_driverstats
|
#!/usr/bin/env bash
# Bundle components/modules
gulp dev-aws-1
npm run start-developing-aws-1 |
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
SCRIPT_VERSION=0.1.0
# Required external tools to be available on PATH.
REQUIRED_TOOLS=("az")
LOG_LEVELS=([0]="emerg" [1]="alert" [2]="crit" [3]="err" [4]="warning" [5]="notice" [6]="info" [7]="debug")
function .log() {
local LEVEL=${1}
shift
if [ ${__VERBOSE} -ge ${LEVEL} ]; then
if [ ${LEVEL} -ge 3 ]; then
echo "[${LOG_LEVELS[$LEVEL]}]" "$@" 1>&2
else
echo "[${LOG_LEVELS[$LEVEL]}]" "$@"
fi
fi
}
__VERBOSE=${__VERBOSE:=6}
function check_tools() {
local tools=("$@")
local errors_count=0
for cmd in "${tools[@]}"
do
if ! [[ -x "$(command -v ${cmd})" ]]; then
.log 3 "${cmd} is required and was not found in PATH."
errors_count=$((errors_count + 1))
else
.log 6 "Found '${cmd}' in path"
fi
done
if [ ${errors_count} -gt 0 ]; then
exit 1
fi
}
function usage() {
echo ""
echo "Usage: $0 [-t <pat_token>] -p <pipeline_name> -f <yaml_file> [-h]" 1>&2
echo "Version: ${SCRIPT_VERSION}"
echo ""
echo "Options"
echo "-t <pat_token> Personal Access Token that should be used for creating the pipeline."
echo " OR: AZURE_DEVOPS_CLI_PAT environment variable."
echo "-o <organization> Azure DevOps org, e.g. 'https://dev.azure.com/contoso/'"
echo " OR: AZURE_DEVOPS_ORGANIZATION environment variable."
echo "-p <project_name> Name of the Azure DevOps project, e.g. 'MyProject'."
echo " OR: AZURE_DEVOPS_PROJECT environment variable."
echo "-x <prefix> Short 2-10 letters prefix for the project, e.g. 'proj'."
echo " OR: TF_VAR_prefix environment variable."
echo "-g <git_repo_name> Name of the Azure DevOps git repo, e.g. 'myrepo'."
echo " OR: AZURE_DEVOPS_GIT_REPO environment variable."
echo "-n <pipeline_name> Name of the pipeline to create, e.g. 'MyPipeline'."
echo "-f <yaml_file> YAML definition location relative to the repository root, e.g. './pipeline.yaml'."
echo "-c <service_connection> The service connection name that should be used."
echo " OR: AZURE_DEVOPS_SERVICE_CONNECTION_NAME environment variable."
echo "-h Help: Print this dialog and exit."
echo ""
exit 1
}
# =============================================
# Check Options
t=${AZURE_DEVOPS_CLI_PAT:=""}
o=${AZURE_DEVOPS_ORGANIZATION:=""}
p=${AZURE_DEVOPS_PROJECT:=""}
x=${TF_VAR_prefix:=""}
g=${AZURE_DEVOPS_GIT_REPO:=""}
c=${AZURE_DEVOPS_SERVICE_CONNECTION_NAME:=""}
n=""
f=""
while getopts ":t:o:p:x:g:n:f:c:h" z; do
case "${z}" in
t)
t=${OPTARG}
;;
o)
o=${OPTARG}
;;
p)
p=${OPTARG}
;;
x)
x=${OPTARG}
;;
g)
g=${OPTARG}
;;
n)
n=${OPTARG}
;;
f)
f=${OPTARG}
;;
c) c=${OPTARG}
;;
h)
usage
;;
*)
usage
;;
esac
done
shift $((OPTIND - 1))
opt_errors_count=0
if [ -z "${t}" ]; then
.log 3 "PAT Token Missing"
opt_errors_count=$((opt_errors_count + 1))
fi
if [ -z "${o}" ]; then
.log 3 "Organization missing."
opt_errors_count=$((opt_errors_count + 1))
fi
if [ -z "${p}" ]; then
.log 3 "Project name missing."
opt_errors_count=$((opt_errors_count + 1))
fi
if [ -z "${x}" ]; then
.log 3 "Prefix is missing."
opt_errors_count=$((opt_errors_count + 1))
fi
if [ -z "${g}" ]; then
.log 3 "Git Repo name missing."
opt_errors_count=$((opt_errors_count + 1))
fi
if [ -z "${n}" ]; then
.log 3 "Pipeline Name missing."
opt_errors_count=$((opt_errors_count + 1))
fi
if [ -z "${f}" ]; then
.log 3 "YAML file missing."
opt_errors_count=$((opt_errors_count + 1))
fi
if [ -z "${c}" ]; then
.log 3 "Service Connection Name is missing."
opt_errors_count=$((opt_errors_count + 1))
fi
if [ ${opt_errors_count} -gt 0 ]; then
usage
fi
.log 6 "[==== Check Required Tools ====]"
.log 6 "Found 'bash' (version: ${BASH_VERSION})"
check_tools "${REQUIRED_TOOLS[@]}"
.log 6 "[==== Login to Azure DevOps ====]"
echo ${t} | az devops login
.log 6 "[==== Creating Pipeline ${n}... (first run is *not* triggered) ====]"
az devops configure --defaults organization="${o}" project="${p}" --use-git-aliases true
# NOTE: There is a bug in the YAML variable group authorization path:
# https://developercommunity.visualstudio.com/content/problem/729324/variables-group-in-yml-cannot-be-authorized-from-p.html
# As there is no sensible information in the variable groups we can safely use '--authorize true' for the moment.
az pipelines variable-group create --name "IaC_Shared_Variables" --authorize true --variables TF_VAR_prefix=${x} Service_Connection_Name="${c}"
az pipelines variable-group create --name "IaC_Terraform_Backend_Variables" --authorize true --variables __TF_backend_resource_group_name= __TF_backend_location= __TF_backend_storage_account_name= __TF_backend_storage_container_name=
az pipelines create --name "${n}" --repository "${g}" --branch master --yml-path "${f}" --repository-type tfsgit --skip-first-run
.log 6 "[==== All done. ====]"
|
chmod 777 -R . |
import express from 'express';
import {
getArticles,
getAllArticles,
getArticleDetail,
deleteArticle,
addTag,
removeTag,
} from '../controller/controller.article';
const router = express.Router();
router.get('/article/all', getAllArticles);
router.get('/article/user', getArticles);
router.get('/article/:id', getArticleDetail);
router.post('/article/delete', deleteArticle);
router.post('/article/tag/add', addTag);
router.post('/article/tag/remove', removeTag);
export default router;
|
import React, {PropTypes} from 'react';
import {observer, inject} from 'mobx-react';
import Select from 'components/lib/Select';
const Option = Select.Option;
import styles from './index.less';
function DateSelect({consumeStore, type}) {
const handleSelect = (value) => {
consumeStore.updateValue(`${type}.mothFilter`, value);
};
const createOption = () => {
const timeArr = [
{name: '最近一个月订单', value: 'one'},
{name: '最近两个月订单', value: 'two'},
{name: '最近三个月订单', value: 'three'},
{name: '最近六个月订单', value: 'six'},
{name: '最近一年订单', value: 'year'},
{name: '全部', value: 'all'},
];
return timeArr.map(({name, value}) => {
return (<Option key={value} value={value}>{name}</Option>);
});
};
return (
<div className={styles['date-select']}>
<div className={styles['main-cont']}>
<Select
placeholder="订单日期"
value={consumeStore[type].mothFilter}
width="130px"
onChange={handleSelect}
className={styles.select}
>
{createOption()}
</Select>
</div>
</div>
);
}
DateSelect.propTypes = {
consumeStore: PropTypes.object,
};
export default inject('consumeStore')(observer(DateSelect));
|
import { MR } from "./movieRatingConstants"
const initialState = {
dataSet: [],
columns: [],
submittedReportRequest: false,
};
export const initialRatingsFormUserState = {
user: '',
};
const movieRatingReducer = (state = initialState, action) => {
switch (action.type) {
case MR.UPDATE_DATASET: {
state = {
...state,
dataSet: action.payload.dataSet,
columns: action.payload.columns,
submittedReportRequest: true
};
break;
}
}
return state
};
export default movieRatingReducer; |
import { Injectable } from '@nestjs/common';
import { Repository, DeepPartial } from 'typeorm';
import { OAuthClient } from './oauth-client.entity';
import { InjectRepository } from '@nestjs/typeorm';
import { OAuthCode } from './oauth-code.entity';
import { AuthorizationCode } from 'oauth2-server';
@Injectable()
export class OAuthCodeService {
constructor(
@InjectRepository(OAuthCode)
private readonly repo: Repository<OAuthCode>,
) {}
async save (code: AuthorizationCode) {
await this.repo.save({
authorizationCode: code.authorizationCode,
redirectUri: code.redirectUri,
client: { id: code.client.id },
user: { id: code.user.id },
expiresAt: code.expiresAt,
scope: Array.isArray(code.scope) ? code.scope : code.scope.split(',')
})
return code
}
async findAndDelete(code:string): Promise<OAuthCode|null> {
const authCode = await this.repo.findOne(code, { relations: ['user', 'client'] })
await this.repo.remove(authCode)
return authCode.expiresAt.getTime() > Date.now() ? authCode : null
}
}
|
from scipy.optimize import fmin
def f(x, y):
return 3*x**2 + 2*x*y + 5*y**2
x, y = fmin(f, 0, 0)
print(f'The maximum of f(x,y) is {f(x, y)} at (x, y) = ({x:0.5f}, {y:0.5f})') |
mkdir -p /etc/systemd/system
environment_proxy=""
if [ ! -z "$HTTP_PROXY" ]; then
environment_proxy="\"HTTP_PROXY=${HTTP_PROXY}\" "
fi
if [ ! -z "$HTTPS_PROXY" ]; then
environment_proxy="\"HTTPS_PROXY=${HTTPS_PROXY}\" "
fi
if [ ! -z "${environment_proxy}" ]; then
environment_proxy="Environment=${environment_proxy}"
fi
cat <<EOT > /etc/systemd/system/hab-sup.service
[Unit]
Description=Habitat Supervisor
[Service]
ExecStartPre=/bin/bash -c "/bin/systemctl set-environment SSL_CERT_FILE=$(hab pkg path core/cacerts)/ssl/cert.pem"
ExecStart=/bin/hab run
${environment_proxy}
[Install]
WantedBy=default.target
EOT
systemctl daemon-reload
systemctl start hab-sup
# wait for the sup to come up before proceeding.
until hab svc status > /dev/null 2>&1; do
sleep 1
done
|
<gh_stars>1-10
package com.aqzscn.www.global.controller;
import com.aqzscn.www.global.config.validation.ValidationGroup1;
import com.aqzscn.www.global.config.validation.ValidationGroup2;
import com.aqzscn.www.global.domain.co.AppException;
import com.aqzscn.www.global.domain.dto.MyPage;
import com.aqzscn.www.global.domain.dto.PageRequest;
import com.aqzscn.www.global.domain.dto.ReturnVo;
import com.aqzscn.www.global.mapper.Dispatch;
import com.aqzscn.www.global.service.DispatchService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.validation.BindingResult;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.*;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@RestController
@Api(tags = "请求中转服务")
@RequestMapping("/g")
public class DispatchController extends BaseController {
private DispatchService dispatchService;
public DispatchController(HttpServletRequest request, HttpServletResponse response, DispatchService dispatchService) {
super(request, response);
this.dispatchService = dispatchService;
}
@ApiOperation("获取中转服务列表")
@GetMapping("/dispatches")
public ReturnVo selectDispatches(@Validated(ValidationGroup2.class) PageRequest pageRequest) throws RuntimeException {
ReturnVo vo = new ReturnVo();
MyPage page = dispatchService.selectDispatch(pageRequest.getPageNum(), pageRequest.getPageSize());
vo.setData(page);
return vo;
}
@ApiOperation("新增中转服务")
@PostMapping("/dispatches")
public ReturnVo insertDispatch(@Validated(ValidationGroup2.class) @RequestBody Dispatch dispatch, BindingResult result) throws RuntimeException {
if (result.hasErrors()) {
throw AppException.of(result.getAllErrors());
}
return this.response(this.dispatchService.insertDispatch(dispatch));
}
@ApiOperation("修改中转服务")
@PutMapping("/dispatches")
public ReturnVo updateDispatch(@Validated(ValidationGroup1.class) @RequestBody Dispatch dispatch, BindingResult result) throws RuntimeException {
if (result.hasErrors()) {
throw AppException.of(result.getAllErrors());
}
return this.response(this.dispatchService.updateDispatch(dispatch));
}
@ApiOperation("删除中转服务")
@DeleteMapping("/dispatches")
public ReturnVo deleteDispatch(@Validated(ValidationGroup1.class) @RequestBody Dispatch dispatch, BindingResult result) throws RuntimeException {
if (result.hasErrors()) {
throw AppException.of(result.getAllErrors());
}
return this.response(this.dispatchService.deleteDispatch(dispatch));
}
@ApiOperation("切换中转服务")
@PutMapping("/dispatch-status")
public ReturnVo switchService(@Validated(ValidationGroup1.class) @RequestBody Dispatch dispatch, BindingResult result) throws RuntimeException {
if (result.hasErrors()) {
throw AppException.of(result.getAllErrors());
}
return this.response(this.dispatchService.switchDispatch(dispatch));
}
// @ApiOperation("中转post请求(仅支持POST JSON数据)")
// @PostMapping("/dispatch/**")
// public String post(@RequestBody String json) throws RuntimeException {
// ObjectMapper objectMapper = new ObjectMapper();
// try {
// Dispatch dispatch = GlobalCaches.DISPATCH;
// // 判断当前转发服务是否激活
// if (dispatch == null) {
// throw AppException.of("当前没有激活的转发服务,请激活后使用!");
// }
// // 判断是否需要处理请求数据
// String reqBody = json;
// if (StringUtils.isNotBlank(dispatch.getReqTargetParam())) {
// // 对于请求数据,是否需要获取具体数据(仅支持第一层对象)
// JsonNode node = objectMapper.readTree(json);
// String objStr = objectMapper.writeValueAsString(node.get(dispatch.getReqTargetParam()));
// if (StringUtils.isNotBlank(dispatch.getReqPrefix())) {
// reqBody = "jsonRest=" + objStr;
// } else {
// reqBody = objStr;
// }
// }
// // URL地址
// ResponseEntity<String> responseEntity = this.restTemplate.postForEntity(dispatch.getServiceUrl(), reqBody, String.class);
// // 判断是否需要处理响应数据
// if (StringUtils.isNotBlank(dispatch.getResBody()) && StringUtils.isNotBlank(dispatch.getResDataKey())) {
// JsonNode node = objectMapper.readTree(dispatch.getResBody());
// Map<String, Object> map = new HashMap<>();
// Iterator<String> names = node.fieldNames();
// while (names.hasNext()) {
// String name = names.next();
// if (name.equals(dispatch.getResDataKey())) {
// map.put(name, objectMapper.readTree(responseEntity.getBody()));
// } else {
// map.put(name, node.get(name));
// }
// }
// return objectMapper.writeValueAsString(map);
// } else {
// return responseEntity.getBody();
// }
// } catch (Exception e) {
// throw AppException.of(e.getMessage());
// }
// }
}
|
#!/usr/bin/env bash
export PATH=/sbin:/opt/bin:/usr/local/bin:/usr/contrib/bin:/bin:/usr/bin:/usr/sbin:/usr/bin/X11
### Create .update-cloudflare-dns.log file of the last run for debug
parent_path="$(dirname "${BASH_SOURCE[0]}")"
FILE=${parent_path}/update-cloudflare-dns.log
if ! [ -x "$FILE" ]; then
touch "$FILE"
fi
LOG_FILE=${parent_path}'/update-cloudflare-dns.log'
### Write last run of STDOUT & STDERR as log file and prints to screen
exec > >(tee $LOG_FILE) 2>&1
echo "==> $(date "+%Y-%m-%d %H:%M:%S")"
### Validate if config file exists
if ! source ${parent_path}/update-cloudflare-dns.conf; then
echo 'Error! Missing "update-cloudflare-dns.conf or invalid syntax"!'
exit 0
fi
### Check validity of "ttl" parameter
if [ "${ttl}" -lt 120 ] || [ "${ttl}" -gt 7200 ] && [ "${ttl}" -ne 1 ]; then
echo "Error! ttl out of range (120-7200) or not set to 1"
exit
fi
### Check validity of "proxied" parameter
if [ "${proxied}" != "false" ] && [ "${proxied}" != "true" ]; then
echo 'Error! Incorrect "proxied" parameter choose "true" or "false"'
exit 0
fi
### Check validity of "what_ip" parameter
if [ "${what_ip}" != "external" ] && [ "${what_ip}" != "internal" ]; then
echo 'Error! Incorrect "what_ip" parameter choose "external" or "internal"'
exit 0
fi
### Check if set to internal ip and proxy
if [ "${what_ip}" == "internal" ] && [ "${proxied}" == "true" ]; then
echo 'Error! Internal IP cannot be Proxied'
exit 0
fi
### Get External ip from https://checkip.amazonaws.com
if [ "${what_ip}" == "external" ]; then
ip=$(curl -s -X GET https://checkip.amazonaws.com --max-time 10)
if [ -z "$ip" ]; then
echo "Error! Can't get external ip from https://checkip.amazonaws.com"
exit 0
fi
echo "==> External IP is: $ip"
fi
### Get Internal ip from primary interface
if [ "${what_ip}" == "internal" ]; then
### Check if "IP" command is present, get the ip from interface
if which ip >/dev/null; then
### "ip route get" (linux)
interface=$(ip route get 1.1.1.1 | awk '/dev/ { print $5 }')
ip=$(ip -o -4 addr show ${interface} scope global | awk '{print $4;}' | cut -d/ -f 1)
### if no "IP" command use "ifconfig", get the ip from interface
else
### "route get" (macOS, Freebsd)
interface=$(route get 1.1.1.1 | awk '/interface:/ { print $2 }')
ip=$(ifconfig ${interface} | grep 'inet ' | awk '{print $2}')
fi
if [ -z "$ip" ]; then
echo "Error! Can't read ip from ${interface}"
exit 0
fi
echo "==> Internal ${interface} IP is: $ip"
fi
### Get IP address of DNS record from 1.1.1.1 DNS server when proxied is "false"
if [ "${proxied}" == "false" ]; then
### Check if "nsloopup" command is present
if which nslookup >/dev/null; then
dns_record_ip=$(nslookup ${dns_record} 1.1.1.1 | awk '/Address/ { print $2 }' | sed -n '2p')
else
### if no "nslookup" command use "host" command
dns_record_ip=$(host -t A ${dns_record} 1.1.1.1 | awk '/has address/ { print $4 }' | sed -n '1p')
fi
if [ -z "$dns_record_ip" ]; then
echo "Error! Can't resolve the ${dns_record} via 1.1.1.1 DNS server"
exit 0
fi
is_proxed="${proxied}"
fi
### Get the dns record id and current proxy status from cloudflare's api when proxied is "true"
if [ "${proxied}" == "true" ]; then
dns_record_info=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zoneid/dns_records?name=$dns_record" \
-H "Authorization: Bearer $cloudflare_zone_api_token" \
-H "Content-Type: application/json")
if [[ ${dns_record_info} == *"\"success\":false"* ]]; then
echo ${dns_record_info}
echo "Error! Can't get dns record info from cloudflare's api"
exit 0
fi
is_proxed=$(echo ${dns_record_info} | grep -o '"proxied":[^,]*' | grep -o '[^:]*$')
dns_record_ip=$(echo ${dns_record_info} | grep -o '"content":"[^"]*' | cut -d'"' -f 4)
fi
### Check if ip or proxy have changed
if [ ${dns_record_ip} == ${ip} ] && [ ${is_proxed} == ${proxied} ]; then
echo "==> DNS record IP of ${dns_record} is ${dns_record_ip}", no changes needed. Exiting...
exit
fi
echo "==> DNS record of ${dns_record} is: ${dns_record_ip}. Trying to update..."
### Get the dns record information from cloudflare's api
cloudflare_record_info=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zoneid/dns_records?name=$dns_record" \
-H "Authorization: Bearer $cloudflare_zone_api_token" \
-H "Content-Type: application/json")
if [[ ${cloudflare_record_info} == *"\"success\":false"* ]]; then
echo ${cloudflare_record_info}
echo "Error! Can't get ${dns_record} record inforamiton from cloudflare API"
exit 0
fi
### Get the dns record id from response
cloudflare_dns_record_id=$(echo ${cloudflare_record_info} | grep -o '"id":"[^"]*' | cut -d'"' -f4)
### Push new dns record information to cloudflare's api
update_dns_record=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$zoneid/dns_records/$cloudflare_dns_record_id" \
-H "Authorization: Bearer $cloudflare_zone_api_token" \
-H "Content-Type: application/json" \
--data "{\"type\":\"A\",\"name\":\"$dns_record\",\"content\":\"$ip\",\"ttl\":$ttl,\"proxied\":$proxied}")
if [[ ${update_dns_record} == *"\"success\":false"* ]]; then
echo ${update_dns_record}
echo "Error! Update Failed"
exit 0
fi
echo "==> Success!"
echo "==> $dns_record DNS Record Updated To: $ip, ttl: $ttl, proxied: $proxied"
### Telegram notification
if [ ${notify_me_telegram} == "no" ]; then
exit 0
fi
if [ ${notify_me_telegram} == "yes" ]; then
telegram_notification=$(
curl -s -X GET "https://api.telegram.org/bot${telegram_bot_API_Token}/sendMessage?chat_id=${telegram_chat_id}" --data-urlencode "text=${dns_record} DNS record updated to: ${ip}"
)
if [[ ${telegram_notification=} == *"\"ok\":false"* ]]; then
echo ${telegram_notification=}
echo "Error! Telegram notification failed"
exit 0
fi
fi
|
<gh_stars>0
package com.gemmano.dtm.repositories;
import org.springframework.data.mongodb.repository.MongoRepository;
import com.gemmano.dtm.entities.DeviceData;
public interface DeviceDataRepository extends MongoRepository<DeviceData, String> {
}
|
<filename>src/data/countries.js
import germany from '../data/countries/germany.json';
import turkey from '../data/countries/turkey.json';
import brazil from '../data/countries/brazil.json';
export default {
germany,
turkey,
brazil,
} |
<reponame>majioa/knigodej<gh_stars>0
module Knigodej
VERSION = "0.0.1"
NAME = 'knigodej'
end
|
'use strict'
const path = require('path')
const debug = require('debug')('mojoin:Mojoin')
const Datasource = require('./datasource')
const Cache = require('./cache')
const Report = require('./report')
/**
* Sync all kinds of datasources into a central SQL database
* and perform queries/ joins on the gathered data
*/
class Mojoin {
/**
* Mojoin constructor
*
* @constructor
* @param {object | array} config | datasources
* @param {array} config.datasources - array of datasource objects
* @param {object} config.cache - cache config object
*/
constructor (config) {
// TODO: input validation
debug('start creating mojoin instance')
let datasourceConfigs
if (Array.isArray(config)) datasourceConfigs = config
else datasourceConfigs = config.datasources
const cacheConfig = config.cache || {
type: 'sqlite',
location: path.join(__dirname, '../db/cache.db')
}
this.cache = new Cache(cacheConfig)
this.datasources = []
datasourceConfigs.forEach(config => this.addDatasource(config))
debug(
'finished creating mojoin instance with cache: %s and %d datasources',
this.cache.databaseName,
this.datasources.length
)
}
/**
* adds a datasource to the Mojoion instance
*
* @param {object} datasource
*/
addDatasource (datasource) {
datasource.cache = this.cache
this.datasources.push(Datasource(datasource))
debug('added %s datasource %s', datasource.type, datasource.name)
}
/**
* syncronizes all datasources with the cache
*
* @returns {object} - syncResults
*/
async syncAll () {
let syncResults = []
for (let i = 0; i < this.datasources.length; i++) {
await this.datasources[i].init()
const result = await this.datasources[i].syncDatasource()
syncResults.push(result)
}
debug('finished syncing %d datasources', syncResults.length)
return syncResults
}
/**
* queries the cache and returns the result
*
* @param {object} query - query object (see README.md for specification)
* @returns {object} report
*/
async generateReport (query) {
const report = new Report({ cache: this.cache, query })
const result = await report.generate()
debug('generated report with %d entries', result.length)
return result
}
}
module.exports = Mojoin
|
#!/bin/sh
# Parameter to named parameter
git_commit_message=`[ ! -z "$1" ] && echo "$1" || [ ! -z "$GIT_PUSH_COMMIT_MESSAGE" ] && echo "$GIT_PUSH_COMMIT_MESSAGE" || echo ""`
git_author_email=`[ ! -z "$2" ] && echo "$2" || [ ! -z "$GIT_PUSH_AUTHOR_EMAIL" ] && echo "$GIT_PUSH_AUTHOR_EMAIL" || echo ""`
git_author_name=`[ ! -z "$3" ] && echo "$3" || [ ! -z "$GIT_PUSH_AUTHOR_NAME" ] && echo "$GIT_PUSH_AUTHOR_NAME" || echo ""`
# Check that all parameters are given
if [ -z "$git_commit_message" ]; then
echo "::error file=git_push.sh::The commit message is not given. Either pass the commit message as the first parameter to the script or set the GIT_PUSH_COMMIT_MESSAGE environment variable."
exit 1
fi
if [ -z "$git_author_email" ]; then
echo "::error file=git_push.sh::The author email is not given. Either pass the author email as the second parameter to the script or set the GIT_PUSH_AUTHOR_EMAIL environment variable."
exit 1
fi
if [ -z "$git_author_name" ]; then
echo "::error file=git_push.sh::The author name is not given. Either pass the author name as the third parameter to the script or set the GIT_PUSH_AUTHOR_NAME environment variable."
exit 1
fi
# Check that the current working directory is a root directory of a git repo
if [ ! -d .git ]; then
echo "::error file=git_push.sh::Current working directory is not a root directory of a git repository."
exit 1
fi
# Check that the current working directory has a remote set
git_remote=`git remote`
if [ -z "$git_remote" ]; then
echo "::error file=git_push.sh::No remote repository found for the git repository."
exit 1
fi
# Set author
echo "Setting the git author email to '***'."
git config --global user.email "$git_author_email"
echo "Setting the git author name to '***'."
git config --global user.name "$git_author_name"
# Add all changes
echo "Adding all changes from the root."
git add .
# Commit all changes
echo "Commiting the change with the commit message: '$git_commit_message'."
git commit -m "$git_commit_message"
# Push all changes
echo "Pushing all changes."
git push |
package android.example.com.split.ui.tabfragment;
import android.example.com.split.R;
import android.example.com.split.data.entity.Group;
import android.example.com.split.data.entity.User;
import android.example.com.split.ui.recycleradapter.MembersRecyclerAdapter;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import java.util.List;
public class MembersTabFragment extends BaseTabFragment<MembersRecyclerAdapter, User> implements
MembersActions {
private static final String TAG = "MembersTabFragment";
private Group group;
@Nullable
@Override
public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container,
@Nullable Bundle savedInstanceState) {
View rootView = inflater.inflate(R.layout.fragment_tab_members, container, false);
setupRecyclerView(rootView, R.id.recyclerView_fragment_tab_expenses);
return rootView;
}
@Override
protected void setupRecyclerView(View rootView, int recyclerViewId) {
recyclerView = (RecyclerView) rootView.findViewById(R.id.recyclerView_fragment_tab_members);
recyclerView.setHasFixedSize(true);
LinearLayoutManager mLayoutManager = new LinearLayoutManager(getActivity());
recyclerView.setLayoutManager(mLayoutManager);
// Create bundle to get the group passed from the GroupDetailActivity
Bundle bundle = getArguments();
group = (Group) bundle.get("group");
//gets the expenses from the group
setData(group.getUserMembers());
setRecyclerAdapter(new MembersRecyclerAdapter(getData()));
recyclerView.setAdapter(getRecyclerAdapter());
}
@Override
public void addNewMember(User member) {
}
@Override
public List<User> getContactsNotInAGroup(Group group, List<User> contacts) {
return null;
}
@Override
public User selectContactFromContacts(List<User> contacts, User contact) {
return null;
}
@Override
public boolean isContactMemberInGroup(User contact, Group group) {
return false;
}
@Override
public User initializeNewGroupMember(User newMember, Group group) {
return null;
}
@Override
public void saveNewMemberInGroup(User newMember, Group group) {
}
@Override
public void saveNewMemberInGroupToRemoteDb(User newMember, Group group) {
}
@Override
public void updateUiAfterAddingNewMemberToGroup() {
}
@Override
public void removeGroupMember(User member, Group group) {
}
@Override
public boolean groupMemberHasZeroBalanceInGroup(User user, Group group) {
return false;
}
@Override
public void removeGroupMemberInRemoteDb(User member, Group group) {
}
@Override
public void updateUiAfterRemovingGroupMember() {
}
}
|
package org.apache.tapestry5.plastic.test;
public interface IndirectAccess<T>
{
T get();
void set(T newValue);
}
|
<filename>config/dbqueue.js
/**
Copyright 2019 University of Denver
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
'use strict';
const CONFIG = require('../config/config'),
DBQUEUE = require('knex')({
client: 'mysql2',
connection: {
host: CONFIG.dbQueueHost,
user: CONFIG.dbQueueUser,
password: <PASSWORD>,
database: CONFIG.dbQueueName
}
});
module.exports = function () {
return DBQUEUE;
}; |
// Importações.
const express = require('express');
const router = express.Router();
// Conexões.
const database = require('../../configs/database').connection;
// Models.
const Animal = require('../models/Animal');
const AlbumAnimal = require('../models/AlbumAnimal');
const FotoAnimal = require('../models/FotoAnimal');
const Anuncio = require('../models/Anuncio');
const Candidatura = require('../models/Candidatura');
const Usuario = require('../models/Usuario');
const EnderecoUsuario = require('../models/EnderecoUsuario');
const Bloqueio = require('../models/Bloqueio');
const Notificacao = require('../models/Notificacao');
const PontoEncontro = require('../models/PontoEncontro');
const DocResponsabilidade = require('../models/DocResponsabilidade');
// Utilidades.
const { Op } = require('sequelize');
const fs = require('fs');
const path = require('path');
const uuid = require('uuid'); // 'uuid' para criar os nomes únicos dos arquivos.
const moment = require('moment'); // 'moment' para manipular dados de tempo de forma mais flexível.
const multer = require('multer'); // 'multer' para receber dados via POST de um formulário com encode 'multipart/form-data' (XMLHttpRequest).
const sharp = require('sharp'); // 'sharp' para processar imagens.
const mv = require('mv'); // 'mv' para mover arquivos de forma segura.
const randomize = require('randomatic'); // 'randomatic' para gerar valores aleatórios.
// Helpers.
const checkUserBlockList = require('../../helpers/check_user_BlockList');
const generate_QRCode = require('../../helpers/generate_QRCode');
const generate_Template = require('../../helpers/generate_HTMLTemplate');
const generate_PDF = require('../../helpers/generate_PDF');
// Rotas.
router.get('/', (req, res, next) => {
/* 02 formas de capturar as notificações.
01. Listar todas as notificações um usuário. (Admins/Usuário - Dono do recurso).
02. Listar todas as notificações um usuário com filtros (Lida/Não lida). (Admins/Usuário - Dono do recurso).
*/
// Início das Restrições de acesso à rota.
// Apenas usuários das aplicações Pet Adote poderão acessar a listagem de documentos de termos de responsabilidade das candidaturas.
if (!req.dadosAuthToken){
// Se em algum caso não identificado, a requisição de uma aplicação chegou aqui e não apresentou suas credenciais JWT, não permita o acesso.
return res.status(401).json({
mensagem: 'Requisição inválida - Você não possui o nível de acesso adequado para esse recurso.',
code: 'ACCESS_TO_RESOURCE_NOT_ALLOWED'
});
}
// Se o Cliente não for do tipo Pet Adote, não permita o acesso.
if (req.dadosAuthToken.tipo_cliente !== 'Pet Adote'){
return res.status(401).json({
mensagem: 'Requisição inválida - Você não possui o nível de acesso adequado para esse recurso.',
code: 'ACCESS_TO_RESOURCE_NOT_ALLOWED'
});
}
// Capturando os dados do usuário, se o requisitante for o usuário de uma aplicação Pet Adote.
const { usuario } = req.dadosAuthToken;
// Fim das Restrições de acesso à rota.
// Início das configurações de possíveis operações de busca.
let operacao = undefined; // Se a operação continuar como undefined, envie BAD_REQUEST (400).
let { fromUser, filterBy, page, limit } = req.query;
switch (Object.entries(req.query).length){
case 0:
// operacao = 'getAny';
break;
case 1:
// if (page) { operacao = 'getAny' };
if (fromUser) { operacao = 'getAll_fromUser' };
break;
case 2:
if (fromUser && page) { operacao = 'getAll_fromUser' };
if (fromUser && filterBy) { operacao = 'getAll_filtered_fromUser' };
break;
case 3:
if (fromUser && page && limit) { operacao = 'getAll_fromUser' };
if (fromUser && filterBy && page) { operacao = 'getAll_filtered_fromUser' };
break;
case 4:
if (fromUser && filterBy && page && limit) { operacao = 'getAll_filtered_fromUser' };
break;
default:
break;
}
// Fim das configurações de possíveis operações de busca.
// Início da validação dos parâmetros.
if (fromUser){
if (String(fromUser).match(/[^\d]+/g)){ // Se "fromUser" conter algo diferente do esperado.
return res.status(400).json({
mensagem: 'Requisição inválida - O ID de um Usuário deve conter apenas dígitos.',
code: 'INVALID_REQUEST_QUERY'
});
}
if (usuario?.e_admin == 0 && (usuario?.cod_usuario != fromUser)){
// Se o requisitante for um usuário comum - Deverá ter acesso apenas as notificações que pertencem a ele.
return res.status(401).json({
mensagem: 'Você não possui o nível de acesso adequado para esse recurso.',
code: 'ACCESS_TO_RESOURCE_NOT_ALLOWED'
});
}
}
if (filterBy){
let allowedFilters = [
'read',
'unread'
];
if (!allowedFilters.includes(filterBy)){
return res.status(400).json({
mensagem: 'Requisição inválida - (filterBy) deve receber um dos seguintes valores [read], [unread].',
code: 'INVALID_REQUEST_QUERY'
});
}
}
// Fim da validação dos parâmetros.
// Início da normalização dos parâmetros.
req.query.page = Number(req.query.page); // Se o valor para a página do sistema de páginação for recebido como String, torne-o um Number.
req.query.limit = Number(req.query.limit); // Se o valor para o limite de entrega de dados do sistema de páginação for recebido como String, torne-o um Number.
req.query.fromUser = String(req.query.fromUser);
req.query.filterBy = String(req.query.filterBy);
// Fim da normalização dos parâmetros.
// Início dos processos de listagem das notificações.
// Início das configurações de paginação.
let requestedPage = req.query.page || 1; // Página por padrão será a primeira.
let paginationLimit = req.query.limit || 10; // Limite padrão de dados por página = 10;
let paginationOffset = (requestedPage - 1) * paginationLimit; // Define o índice de partida para coleta dos dados.
// Fim das configuração de paginação.
if (!operacao){
return res.status(400).json({
mensagem: 'Algum parâmetro inválido foi passado na URL da requisição.',
code: 'BAD_REQUEST'
});
}
if (operacao == 'getAll_fromUser'){
// Chamada para Usuários.
// Entrega uma lista contendo todas as notificações que o usuário possui.
Notificacao.findAndCountAll({
where: {
cod_usuario: req.query.fromUser
},
order: [['foi_lida', 'ASC'], ['data_criacao', 'DESC']],
limit: paginationLimit,
offset: paginationOffset
})
.then((resultArr) => {
if (resultArr.count === 0){
return res.status(200).json({
mensagem: 'Este usuário não possui notificações.'
});
}
// Restrições de uso da chamada - Caso algum resultado tenha sido encontrado.
if (usuario?.e_admin == 0 && (usuario?.cod_usuario != resultArr.rows[0].cod_usuario)){
// Se o requisitante for um usuário comum - Deverá ter acesso apenas as notificações que pertencem a ele.
return res.status(401).json({
mensagem: 'Você não possui o nível de acesso adequado para esse recurso.',
code: 'ACCESS_TO_RESOURCE_NOT_ALLOWED'
});
}
// Fim das restrições de uso da chamada.
// Início da construção do objeto enviado na resposta.
let total_notificacoes = resultArr.count;
let total_paginas = Math.ceil(total_notificacoes / paginationLimit);
let notificacoes = [];
let voltar_pagina = undefined;
let avancar_pagina = undefined;
if (requestedPage > 1 && requestedPage <= total_paginas){
voltar_pagina = `${req.protocol}://${req.get('host')}/usuarios/notificacoes/?fromUser=${req.query.fromUser}&page=${requestedPage - 1}&limit=${paginationLimit}`;
}
if (requestedPage < total_paginas){
avancar_pagina = `${req.protocol}://${req.get('host')}/usuarios/notificacoes/?fromUser=${req.query.fromUser}&page=${requestedPage + 1}&limit=${paginationLimit}`;
}
if (requestedPage > total_paginas){
return res.status(404).json({
mensagem: 'Você chegou ao final da lista de notificações deste usuário.',
code: 'RESOURCE_NOT_FOUND'
});
}
// Início da inclusão de atributos extra.
resultArr.rows.forEach((notificacao) => {
if (usuario?.e_admin == 0){
if (notificacao.foi_lida == 0){
notificacao.update({
foi_lida: 1,
data_modificacao: new Date()
}); // Se a notificação foi exibida para o usuário, então foi lida.
}
}
notificacao = notificacao.get({ plain: true });
// Separando os dados do objeto.
// ...
// Fim da separação dos dados.
// Inclusão de atributos essenciais aos clientes.
// ...
// Fim da inclusão de atributos essenciais aos clientes.
// Unindo os dados em objeto em um objeto "dadosCandidatura".
// ...
// Fim da união dos dados em um objeto "dadosCandidatura"
notificacoes.push(notificacao);
});
// Fim da inclusão de atributos extra.
// Fim da construção do objeto enviado na resposta.
// Início do envio da Resposta.
return res.status(200).json({
mensagem: `Lista de notificações do usuário.`,
total_notificacoes,
total_paginas,
notificacoes,
voltar_pagina,
avancar_pagina
});
// Fim do envio da resposta.
})
.catch((error) => {
console.error('Algo inesperado aconteceu ao listar as notificações do usuário.', error);
let customErr = new Error('Algo inesperado aconteceu ao listar as notificações do usuário. Entre em contato com o administrador.');
customErr.status = 500;
customErr.code = 'INTERNAL_SERVER_ERROR'
return next( customErr );
});
}
if (operacao == 'getAll_filtered_fromUser'){
// Chamada para Usuários.
// Entrega uma lista contendo todas as notificações que o usuário possui filtradas pelo estado de leitura (lida/não lida).
// Início da definição do filtro.
let filter = undefined;
switch(req.query.filterBy){
case 'read':
filter = 1;
break;
case 'unread':
filter = 0;
break;
default:
break;
}
if (filter === undefined) {
return res.status(400).json({
mensagem: 'Requisição inválida - (filterBy) deve receber um dos seguintes valores [read], [unread].',
code: 'INVALID_REQUEST_QUERY'
});
}
// Fim da definição do filtro.
Notificacao.findAndCountAll({
where: {
cod_usuario: req.query.fromUser,
foi_lida: filter
},
order: [['data_criacao', 'DESC']],
limit: paginationLimit,
offset: paginationOffset
})
.then((resultArr) => {
if (resultArr.count === 0){
return res.status(200).json({
mensagem: `Este usuário não possui notificações ${ filter == 1 ? 'lidas' : 'não lidas' }.`
});
}
// Restrições de uso da chamada - Caso algum resultado tenha sido encontrado.
if (usuario?.e_admin == 0 && (usuario?.cod_usuario != resultArr.rows[0].cod_usuario)){
// Se o requisitante for um usuário comum - Deverá ter acesso apenas as notificações que pertencem a ele.
return res.status(401).json({
mensagem: 'Você não possui o nível de acesso adequado para esse recurso.',
code: 'ACCESS_TO_RESOURCE_NOT_ALLOWED'
});
}
// Fim das restrições de uso da chamada.
// Início da construção do objeto enviado na resposta.
let total_notificacoes = resultArr.count;
let total_paginas = Math.ceil(total_notificacoes / paginationLimit);
let notificacoes = [];
let voltar_pagina = undefined;
let avancar_pagina = undefined;
if (requestedPage > 1 && requestedPage <= total_paginas){
voltar_pagina = `${req.protocol}://${req.get('host')}/usuarios/notificacoes/?fromUser=${req.query.fromUser}&filterBy=${req.query.filterBy}&page=${requestedPage - 1}&limit=${paginationLimit}`;
}
if (requestedPage < total_paginas){
avancar_pagina = `${req.protocol}://${req.get('host')}/usuarios/notificacoes/?fromUser=${req.query.fromUser}&filterBy=${req.query.filterBy}&page=${requestedPage + 1}&limit=${paginationLimit}`;
}
if (requestedPage > total_paginas){
return res.status(404).json({
mensagem: 'Você chegou ao final da lista de notificações deste usuário.',
code: 'RESOURCE_NOT_FOUND'
});
}
// Início da inclusão de atributos extra.
resultArr.rows.forEach((notificacao) => {
if (usuario?.e_admin == 0){
if (notificacao.foi_lida == 0){
notificacao.update({
foi_lida: 1,
data_modificacao: new Date()
}); // Se a notificação foi exibida para o usuário, então foi lida.
}
}
notificacao = notificacao.get({ plain: true });
// Separando os dados do objeto.
// ...
// Fim da separação dos dados.
// Inclusão de atributos essenciais aos clientes.
// ...
// Fim da inclusão de atributos essenciais aos clientes.
// Unindo os dados em objeto em um objeto "dadosCandidatura".
// ...
// Fim da união dos dados em um objeto "dadosCandidatura"
notificacoes.push(notificacao);
});
// Fim da inclusão de atributos extra.
// Fim da construção do objeto enviado na resposta.
// Início do envio da Resposta.
return res.status(200).json({
mensagem: `Lista de notificações ${ filter == 1 ? 'lidas' : 'não lidas' } do usuário.`,
total_notificacoes,
total_paginas,
notificacoes,
voltar_pagina,
avancar_pagina
});
// Fim do envio da resposta.
})
.catch((error) => {
console.error('Algo inesperado aconteceu ao listar as notificações do usuário.', error);
let customErr = new Error('Algo inesperado aconteceu ao listar as notificações do usuário. Entre em contato com o administrador.');
customErr.status = 500;
customErr.code = 'INTERNAL_SERVER_ERROR'
return next( customErr );
});
}
// Fim dos processos de listagem das notificações.
})
// Exportações.
module.exports = router; |
buildSystem=""
buildType=$2
project=""
#########################################
# Clean #
#########################################
function Clean()
{
rm ./build/* -rf
}
#########################################
# Build lib #
#########################################
function Lib()
{
project="libsq.a"
rm ./build/${buildSystem}/${project}
cmake ./Engine/ -B ./build/${buildSystem} -DCMAKE_BUILD_TYPE=${buildType} -G "Ninja"
ninja -C ./build/${buildSystem}
}
#########################################
# Build SQ #
#########################################
function SQ()
{
project="Sandbox"
rm ./build/${buildSystem}/Sandbox/Sandbox
cmake -B ./build/${buildSystem} -DCMAKE_BUILD_TYPE=${buildType} -G "Ninja"
ninja -C ./build/${buildSystem}
}
function Run()
{
if [[ -f "./build/${buildSystem}/Sandbox/Sandbox" ]]; then
xfce4-terminal -T "${project}" -e "/usr/bin/cb_console_runner ./build/${buildSystem}/Sandbox/Sandbox" --show-borders --hide-menubar --hide-toolbar --geometry=50x15+5+10&
fi
}
function perftool()
{
Run
sleep 5
echo "Starting performance testing tools..."
perf record -o /home/rahul/Development/Projects/SQ/perf.data --call-graph dwarf --aio -z --sample-cpu --pid $(pidof Sandbox)
hotspot /home/rahul/Development/Projects/SQ/perf.data
}
if [[ $1 == "Clean" ]]; then
Clean
elif [[ $1 == "lib" ]]; then
buildSystem="lib"
Lib
Run
elif [[ $1 == "SQ" ]]; then
buildSystem="SQ"
SQ
Run
elif [[ $1 == "Perf" ]]; then
buildSystem="SQ"
SQ
perftool
fi |
<filename>Code/MeshImporter/ImporterUtil.cpp
// Copyright 2001-2017 Crytek GmbH / Crytek Group. All rights reserved.
#include "StdAfx.h"
#include "ImporterUtil.h"
#include "FileUtilNew.h"
#include "PathUtil.h"
#include <QCoreApplication>
//#include <ThreadingUtils.h>
//#include "FileUtil.h"
#include <CrySystem/IProjectManager.h>
#include <CryRenderer/IRenderAuxGeom.h>
#include <QDir>
#include <QTextStream>
#include <QTemporaryDir>
#include <QTemporaryFile>
#include <QFile>
#include <CryString/CryStringUtils.h>
#include <CryString/CryPath.h>
void LogPrintf(const char* szFormat, ...);
QString GetAbsoluteGameFolderPath()
{
return QtUtil::ToQString(std::string(gEnv->pSystem->GetIProjectManager()->GetCurrentAssetDirectoryAbsolute()));
}
static QString GetTempAssetFolder()
{
char path[ICryPak::g_nMaxPath] = {};
static const QString folder = QtUtil::ToQString(std::string(gEnv->pCryPak->AdjustFileName("%USER%/MeshImporter", path, ICryPak::FLAGS_PATH_REAL | ICryPak::FLAGS_FOR_WRITING | ICryPak::FLAGS_ADD_TRAILING_SLASH)));
QDir().mkpath(folder);
return folder;
}
//QString AppendPath(const QString& lhp, const QString& rhp)
//{
// return QDir::cleanPath(lhp + QDir::separator() + rhp);
//}
string MakeAlphaNum(const string& str)
{
string res;
for (int i = 0; i < str.length(); ++i)
{
res += isalnum(str[i]) ? str[i] : ' ';
}
res.Trim();
return res;
}
std::unique_ptr<QTemporaryFile> WriteTemporaryFile(const string& dirPath, const string& content, string templateName)
{
if (templateName.empty())
{
templateName = "tmp_meshImporter_XXXXXX";
}
const QString templatePath = AppendPath(QtUtil::ToQString(dirPath), QtUtil::ToQString(templateName));
std::unique_ptr<QTemporaryFile> pTmpFile(new QTemporaryFile(templatePath));
if (pTmpFile->open())
{
if (pTmpFile->write(content.begin(), content.size()) == content.size())
{
pTmpFile->close();
return std::move(pTmpFile);
}
}
pTmpFile->close();
LogPrintf("%s: Writing meta data failed.\n", __FUNCTION__);
return std::unique_ptr<QTemporaryFile>();
}
std::unique_ptr<QTemporaryDir> CreateTemporaryDirectory()
{
const QString templatePath = "";// AppendPath(GetTempAssetFolder(), "tmp_meshImporter_XXXXXX");
std::unique_ptr<QTemporaryDir> pTmpDir(new QTemporaryDir(templatePath));
if (!pTmpDir->isValid())
{
LogPrintf("%s: Cannot create temporary directory. Template path is %s\n", __FUNCTION__, templatePath.toLocal8Bit().constData());
}
return std::move(pTmpDir);
}
//! Returns file extension of asset meta data. Lower-case and without leading dot.
//! Example: cryasset
static const char* AssetMetaDataExt()
{
return "cryasset";
}
bool IsAssetMetaDataFile(string filePath)
{
static string ext = string().Format(".%s", AssetMetaDataExt());
filePath.MakeLower();
auto it0 = ext.rbegin();
auto it1 = filePath.rbegin();
for (; it0 != ext.rend(); ++it0, ++it1)
{
if (it1 == filePath.rend() || *it1 != *it0)
{
return false;
}
}
return true;
}
string GetFileFromAsset(CAsset& asset, const std::vector<string>& exts)
{
assert(0);
/*const char delim = '*';
string flatExts;
flatExts.reserve(5 * exts.size());
for (auto& ext : exts)
{
flatExts += delim;
flatExts += ext;
}
flatExts.MakeLower();
string needle;
for (size_t i = 0, N = asset.GetFilesCount(); i < N; ++i)
{
needle.Format("%c%s", delim, PathUtil::GetExt(asset.GetFile(i)));
if (flatExts.find(needle) != string::npos)
{
return asset.GetFile(i);
}
}
*/
return string();
}
bool WriteToFile(const QString& filePath, const QString& content)
{
QFile file(filePath);
if (!file.open(QIODevice::Truncate | QIODevice::WriteOnly))
{
return false;
}
QTextStream out(&file);
out << content;
file.close();
return true;
}
bool FileExists(const string& path)
{
FILE* const pFile = fopen(path.c_str(), "rb");
if (pFile)
{
fclose(pFile);
return true;
}
return false;
}
static bool IsInDirectory(const string& query, const string& dir)
{
string iquery = query;
string idir = dir;
return !strncmp(PathUtil::ToUnixPath(iquery.MakeLower()), PathUtil::ToUnixPath(idir.MakeLower()), idir.size());
}
bool IsPathAbsolute(const string& path)
{
return QFileInfo(QtUtil::ToQString(path)).isAbsolute();
}
bool IsAssetPath(const string& filePath)
{
assert(0);
return !IsPathAbsolute(filePath) || IsInDirectory(filePath, PathUtil::GetEnginePath());
}
QString AppendPath(const QString& lhp, const QString& rhp)
{
return QDir::cleanPath(lhp + QDir::separator() + rhp);
}
bool CopyNoOverwrite(const QString& from, const QString& to, bool bSilent)
{
if (QFileInfo(to).exists())
{
if (CFileUtil::CompareFiles(QtUtil::ToString(to).c_str(), QtUtil::ToString(from).c_str()))
{
LogPrintf("File %s already exists and is identical to %s. Skip copying.",
QtUtil::ToString(to).c_str(),
QtUtil::ToString(from).c_str());
return true;
}
else
{
if (!bSilent)
{
const char* what =
"Cannot copy source file %1 to %2, because file already exists.\n"
"Either choose another location, or create a CGF from %2 instead.";
assert(0);
//CQuestionDialog::SWarning(QObject::tr("Source file already exists"), QObject::tr(what).arg(from).arg(to));
}
return false;
}
}
else
{
return QFile::copy(from, to);
}
}
bool CopyNoOverwrite(const string& from, const string& to)
{
return !CopyNoOverwrite(QtUtil::ToQString(from), QtUtil::ToQString(to), true);
}
static bool CopyAllowOverwriteInternal(const QString& from, const QString& to)
{
if (QFile::rename(from, to))
{
return true;
}
else
{
// Try to overwrite existing file.
return QFile::remove(to) && QFile::rename(from, to);
}
}
bool CopyAllowOverwrite(const string& from, const string& to)
{
if (!FileExists(from))
{
return false; // Source file does not exist and copy operation is considered to be unsuccessful.
}
if (CFileUtil::CompareFiles(to.c_str(), from.c_str()))
{
return true; // Files are identical and copy operation is considered to be successful.
}
return CopyAllowOverwriteInternal(QtUtil::ToQString(from), QtUtil::ToQString(to));
}
bool IsFileWritable(const string& path)
{
QFile f(QtUtil::ToQString(path));
return (f.permissions() & QFileDevice::WriteOwner) != 0;
}
bool CFileImporter::Import(const string& inputFilePath, const string& outputFilePath)
{
m_error.clear();
if (inputFilePath.empty())
{
m_error = "Input file path is empty";
return false;
}
m_outputFilePath = PathUtil::MakeGamePath(outputFilePath.c_str());
string absOutputFilePath = PathUtil::Make(PathUtil::GetGameFolder(), m_outputFilePath);
if (absOutputFilePath.empty())
{
m_error = "Output file path is empty";
return false;
}
QDir().mkpath(QtUtil::ToQString(PathUtil::GetPathWithoutFilename(absOutputFilePath)));
// Copy source to target location.
if (!CopyNoOverwrite(QtUtil::ToQString(inputFilePath), QtUtil::ToQString(absOutputFilePath), true))
{
if (m_mayOverwrite && m_mayOverwrite(absOutputFilePath))
{
QFile::remove(QtUtil::ToQString(absOutputFilePath));
return CopyNoOverwrite(QtUtil::ToQString(inputFilePath), QtUtil::ToQString(absOutputFilePath), true);
}
else
{
m_error.Format("File %s already exists and is different from %s",
PathUtil::ToUnixPath(absOutputFilePath.c_str()),
PathUtil::ToUnixPath(inputFilePath.c_str()));
return false;
}
}
return true;
}
bool CFileImporter::Import(const string& inputFilePath)
{
if (IsAssetPath(inputFilePath))
{
// No need to copy.
m_outputFilePath = PathUtil::MakeGamePath(inputFilePath);
return true;
}
else
{
return Import(inputFilePath, ShowDialog(inputFilePath));
}
}
void CFileImporter::SetMayOverwriteFunc(const MayOverwriteFunc& mayOverwrite)
{
m_mayOverwrite = mayOverwrite;
}
string CFileImporter::GetOutputFilePath() const
{
return m_outputFilePath;
}
string CFileImporter::GetError() const
{
return m_error;
}
string CFileImporter::ShowDialog(const string& inputFilename)
{
/*CEngineFileDialog::RunParams runParams;
runParams.title = QObject::tr("Save file to directory...");
const QString path = CEngineFileDialog::RunGameSelectDirectory(runParams, nullptr);
return PathUtil::Make(PathUtil::Make(PathUtil::GetGameProjectAssetsPath(), QtUtil::ToString(path)), PathUtil::GetFile(inputFilename));*/
return string();
}
// ==================================================
// Rendering.
// ==================================================
// ==================================================
// Qt <-> STL
// ==================================================
QStringList ToStringList(const std::vector<string>& strs)
{
QStringList ret;
std::transform(strs.begin(), strs.end(), std::back_inserter(ret), [](const string& str) { return QtUtil::ToQString(str); }); // Lambda for overload resolution.
return ret;
}
std::future<std::pair<bool, string>> CopySourceFileToDirectoryAsync(const string& from, const string& dir, bool bShowNotification)
{
std::future<std::pair<bool, string>> fr;
return fr;
assert(0);
/*const QString absOriginalFilePath = QtUtil::ToQString(from);
const string absDir = PathUtil::Make(PathUtil::GetGameProjectAssetsPath(), dir);
QFileInfo origInfo(absOriginalFilePath);
QFileInfo dirInfo(QtUtil::ToQString(absDir));
if (!origInfo.isFile())
{
std::promise<std::pair<bool, string>> promise;
promise.set_value({ false, string().Format("Absolute path '%s' is not a file.", QtUtil::ToString(absOriginalFilePath).c_str()) });
return promise.get_future();
}
if (!dirInfo.isDir())
{
std::promise<std::pair<bool, string>> promise;
promise.set_value({ false, string().Format("Absolute path '%s' is not a directory.", absDir) });
return promise.get_future();
}
QString targetFilePath = dirInfo.absoluteDir().absoluteFilePath(origInfo.fileName());
auto copyFunction = [absOriginalFilePath, targetFilePath]()
{
CFileImporter fileImporter;
if (!fileImporter.Import(QtUtil::ToString(absOriginalFilePath), QtUtil::ToString(targetFilePath)))
{
return std::make_pair(false, fileImporter.GetError());
}
else
{
return std::make_pair(true, string());
}
};
return ThreadingUtils::Async([copyFunction, targetFilePath, bShowNotification]()
{
if (bShowNotification)
{
const QString message = QCoreApplication::tr("File %1").arg(targetFilePath);
CProgressNotification notif(QCoreApplication::tr("Copying file"), message);
const std::pair<bool, string> ret = copyFunction();
notif.SetProgress(1.0);
return ret;
}
else
{
return copyFunction();
}
});*/
}
|
#!/bin/bash
source ~/.bashrc
conda install --yes -c conda-forge jupyter_contrib_nbextensions
|
<filename>test/list/get-styles.test.js
/* eslint-env mocha */
import { expect } from 'chai';
import getStyles from '../../src/list/get-styles';
import styles from '../../src/list/styles';
describe('List.getStyles', () => {
describe('root', () => {
it('should get styles', () => {
const style = getStyles.root();
expect(style).to.deep.equal(styles.root);
});
it('should combine styles', () => {
const style = getStyles.root({ color: 'red' });
expect(style).to.have.property('color', 'red');
});
});
describe('listHeader', () => {
it('should get styles', () => {
const style = getStyles.listHeader();
expect(style).to.deep.equal(styles.listHeader);
});
it('should combine styles', () => {
const style = getStyles.listHeader({ color: 'red' });
expect(style).to.have.property('color', 'red');
});
});
describe('list', () => {
it('should get styles', () => {
const style = getStyles.list();
expect(style).to.deep.equal(styles.list);
});
it('should combine styles', () => {
const style = getStyles.list(null, null, { color: 'red' });
expect(style).to.have.property('color', 'red');
});
it('should add header styles', () => {
const style = getStyles.list(true, null, {});
expect(style).to.have.property('height', 'calc(100% - 36px)');
});
it('should add infinite scroll styles', () => {
const style = getStyles.list(null, true, {});
expect(style).to.have.property('overflowY', 'hidden');
});
});
});
|
import axios from 'axios';
const astromatch = axios.create({
baseURL:
'https://us-central1-missao-newton.cloudfunctions.net/astroMatch/victorgutierrez',
});
export default astromatch;
|
<filename>programmers/skill-test-lv1/sum_between_two_int.py
# https://programmers.co.kr/learn/courses/30/lessons/12912
def solution(a, b):
if a > b: a, b = b, a
return sum(range(a, b+1))
# Test
# a, b = 3, 5
# a, b = 3, 3
a, b = 5, 3
print(solution(a, b))
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.u1F500 = void 0;
var u1F500 = {
"viewBox": "0 0 2600 2760.837",
"children": [{
"name": "path",
"attribs": {
"d": "M300 1990l-31-1q-18 0-56-2v-192l69 3h11q220 0 425-116t385.5-324 375.5-333.5T1874 854V523l513 415-513 415v-291q-111 36-252.5 133t-293 270.5-322 292.5T659 1933.5 300 1990zm1574-556l513 415-513 414v-329q-171-44-303-116.5T1322 1646q27-27 52.5-54.5t50.5-55.5l54-59q90 81 183 142.5t212 105.5v-291zM267 989l-54 3V800l55-3q17 0 30-1 228 0 443 87t406 256q-41 39-92 94.5t-53 58.5l-14 16q-169-161-341-240t-359-79h-21z"
},
"children": []
}]
};
exports.u1F500 = u1F500; |
import React, { Component } from 'react';
import PropTypes from 'prop-types';
import { Link } from 'react-router-dom';
//import CurrentlyReading from './CurrentlyReading';
//import WantToRead from './WantToRead';
import Shelf from './Shelf';
const MyReads = ({books_curr_reading, books_want_to_read, books_read, onChangeBookShelf}) => (
<div>
<div className="list-books">
<div className="list-books-title">
<h1>MyReads</h1>
</div>
<div className="list-books-content">
<div>
<Shelf title="Currently Reading" books={books_curr_reading} onChangeBookShelf={onChangeBookShelf}/>
<Shelf title="Want To Read" books={books_want_to_read} onChangeBookShelf={onChangeBookShelf}/>
<Shelf title="Read" books={books_read} onChangeBookShelf={onChangeBookShelf}/>
</div>
</div>
<div className="open-search">
<Link to="/search">Add a book</Link>
</div>
</div>
</div>
)
MyReads.propTypes = {
books_curr_reading: PropTypes.array.isRequired,
books_want_to_read: PropTypes.array.isRequired,
books_read: PropTypes.array.isRequired,
onChangeBookShelf: PropTypes.func.isRequired
}
export default MyReads |
import React from 'react';
import {
BrowserRouter as Router,
Switch,
Route,
Link
} from "react-router-dom";
import { Table, Form } from './Components';
const App = () => {
return (
<Router>
<div>
<Link to="/table">View Table</Link> | <Link to="/form">Add New Data</Link>
<hr />
<Switch>
<Route path="/table">
<Table />
</Route>
<Route path="/form">
<Form />
</Route>
</Switch>
</div>
</Router>
);
}
export default App; |
class Inventory:
def __init__(self):
self.products = {}
def add_product(self, product_id, name, price, quantity):
if product_id in self.products:
print("Product ID already exists. Use update_product to modify the product.")
else:
self.products[product_id] = {'name': name, 'price': price, 'quantity': quantity}
def update_product(self, product_id, name, price, quantity):
if product_id in self.products:
self.products[product_id] = {'name': name, 'price': price, 'quantity': quantity}
else:
print("Product ID does not exist. Use add_product to add a new product.")
def remove_product(self, product_id):
if product_id in self.products:
del self.products[product_id]
else:
print("Product ID does not exist.")
def display_inventory(self):
if not self.products:
print("Inventory is empty.")
else:
print("Inventory:")
for product_id, details in self.products.items():
print(f"ID: {product_id}, Name: {details['name']}, Price: {details['price']}, Quantity: {details['quantity']}")
# Sample usage
inventory = Inventory()
inventory.add_product(1, "Shirt", 20.0, 50)
inventory.add_product(2, "Pants", 30.0, 30)
inventory.display_inventory()
inventory.update_product(1, "T-Shirt", 15.0, 100)
inventory.remove_product(2)
inventory.display_inventory() |
#!/bin/bash
set -eo pipefail
source scripts/setup-env.sh
cat <<EOL
bwt is running:
- HTTP API server on http://$BWT_HTTP_ADDR
- Electrum RPC server on $BWT_ELECTRUM_ADDR
- Bitcoin Core RPC server on 127.0.0.1:$BTC_RPC_PORT
- Logs at $DIR/{bwt,check}.log
You can access bitcoind with:
$ bitcoin-cli -datadir=$BTC_DIR -rpcwallet=internal <cmd>
$ bitcoin-cli -datadir=$BTC_DIR -rpcwallet=bwt <cmd>
Electrum wallet xpubs:
- `ele1 getmpk` (segwit)
- `ele2 getmpk` (non-segwit)
EOL
if [ -z "$NO_GUI" ]; then
echo Starting Electrum GUI...
# disable "Would you like to be notified when there is a newer version of Electrum available?" popup
# and enable some advanced features
ele setconfig check_updates false > /dev/null
ele setconfig dont_show_testnet_warning true > /dev/null
for opt in fee addresses_tab utxo_tab console_tab; do ele setconfig show_$opt true > /dev/null; done
ele stop > /dev/null 2>&1
ele1 --oneserver --server $BWT_ELECTRUM_ADDR:t > /dev/null &
sleep 2
ele2 --oneserver --server $BWT_ELECTRUM_ADDR:t > /dev/null &
else
cat <<EOL
You can access electrum with:
$ electrum --regtest --dir $ELECTRUM_DIR --wallet $WALLET1 <cmd>
$ electrum --regtest --dir $ELECTRUM_DIR --wallet $WALLET2 <cmd>
EOL
fi
echo
read -p 'Press enter to shutdown and clean up'
|
TEMPLATE_FILENAME="template.yml"
ls . | grep .yml | while read FILE_NAME ; do
if [ ! $FILE_NAME = $TEMPLATE_FILENAME ];then
#echo $FILE_NAME
cp $TEMPLATE_FILENAME $FILE_NAME'.copy'
#cp $FILE_NAME $FILE_NAME'.copy'
cat $FILE_NAME'' | grep "ENV_" | while read ENV_VARIABLE ; do
KEY=$(echo $ENV_VARIABLE | sed 's/ENV_//g' | sed 's/:.*//g')
VALUE=$(echo $ENV_VARIABLE | sed 's/ENV_//g' | sed 's/:/#firstdoublepoints#/1' | sed 's/.*#firstdoublepoints#//g' | sed 's/ //1')
#echo $VALUE
cat $FILE_NAME'.copy' | sed "s#\<$KEY\>#$VALUE#g">$FILE_NAME'.new'
mv $FILE_NAME'.new' $FILE_NAME'.copy'
done
mv $FILE_NAME'.copy' ../.github/workflows/$FILE_NAME
fi
done
|
#!/bin/bash
SELINUX_CONF="/etc/selinux/config"
SSH_CONF="/etc/ssh/sshd_config"
SSH_PORT=22
echo "========= setting SSH and internat ip and ipsec secrets ======"
echo "please input SSH PORT: "
read SSH_PORT
echo "========= SSH PORT ============="
echo "${SSH_PORT}"
echo "please input SSH: "
read SSH_KEY
echo "========= SSH ============="
echo "${SSH_KEY}"
if [ ! -x "~/.ssh" ];then
mkdir -p ~/.ssh
fi
echo -e "${SSH_KEY}" > ~/.ssh/authorized_keys
echo "====== read ~/.ssh/authorized_keys ========"
cat ~/.ssh/authorized_keys
echo "===== reset iptables rules======"
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
iptables -F
iptables -X
iptables -Z
service iptables save
service iptables restart
iptables -nvL
#修改SSH为证书登录
setenforce 0
echo -e "#SELINUX=enforcing\n#SELINUXTYPE=targeted\nSELINUX=disabled\nSETLOCALDEFS=0" > ${SELINUX_CONF}
sed -i '/Port /c Port '"$SSH_PORT"'' ${SSH_CONF}
sed -i '/PermitEmptyPasswords no/c #PermitEmptyPasswords no' ${SSH_CONF}
sed -i '/PermitRootLogin yes/c #PermitRootLogin yes' ${SSH_CONF}
sed -i 's/PasswordAuthentication yes/PasswordAuthentication no/g' ${SSH_CONF}
echo "======== set iptables rules================"
yum -y install iptables-services
systemctl mask firewalld.service
systemctl enable iptables.service
systemctl stop firewalld
systemctl start iptables
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -F
iptables -X
iptables -Z
#iptables -t nat -F
# INPUT
iptables -A INPUT -i lo -j ACCEPT
# iptables -A INPUT -m icmp -p icmp --icmp-type any -j ACCEPT
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport ${SSH_PORT} -j ACCEPT
iptables -A INPUT -m state --state NEW -m udp -p udp --sport 123 -j ACCEPT
iptables -A INPUT -p tcp --sport 16630 -j ACCEPT
# OUTPUT
iptables -A OUTPUT -o lo -j ACCEPT
iptables -A OUTPUT -m icmp -p icmp --icmp-type any -j ACCEPT
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -p tcp --sport ${SSH_PORT} -j ACCEPT
iptables -A OUTPUT -p tcp --dport 21 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p udp --dport 123 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 80 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 443 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 1080 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 5222 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 5228 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 5229 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 5230 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 8080 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 14000 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 16630 -j ACCEPT
### Outlook.com
iptables -A OUTPUT -p tcp --dport 993 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 587 -j ACCEPT
### gmail
iptables -A OUTPUT -p tcp --dport 995 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 465 -j ACCEPT
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT DROP
service iptables save
service iptables restart
iptables -nvL
systemctl restart sshd.service
systemctl status sshd.service |
#!/bin/bash
set -e
# be verbose if $DEBUG=1 is set
if [ ! -z "$DEBUG" ] ; then
env
set -x
fi
THIS="$0"
# http://stackoverflow.com/questions/3190818/
args=("$@")
NUMBER_OF_ARGS="$#"
# please do not change $VENDORPREFIX as it will allow for desktop files
# belonging to AppImages to be recognized by future AppImageKit components
# such as desktop integration daemons
VENDORPREFIX=appimagekit
if [ -z $APPDIR ] ; then
# Find the AppDir. It is the directory that contains AppRun.
# This assumes that this script resides inside the AppDir or a subdirectory.
# If this script is run inside an AppImage, then the AppImage runtime likely has already set $APPDIR
path="$(dirname "$(readlink -f "${THIS}")")"
while [[ "$path" != "" && ! -e "$path/$1" ]]; do
path=${path%/*}
done
APPDIR="$path"
fi
export PATH="${APPDIR}:${APPDIR}/usr/sbin:${PATH}"
export XDG_DATA_DIRS="./share/:/usr/share/gnome:/usr/local/share/:/usr/share/:${XDG_DATA_DIRS}"
export LD_LIBRARY_PATH="${APPDIR}/usr/lib:${LD_LIBRARY_PATH}"
export XDG_DATA_DIRS="${APPDIR}"/usr/share/:"${XDG_DATA_DIRS}":/usr/share/gnome/:/usr/local/share/:/usr/share/
export GSETTINGS_SCHEMA_DIR="${APPDIR}/usr/share/glib-2.0/schemas:${GSETTINGS_SCHEMA_DIR}"
DESKTOP_FILE="$APPDIR/<%= desktopFileName %>"
BIN="$APPDIR/<%= executableName %>"
trap atexit EXIT
atexit()
{
if [ -z "$APPIMAGE_EXIT_AFTER_INSTALL" ] ; then
if [ $NUMBER_OF_ARGS -eq 0 ] ; then
exec "$BIN"
else
exec "$BIN" "${args[@]}"
fi
fi
}
error()
{
if [ -x /usr/bin/zenity ] ; then
LD_LIBRARY_PATH="" zenity --error --text "${1}" 2>/dev/null
elif [ -x /usr/bin/kdialog ] ; then
LD_LIBRARY_PATH="" kdialog --msgbox "${1}" 2>/dev/null
elif [ -x /usr/bin/Xdialog ] ; then
LD_LIBRARY_PATH="" Xdialog --msgbox "${1}" 2>/dev/null
else
echo "${1}"
fi
exit 1
}
yesno()
{
TITLE=$1
TEXT=$2
if [ -x /usr/bin/zenity ] ; then
LD_LIBRARY_PATH="" zenity --question --title="$TITLE" --text="$TEXT" 2>/dev/null || exit 0
elif [ -x /usr/bin/kdialog ] ; then
LD_LIBRARY_PATH="" kdialog --title "$TITLE" --yesno "$TEXT" || exit 0
elif [ -x /usr/bin/Xdialog ] ; then
LD_LIBRARY_PATH="" Xdialog --title "$TITLE" --clear --yesno "$TEXT" 10 80 || exit 0
else
echo "zenity, kdialog, Xdialog missing. Skipping ${THIS}."
exit 0
fi
}
check_prevent()
{
FILE=$1
if [ -e "$FILE" ] ; then
exit 0
fi
}
# Exit immediately of one of these files is present
# (e.g., because the desktop environment wants to handle desktop integration itself)
check_prevent "$HOME/.local/share/$VENDORPREFIX/no_desktopintegration"
check_prevent "/usr/share/$VENDORPREFIX/no_desktopintegration"
check_prevent "/etc/$VENDORPREFIX/no_desktopintegration"
# Exit immediately if appimaged is running
pidof appimaged 2>/dev/null && exit 0
# Exit immediately if $DESKTOPINTEGRATION is not empty
if [ ! -z "$DESKTOPINTEGRATION" ] ; then
exit 0
fi
check_dep()
{
DEP=$1
if [ -z $(which $DEP) ] ; then
echo "$DEP is missing. Skipping ${THIS}."
exit 0
fi
}
# Check whether dependencies are present in base system (we do not bundle these)
# http://cgit.freedesktop.org/xdg/desktop-file-utils/
check_dep desktop-file-install
check_dep xdg-icon-resource
check_dep xdg-mime
check_dep xdg-desktop-menu
if [ ! -f "$DESKTOP_FILE" ] ; then
echo "Desktop file is missing. Please run ${THIS} from within an AppImage."
exit 0
fi
if [ -z "$APPIMAGE" ] ; then
APPIMAGE="$APPDIR/AppRun"
# Not running from within an AppImage; hence using the AppRun for Exec=
fi
# Determine where the desktop file should be installed
if [[ $EUID -ne 0 ]]; then
DESTINATION_DIR_DESKTOP="$HOME/.local/share/applications"
SYSTEM_WIDE=""
else
DESTINATION_DIR_DESKTOP="/usr/local/share/applications"
# for xdg-mime and xdg-icon-resource
SYSTEM_WIDE="--mode system"
fi
# check if the desktop file is already there and if so, whether it points to the same AppImage
if [ -e "$DESTINATION_DIR_DESKTOP/$VENDORPREFIX-<%= desktopFileName %>" ] ; then
INSTALLED_APP_VERSION=$(grep "^X-AppImage-BuildId=" "$DESTINATION_DIR_DESKTOP/$VENDORPREFIX-<%= desktopFileName %>" | head -n 1 | cut -d " " -f 1)
APP_VERSION=$(grep "^X-AppImage-BuildId=" "$DESKTOP_FILE" | head -n 1 | cut -d " " -f 1)
#echo "installed: $INSTALLED_APP_VERSION image: $APP_VERSION"
if [ "$INSTALLED_APP_VERSION" == "$APP_VERSION" ] ; then
exit 0
fi
fi
<% if (systemIntegration === "ask") { %>
# we ask the user only if we have found no reason to skip until here
if [ -z "$APPIMAGE_SILENT_INSTALL" ] ; then
yesno "Install" "Would you like to integrate $APPIMAGE with your system?\n\nThis will add it to your applications menu and install icons.\nIf you don't do this you can still launch the application by double-clicking on the AppImage."
fi
<% } %>
# desktop-file-install is supposed to install .desktop files to the user's
# applications directory when run as a non-root user,
# and to /usr/share/applications if run as root
# but that does not really work for me...
desktop-file-install --rebuild-mime-info-cache \
--vendor=$VENDORPREFIX --set-key=Exec --set-value="\"${APPIMAGE}\" %U" \
--set-key=X-AppImage-Comment --set-value="Generated by ${THIS}" \
--set-icon="<%= resourceName %>" --set-key=TryExec --set-value=${APPIMAGE// /\\s} "$DESKTOP_FILE" \
--dir "$DESTINATION_DIR_DESKTOP" \
--mode=755
# uninstall previous icons
xdg-icon-resource uninstall --noupdate --size 16 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 24 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 32 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 48 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 64 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 72 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 96 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 128 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 256 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 512 "<%= resourceName %>"
xdg-icon-resource uninstall --noupdate --size 1024 "<%= resourceName %>"
# Install the icon files for the application
<%- additionalInstall %>
xdg-icon-resource forceupdate
# Install the icon files for the mime type
ICONS=$(find "${APPDIR}/usr/share/icons/" -wholename "*/mimetypes/*.png" 2>/dev/null || true)
for ICON in $ICONS ; do
ICON_SIZE=$(echo "${ICON}" | rev | cut -d "/" -f 3 | rev | cut -d "x" -f 1)
xdg-icon-resource install --context mimetypes --size ${ICON_SIZE} "${ICON}" $(basename $ICON | sed -e 's/.png//g')
done
xdg-desktop-menu forceupdate
# for MIME
gtk-update-icon-cache
|
// Neural network model
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(NUM_FEATURES,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
])
// Hyperparameters
optimizer=tf.keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
// Data augmentation
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=[0.8, 1.2],
horizontal_flip=True)
datagen.fit(x_train)
// Batch and training metrics
model.fit_generator(datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
steps_per_epoch=x_train.shape[0] // BATCH_SIZE,
epochs=NUM_EPOCHS,
callbacks=[callback],
validation_data=(x_test, y_test)) |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
set -x
# shellcheck disable=SC1091
source "/workspace/gcb_env.sh"
# This script updates helm config files and add helm charts to the release tarballs.
# switch to the root of the istio repo
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT"
function fix_values_yaml() {
local tarball_name="$1"
if [[ ${tarball_name} == *.zip ]]; then
local unzip_cmd="unzip -q"
local zip_cmd="zip -q -r"
else
local unzip_cmd="tar -zxf"
local zip_cmd="tar -zcf"
fi
gsutil -q cp "gs://${CB_GCS_BUILD_PATH}/${tarball_name}" .
eval "$unzip_cmd" "${tarball_name}"
rm "${tarball_name}"
# Update version string in yaml files.
sed -i "s|hub: gcr.io/istio-release|hub: ${CB_DOCKER_HUB}|g" ./"istio-${CB_VERSION}"/install/kubernetes/helm/istio*/values.yaml
sed -i "s|tag: .*-latest-daily|tag: ${CB_VERSION}|g" ./"istio-${CB_VERSION}"/install/kubernetes/helm/istio*/values.yaml
# Copy helm charts (build by helm_charts.sh) to be packaged in the tarball.
mkdir -vp ./"istio-${CB_VERSION}"/install/kubernetes/helm/charts
cp /modification-tmp/* ./"istio-${CB_VERSION}"/install/kubernetes/helm/charts
# replace prerelease with release location for istio.io repo
if [ "${CB_PIPELINE_TYPE}" = "monthly" ]; then
sed -i.bak "s:istio-prerelease/daily-build.*$:istio-release/releases/${CB_VERSION}/charts:g" ./"istio-${CB_VERSION}"/install/kubernetes/helm/istio/README.md
rm -rf ./"istio-${CB_VERSION}"/install/kubernetes/helm/istio/README.md.bak
echo "Done replacing pre-released charts with released charts for istio.io repo"
fi
eval "$zip_cmd" "${tarball_name}" "istio-${CB_VERSION}"
sha256sum "${tarball_name}" > "${tarball_name}.sha256"
rm -rf "istio-${CB_VERSION}"
gsutil -q cp "${tarball_name}" "gs://${CB_GCS_BUILD_PATH}/${tarball_name}"
gsutil -q cp "${tarball_name}.sha256" "gs://${CB_GCS_BUILD_PATH}/${tarball_name}.sha256"
echo "DONE fixing gs://${CB_GCS_BUILD_PATH}/${tarball_name} with hub: ${CB_DOCKER_HUB} tag: ${CB_VERSION}"
}
mkdir -p modification-tmp
cd modification-tmp || exit 2
ls -l
pwd
# Linux
fix_values_yaml "istio-${CB_VERSION}-linux.tar.gz"
# Mac
fix_values_yaml "istio-${CB_VERSION}-osx.tar.gz"
# Windows
fix_values_yaml "istio-${CB_VERSION}-win.zip"
#filename | sha256 hash
#-------- | -----------
#[kubernetes.tar.gz](https://dl.k8s.io/v1.10.6/kubernetes.tar.gz) | `dbb1e757ea8fe5e82796db8604d3fc61f8b79ba189af8e3b618d86fcae93dfd0`
|
<filename>main.go<gh_stars>1-10
package main
import (
"log"
"net/http"
)
const port = ":3000"
func main() {
go broadcastMessages()
http.HandleFunc("/spam", spam)
http.HandleFunc("/listen", listen)
http.HandleFunc("/squawk", squawk)
http.HandleFunc("/squawker", transmitter)
log.Println("Server is running on port", port)
err := http.ListenAndServe(port, nil)
if err != nil {
log.Println("Unable start server", err)
}
}
|
#!/bin/sh
mvn deploy:deploy-file -Dfile=target/bide.jar -DpomFile=pom.xml -DrepositoryId=clojars -Durl=https://clojars.org/repo/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.