text stringlengths 1 1.05M |
|---|
#!/bin/bash
echo $1 | rev | sed -E 's/^0+//' |
<filename>plastic/src/test/java/testsubjects/InjectSubClass.java
package testsubjects;
import testannotations.KindaInject;
import testinterfaces.Logger;
public class InjectSubClass extends InjectMidClass
{
@KindaInject
private Logger logger;
public Logger getSubClassLogger()
{
return logger;
}
}
|
#!/usr/bin/env bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Temporarily increasing the cluster size for serving tests to rule out
# resource/eviction as causes of flakiness. These env vars are consumed
# in the test-infra/scripts/e2e-tests.sh. Use the existing value, if provided
# with the job config.
E2E_MIN_CLUSTER_NODES=${E2E_MIN_CLUSTER_NODES:-4}
E2E_MAX_CLUSTER_NODES=${E2E_MAX_CLUSTER_NODES:-4}
E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-e2-standard-8}
# This script provides helper methods to perform cluster actions.
source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/e2e-tests.sh
CERT_MANAGER_VERSION="0.12.0"
# Since default is istio, make default ingress as istio
INGRESS_CLASS=${INGRESS_CLASS:-istio.ingress.networking.knative.dev}
ISTIO_VERSION=""
GLOO_VERSION=""
KOURIER_VERSION=""
AMBASSADOR_VERSION=""
CONTOUR_VERSION=""
CERTIFICATE_CLASS=""
HTTPS=0
MESH=0
INSTALL_MONITORING=0
# List of custom YAMLs to install, if specified (space-separated).
INSTALL_CUSTOM_YAMLS=""
UNINSTALL_LIST=()
TMP_DIR=$(mktemp -d -t ci-$(date +%Y-%m-%d-%H-%M-%S)-XXXXXXXXXX)
readonly TMP_DIR
readonly KNATIVE_DEFAULT_NAMESPACE="knative-serving"
# This the namespace used to install Knative Serving. Use generated UUID as namespace.
export SYSTEM_NAMESPACE
SYSTEM_NAMESPACE=$(uuidgen | tr 'A-Z' 'a-z')
# Parse our custom flags.
function parse_flags() {
case "$1" in
--istio-version)
[[ $2 =~ ^[0-9]+\.[0-9]+(\.[0-9]+|\-latest)$ ]] || abort "version format must be '[0-9].[0-9].[0-9]' or '[0-9].[0-9]-latest"
readonly ISTIO_VERSION=$2
readonly INGRESS_CLASS="istio.ingress.networking.knative.dev"
return 2
;;
--version)
[[ $2 =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be 'v[0-9].[0-9].[0-9]'"
LATEST_SERVING_RELEASE_VERSION=$2
return 2
;;
--cert-manager-version)
[[ $2 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'"
readonly CERT_MANAGER_VERSION=$2
readonly CERTIFICATE_CLASS="cert-manager.certificate.networking.knative.dev"
return 2
;;
--mesh)
readonly MESH=1
return 1
;;
--no-mesh)
readonly MESH=0
return 1
;;
--https)
readonly HTTPS=1
return 1
;;
--install-monitoring)
readonly INSTALL_MONITORING=1
return 1
;;
--custom-yamls)
[[ -z "$2" ]] && fail_test "Missing argument to --custom-yamls"
# Expect a list of comma-separated YAMLs.
INSTALL_CUSTOM_YAMLS="${2//,/ }"
readonly INSTALL_CUSTOM_YAMLS
return 2
;;
--gloo-version)
# currently, the value of --gloo-version is ignored
# latest version of Gloo pinned in third_party will be installed
readonly GLOO_VERSION=$2
readonly INGRESS_CLASS="gloo.ingress.networking.knative.dev"
return 2
;;
--kourier-version)
# currently, the value of --kourier-version is ignored
# latest version of Kourier pinned in third_party will be installed
readonly KOURIER_VERSION=$2
readonly INGRESS_CLASS="kourier.ingress.networking.knative.dev"
return 2
;;
--ambassador-version)
# currently, the value of --ambassador-version is ignored
# latest version of Ambassador pinned in third_party will be installed
readonly AMBASSADOR_VERSION=$2
readonly INGRESS_CLASS="ambassador.ingress.networking.knative.dev"
return 2
;;
--contour-version)
# currently, the value of --contour-version is ignored
# latest version of Contour pinned in third_party will be installed
readonly CONTOUR_VERSION=$2
readonly INGRESS_CLASS="contour.ingress.networking.knative.dev"
return 2
;;
--system-namespace)
[[ -z "$2" ]] || [[ $2 = --* ]] && fail_test "Missing argument to --system-namespace"
export SYSTEM_NAMESPACE=$2
return 2
;;
esac
return 0
}
# Create all manifests required to install Knative Serving.
# This will build everything from the current source.
# All generated YAMLs will be available and pointed by the corresponding
# environment variables as set in /hack/generate-yamls.sh.
function build_knative_from_source() {
local YAML_LIST="$(mktemp)"
# Generate manifests, capture environment variables pointing to the YAML files.
local FULL_OUTPUT="$( \
source $(dirname $0)/../hack/generate-yamls.sh ${REPO_ROOT_DIR} ${YAML_LIST} ; \
set | grep _YAML=/)"
local LOG_OUTPUT="$(echo "${FULL_OUTPUT}" | grep -v _YAML=/)"
local ENV_OUTPUT="$(echo "${FULL_OUTPUT}" | grep '^[_0-9A-Z]\+_YAML=/')"
[[ -z "${LOG_OUTPUT}" || -z "${ENV_OUTPUT}" ]] && fail_test "Error generating manifests"
# Only import the environment variables pointing to the YAML files.
echo "${LOG_OUTPUT}"
echo -e "Generated manifests:\n${ENV_OUTPUT}"
eval "${ENV_OUTPUT}"
}
# Installs Knative Serving in the current cluster, and waits for it to be ready.
# If no parameters are passed, installs the current source-based build, unless custom
# YAML files were passed using the --custom-yamls flag.
# Parameters: $1 - Knative Serving YAML file
# $2 - Knative Monitoring YAML file (optional)
function install_knative_serving() {
if [[ -z "${INSTALL_CUSTOM_YAMLS}" ]]; then
install_knative_serving_standard "$1" "$2"
return
fi
echo ">> Installing Knative serving from custom YAMLs"
echo "Custom YAML files: ${INSTALL_CUSTOM_YAMLS}"
for yaml in ${INSTALL_CUSTOM_YAMLS}; do
local YAML_NAME=${TMP_DIR}/${yaml##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${yaml} > ${YAML_NAME}
echo "Installing '${YAML_NAME}'"
kubectl create -f "${YAML_NAME}" || return 1
done
}
function install_istio() {
# If no gateway was set on command line, assume Istio
if [[ -z "${ISTIO_VERSION}" ]]; then
echo ">> No gateway set up on command line, using Istio"
readonly ISTIO_VERSION="1.4-latest"
fi
local istio_base="./third_party/istio-${ISTIO_VERSION}"
INSTALL_ISTIO_CRD_YAML="${istio_base}/istio-crds.yaml"
if (( MESH )); then
INSTALL_ISTIO_YAML="${istio_base}/istio-ci-mesh.yaml"
else
INSTALL_ISTIO_YAML="${istio_base}/istio-ci-no-mesh.yaml"
fi
echo "Istio CRD YAML: ${INSTALL_ISTIO_CRD_YAML}"
echo "Istio YAML: ${INSTALL_ISTIO_YAML}"
echo ">> Bringing up Istio"
echo ">> Running Istio CRD installer"
kubectl apply -f "${INSTALL_ISTIO_CRD_YAML}" || return 1
wait_until_batch_job_complete istio-system || return 1
UNINSTALL_LIST+=( "${INSTALL_ISTIO_CRD_YAML}" )
echo ">> Running Istio"
kubectl apply -f "${INSTALL_ISTIO_YAML}" || return 1
UNINSTALL_LIST+=( "${INSTALL_ISTIO_YAML}" )
# If the yaml for the Istio Ingress controller is passed, then install it.
if [[ -n "$1" ]]; then
echo ">> Installing Istio Ingress"
echo "Istio Ingress YAML: ${1}"
# We apply a filter here because when we're installing from a pre-built
# bundle then the whole bundle it passed here. We use ko because it has
# better filtering support for CRDs.
local YAML_NAME=${TMP_DIR}/${1##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${1} > ${YAML_NAME}
ko apply -f "${YAML_NAME}" --selector=networking.knative.dev/ingress-provider=istio || return 1
UNINSTALL_LIST+=( "${YAML_NAME}" )
fi
}
function install_gloo() {
local INSTALL_GLOO_YAML="./third_party/gloo-latest/gloo.yaml"
echo "Gloo YAML: ${INSTALL_GLOO_YAML}"
echo ">> Bringing up Gloo"
kubectl apply -f ${INSTALL_GLOO_YAML} || return 1
UNINSTALL_LIST+=( "${INSTALL_GLOO_YAML}" )
echo ">> Patching Gloo"
# Scale replicas of the Gloo proxies to handle large qps
kubectl scale -n gloo-system deployment knative-external-proxy --replicas=6
kubectl scale -n gloo-system deployment knative-internal-proxy --replicas=6
}
function install_kourier() {
local INSTALL_KOURIER_YAML="./third_party/kourier-latest/kourier.yaml"
echo "Kourier YAML: ${INSTALL_KOURIER_YAML}"
echo ">> Bringing up Kourier"
kubectl apply -f ${INSTALL_KOURIER_YAML} || return 1
UNINSTALL_LIST+=( "${INSTALL_KOURIER_YAML}" )
echo ">> Patching Kourier"
# Scale replicas of the Kourier gateways to handle large qps
kubectl scale -n kourier-system deployment 3scale-kourier-gateway --replicas=6
}
function install_ambassador() {
local AMBASSADOR_MANIFESTS_PATH="./third_party/ambassador-latest/"
echo "Ambassador YAML: ${AMBASSADOR_MANIFESTS_PATH}"
echo ">> Creating namespace 'ambassador'"
kubectl create namespace ambassador || return 1
echo ">> Installing Ambassador"
kubectl apply -n ambassador -f ${AMBASSADOR_MANIFESTS_PATH} || return 1
UNINSTALL_LIST+=( "${AMBASSADOR_MANIFESTS_PATH}" )
# echo ">> Fixing Ambassador's permissions"
# kubectl patch clusterrolebinding ambassador -p '{"subjects":[{"kind": "ServiceAccount", "name": "ambassador", "namespace": "ambassador"}]}' || return 1
# echo ">> Enabling Knative support in Ambassador"
# kubectl set env --namespace ambassador deployments/ambassador AMBASSADOR_KNATIVE_SUPPORT=true || return 1
echo ">> Patching Ambassador"
# Scale replicas of the Ambassador gateway to handle large qps
kubectl scale -n ambassador deployment ambassador --replicas=6
}
function install_contour() {
local INSTALL_CONTOUR_YAML="./third_party/contour-latest/contour.yaml"
local INSTALL_NET_CONTOUR_YAML="./third_party/contour-latest/net-contour.yaml"
echo "Contour YAML: ${INSTALL_CONTOUR_YAML}"
echo "Contour KIngress YAML: ${INSTALL_NET_CONTOUR_YAML}"
echo ">> Bringing up Contour"
kubectl apply -f ${INSTALL_CONTOUR_YAML} || return 1
UNINSTALL_LIST+=( "${INSTALL_CONTOUR_YAML}" )
local NET_CONTOUR_YAML_NAME=${TMP_DIR}/${INSTALL_NET_CONTOUR_YAML##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${INSTALL_NET_CONTOUR_YAML} > ${NET_CONTOUR_YAML_NAME}
echo ">> Bringing up net-contour"
kubectl apply -f ${NET_CONTOUR_YAML_NAME} || return 1
UNINSTALL_LIST+=( "${NET_CONTOUR_YAML_NAME}" )
}
# Installs Knative Serving in the current cluster, and waits for it to be ready.
# If no parameters are passed, installs the current source-based build.
# Parameters: $1 - Knative Serving YAML file
# $2 - Knative Monitoring YAML file (optional)
function install_knative_serving_standard() {
echo ">> Creating ${SYSTEM_NAMESPACE} namespace if it does not exist"
kubectl get ns ${SYSTEM_NAMESPACE} || kubectl create namespace ${SYSTEM_NAMESPACE}
if (( MESH )); then
kubectl label namespace ${SYSTEM_NAMESPACE} istio-injection=enabled
fi
# Delete the test namespace
add_trap "kubectl delete namespace ${SYSTEM_NAMESPACE} --ignore-not-found=true" SIGKILL SIGTERM SIGQUIT
echo ">> Installing Knative CRD"
if [[ -z "$1" ]]; then
# If we need to build from source, then kick that off first.
build_knative_from_source
echo "CRD YAML: ${SERVING_CRD_YAML}"
kubectl apply -f "${SERVING_CRD_YAML}" || return 1
UNINSTALL_LIST+=( "${SERVING_CRD_YAML}" )
else
local YAML_NAME=${TMP_DIR}/${1##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${1} > ${YAML_NAME}
echo "Knative YAML: ${YAML_NAME}"
ko apply -f "${YAML_NAME}" --selector=knative.dev/crd-install=true || return 1
UNINSTALL_LIST+=( "${YAML_NAME}" )
fi
echo ">> Installing Ingress"
if [[ -n "${GLOO_VERSION}" ]]; then
install_gloo || return 1
elif [[ -n "${KOURIER_VERSION}" ]]; then
install_kourier || return 1
elif [[ -n "${AMBASSADOR_VERSION}" ]]; then
install_ambassador || return 1
elif [[ -n "${CONTOUR_VERSION}" ]]; then
install_contour || return 1
else
install_istio "./third_party/net-istio.yaml" || return 1
fi
echo ">> Installing Cert-Manager"
readonly INSTALL_CERT_MANAGER_YAML="./third_party/cert-manager-${CERT_MANAGER_VERSION}/cert-manager.yaml"
echo "Cert Manager YAML: ${INSTALL_CERT_MANAGER_YAML}"
kubectl apply -f "${INSTALL_CERT_MANAGER_YAML}" --validate=false || return 1
UNINSTALL_LIST+=( "${INSTALL_CERT_MANAGER_YAML}" )
readonly NET_CERTMANAGER_YAML="./third_party/cert-manager-${CERT_MANAGER_VERSION}/net-certmanager.yaml"
echo "net-certmanager YAML: ${NET_CERTMANAGER_YAML}"
local CERT_YAML_NAME=${TMP_DIR}/${NET_CERTMANAGER_YAML##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${NET_CERTMANAGER_YAML} > ${CERT_YAML_NAME}
kubectl apply \
-f "${CERT_YAML_NAME}" || return 1
UNINSTALL_LIST+=( "${CERT_YAML_NAME}" )
echo ">> Installing Knative serving"
if [[ -z "$1" ]]; then
local CORE_YAML_NAME=${TMP_DIR}/${SERVING_CORE_YAML##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_CORE_YAML} > ${CORE_YAML_NAME}
local HPA_YAML_NAME=${TMP_DIR}/${SERVING_HPA_YAML##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${SERVING_HPA_YAML} > ${HPA_YAML_NAME}
echo "Knative YAML: ${CORE_YAML_NAME} and ${HPA_YAML_NAME}"
kubectl apply \
-f "${CORE_YAML_NAME}" \
-f "${HPA_YAML_NAME}" || return 1
UNINSTALL_LIST+=( "${CORE_YAML_NAME}" "${HPA_YAML_NAME}" )
if (( INSTALL_MONITORING )); then
echo ">> Installing Monitoring"
echo "Knative Monitoring YAML: ${MONITORING_YAML}"
kubectl apply -f "${MONITORING_YAML}" || return 1
UNINSTALL_LIST+=( "${MONITORING_YAML}" )
fi
else
echo "Knative YAML: ${1}"
# If we are installing from provided yaml, then only install non-istio bits here,
# and if we choose to install istio below, then pass the whole file as the rest.
# We use ko because it has better filtering support for CRDs.
local YAML_NAME=${TMP_DIR}/${1##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${1} > ${YAML_NAME}
ko apply -f "${YAML_NAME}" --selector=networking.knative.dev/ingress-provider!=istio || return 1
UNINSTALL_LIST+=( "${YAML_NAME}" )
if (( INSTALL_MONITORING )); then
echo ">> Installing Monitoring"
echo "Knative Monitoring YAML: ${2}"
kubectl apply -f "${2}" || return 1
UNINSTALL_LIST+=( "${2}" )
fi
fi
echo ">> Configuring the default Ingress: ${INGRESS_CLASS}"
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: config-network
namespace: ${SYSTEM_NAMESPACE}
labels:
serving.knative.dev/release: devel
data:
ingress.class: ${INGRESS_CLASS}
EOF
echo ">> Turning on profiling.enable"
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: config-observability
namespace: ${SYSTEM_NAMESPACE}
data:
profiling.enable: "true"
EOF
echo ">> Patching activator HPA"
# We set min replicas to 15 for testing multiple activator pods.
kubectl -n ${SYSTEM_NAMESPACE} patch hpa activator --patch '{"spec":{"minReplicas":15}}' || return 1
}
# Check if we should use --resolvabledomain. In case the ingress only has
# hostname, we doesn't yet have a way to support resolvable domain in tests.
function use_resolvable_domain() {
# Temporarily turning off xip.io tests, as DNS errors aren't always retried.
echo "false"
}
# Check if we should specify --ingressClass
function ingress_class() {
if [[ -z "${INGRESS_CLASS}" ]]; then
echo ""
else
echo "--ingressClass=${INGRESS_CLASS}"
fi
}
# Check if we should specify --certificateClass
function certificate_class() {
if [[ -z "${CERTIFICATE_CLASS}" ]]; then
echo ""
else
echo "--certificateClass=${CERTIFICATE_CLASS}"
fi
}
# Uninstalls Knative Serving from the current cluster.
function knative_teardown() {
if [[ -z "${INSTALL_CUSTOM_YAMLS}" && -z "${UNINSTALL_LIST[@]}" ]]; then
echo "install_knative_serving() was not called, nothing to uninstall"
return 0
fi
if [[ -n "${INSTALL_CUSTOM_YAMLS}" ]]; then
echo ">> Uninstalling Knative serving from custom YAMLs"
for yaml in ${INSTALL_CUSTOM_YAMLS}; do
echo "Uninstalling '${yaml}'"
kubectl delete --ignore-not-found=true -f "${yaml}" || return 1
done
else
echo ">> Uninstalling Knative serving"
for i in ${!UNINSTALL_LIST[@]}; do
# We uninstall elements in the reverse of the order they were installed.
local YAML="${UNINSTALL_LIST[$(( ${#array[@]} - $i ))]}"
echo ">> Bringing down YAML: ${YAML}"
kubectl delete --ignore-not-found=true -f "${YAML}" || return 1
done
fi
}
# Add function call to trap
# Parameters: $1 - Function to call
# $2...$n - Signals for trap
function add_trap() {
local cmd=$1
shift
for trap_signal in $@; do
local current_trap="$(trap -p $trap_signal | cut -d\' -f2)"
local new_cmd="($cmd)"
[[ -n "${current_trap}" ]] && new_cmd="${current_trap};${new_cmd}"
trap -- "${new_cmd}" $trap_signal
done
}
# Create test resources and images
function test_setup() {
echo ">> Replacing ${KNATIVE_DEFAULT_NAMESPACE} with the actual namespace for Knative Serving..."
local TEST_DIR=${TMP_DIR}/test
mkdir -p ${TEST_DIR}
cp -r test/* ${TEST_DIR}
find ${TEST_DIR} -type f -name "*.yaml" -exec sed -i "s/${KNATIVE_DEFAULT_NAMESPACE}/${SYSTEM_NAMESPACE}/g" {} +
echo ">> Setting up logging..."
# Install kail if needed.
if ! which kail > /dev/null; then
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$GOPATH/bin"
fi
# Capture all logs.
kail > ${ARTIFACTS}/k8s.log-$(basename ${E2E_SCRIPT}).txt &
local kail_pid=$!
# Clean up kail so it doesn't interfere with job shutting down
add_trap "kill $kail_pid || true" EXIT
local TEST_CONFIG_DIR=${TEST_DIR}/config
echo ">> Creating test resources (${TEST_CONFIG_DIR}/)"
ko apply ${KO_FLAGS} -f ${TEST_CONFIG_DIR}/ || return 1
if (( MESH )); then
kubectl label namespace serving-tests istio-injection=enabled
kubectl label namespace serving-tests-alt istio-injection=enabled
kubectl label namespace serving-tests-security istio-injection=enabled
ko apply ${KO_FLAGS} -f ${TEST_CONFIG_DIR}/security/ --selector=test.knative.dev/dependency=istio-sidecar || return 1
fi
echo ">> Uploading test images..."
${REPO_ROOT_DIR}/test/upload-test-images.sh || return 1
echo ">> Waiting for Serving components to be running..."
wait_until_pods_running ${SYSTEM_NAMESPACE} || return 1
echo ">> Waiting for Cert Manager components to be running..."
wait_until_pods_running cert-manager || return 1
echo ">> Waiting for Ingress provider to be running..."
if [[ -n "${ISTIO_VERSION}" ]]; then
wait_until_pods_running istio-system || return 1
wait_until_service_has_external_http_address istio-system istio-ingressgateway
fi
if [[ -n "${GLOO_VERSION}" ]]; then
# we must set these override values to allow the test spoofing client to work with Gloo
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
export GATEWAY_OVERRIDE=knative-external-proxy
export GATEWAY_NAMESPACE_OVERRIDE=gloo-system
wait_until_pods_running gloo-system || return 1
wait_until_service_has_external_ip gloo-system knative-external-proxy
fi
if [[ -n "${KOURIER_VERSION}" ]]; then
# we must set these override values to allow the test spoofing client to work with Kourier
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
export GATEWAY_OVERRIDE=kourier
export GATEWAY_NAMESPACE_OVERRIDE=kourier-system
wait_until_pods_running kourier-system || return 1
wait_until_service_has_external_http_address kourier-system kourier
fi
if [[ -n "${AMBASSADOR_VERSION}" ]]; then
# we must set these override values to allow the test spoofing client to work with Ambassador
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
export GATEWAY_OVERRIDE=ambassador
export GATEWAY_NAMESPACE_OVERRIDE=ambassador
wait_until_pods_running ambassador || return 1
wait_until_service_has_external_http_address ambassador ambassador
fi
if [[ -n "${CONTOUR_VERSION}" ]]; then
# we must set these override values to allow the test spoofing client to work with Contour
# see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37
export GATEWAY_OVERRIDE=envoy
export GATEWAY_NAMESPACE_OVERRIDE=contour-external
wait_until_pods_running contour-external || return 1
wait_until_pods_running contour-internal || return 1
wait_until_service_has_external_ip "${GATEWAY_NAMESPACE_OVERRIDE}" "${GATEWAY_OVERRIDE}"
fi
if (( INSTALL_MONITORING )); then
echo ">> Waiting for Monitoring to be running..."
wait_until_pods_running knative-monitoring || return 1
fi
}
# Delete test resources
function test_teardown() {
local TEST_CONFIG_DIR=${TMP_DIR}/test/config
echo ">> Removing test resources (${TEST_CONFIG_DIR}/)"
ko delete --ignore-not-found=true --now -f ${TEST_CONFIG_DIR}/
if (( MESH )); then
ko delete --ignore-not-found=true --now -f ${TEST_CONFIG_DIR}/security/
fi
echo ">> Ensuring test namespaces are clean"
kubectl delete all --all --ignore-not-found --now --timeout 60s -n serving-tests
kubectl delete --ignore-not-found --now --timeout 60s namespace serving-tests
kubectl delete all --all --ignore-not-found --now --timeout 60s -n serving-tests-alt
kubectl delete --ignore-not-found --now --timeout 60s namespace serving-tests-alt
kubectl delete all --all --ignore-not-found --now --timeout 60s -n serving-tests-security
kubectl delete --ignore-not-found --now --timeout 60s namespace serving-tests-security
}
# Dump more information when test fails.
function dump_extra_cluster_state() {
echo ">>> Routes:"
kubectl get routes -o yaml --all-namespaces
echo ">>> Configurations:"
kubectl get configurations -o yaml --all-namespaces
echo ">>> Revisions:"
kubectl get revisions -o yaml --all-namespaces
echo ">>> PodAutoscalers:"
kubectl get podautoscalers -o yaml --all-namespaces
echo ">>> SKSs:"
kubectl get serverlessservices -o yaml --all-namespaces
}
function turn_on_auto_tls() {
kubectl patch configmap config-network -n ${SYSTEM_NAMESPACE} -p '{"data":{"autoTLS":"Enabled"}}'
}
function turn_off_auto_tls() {
kubectl patch configmap config-network -n ${SYSTEM_NAMESPACE} -p '{"data":{"autoTLS":"Disabled"}}'
}
|
#!/bin/bash
FOXTROT_CONFIG_FILE=/config/docker.yml
if [ -z "${CONFIG_PATH}" ]; then
echo "No CONFIG_PATH defined. We shall be using default config from ${FOXTROT_CONFIG_FILE}"
else
if [ -f ${FOXTROT_CONFIG_FILE} ]; then
FOXTROT_CONFIG_FILE=${CONFIG_PATH}
else
echo "Defined CONFIG_PATH (${CONFIG_PATH}) doesn ot look like a proper file. Using default: ${FOXTROT_CONFIG_FILE}"
fi
fi
if [ -z "${INIT_SLEEP}" ]; then
echo "No initial sleep specified.. Foxtrot will start up immediately"
else
echo -e "Init sleep of ${INIT_SLEEP} seconds specified. Waiting..."
sleep ${INIT_SLEEP}
echo "done"
fi
INIT_COMMAND="java -jar server.jar initialize ${FOXTROT_CONFIG_FILE}"
EXEC_CMD="java -Dfile.encoding=utf-8 -XX:+${GC_ALGO-UseG1GC} -Xms${JAVA_PROCESS_MIN_HEAP-1g} -Xmx${JAVA_PROCESS_MAX_HEAP-1g} ${JAVA_OPTS} -jar server.jar server ${FOXTROT_CONFIG_FILE}"
if [ -z "${SKIP_INIT}" ]; then
echo "Executing Init Command: ${INIT_COMMAND}"
$INIT_COMMAND
else
echo "Skipping Init as $SKIP_INIT was set"
fi
echo "Starting foxtrot with command line: ${EXEC_CMD}"
$EXEC_CMD
|
<reponame>RVSWeb/CursoProgramacionJavascript<filename>Modulo 6 - AngularJS/modulos/controlador.js
// Controladores
app.controller("miControlador", function ($scope) {
$scope.nombre = "Mario";
$scope.apellidos = "Flores";
}); |
#!/bin/bash -e
# Copyright 2014-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Thomas Beermann, <thomas.beermann@cern.ch>, 2018
python /setup_data.py
/usr/bin/rucio-automatix --run-once --input-file /opt/rucio/etc/automatix.json
|
SELECT *
FROM myTable
WHERE <columns> like '%hello%'; |
<reponame>OhFinance/oh-app
import {
Table,
TableBody,
TableCell,
TableContainer,
TableHead,
TableRow,
} from "@material-ui/core";
import { TableSurface } from "components/TableSurface";
import OhToken from "assets/img/oh-token.svg";
import { Flex } from "@ohfinance/oh-ui";
import { Balance } from "components/Balance";
import { StakeClaimTableRow } from "./StakeClaimTableRow";
import { pools } from "config/constants/pools";
import { useWeb3 } from "hooks/useWeb3";
export const StakeClaimTable = () => {
const { chainId } = useWeb3();
return (
<TableSurface>
<TableContainer>
<Table>
<TableHead>
<TableRow>
<TableCell padding="checkbox"></TableCell>
<TableCell>Claim</TableCell>
<TableCell align="right">Amount</TableCell>
<TableCell align="right">USD Value</TableCell>
<TableCell align="right">Status</TableCell>
<TableCell align="right">Unlock Date</TableCell>
</TableRow>
</TableHead>
<TableBody>
<StakeClaimTableRow pool={pools[chainId][0]} />
</TableBody>
</Table>
</TableContainer>
</TableSurface>
);
};
|
package lookup
import (
"github.com/jensneuse/graphql-go-tools/pkg/document"
"github.com/jensneuse/graphql-go-tools/pkg/parser"
"testing"
)
func putLiteralString(p *parser.Parser, literal string) document.ByteSliceReference {
mod := parser.NewManualAstMod(p)
ref, _, err := mod.PutLiteralString(literal)
if err != nil {
panic(err)
}
return ref
}
func literalString(p *parser.Parser, cachedName document.ByteSliceReference) string {
return string(p.ByteSlice(cachedName))
}
func TestFieldsContainingDirectiveIterator(t *testing.T) {
p := parser.NewParser()
look := New(p)
if err := p.ParseTypeSystemDefinition([]byte(FieldsContainingDirectiveIteratorInput)); err != nil {
panic(err)
}
addArgumentFromContext := putLiteralString(p, "addArgumentFromContext")
documents := putLiteralString(p, "documents")
Query := putLiteralString(p, "Query")
adminField := putLiteralString(p, "adminField")
Document := putLiteralString(p, "Document")
walk := NewWalker(512, 8)
walk.SetLookup(look)
walk.WalkTypeSystemDefinition()
iter := walk.FieldsContainingDirectiveIterator(addArgumentFromContext)
if iter.Next() == false {
t.Errorf("want true")
}
field, object, directive := iter.Value()
directiveName := look.Directive(directive).Name
if !look.ByteSliceReferenceContentsEquals(directiveName, addArgumentFromContext) {
t.Errorf("want directive name: %s, got: %s", "addArgumentFromContext", literalString(p, directiveName))
}
fieldName := look.FieldDefinition(field).Name
if !look.ByteSliceReferenceContentsEquals(fieldName, documents) {
t.Errorf("want field name: %s, got: %s", "documents", literalString(p, fieldName))
}
objectName := look.ObjectTypeDefinition(object).Name
if !look.ByteSliceReferenceContentsEquals(objectName, Query) {
t.Errorf("want object type definition name: %s. got: %s", "Query", literalString(p, objectName))
}
if iter.Next() == false {
t.Errorf("want true")
}
field, object, directive = iter.Value()
directiveName = look.Directive(directive).Name
if !look.ByteSliceReferenceContentsEquals(directiveName, addArgumentFromContext) {
t.Errorf("want directive name: %s, got: %s", "addArgumentFromContext", literalString(p, directiveName))
}
fieldName = look.FieldDefinition(field).Name
if !look.ByteSliceReferenceContentsEquals(fieldName, adminField) {
t.Errorf("want field: %s, got: %s", "adminField", literalString(p, fieldName))
}
objectName = look.ObjectTypeDefinition(object).Name
if !look.ByteSliceReferenceContentsEquals(objectName, Document) {
t.Errorf("want object type definition: %s, got: %s", "Document", literalString(p, objectName))
}
if iter.Next() {
t.Errorf("want false")
}
}
const FieldsContainingDirectiveIteratorInput = `
directive @addArgumentFromContext(
name: String!
contextKey: String!
) on FIELD_DEFINITION
scalar String
schema {
query: Query
}
type Query {
documents: [Document] @addArgumentFromContext(name: "user",contextKey: "user")
}
type Document implements Node {
owner: String
sensitiveInformation: String
adminField: String @addArgumentFromContext(name: "admin",contextKey: "admin")
}
`
|
#!/bin/bash
# Build Path: /app/.heroku/php/
dep_url=git://github.com/grpc/grpc.git
grpc_root="$(pwd)"
echo "-----> Building Grpc..."
### Groc
echo "[LOG] Downloading Grpc"
git clone $dep_url -q
if [ ! -d "$grpc_dir" ]; then
echo "[ERROR] Failed to find grpc directory $grpc_dir"
exit
fi
cd src/php/ext/grpc
phpize
GRPC_LIB_SUBDIR=libs/opt ./configure --enable-grpc="${grpc_root}"
#cd $grpc_dir/build
# /app/php/bin/phpize
# ./configure --enable-grpc --with-php-config=$PHP_ROOT/bin/php-config
# make
# make install
BUILD_DIR=$1
ln -s $BUILD_DIR/.heroku /app/.heroku
export PATH=/app/.heroku/php/bin:$PATH
bash ./install
cd
echo "important extension grpc into php.ini"
echo "extension=grpc.so" >> /app/.heroku/php/etc/php/php.ini
|
<gh_stars>1-10
#ifndef _SYS__TYPES__INTPTR_T_H_
#define _SYS__TYPES__INTPTR_T_H_
#if defined(__INTPTR_TYPE__)
typedef __INTPTR_TYPE__ intptr_t;
#endif
#endif
|
package org.firstinspires.ftc.teamcode;
import com.qualcomm.robotcore.eventloop.opmode.Autonomous;
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
import com.qualcomm.robotcore.eventloop.opmode.OpMode;
import com.qualcomm.robotcore.hardware.HardwareMap;
@Autonomous(name="AutonomousDriveNew")
public class AutonomousVersionOne extends LinearOpMode {
MecanumHardware robot = new MecanumHardware();
//intake rollers for 250 ms
//then intake rollers for 400 ms
//then intake rollers for 400 ms
/*
Move wobble goal to target zone
shoot for top goal in tower
3 seconds for winding up shooter motor
.25 second for rolling the input
.4 seconds for rolling the input for the next 2
*/
@Override
public void runOpMode()
{
robot.init(hardwareMap);
waitForStart();
while(opModeIsActive())
{
//negative turn is turn left
drive(.5);
sleep(1000); //700 initially
drive(.5);
sleep(800);
drive(0);
turn(-.5);
sleep(900); //800 is fine maybe turn for 700
drive(.1);
sleep(400);
drive(0);
robot.wobble.setPower(-.7);
sleep(1500);
robot.wobble.setPower(.3);
sleep(250);
drive(0);
sleep(750);
drive(-.5);
robot.wobble.setPower(.1);
sleep(100);
turn(.5);
sleep(1100); //placement is fine just wokr
drive(0);
robot.wobble.setPower(1);
sleep(500);
drive(-.2);
robot.wobble.setPower(0);
sleep(300);
drive(0);
turn(-.5);
sleep(150);
//WIND UP
drive(0);
robot.wobble.setPower(0);
robot.output.setPower(.85);
robot.output2.setPower(.85);
sleep(1500); //change timing here for initial drive
robot.input.setPower(1);
sleep(250);
robot.input.setPower(0);
robot.output.setPower(.85);
robot.output2.setPower(.85);
sleep(1000);
robot.input.setPower(1);
sleep(250);
//33+6
robot.input.setPower(0);
robot.output.setPower(.85);
robot.output2.setPower(.85);
sleep(500);
robot.input.setPower(1);
sleep(1000);
robot.input.setPower(0);
sleep(1000);
drive(.7);
sleep(400);
STOP();
sleep(200000);
}
}
public void drive(double power) {
robot.leftFront.setPower(power); //negative should stay because of the direction of the robot
robot.rightFront.setPower(power);
robot.rightBack.setPower(power);
robot.leftBack.setPower(power);
}
public void turn(double power) {
robot.leftFront.setPower(-power); //This isn't inverted the motor config is just weird...
robot.rightFront.setPower(power);
robot.rightBack.setPower(power);
robot.leftBack.setPower(-power);
}
public void STOP(){
int power = 0;
robot.leftFront.setPower(power); //negative should stay because of the direction of the robot
robot.rightFront.setPower(power);
robot.rightBack.setPower(power);
robot.leftBack.setPower(power);
robot.input.setPower(0);
robot.output.setPower(0);
robot.output2.setPower(0);
}
}
|
package com.xstudio.dao.sys;
import com.xstudio.models.sys.SysDict;
import com.xstudio.models.sys.SysDictExample;
import java.util.List;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
@Repository
public interface SysDictMapper {
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int countByExample(SysDictExample example);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int deleteByExample(SysDictExample example);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int deleteByPrimaryKey(Integer id);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int insert(SysDict record);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int insertSelective(SysDict record);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
List<SysDict> selectByExample(SysDictExample example);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
SysDict selectByPrimaryKey(Integer id);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int updateByExampleSelective(@Param("record") SysDict record, @Param("example") SysDictExample example);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int updateByExample(@Param("record") SysDict record, @Param("example") SysDictExample example);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int updateByPrimaryKeySelective(SysDict record);
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table sys_dict
*
* @mbggenerated Sun May 08 16:11:43 CST 2016
*/
int updateByPrimaryKey(SysDict record);
} |
#!/bin/sh
#
# Script to setup parallel eacrd nodes with separate wallets.
# Useful for testing reorgs by disconnecting nodes, mining individually, then
# reconnecting them.
#
# alpha <------> beta
# listen 19100 19200
# rpclisten 19101 <. .> 19201
# w-alpha | | w-beta
# rpclisten 19102 19202
#
# For simplicity, node "beta" is configured to connect to node "alpha" via
# --connect on the command line, so that you can easily disconnect the nodes
# by stopping beta, removing the --connect, then restarting it.
set -e
SESSION="eacrd-parallel-nodes"
NODES_ROOT=~/eacrdsimnet
RPCUSER="USER"
RPCPASS="PASS"
WALLET01_SEED="1111111111111111111111111111111111111111111111111111111111111111"
WALLET02_SEED="2222222222222222222222222222222222222222222222222222222222222222"
WALLET01_MININGADDR="Ssmn12w4CTF2j6B1jaLxEyKXVeMFkPJmDBs"
WALLET02_MININGADDR="Ssc9exyQoHX3octYgu7SVWYTJQBP7PqJd31"
# WALLET01_MININGADDR="Ssoaqgx4ecmHX54LqrUXgqi6miUFxP9iUvc"
# WALLET02_MININGADDR="SsgkhRgr9JdonELE7MjK8qUkwSPsrTnWcE6"
if [ -d "${NODES_ROOT}" ] ; then
rm -R "${NODES_ROOT}"
fi
mkdir -p "${NODES_ROOT}/"{alpha,beta,w-alpha,w-beta}
# Config Files
cat > "${NODES_ROOT}/eacrd.conf" <<EOF
rpcuser=${RPCUSER}
rpcpass=${RPCPASS}
simnet=1
logdir=./log
datadir=./data
notls=1
txindex=1
debuglevel=TXMP=TRACE,MINR=TRACE,CHAN=TRACE
EOF
cat > "${NODES_ROOT}/eacrctl.conf" <<EOF
rpcuser=${RPCUSER}
rpcpass=${RPCPASS}
notls=1
simnet=1
EOF
cat > "${NODES_ROOT}/wallet.conf" <<EOF
username=${RPCUSER}
password=${RPCPASS}
noclienttls=1
simnet=1
logdir=./log
appdata=./data
pass=123
enablevoting=1
enableticketbuyer=1
nogrpc=1
EOF
cp ${NODES_ROOT}/eacrd.conf ${NODES_ROOT}/alpha
cat >> ${NODES_ROOT}/alpha/eacrd.conf <<EOF
listen=127.0.0.1:19100
rpclisten=127.0.0.1:19101
miningaddr=${WALLET01_MININGADDR}
EOF
cp ${NODES_ROOT}/eacrctl.conf ${NODES_ROOT}/alpha
cat >> ${NODES_ROOT}/alpha/eacrctl.conf <<EOF
rpcserver=127.0.0.1:19101
EOF
cp ${NODES_ROOT}/eacrctl.conf ${NODES_ROOT}/w-alpha
cat >> ${NODES_ROOT}/w-alpha/eacrctl.conf <<EOF
walletrpcserver=127.0.0.1:19102
EOF
cp ${NODES_ROOT}/eacrd.conf ${NODES_ROOT}/beta
cat >> ${NODES_ROOT}/beta/eacrd.conf <<EOF
listen=127.0.0.1:19200
rpclisten=127.0.0.1:19201
miningaddr=${WALLET02_MININGADDR}
EOF
cp ${NODES_ROOT}/eacrctl.conf ${NODES_ROOT}/beta
cat >> ${NODES_ROOT}/beta/eacrctl.conf <<EOF
rpcserver=127.0.0.1:19201
EOF
cp ${NODES_ROOT}/eacrctl.conf ${NODES_ROOT}/w-beta
cat >> ${NODES_ROOT}/w-beta/eacrctl.conf <<EOF
walletrpcserver=127.0.0.1:19202
EOF
# Node Scripts
cat > "${NODES_ROOT}/alpha/ctl" <<EOF
#!/bin/sh
eacrctl -C ./eacrctl.conf \$*
EOF
chmod +x "${NODES_ROOT}/alpha/ctl"
cat > "${NODES_ROOT}/alpha/mine" <<EOF
#!/bin/sh
NUM=1
case \$1 in
''|*[!0-9]*) ;;
*) NUM=\$1 ;;
esac
for i in \$(seq \$NUM) ; do
eacrctl -C ./eacrctl.conf generate 1
sleep 0.3
done
EOF
chmod +x "${NODES_ROOT}/alpha/mine"
# script to mine one block on each node
cat > "${NODES_ROOT}/mine-both" <<EOF
#!/bin/sh
NUM=1
case \$1 in
''|*[!0-9]*) ;;
*) NUM=\$1 ;;
esac
for i in \$(seq \$NUM) ; do
cd ${NODES_ROOT}/alpha && ./mine
cd ${NODES_ROOT}/beta && ./mine
done
EOF
chmod +x "${NODES_ROOT}/mine-both"
cp ${NODES_ROOT}/alpha/ctl ${NODES_ROOT}/beta/
cp ${NODES_ROOT}/alpha/mine ${NODES_ROOT}/beta/
# Wallet Scripts
cat > "${NODES_ROOT}/w-alpha/ctl" <<EOF
#!/bin/sh
eacrctl -C ./eacrctl.conf --wallet -c ./data/rpc.cert \$*
EOF
chmod +x "${NODES_ROOT}/w-alpha/ctl"
cat > "${NODES_ROOT}/w-alpha/tickets" <<EOF
#!/bin/sh
NUM=1
case \$1 in
''|*[!0-9]*) ;;
*) NUM=\$1 ;;
esac
./ctl purchaseticket default 999999 1 \`./ctl getnewaddress\` \$NUM
EOF
chmod +x "${NODES_ROOT}/w-alpha/tickets"
cat > "${NODES_ROOT}/w-alpha/xfer" <<EOF
#!/bin/sh
./ctl sendtoaddress \`./ctl getnewaddress\` 0.1
EOF
chmod +x "${NODES_ROOT}/w-alpha/xfer"
cp ${NODES_ROOT}/w-alpha/ctl ${NODES_ROOT}/w-beta
cp ${NODES_ROOT}/w-alpha/tickets ${NODES_ROOT}/w-beta
cp ${NODES_ROOT}/w-alpha/xfer ${NODES_ROOT}/w-beta
cd ${NODES_ROOT} && tmux -2 new-session -d -s $SESSION
tmux rename-window -t $SESSION:0 'prompt'
# Alpha Node
tmux new-window -t $SESSION:1 -n 'alpha'
tmux split-window -v
tmux select-pane -t 0
tmux send-keys "cd alpha" C-m
tmux send-keys "eacrd -C ./eacrd.conf" C-m
tmux select-pane -t 1
tmux send-keys "cd alpha" C-m
#sleep 3
#tmux send-keys "./ctl generate 16" C-m
# Beta Node
tmux new-window -t $SESSION:2 -n 'beta'
tmux split-window -v
tmux select-pane -t 0
tmux send-keys "cd beta" C-m
tmux send-keys "eacrd -C ./eacrd.conf --connect 127.0.0.1:19100" C-m
tmux select-pane -t 1
tmux send-keys "cd beta" C-m
#sleep 3
#tmux send-keys "./ctl generate 16" C-m
sleep 3
tmux send-keys -t $SESSION:0 "./mine-both 16" C-m
# Wallets
tmux new-window -t $SESSION:3 -n 'wallets'
tmux split-window -h
tmux split-window -v
tmux select-pane -t 0
tmux split-window -v
tmux select-pane -t 0
tmux send-keys "cd w-alpha" C-m
tmux send-keys "eacrwallet -C ../wallet.conf --create" C-m
sleep 2
tmux send-keys "123" C-m "123" C-m "n" C-m "y" C-m
sleep 1
tmux send-keys "${WALLET01_SEED}" C-m C-m
tmux send-keys "eacrwallet -C ../wallet.conf --rpcconnect 127.0.0.1:19101 \
--rpclisten 127.0.0.1:19102" C-m
tmux select-pane -t 2
tmux send-keys "cd w-alpha" C-m
tmux select-pane -t 1
tmux send-keys "cd w-beta" C-m
tmux send-keys "eacrwallet -C ../wallet.conf --create" C-m
sleep 2
tmux send-keys "123" C-m "123" C-m "n" C-m "y" C-m
sleep 1
tmux send-keys "${WALLET02_SEED}" C-m C-m
tmux send-keys "eacrwallet -C ../wallet.conf --rpcconnect 127.0.0.1:19201 \
--rpclisten 127.0.0.1:19202" C-m
tmux select-pane -t 3
tmux send-keys "cd w-beta" C-m
echo Attach to simnet nodes/wallets with \"tmux a -t $SESSION\".
# tmux attach-session -t $SESSION
|
set -e -u
name=`basename $PWD`
echo $name
logFile=`ls -t ~/.pm2/logs/${name}-out-*`
errorFile=`ls -t ~/.pm2/logs/${name}-error-*`
echo; ls -l $errorFile
tail $errorFile
echo; ls -l $logFile
tail $logFile | bunyan -o short
|
package io.github.hotspacode.neeza.base.dto;
import io.github.hotspacode.neeza.base.api.INeezaSerialization;
import java.io.Serializable;
public class MockTransport implements Serializable {
private boolean mocked = false;
private String response;
private Class methodReturnClass;
private boolean primitive = false;
private boolean returnNull = false;
private boolean returnVoid = false;
private INeezaSerialization neezaSerialization;
public MockTransport() {
}
public MockTransport(boolean mocked) {
this.mocked = mocked;
}
public static <T> T getObject(MockTransport methodSpiResponseDTO) {
if (methodSpiResponseDTO.isReturnNull()) {
return null;
}
if (null == methodSpiResponseDTO.getResponse()) {
return null;
}
if (methodSpiResponseDTO.isPrimitive()) {
return (T) methodSpiResponseDTO.getResponse();
}
return (T) methodSpiResponseDTO.getNeezaSerialization().deserialize(methodSpiResponseDTO.getResponse().getBytes(), methodSpiResponseDTO.getMethodReturnClass());
}
public boolean isReturnNull() {
return returnNull;
}
public boolean isReturnVoid() {
return returnVoid;
}
public void setReturnVoid(boolean returnVoid) {
this.returnVoid = returnVoid;
}
public void setReturnNull(boolean returnNull) {
this.returnNull = returnNull;
}
public boolean isPrimitive() {
return primitive;
}
public void setPrimitive(boolean primitive) {
this.primitive = primitive;
}
public boolean isMocked() {
if (this.isPrimitive() && this.returnNull) {
return false;
}
if (null == neezaSerialization) {
return false;
}
return mocked;
}
public void setMocked(boolean mocked) {
this.mocked = mocked;
}
public Class getMethodReturnClass() {
return methodReturnClass;
}
public void setMethodReturnClass(Class methodReturnClass) {
this.methodReturnClass = methodReturnClass;
}
public String getResponse() {
return response;
}
public void setResponse(String response) {
this.response = response;
}
public INeezaSerialization getNeezaSerialization() {
return neezaSerialization;
}
public void setNeezaSerialization(INeezaSerialization neezaSerialization) {
this.neezaSerialization = neezaSerialization;
}
}
|
#!/bin/bash
set -e
# TODO: find a better way
echo "Waiting until all the EMs are ready.."
sleep 10
# deploy
echo "Deploying modules.."
reactive-tools --debug --manager deploy descriptor.json --result res.json
# TODO: is it really necessary?
echo "Waiting until all the modules are up and running.."
sleep 2
# attest
echo "Attesting modules.."
reactive-tools --debug --manager attest res.json
# connect
echo "Establishing connections.."
reactive-tools --debug --manager connect res.json
echo "Setup complete"
sleep 3600
|
numbers = [1, 2, 3, 4, 5, 6]
for x in reversed(numbers):
print(x) |
import styled from '@emotion/styled';
import { device } from '@theme/index';
export const Score = styled.h2`
text-align: left;
font-size: 40px;
`;
export const QuestionList = styled.ul`
display: flex;
flex-wrap: wrap;
justify-content: space-between;
`;
export const QuestionCard = styled.li`
flex: 1 1 100%;
display: flex;
align-items: center;
padding: 20px;
background-color: #ffe9c0;
border-radius: 10px;
@media (${device.laptop}) {
flex: 0 1 calc((100% / 2) - 3.5rem);
}
`;
export const Question = styled.h3`
text-align: left;
font-size: 25px;
`;
export const AnswersList = styled.ul`
display: flex;
flex-wrap: wrap;
gap: 10px;
margin-top: 15px;
`;
export const Answer = styled.li`
flex: 1 1 100%;
padding: 10px 0;
background-color: orange;
border-radius: 10px;
margin-bottom: 20px;
text-align: center;
@media (${device.laptop}) {
flex: 0 1 calc((100% / 3) - 3rem);
}
`;
export const AnswerInfo = styled.div`
flex: 1;
margin-left: 40px;
`;
export const UserAnswer = styled.h4`
font-size: 20px;
margin-bottom: 10px;
&.correct {
color: green;
}
&.uncorrect {
color: red;
}
span {
display: block;
}
`;
export const CorrectAnswer = styled.h4`
font-size: 20px;
`;
|
def choose_action(key, context, actions):
if key == "combat":
# Example: Choose the action with the highest damage potential in combat context
return max(actions, key=lambda action: action.damage)
elif key == "exploration":
# Example: Choose the action with the highest exploration value in exploration context
return max(actions, key=lambda action: action.exploration_value)
else:
# Default: Choose the first action in the list for unknown keys
return actions[0] |
#!/usr/bin/env bash
set -euo pipefail
cd "$( dirname "${BASH_SOURCE[0]}" )/.."
source .envrc
./scripts/install_tools.sh
GINKGO_NODES=${GINKGO_NODES:-3}
GINKGO_ATTEMPTS=${GINKGO_ATTEMPTS:-2}
export CF_STACK=${CF_STACK:-cflinuxfs2}
cd src/*/integration
echo "Run Uncached Buildpack"
ginkgo -r --flakeAttempts=$GINKGO_ATTEMPTS -nodes $GINKGO_NODES --slowSpecThreshold=120 --noisySkippings=false -- --cached=false
echo "Run Cached Buildpack"
ginkgo -r --flakeAttempts=$GINKGO_ATTEMPTS -nodes $GINKGO_NODES --slowSpecThreshold=120 --noisySkippings=false -- --cached
|
package dev.webfx.kit.mapper.peers.javafxgraphics.emul_coupling;
import com.sun.javafx.tk.TKStage;
import javafx.stage.Window;
/**
* @author <NAME>
*/
public interface WindowPeer extends TKStage {
Window getWindow();
//void setScene(ScenePeer scenePeer);
void onSceneRootChanged();
void setVisible(boolean visible);
/**
* Sets the window bounds to the specified values.
*
* Gravity values specify how to correct window location if only its size
* changes (for example when stage decorations are added). User initiated
* resizing should be ignored and must not influence window location through
* this mechanism.
*
* The corresponding correction formulas are:
*
* {@code x -= xGravity * deltaW}
* {@code y -= yGravity * deltaH}
*
* @param x the new window horizontal position, ignored if xSet is set to
* false
* @param y the new window vertical position, ignored if ySet is set to
* false
* @param xSet indicates whether the x parameter is valid
* @param ySet indicates whether the y parameter is valid
* @param w the new window width, ignored if set to -1
* @param h the new window height, ignored if set to -1
* @param cw the new window content width, ignored if set to -1
* @param ch the new window content height, ignored if set to -1
* @param xGravity the xGravity coefficient
* @param yGravity the yGravity coefficient
*/
void setBounds(float x, float y, boolean xSet, boolean ySet,
float w, float h, float cw, float ch,
float xGravity, float yGravity);
}
|
int square(int number) {
return number * number;
} |
#!/bin/bash
# constants file, disable SC2034
# shellcheck disable=SC2034
true
# List of semi-standard exit status codes
# Sources:
# A: http://tldp.org/LDP/abs/html/exitcodes.html#EXITCODESREF
# B: http://www.gnu.org/software/libc/manual/html_node/Exit-Status.html
# C: sysexits.h
EX_OK=0 # successful termination ## source: A
EX_SUCCESS=0 # successful termination ## source: A
EX_ERR=1 # Catchall for general errors ## source: A
# Don't use. Reserved for bash
EX_SHELL=2 # Misuse of shell builtins ## source: A
EX_USAGE=64 # command line usage error ## source: C
EX_DATAERR=65 # data format error ## source: C
EX_NOINPUT=66 # cannot open input ## source: C
EX_NOUSER=67 # addressee unknown ## source: C
EX_NOHOST=68 # host name unknown ## source: C
EX_UNAVAILABLE=69 # service unavailable ## source: C
EX_SOFTWARE=70 # internal software error ## source: C
EX_OSERR=71 # system error (e.g., can't fork) ## source: C
EX_OSFILE=72 # critical OS file missing ## source: C
EX_CANTCREAT=73 # can't create (user) output file ## source: C
EX_IOERR=74 # input/output error ## source: C
EX_TEMPFAIL=75 # temp failure; user is invited to retry ## source: C
EX_PROTOCOL=76 # remote error in protocol ## source: C
EX_NOPERM=77 # permission denied ## source: C
EX_CONFIG=78 # configuration error ## source: C
# Don't use. Reserved for bash
EX_NOEXEC=126 # Command invoked cannot execute ## source: A
EX_NOTFOUND=127 # "command not found" ## source: A
# These two are in direct conflict, don't use
EX_EXIT_ERR=128 # Invalid argument to exit ## source: A
EX_EXEC_FAIL=128 # Failed to execute subprocess ## source: B
EX_SIGTERM=130 # Script terminated by Control-C ## source: A
# Custom homeshick status codes (range: 79-113)
EX_AHEAD=85 # local HEAD is ahead of its upstream branch
EX_BEHIND=86 # local HEAD is behind its upstream branch
EX_TH_EXCEEDED=87 # Time since last repository update is larger than the threshhold
EX_MODIFIED=88 # local working directory has modified files
|
<reponame>Coopexx/hendriktreuner.com<filename>src/components/work/Project.js
import classes from './Project.module.scss';
import icons from '../../images/sprite-work.svg';
import Button from './Button';
const SVG = (props) => {
return (
<li className={classes['project__technology']}>
<svg className={classes['project__technology-svg']}>
<use href={`${icons}#${props.tech}`} />
</svg>
</li>
);
};
const Project = (props) => {
return (
<div className={classes.project}>
<h3 className={classes['project__caption']}>{props.name}</h3>
<ul className={classes['project__technologies-container']}>
{props.technology.map((tech) => {
return <SVG tech={tech} />;
})}
</ul>
<p className={classes['project__description']}>
{props.description}
</p>
<div className={classes['button-container']}>
{props.button.map((type, index) => {
return (
<Button type={type} link={props.link[index]}></Button>
);
})}
</div>
</div>
);
};
export default Project;
|
#!/bin/bash
public_test=$(openstack network list | grep public)
if [[ $public_test != '' ]]; then
echo 'Existing public network detected...exiting...'
exit
fi
echo 'Creating public network...'
|
public class User {
// instance variables
private String name;
private String email;
private String mobileNumber;
// constructor
public User(String name,
String email,
String mobileNumber)
{
this.name = name;
this.email = email;
this.mobileNumber = mobileNumber;
}
// get and set methods
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public String getEmail() {
return this.email;
}
public void setEmail(String email) {
this.email = email;
}
public String getMobileNumber() {
return this.mobileNumber;
}
public void setMobileNumber(String mobileNumber) {
this.mobileNumber = mobileNumber;
}
} |
#!/bin/sh
#Author: ReSearchITEng
#Work is based on other scripts on the same topic, from around the internet
##########
#Var list:
##########
dir=/home/osmc
outlist=$dir/hosts.blocked.tmp
outlistFinal=$dir/hosts.blocked
tempoutlist=$dir/list.tmp
whitelist=$dir/whitelist.txt
echo "Getting ad list files quiet..."
###########
#Functions
##########
getter(){
echo "Getter starts"
echo "" > $tempoutlist
echo "Getting hosts.eladkarako.com ..."
curl -sL "https://github.com/eladkarako/hosts.eladkarako.com/blob/master/hosts0.txt?raw=true" | sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
echo "Getting winhelp2002.mvps.org ..."
wget -qO- "http://winhelp2002.mvps.org/hosts.txt" | sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
echo "Getting adaway, someonewhocares, pgl.yoyo.org ..."
wget -qO- "http://adaway.org/hosts.txt" | sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
wget -qO- "http://someonewhocares.org/hosts/zero/hosts" | sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
wget -qO- "http://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext&useip=0.0.0.0" \
| sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
echo "Getting hosts-file.net (ad&hosts) ..."
wget -qO- "http://hosts-file.net/ad_servers.txt" | sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
wget -qO- "http://hosts-file.net/download/hosts.txt"| sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
echo "Getting hostsfile.mine.nu ..."
wget -qO- "http://hostsfile.mine.nu/Hosts" | sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
echo "Getting Mother of All Ad Blocks list..."
wget -qO- "http://adblock.mahakala.is/hosts" --user-agent="Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0" \
| sed s/127.0.0.1/0.0.0.0/g | sed $'s/\r$//' | sed 's/ */\ /g' | grep -w ^0.0.0.0 | awk '{print $1 " " $2}' | sort -u >> $tempoutlist
}
whitelist_dos_finalizing(){
# remove whitelisted entries in tempblack and write final file, remove temp and tempblack files
echo "checking whitelist ($whitelist) as well as final sort uniq & dos2unix... "
if [ -s $whitelist ];then
cat $tempoutlist | sort -u | fgrep -vf $whitelist > $outlist
else
echo "WARNING: $whitelist not found, or it's zero size"
cat $tempoutlist | sort -u > $outlist
fi
dos2unix $tempoutlist || true
if [ -s $tempoutlist ];then
mv $outlist ${outlistFinal}0
cat ${outlistFinal}0 | sed s/0.0.0.0/127.0.0.1/ >${outlistFinal}127
#git_upload
else
echo "something funky has happened and output file is zero; therefore nothing has been done"
fi
}
report(){
# Count how many domains/whitelists were added so it can be displayed to the user
numberOfAdsBlocked=$(cat $outlistFinal | wc -l | sed 's/^[ \t]*//')
echo "$numberOfAdsBlocked ad domains blocked."
}
cleanup(){
echo "Deleting temp file $tempoutlist ..."
rm $tempoutlist
}
restart_dns(){
sleep 1
echo "Restart dnsmasq..."
sudo systemctl restart dnsmasq
#service restart_dnsmasq
}
#############
###### MAIN #
#############
echo "" > $tempoutlist
getter
whitelist_dos_finalizing
report
#cleanup
restart_dns
sleep 1
echo "All done !!!"
|
set -e
### change these variables if needed
DATA_DIR=data
TASK_NAME=cfever
#MODEL_TYPE=bert
#MODEL_NAME=textattack/bert-base-uncased-MNLI
MODEL_TYPE=roberta
MODEL_NAME=textattack/roberta-base-MNLI
SEED=42
COLDSTART=none
SAMPLING=rand
INCREMENT=10
MAX_SIZE=300
### end
for SAMPLING_SEED in 123 124 125 126 127 128 129 130 131 132; do #sampling seed is only used for rand
METHOD=${COLDSTART}-${SAMPLING}
MODEL_DIR=rand/${SEED}/${TASK_NAME}/${SAMPLING_SEED}
if [ "$COLDSTART" == "none" ]
then
MODEL0=$MODEL_NAME
START=0
METHOD=${SAMPLING}
else
MODEL0=${MODEL_DIR}/${COLDSTART}_${INCREMENT}
START=$INCREMENT
fi
active (){
# 1=number of samples
# 2=model path
# 3=sampling method
echo -e "\n\nACQUIRING $1 SAMPLES\n\n"
python -m src.active \
--model_type $MODEL_TYPE \
--model_name_or_path $2 \
--task_name $TASK_NAME \
--data_dir $DATA_DIR/$TASK_NAME \
--output_dir ${MODEL_DIR}/${3}_${1} \
--seed $SEED \
--sampling_seed $SAMPLING_SEED \
--query_size $INCREMENT \
--sampling $SAMPLING \
--base_model $MODEL_NAME \
--per_gpu_eval_batch_size 32 \
--max_seq_length 128
}
train (){
# 1 = number of samples
# 2 = output directory
echo -e "\n\nTRAINING WITH $1 SAMPLES\n\n"
python -m src.train \
--model_type $MODEL_TYPE \
--model_name_or_path $MODEL_NAME \
--task_name $TASK_NAME \
--do_train \
--do_eval \
--data_dir $DATA_DIR/$TASK_NAME \
--max_seq_length 128 \
--learning_rate 2e-5 \
--num_train_epochs 3.0 \
--output_dir $2 \
--seed $SEED \
--base_model $MODEL_NAME \
--per_gpu_eval_batch_size 16 \
--per_gpu_train_batch_size 32
}
f=$MODEL0
p=$(( $START + $INCREMENT ))
while [ $p -le $MAX_SIZE ]
do
active $p $f $METHOD
if [ $f != $MODEL0 ]
then
find $f ! -name 'eval_results.txt' ! -name 'sampled.pt' -type f -exec rm -f {} +
fi
f=${MODEL_DIR}/${METHOD}_$p
train $p $f
p=$(( $p + $INCREMENT ))
done
done
# --num_train_epochs 3.0 \
|
package eon.spectrum;
import eon.network.Link;
/**
* @author vxFury
*
*/
public class ResourceOnLink {
private Link link;
private int startIndex;
private int slots;
private Request request;
public ResourceOnLink(Link link, int startIndex, int slots, Request request) {
setLink(link);
setStartIndex(startIndex);
setSlots(slots);
setRequest(request);
}
public int getStartIndex() {
return startIndex;
}
public void setStartIndex(int startIndex) {
this.startIndex = startIndex;
}
public int getSlots() {
return slots;
}
public void setSlots(int slots) {
this.slots = slots;
}
public Link getLink() {
return link;
}
public void setLink(Link link) {
this.link = link;
}
public Request getRequest() {
return request;
}
public void setRequest(Request request) {
this.request = request;
}
} |
from enum import IntEnum
class StickerFormatType(IntEnum):
png = 1
apng = 2
lottie = 3
def process_sticker(format_type):
if format_type == StickerFormatType.png:
return "Convert to PNG"
elif format_type == StickerFormatType.apng:
return "Convert to Animated PNG"
elif format_type == StickerFormatType.lottie:
return "Convert to Lottie Animation"
else:
return "Invalid sticker format type"
# Demonstration
print(process_sticker(StickerFormatType.png)) # Output: Convert to PNG
print(process_sticker(StickerFormatType.apng)) # Output: Convert to Animated PNG
print(process_sticker(StickerFormatType.lottie)) # Output: Convert to Lottie Animation
print(process_sticker(4)) # Output: Invalid sticker format type |
import assert from "node:assert";
import sinon from "sinon";
import * as scraper from "../../../../src/core/scraper/arte.js";
describe("core/scraper/arte.js", function () {
describe("extract()", function () {
it("should return null when it's a unsupported URL", async function () {
const url = new URL("https://www.arte.tv/fr/guide/");
const file = await scraper.extract(url);
assert.strictEqual(file, null);
});
it("should return null when video is unavailable", async function () {
const stub = sinon.stub(globalThis, "fetch").resolves(new Response(
JSON.stringify({
videoJsonPlayer: { VSR: { 0: { id: "foo_2" } } },
}),
));
const url = new URL("https://www.arte.tv/de/videos/bar/baz");
const file = await scraper.extract(url);
assert.strictEqual(file, null);
assert.strictEqual(stub.callCount, 1);
assert.deepStrictEqual(stub.firstCall.args, [
"https://api.arte.tv/api/player/v1/config/de/bar",
]);
});
it("should return french video URL", async function () {
const stub = sinon.stub(globalThis, "fetch").resolves(new Response(
JSON.stringify({
videoJsonPlayer: {
VSR: {
0: { id: "foo_1", height: 100 },
1: { id: "foo_2" },
2: {
id: "foo_1",
height: 400,
url: "https://bar.tv/baz.mp4",
},
3: {
id: "foo_1",
height: 200,
},
},
},
}),
));
const url = new URL("https://www.arte.tv/fr/videos/qux/quux");
const file = await scraper.extract(url);
assert.strictEqual(file, "https://bar.tv/baz.mp4");
assert.strictEqual(stub.callCount, 1);
assert.deepStrictEqual(stub.firstCall.args, [
"https://api.arte.tv/api/player/v1/config/fr/qux",
]);
});
});
});
|
def divide(a, b):
return a // b
a = 5
b = 2
number = divide(a, b)
print(number) |
<reponame>openarmy/bethelapp
def final_grade(exem, hw):
"""return final grade of an final grade
input is exam and hw """
hw1 = x
hw2 = y
hw3 = z
if (exem > 60) and ((x + y + z // 3) <= (exem + 30)):
final_grade = (0.8 * exem) and (0.2 * hw)
else:
final_grade = (0.8 * exem)
return final_grade
if __name__ == '__main__':
x, y, z = input("Enter three SB scores seperated by a comma: ")
exam = input("Enter Exam score: ")
print(final_grade(exam, (x, y, z)))
|
if [ $(id -u) != "0" ]
then
echo "Please use sudo"
exit
fi
SCRIPT_HOME="`dirname \"$0\"`"
SCRIPT_HOME="`( cd \"$SCRIPT_HOME\" && pwd )`"
TARGET_CERT_DIR=/home/kelghar/git/cubietruck_configuration/docker_configs/traefik/certs
WORKING_DIR=$SCRIPT_HOME/working
mkdir $WORKING_DIR
cd $WORKING_DIR
echo "Create certificate in $WORKING_DIR"
openssl genrsa -out san_domain_com.key 2048
openssl req -new -out san_domain_com.csr -key san_domain_com.key -config $SCRIPT_HOME/multi.cfg
openssl req -text -noout -in san_domain_com.csr
openssl x509 -req -days 365 -in san_domain_com.csr -signkey san_domain_com.key -out san_domain_com.crt -extensions v3_req -extfile $SCRIPT_HOME/multi.cfg
openssl req -new -x509 -extensions v3_ca -keyout cacert.key -out cacert.pem -days 365 -config $SCRIPT_HOME/multi.cfg
echo "Copy to target directory"
cp san_domain_com.crt $TARGET_CERT_DIR/server.crt
cp san_domain_com.key $TARGET_CERT_DIR/server.key
rm -r $WORKING_DIR
|
<reponame>codeforamerica/open311_java
package org.codeforamerica.open311.internals.network;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.net.MalformedURLException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.codeforamerica.open311.facade.data.Attribute;
import org.codeforamerica.open311.facade.data.MultiValueAttribute;
import org.codeforamerica.open311.facade.data.SingleValueAttribute;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests the {@link URLBuilder} class.
*
* @author <NAME> <<EMAIL>>
*
*/
public class URLBuilderTest {
private static final String BASE_URL = "https://api.city.gov/dev/v2";
private static final String FORMAT = "xml";
private static final String JURISDICTION_ID = "city.gov";
private URLBuilder builder = new URLBuilder(BASE_URL, JURISDICTION_ID,
FORMAT);
@BeforeClass
public static void testInitialization() {
System.out.println("[URL BUILDER TEST] Starts");
}
@AfterClass
public static void testFinish() {
System.out.println("[URL BUILDER TEST] Ends");
}
@Test
public void urlBuilderWithoutOptionalArgumentsTest()
throws MalformedURLException {
assertEquals(builder.buildGetServiceListUrl().toString(),
"https://api.city.gov/dev/v2/services.xml?jurisdiction_id="
+ JURISDICTION_ID);
assertEquals(builder.buildGetServiceDefinitionUrl("033").toString(),
"https://api.city.gov/dev/v2/services/033.xml?jurisdiction_id="
+ JURISDICTION_ID);
assertEquals(builder.buildGetServiceRequestIdFromATokenUrl("123456")
.toString(),
"https://api.city.gov/dev/v2/tokens/123456.xml?jurisdiction_id="
+ JURISDICTION_ID);
assertEquals(builder.buildGetServiceRequest("123456").toString(),
"https://api.city.gov/dev/v2/requests/123456.xml?jurisdiction_id="
+ JURISDICTION_ID);
}
@Test
public void urlBuilderWithOptionalArgumentsTest()
throws MalformedURLException {
Map<String, String> arguments = new HashMap<String, String>();
arguments.put("lat", "8.12");
arguments.put("long", "4.12");
arguments.put("account_id", "1");
arguments.put("api_key", "2");
List<Attribute> attributes = new LinkedList<Attribute>();
attributes.add(new SingleValueAttribute("code", "value1"));
attributes.add(new SingleValueAttribute("code2", "value2"));
attributes.add(new MultiValueAttribute("code3", "value1", "value2"));
Map<String, String> bodyParameters = builder
.buildPostServiceRequestBody(arguments, attributes);
assertEquals(bodyParameters.get("api_key"),"2");
assertEquals(bodyParameters.get("lat"),"8.12");
assertEquals(bodyParameters.get("long"),"4.12");
assertEquals(bodyParameters.get("account_id"),"1");
assertEquals(bodyParameters.get("attribute[code]"),"value1");
assertEquals(bodyParameters.get("attribute[code2]"),"value2");
assertEquals(bodyParameters.get("attribute[code3][0]"),"value1");
assertEquals(bodyParameters.get("attribute[code3][1]"),"value2");
String url = builder.buildPostServiceRequestUrl().toString();
assertEquals(url, "https://api.city.gov/dev/v2/requests.xml");
arguments = new HashMap<String, String>();
arguments.put("start_date", "2010-05-24T00:00:00Z");
arguments.put("end_date", "2010-06-24T00:00:00Z");
arguments.put("status", "open");
url = builder.buildGetServiceRequests(arguments).toString();
assertTrue(url.contains("https://api.city.gov/dev/v2/requests.xml?"));
assertTrue(url.contains("start_date=2010-05-24T00:00:00Z"));
assertTrue(url.contains("end_date=2010-06-24T00:00:00Z"));
assertTrue(url.contains("status=open"));
assertTrue(url.contains("jurisdiction_id=city.gov"));
arguments = new HashMap<String, String>();
arguments.put("service_request_id", "2");
arguments.put("status_notes", "test");
arguments.put("status", "open");
}
}
|
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: <NAME>
*/
package com.tzavellas.coeus.mvc.view.helper
import javax.servlet.ServletContext
import org.junit.Test
import org.junit.Assert._
import org.mockito.Mockito._
import com.tzavellas.coeus.test.TestHelpers
class AssetsHelperTest {
import AssetsHelperTest._
@Test
def without_asset_hosts() {
assertEquals("""<script type="text/javascript" src="/test/assets/jquery.js?0"></script>""", helper1.script("jquery").toString)
assertEquals("""<link rel="stylesheet" type="text/css" media="print" href="/test/assets/yui.css?0"/>""", helper1.stylesheet("yui", "print").toString)
assertEquals("""<img src="/test/assets/logo.png?0" alt="The logo"/>""", helper1.image("logo.png", "The logo").toString)
assertEquals("""<img src="/test/assets/logo.png?0" alt="The logo" width="10" height="20"/>""", helper1.image("logo.png", "The logo", 10, 20).toString)
}
@Test
def with_asset_host() {
assertEquals("""<script type="text/javascript" src="http://assets.example.com/assets/jquery.js?0"></script>""", helper2.script("jquery").toString)
assertEquals("""<link rel="stylesheet" type="text/css" media="print" href="http://assets.example.com/assets/yui.css?0"/>""", helper2.stylesheet("yui", "print").toString)
assertEquals("""<img src="http://assets.example.com/assets/logo.png?0" alt="The logo"/>""", helper2.image("logo.png", "The logo").toString)
assertEquals("""<img src="http://assets.example.com/assets/logo.png?0" alt="The logo" width="10" height="20"/>""", helper2.image("logo.png", "The logo", 10, 20).toString)
}
@Test
def custom_version() {
assertEquals("""<script type="text/javascript" src="/test/assets/jquery.js?42"></script>""", helper3.script("jquery").toString)
}
}
object AssetsHelperTest extends TestHelpers {
val host = "http://assets.example.com"
val context = mock[ServletContext]
when(context.getContextPath).thenReturn("/test")
object helper1 extends AssetsHelper {
val servletContext = context
}
object helper2 extends AssetsHelper {
val servletContext = context
override val assetHosts = List(host)
}
object helper3 extends AssetsHelper {
val servletContext = context
override val version = Some("42")
}
}
|
package com.hewentian.hadoop.mr;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* <p>
* <b>TempJob</b> 是 统计温度的Job
* 需求:
* 1. 计算2016-2018年,每年温度最高的时间;
* 2. 计算2016-2018年,每年温度最高的前10天。
*
* 思路:
* 1. 按年份升序排序,同时每一年的温度降序排序;
* 2. 按年份分组,每一年对应一个reduce任务;
* 3. mapper输出的key为一个封装对象;
* 4. 学习自定义排序、分区、分组。
* </p>
* <p>
* 数据如下:
* 年-月-日 时:分:秒 温度
* 2016-10-01 12:21:05 34
* 2016-10-02 14:01:03 36
* 2017-01-01 10:31:12 32
* 2017-10-01 12:21:02 37
* 2018-12-01 12:21:02 23
* 2017-10-02 12:21:02 41
* 2017-10-03 12:21:02 27
* 2018-01-01 12:21:02 45
* 2018-07-02 12:21:02 46
* </p>
* <p>
* 执行mapReduce后的输出结果如下:
* $ ./bin/hdfs dfs -ls /output/temp/
* Found 4 items
* -rw-r--r-- 3 hadoop supergroup 0 2018-12-31 06:58 /output/temp/_SUCCESS
* -rw-r--r-- 3 hadoop supergroup 110 2018-12-31 06:58 /output/temp/part-00000
* -rw-r--r-- 3 hadoop supergroup 220 2018-12-31 06:58 /output/temp/part-00001
* -rw-r--r-- 3 hadoop supergroup 165 2018-12-31 06:58 /output/temp/part-00002
* $
* $ ./bin/hdfs dfs -cat /output/temp/*
* TempKeyPair{year=2016, temp=36} 2016-10-02 14:01:03 36
* TempKeyPair{year=2016, temp=34} 2016-10-01 12:21:05 34
* TempKeyPair{year=2017, temp=41} 2017-10-02 12:21:02 41
* TempKeyPair{year=2017, temp=37} 2017-10-01 12:21:02 37
* TempKeyPair{year=2017, temp=32} 2017-01-01 10:31:12 32
* TempKeyPair{year=2017, temp=27} 2017-10-03 12:21:02 27
* TempKeyPair{year=2018, temp=46} 2018-07-02 12:21:02 46
* TempKeyPair{year=2018, temp=45} 2018-01-01 12:21:02 45
* TempKeyPair{year=2018, temp=23} 2018-12-01 12:21:02 23
* $
* $ ./bin/hdfs dfs -cat /output/temp/part-00000
* TempKeyPair{year=2016, temp=36} 2016-10-02 14:01:03 36
* TempKeyPair{year=2016, temp=36} 2016-10-01 12:21:05 34
* </p>
*
* @author <a href="mailto:<EMAIL>">hewentian</a>
* @date 2019-01-03 22:22:32
* @since JDK 1.8
*/
public class TempJob {
static class TempMapper extends Mapper<LongWritable, Text, TempKeyPair, Text> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
if (StringUtils.isNotBlank(line)) {
String[] ss = line.split(" ");
if (ss.length == 3) {
String date = ss[0];
TempKeyPair tempKeyPair = new TempKeyPair();
tempKeyPair.setYear(Integer.parseInt(date.split("-")[0]));
tempKeyPair.setTemp(Integer.parseInt(ss[2]));
context.write(tempKeyPair, value);
}
}
}
}
static class TempReducer extends Reducer<TempKeyPair, Text, TempKeyPair, Text> {
@Override
protected void reduce(TempKeyPair key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
for (Text value : values) {
context.write(key, value);
}
}
}
public static void main(String[] args) {
if (args.length != 2) {
System.out.println("need: input file and output dir.");
System.out.println("eg: {HADOOP_HOME}/bin/hadoop jar /home/hadoop/tempStat.jar com.hewentian.hadoop.mr.TempJob /temp.txt /output/temp/");
System.exit(1);
}
try {
Job job = Job.getInstance();
job.setJobName("temperature stat demo");
job.setJarByClass(TempJob.class);
job.setMapperClass(TempMapper.class);
job.setReducerClass(TempReducer.class);
job.setOutputKeyClass(TempKeyPair.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(3); // 只有3个年份:2016, 2017, 2018
job.setPartitionerClass(TempYearPartition.class);
job.setSortComparatorClass(TempSort.class);
job.setGroupingComparatorClass(TempGroup.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
#!/bin/bash
# random proxy numThread numReads numWrites
./runDistributedTests random dh2020pc05 8 10000 0
./runDistributedTests random dh2020pc05 8 9000 1000
./runDistributedTests random dh2020pc05 8 8000 2000
./runDistributedTests random dh2020pc05 8 7000 3000
./runDistributedTests random dh2020pc05 8 6000 4000
./runDistributedTests random dh2020pc05 8 5000 5000
./runDistributedTests random dh2020pc05 8 4000 6000
./runDistributedTests random dh2020pc05 8 3000 7000
./runDistributedTests random dh2020pc05 8 2000 8000
./runDistributedTests random dh2020pc05 8 1000 9000
./runDistributedTests random dh2020pc05 8 0 10000 |
package ma.ensias.ticket_me.response;
import com.google.gson.annotations.SerializedName;
import java.util.HashMap;
import java.util.List;
public class ResponseCategories {
@SerializedName("categories")
private List<HashMap<String,String>> categories;
public ResponseCategories(List<HashMap<String,String>> categories) {
this.categories = categories;
}
public boolean getValid() {
return (categories == null) ? false : true;
}
public List<HashMap<String,String>> getCategories()
{
return categories;
}
}
|
def is_prime(n):
if (n <= 1):
return False
if (n <= 3):
return True
if (n % 2 == 0 or n % 3 == 0):
return False
i = 5
while(i * i <= n):
if (n % i == 0 or n % (i + 2) == 0):
return False
i = i + 6
return True |
import java.util.stream.Stream;
/**
* @author Zexho
* @date 2021/9/6 9:48 上午
*/
public class StreamSample {
public static void main(String[] args) {
parallelStreamTest();
// parallelStreamTest2();
}
public static void parallelStreamTest() {
long count = Stream.of(1, 2, 3, 4, 5).parallel().filter(integer -> integer > 2).count();
assert count == 5;
long count2 = Stream.of(1, 2, 3, 4, 5).parallel().reduce(10, Integer::sum);
assert count2 == 65;
}
public static void parallelStreamTest2() {
long count = Stream.of(1, 2, 3, 4, 5).parallel().reduce((x, y) -> x + y + 10).get();
assert count == 55;
}
}
|
<reponame>LiuFang07/bk-cmdb<filename>src/framework/core/monitor/metric/metric.go
/*
* Tencent is pleased to support the open source community by making 蓝鲸 available.
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package metric
import (
"configcenter/src/common/metric"
"configcenter/src/framework/core/httpserver"
"configcenter/src/framework/core/option"
"github.com/emicklei/go-restful"
)
type Manager struct {
ms []metric.Action
}
var _ Metric = &Manager{}
func NewManager(opt *option.Options) Metric {
conf := metric.Config{
ModuleName: opt.AppName,
ServerAddress: opt.Addrport,
}
ms := metric.NewMetricController(conf, healthMetric)
manager := &Manager{
ms: ms,
}
return manager
}
// Actions returns metricActions
func (m *Manager) Actions() []httpserver.Action {
var httpactions []httpserver.Action
for _, a := range m.ms {
httpactions = append(httpactions, httpserver.Action{Method: a.Method, Path: a.Path, Handler: func(req *restful.Request, resp *restful.Response) {
a.HandlerFunc(resp.ResponseWriter, req.Request)
}})
}
return httpactions
}
// HealthMetric check netservice is health
func healthMetric() metric.HealthMeta {
meta := metric.HealthMeta{IsHealthy: true}
return meta
}
|
<gh_stars>0
package com.ineunet.knife.qlmap.criteria;
import org.junit.Test;
public class CriteriaTests {
private static final ICriteria c = new Criteria("bill");
@Test
public void testAdd() {
c.addRestrictor(Restrictors.eq("code", null));
c.addRestrictor(Restrictors.isNull("money"));
System.out.println(c);
}
// @Test
public void testOrderBy() {
c.orderBy(" paymentId ASC");
System.out.println(c.getQueryString());
c.orderBy(" paymentId DESC");
System.out.println(c.getQueryString());
c.addRestrictor(Restrictors.in("id", "(select id from shop where shop_name like '%ads%')"));
System.out.println(c.getQueryString());
}
}
|
#!/bin/bash -e
if [ -n ${EXTRA_BOOT} ]; then
wget -r -nH --cut-dirs=5 -np -R "index.html*" "-l${EXTRA_BOOT_DIR_DEPTH}" "${EXTRA_BOOT}" -P "${STAGE_WORK_DIR}/rootfs/boot"
fi
|
#!/bin/bash
#SBATCH --account=def-jsunday
#SBATCH --mem-per-cpu=1G
#SBATCH --time=00:20:00
#SBATCH --mail-type=ALL
#SBATCH --mail-user='nicole.moore@mail.mcgill.ca'
module load java/13.0.2
wget --user='https://esgf-node.llnl.gov/esgf-idp/openid/nicole_a_moore' --password=SundayLab101! 'http://esgf-data1.diasjp.net/thredds/fileServer/esg_dataroot/cmip5/output1/MIROC/MIROC-ESM/historical/day/atmos/day/r3i1p1/v20120710/tas/tas_day_MIROC-ESM_historical_r3i1p1_18500101-20051231.nc'
|
def findElement(array, element):
# set match as false
match = False
index = 0
while index < len(array):
if array[index] == element:
match = True
index += 1
return match |
package gocb
import "time"
// AnalyticsQuery represents a pending Analytics query.
type AnalyticsQuery struct {
options map[string]interface{}
}
// NewAnalyticsQuery creates a new AnalyticsQuery object from a query string.
func NewAnalyticsQuery(statement string) *AnalyticsQuery {
nq := &AnalyticsQuery{
options: make(map[string]interface{}),
}
nq.options["statement"] = statement
return nq
}
// ServerSideTimeout indicates the maximum time to wait for this query to complete.
func (aq *AnalyticsQuery) ServerSideTimeout(timeout time.Duration) *AnalyticsQuery {
aq.options["timeout"] = timeout.String()
return aq
}
// Pretty indicates whether the response should be nicely formatted.
func (aq *AnalyticsQuery) Pretty(pretty bool) *AnalyticsQuery {
aq.options["pretty"] = pretty
return aq
}
// ContextId sets the client context id for the request, for use with tracing.
func (aq *AnalyticsQuery) ContextId(clientContextId string) *AnalyticsQuery {
aq.options["client_context_id"] = clientContextId
return aq
}
// RawParam allows specifying custom query options.
func (aq *AnalyticsQuery) RawParam(name string, value interface{}) *AnalyticsQuery {
aq.options[name] = value
return aq
}
// Priority sets whether or not the query should be run with priority status.
func (aq *AnalyticsQuery) Priority(priority bool) *AnalyticsQuery {
if priority {
aq.options["priority"] = -1
} else {
delete(aq.options, "priority")
}
return aq
}
// Deferred sets whether or not the query should be run as a deferred query.
//
// Experimental: This API is subject to change at any time.
func (aq *AnalyticsQuery) Deferred(deferred bool) *AnalyticsQuery {
if deferred {
aq.options["mode"] = "async"
} else {
delete(aq.options, "mode")
}
return aq
}
|
package com.corsair.sparrow.pirate.zuul.domain.properties;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
/**
* @author jack
*/
@Data
@ConfigurationProperties(prefix = "spring.redis",ignoreInvalidFields = true,ignoreUnknownFields = true)
public class RedisConfigProperties {
private String host;
private Integer port;
private Integer database;
private String password;
}
|
package cc.sfclub.events;
import cc.sfclub.core.Core;
import org.greenrobot.eventbus.EventBus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* 事件基类/控制相关
*/
public abstract class Event {
private static final Logger logger = LoggerFactory.getLogger(Event.class);
private static EventBus eventBus = EventBus.builder()
.sendNoSubscriberEvent(false)
.logNoSubscriberMessages(false)
.throwSubscriberException(true)
.build();
public static void registerListeners(Object... object) {
for (Object o : object) {
eventBus.register(o);
}
}
public static void unregisterListeners(Object... object) {
eventBus.unregister(object);
}
private static void initEventBus() {
eventBus = EventBus.builder()
.sendNoSubscriberEvent(false)
.throwSubscriberException(true)
.logNoSubscriberMessages(false)
.build();
}
public static void unregisterAllListeners() {
eventBus = null;
initEventBus();
}
public static void postEvent(Event event) {
eventBus.post(event);
}
public static void postEventSticky(Event event) {
eventBus.postSticky(event);
}
public static void setCancelled(Event event) {
eventBus.cancelEventDelivery(event);
}
public static void broadcastMessage(MessageEvent event, long time) {
Core.get().getPolarSec().postMessage(event, time);
}
}
|
#!/bin/bash
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# We should not assume that any of the steps have been executed.
# This makes it possible for a user to cleanup at any point.
source ./env.sh
./scripts/vtgate-down.sh
for tablet in 200 300 400 500; do
if vtctlclient -server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
echo "Shutting down tablet zone1-$tablet"
CELL=zone1 TABLET_UID=$tablet ./scripts/vttablet-down.sh
echo "Shutting down mysql zone1-$tablet"
CELL=zone1 TABLET_UID=$tablet ./scripts/mysqlctl-down.sh
fi
done
./scripts/vtctld-down.sh
if [ "${TOPO}" = "zk2" ]; then
CELL=zone1 ./scripts/zk-down.sh
elif [ "${TOPO}" = "k8s" ]; then
CELL=zone1 ./scripts/k3s-down.sh
else
CELL=zone1 ./scripts/etcd-down.sh
fi
# pedantic check: grep for any remaining processes
if [ ! -z "$VTDATAROOT" ]; then
if pgrep -f -l "$VTDATAROOT" >/dev/null; then
echo "ERROR: Stale processes detected! It is recommended to manually kill them:"
pgrep -f -l "$VTDATAROOT"
else
echo "All good! It looks like every process has shut down"
fi
# shellcheck disable=SC2086
rm -r ${VTDATAROOT:?}/*
fi
disown -a
|
/* eslint prefer-arrow-callback:0 */
import fetchJsonp from 'fetch-jsonp';
import {replaceEnvLink, onSprintr, onCloud, mxEnv} from 'Resources/helpers';
import {microflows, links} from 'Resources/mendix.json';
const profileUrl = replaceEnvLink(links.profile) + `?q=${Number(new Date())}`;
const getFallbackMicroflow = env => {
if (env === 'heimdal') {
return microflows.heimdal.profileMenu;
}
if (env === 'brokkr') {
return microflows.brokkr.profileMenu;
}
if (env === 'privatecloud') {
return microflows.privatecloud.profileMenu;
}
return null;
};
const fallbackProfileCall = (commit, profile) => {
const env = mxEnv();
const appsWithoutAppbar2 = ['heimdal', 'brokkr', 'privatecloud'];
const requiresFallback = env && appsWithoutAppbar2.indexOf(env) !== -1;
const isClientApiLoaded = window.mx && window.mx.data && window.mx.data.action;
if (requiresFallback && isClientApiLoaded) {
const microflow = getFallbackMicroflow(env);
window.mx.data.action({
params: {
actionname: microflow
},
callback: obj => {
if (obj.length) {
const mxObj = obj[0];
const readObject = {};
mxObj.getAttributes().forEach(attr => {
readObject[attr] = mxObj.get(attr);
});
profile.loggedIn = true;
profile.avatarUrl = readObject.AvatarUrl;
profile.displayName = readObject.DisplayName;
profile.userName = readObject.EmailAddress;
profile.logoutUrl = true; // doesn't matter
}
commit('profile', profile);
},
error: err => {
console.warn(err);
commit('profile', profile);
}
});
} else {
commit('profile', profile);
}
};
export default {
getProfile({commit}) {
fetchJsonp(profileUrl, {
jsonpCallbackFunction: 'getProfile'
})
.then(function (response) {
return response.json();
}).then(function (json) {
commit('loaded', true);
if (json && json.length === 1) {
const profile = json[0];
if (profile.loggedIn) {
commit('profile', profile);
} else {
fallbackProfileCall(commit, profile);
}
if (typeof profile.openId === 'undefined' || !process.env.OPTIONS.banner) {
commit('messageStatus', 1);
} else {
commit('messageStatus', 1);
// dispatch('getPartnerStatus', profile.openId);
}
} else {
commit('messageStatus', 1);
console.log(`Failed to find profile, got response: `, json);
}
}).catch(function (ex) {
commit('loaded', true);
commit('messageStatus', 1);
console.log(`Failed to get profile: `, ex);
});
},
// getPartnerStatus({commit}, openID) {
// const url = links.isPartner + escape(openID);
// fetchJsonp(url, {
// jsonpCallbackFunction: 'partnerstatus'
// })
// .then(response => response.json())
// .then(json => {
// commit('messageStatus', typeof json.isPartner === 'undefined' ? 1 : json.isPartner ? 3 : 2);
// })
// .catch(ex => {
// commit('messageStatus', false);
// console.log(`Failed to check profile status: `, ex);
// });
// },
getAdminAttributes({commit}) {
if (window.mx && window.mx.data && window.mx.data.action) {
let MF = false;
if (onSprintr()) {
MF = microflows.sprintr.profileMenu;
} else if (onCloud()) {
MF = microflows.cloudportal.profileMenu;
}
if (MF) {
window.mx.data.action({
params: {
actionname: MF
},
callback: obj => {
if (obj.length) {
const mxObj = obj[0];
const returnObj = {};
mxObj.getAttributes().forEach(attr => {
returnObj[attr] = mxObj.get(attr);
});
commit('adminDetails', returnObj);
}
},
error: () => {
// console.error(err);
}
});
}
}
}
};
|
import AccessPolicyTools from '../../tools/AccessPolicyTools';
import Module from '../Module';
import ModuleTable from '../ModuleTable';
import ModuleTableField from '../ModuleTableField';
import BGThreadVO from './vos/BGThreadVO';
export default class ModuleBGThread extends Module {
public static MODULE_NAME: string = "BGThread";
public static POLICY_GROUP = AccessPolicyTools.POLICY_GROUP_UID_PREFIX + ModuleBGThread.MODULE_NAME;
public static POLICY_BO_ACCESS = AccessPolicyTools.POLICY_UID_PREFIX + ModuleBGThread.MODULE_NAME + ".BO_ACCESS";
public static getInstance(): ModuleBGThread {
if (!ModuleBGThread.instance) {
ModuleBGThread.instance = new ModuleBGThread();
}
return ModuleBGThread.instance;
}
private static instance: ModuleBGThread = null;
private constructor() {
super("bgthread", ModuleBGThread.MODULE_NAME);
this.forceActivationOnInstallation();
}
public initialize() {
this.fields = [];
this.datatables = [];
let label_field = new ModuleTableField('name', ModuleTableField.FIELD_TYPE_string, 'Nom', true);
let datatable_fields = [
label_field,
new ModuleTableField('last_up_date', ModuleTableField.FIELD_TYPE_tstz, 'Dernière exécution', false)
];
this.datatables.push(new ModuleTable(this, BGThreadVO.API_TYPE_ID, () => new BGThreadVO(), datatable_fields, label_field, "BGThreads"));
}
} |
package com.senjacreative.kiwarichat.Utils;
import android.app.Activity;
import android.app.Dialog;
import android.content.Intent;
import android.net.Uri;
import android.view.View;
import android.view.Window;
import android.widget.Button;
import android.widget.TextView;
import androidx.cardview.widget.CardView;
import com.bumptech.glide.Glide;
import com.senjacreative.kiwarichat.R;
import de.hdodenhof.circleimageview.CircleImageView;
public class ShowDialog {
public void show(Activity activity, Session session){
final Dialog dialog = new Dialog(activity);
dialog.requestWindowFeature(Window.FEATURE_NO_TITLE);
dialog.setContentView(R.layout.dialog_profile);
TextView name = (TextView) dialog.findViewById(R.id.name);
name.setText(session.getName());
TextView email = (TextView) dialog.findViewById(R.id.email);
email.setText(session.getEmail());
CircleImageView avatar = (CircleImageView) dialog.findViewById(R.id.avatar);
Glide.with(activity)
.load(session.getAvatar())
.placeholder(activity.getResources().getDrawable(R.color.colorPrimary))
.into(avatar);
dialog.show();
}
public void show(final Activity activity){
final Dialog dialog = new Dialog(activity);
dialog.requestWindowFeature(Window.FEATURE_NO_TITLE);
dialog.setContentView(R.layout.dialog_about);
TextView made = (TextView) dialog.findViewById(R.id.made);
made.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
activity.startActivity(new Intent(Intent.ACTION_VIEW).setData(Uri.parse("https://github.com/MakesMeInspire")));
}
});
dialog.show();
}
}
|
def searchValue(arr, search_value):
for i in range(len(arr)):
if arr[i] == search_value:
return i
return -1
result = searchValue([1, 3, 5, 7, 9], 5)
print(result) |
<reponame>tt-p/public-transportation-system
package Model;
public class TransportPrice {
private String cardType;
private double price;
public String getCardType() {
return cardType;
}
public double getPrice() {
return price;
}
public TransportPrice(String cardType, double price) {
this.cardType = cardType;
this.price = price;
}
} |
#!/bin/bash -e
IMAGE_NAME=$1
TAG=$2
./gradlew clean
./gradlew assemble
docker build -t ${IMAGE_NAME}:${TAG} ftgo-application/ |
#!/bin/bash
#bonne facon
echo $((10+10))
#mauvaise facon
echo 10+10
read -p "nombre 1 : " nb1
read -p "nb 2 : " nb2
let result=$nb1*$nb2
echo $result
|
<reponame>laungcisin/bdi<filename>models/sdtBdiModel.go
package models
import (
"fmt"
"github.com/astaxie/beego/orm"
"log"
"strconv"
"strings"
"time"
)
type SdtBdi struct {
Id int `form:"bdiId"` //主键
BdiTypeId int `form:"bdiTypeId"` //指标类型ID
BdiName string `form:"bdiName"` //指标名称
Remarks string `form:"remarks"` //指标集备注
UserCode string `form:"userCode"` //创建人ID
CreateTime time.Time `form:"createTime"` //创建时间
EditTime time.Time `form:"editTime"` //修改时间
//新增时使用
BdiSetIds string `form:"bdiSetIds"` //所属指标集ids
//以下字段为datagrid展示
TypeName string //所属指标类型名称
BdiSetNames string //所属指标集名称
}
func (u *SdtBdi) TableName() string {
return "sdt_bdi"
}
/**
获取所有指标。
*/
func (this *SdtBdi) GetAllSdtBdi(rows int, page int) ([]SdtBdi, int, error) {
var o orm.Ormer
o = orm.NewOrm()
var sdtBdiSlice []SdtBdi = make([]SdtBdi, 0)
//查询的字段顺序最好和model的字段对应,方便解析并赋值。
var querySql = "select " +
" sb.id, " +
" sb.bdi_name, " +
" sb.bdi_type_id, " +
" sb.remarks, " +
" sbt.type_name, " +
" group_concat(sbset.id) as bdi_set_ids, " +
" group_concat(sbset.bdi_set_name) as bdi_set_names " +
"from " +
" sdt_bdi sb, " +
" sdt_bdi_set_bdi rel, " +
" sdt_bdi_set sbset, " +
" sdt_bdi_type sbt " +
"where " +
" sb.id = rel.bdi_id " +
"and rel.set_id = sbset.id " +
"and sb.bdi_type_id = sbt.id " +
"group by sb.id limit ?, ? "
num, err := o.Raw(querySql, (page-1)*rows, page*rows).QueryRows(&sdtBdiSlice)
if err != nil {
log.Fatal("查询表:" + this.TableName() + "出错!")
return nil, 0, err
}
var countSql = "select " +
" count(*) as counts " +
"from " +
" sdt_bdi sb, " +
" sdt_bdi_set_bdi rel, " +
" sdt_bdi_set sbset, " +
" sdt_bdi_type sbt " +
"where " +
" sb.id = rel.bdi_id " +
"and rel.set_id = sbset.id " +
"and sb.bdi_type_id = sbt.id"
err = o.Raw(countSql).QueryRow(&num)
if err != nil {
log.Fatal("查询表:" + this.TableName() + "出错!")
return nil, 0, err
}
return sdtBdiSlice, int(num), nil
}
/**
获取所有指标。
*/
func (this *SdtBdi) GetSdtBdiById() error {
var o orm.Ormer
o = orm.NewOrm()
//查询的字段顺序最好和model的字段对应,这样才方便解析并赋值。
var querySql = "select " +
" sb.id, " +
" sb.bdi_name, " +
" sb.bdi_type_id, " +
" sb.remarks, " +
" sbt.type_name, " +
" group_concat(sbset.id) as bdi_set_ids, " +
" group_concat(sbset.bdi_set_name) as bdi_set_names " +
"from " +
" sdt_bdi sb, " +
" sdt_bdi_set_bdi rel, " +
" sdt_bdi_set sbset, " +
" sdt_bdi_type sbt " +
"where " +
" sb.id = ? " +
" and sb.id = rel.bdi_id " +
" and rel.set_id = sbset.id " +
" and sb.bdi_type_id = sbt.id "
err := o.Raw(querySql, this.Id).QueryRow(this)
if err != nil {
fmt.Println(err)
log.Fatal("查询表:" + this.TableName() + "出错!")
return err
}
return nil
}
func (this *SdtBdi) Add() error {
o := orm.NewOrm()
o.Begin()
var insertSdtBdiSql = " insert into sdt_bdi(bdi_name, bdi_type_id, remarks, user_code, create_time) values (?, ?, ?, ?, ?)"
var insertSdtBdiSetRelBdiSql = " insert into sdt_bdi_set_bdi(set_id, bdi_id) values (?, ?) "
res, err := o.Raw(insertSdtBdiSql, this.BdiName, this.BdiTypeId, this.Remarks, 0, time.Now()).Exec()
if err != nil {
o.Rollback()
return err
}
sdtBidId, err := res.LastInsertId()
if err != nil {
o.Rollback()
return err
}
bdiSetIds := strings.Split(this.BdiSetIds, ",")
for _, v := range bdiSetIds {
bdiSetId, err := strconv.Atoi(v)
if err != nil {
o.Rollback()
return err
}
_, err = o.Raw(insertSdtBdiSetRelBdiSql, bdiSetId, sdtBidId).Exec()
if err != nil {
o.Rollback()
return err
}
}
o.Commit()
return nil
}
func (this *SdtBdi) Update() error {
o := orm.NewOrm()
o.Begin()
var deleteSdtBdiSetRelBdiSql = " delete from sdt_bdi_set_bdi where bdi_id = ? "
var updateSdtBdiSql = "update sdt_bdi " +
"set " +
" bdi_name = ?, " +
" bdi_type_id = ?, " +
" remarks = ?, " +
" edit_time = ? " +
"where id = ?"
var insertSdtBdiSetRelBdiSql = " insert into sdt_bdi_set_bdi(set_id, bdi_id) values (?, ?) "
_, err := o.Raw(updateSdtBdiSql, this.BdiName, this.BdiTypeId, this.Remarks, time.Now(), this.Id).Exec()
if err != nil {
o.Rollback()
return err
}
_, err = o.Raw(deleteSdtBdiSetRelBdiSql, this.Id).Exec()
if err != nil {
o.Rollback()
return err
}
bdiSetIds := strings.Split(this.BdiSetIds, ",")
for _, v := range bdiSetIds {
bdiSetId, err := strconv.Atoi(v)
if err != nil {
o.Rollback()
return err
}
_, err = o.Raw(insertSdtBdiSetRelBdiSql, bdiSetId, this.Id).Exec()
if err != nil {
o.Rollback()
return err
}
}
o.Commit()
return nil
}
|
package stores
import (
"github.com/bradpurchase/grocerytime-backend/internal/pkg/db"
"github.com/bradpurchase/grocerytime-backend/internal/pkg/db/models"
uuid "github.com/satori/go.uuid"
)
// UpdateStoreUserPrefs updates store user preferences
func UpdateStoreUserPrefs(storeUserID uuid.UUID, args map[string]interface{}) (sup models.StoreUserPreference, err error) {
query := db.Manager.
Where("store_user_id = ?", storeUserID).
First(&sup).
Error
if err := query; err != nil {
return sup, err
}
if args["defaultStore"] != nil {
sup.DefaultStore = args["defaultStore"].(bool)
}
if args["notifications"] != nil {
sup.Notifications = args["notifications"].(bool)
}
if err := db.Manager.Save(&sup).Error; err != nil {
return sup, err
}
return sup, nil
}
|
#!/bin/bash -e
# get the directory of this script
# snippet from https://stackoverflow.com/a/246128/10102404
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
# load the utils
# shellcheck source=/dev/null
source "$SCRIPT_DIR/utils.sh"
snap_remove
DEFAULT_TEST_CHANNEL=${DEFAULT_TEST_CHANNEL:-beta}
# install the snap to make sure it installs
if [ -n "$REVISION_TO_TEST" ]; then
snap_install "$REVISION_TO_TEST" "$REVISION_TO_TEST_CHANNEL" "$REVISION_TO_TEST_CONFINEMENT"
else
snap_install edgexfoundry "$DEFAULT_TEST_CHANNEL"
fi
if lsof -i -P -n | grep "TCP \*:48080 (LISTEN)" ; then
echo "fail - listening on 0.0.0.0"
exit 1
elif lsof -i -P -n | grep "TCP 127.0.0.1:48080 (LISTEN)" ; then
echo "pass - listening on 127.0.0.1"
else
echo "fail - did not find service on port 48080 - is edgexfoundry running?"
exit 1
fi
# remove the snap to run again
snap_remove
|
<gh_stars>0
function initialize()
{
/*
DCheckbox
*/
$( ".dcheckbox" ).click( function()
{
$( this ).find( ".dcheckbox-box" ).css( 'background-image', 'url( "images/dcheckbox-check.png" )' );
} )
/*
DCategoryList
*/
$( ".dcategorylist" ).ready( function()
{
$( this ).find( ".dcategorylist-body" ).slideToggle( 0 );
} )
$( ".dcategorylist-bar" ).click( function()
{
$( this ).closest( ".dcategorylist" ).find( ".dcategorylist-body" ).slideToggle( 200 );
} )
/*
DComboBox
*/
$( ".dcombobox" ).ready( function()
{
$( this ).find( ".dcombobox-menu" ).slideToggle( 0 );
} )
$( ".dcombobox" ).click( function()
{
$( this ).find( ".dcombobox-menu" ).slideToggle( 0 );
} )
}
document.addEventListener( 'DOMContentLoaded', initialize, false ); |
<reponame>macintoshhelper/react-figma
import * as React from 'react';
import { $updatedYogaCoords, updateYogaNode } from '../yoga/yogaStream';
import { filter, map } from 'rxjs/operators';
export const useYogaLayout = props => {
const { nodeRef } = props;
const [yogaProps, setYogaProps] = React.useState<any>({});
React.useEffect(() => {
const instance = nodeRef.current;
const subject = $updatedYogaCoords.pipe(
filter((message: any) => message.instance === instance),
map((message: any) => message.props)
);
const subscription = subject.subscribe(setYogaProps);
return () => subscription.unsubscribe();
}, []);
const didMountRef = React.useRef(false);
React.useEffect(() => {
if (!didMountRef.current) {
didMountRef.current = true;
return;
}
const instance = nodeRef.current;
updateYogaNode(instance);
}, [props.children, props.width, props.height, props.style, props.characters, props.fontSize]);
return yogaProps;
};
|
function getRandomNumber() {
return Math.floor(Math.random() * 10) + 1;
}
console.log(getRandomNumber()); |
package com.threathunter.bordercollie.slot.compute;
import com.threathunter.bordercollie.slot.compute.graph.DimensionVariableGraphManager;
import com.threathunter.bordercollie.slot.compute.graph.VariableGraph;
import com.threathunter.bordercollie.slot.compute.graph.VariableGraphManager;
import com.threathunter.bordercollie.slot.compute.graph.extension.incident.IncidentComputeWorker;
import com.threathunter.bordercollie.slot.compute.graph.extension.incident.IncidentNode;
import com.threathunter.bordercollie.slot.compute.graph.extension.incident.IncidentVariableGraphManager;
import com.threathunter.bordercollie.slot.compute.graph.extension.incident.IncidentVariableMetaRegister;
import com.threathunter.bordercollie.slot.compute.graph.node.CacheNode;
import com.threathunter.bordercollie.slot.compute.graph.node.NodePrimaryData;
import com.threathunter.bordercollie.slot.compute.graph.node.TopVariableNode;
import com.threathunter.bordercollie.slot.compute.graph.node.VariableNode;
import com.threathunter.bordercollie.slot.compute.graph.query.VariableQuery;
import com.threathunter.bordercollie.slot.util.MetaUtil;
import com.threathunter.bordercollie.slot.util.ResultFormatter;
import com.threathunter.bordercollie.slot.util.VariableQueryUtil;
import com.threathunter.common.Identifier;
import com.threathunter.model.VariableMeta;
import com.threathunter.variable.DimensionType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
*
*/
public class SlotQuery implements SlotQueryable {
private static Logger logger = LoggerFactory.getLogger(SlotQuery.class);
private final SlotComputable engine;
public SlotQuery(SlotComputable engine) {
this.engine = engine;
}
@Override
public Object queryPrevious(Identifier identifier, Collection<String> keys) {
Map<Long, SlotWindow> allSlots = engine.getAllSlots();
Map<Collection<String>, Map<String, Object>> results = new HashMap<>();
allSlots.forEach((k, v) -> {
SlotWindow window = v;
Identifier id = identifier;
VariableMeta meta = null;
VariableGraphManager manager = null;
Map<DimensionType, DimensionVariableGraphManager> dimensionedGraphManagers = window.getDimensionedGraphManagers();
Map<DimensionType, IncidentVariableGraphManager> dimensionedIncidentVariableGraphManager = window.getDimensionedIncidentVariableGraphManager();
List<VariableMeta> metas = window.getMetas();
if (MetaUtil.getMetas(metas, id) != null) {
meta = MetaUtil.getMetas(metas, id);
manager = dimensionedGraphManagers.get(DimensionType.valueOf(meta.getDimension().toUpperCase()));
} else {
meta = IncidentVariableMetaRegister.getMeta(id);
manager = dimensionedIncidentVariableGraphManager.get(DimensionType.valueOf(meta.getDimension().toUpperCase()));
}
if (meta == null) {
logger.error("variable does not exist");
return;
}
if (manager instanceof DimensionVariableGraphManager) {
DimensionVariableGraphManager dimensionManager = (DimensionVariableGraphManager) manager;
DimensionVariableGraphManager.VariableGraphProcessor[] shardGraphsProcessors = dimensionManager.getShardGraphsProcessors();
//TODO YY
/*int hash = HashType.getMurMurHash(dimensionManager.getHashType(), DimensionHelper.getDimensionKey(DimensionType.valueOf(meta.getDimension().toUpperCase())));
int shard = hash < 0 ? (hash * -1 % dimensionManager.getShardGraphsProcessors().length) : hash % shardGraphsProcessors.length;*/
Map<Integer, Collection<String>> shardKeys = manager.groupShardKeys(keys);
shardKeys.forEach((shard, sks) -> {
DimensionVariableGraphManager.VariableGraphProcessor shardGraphsProcessor = shardGraphsProcessors[shard];
VariableGraph variableGraph = shardGraphsProcessor.getVariableGraph();
Map<Identifier, VariableNode> variableMap = variableGraph.getVariableMap();
VariableNode variableNode = variableMap.get(identifier);
if (variableNode instanceof TopVariableNode) {
Object data = ((TopVariableNode) variableNode).getData();
results.put(keys, ResultFormatter.parse(k, data));
} else if (variableNode instanceof CacheNode) {
Object data = ((CacheNode) variableNode).getData(sks.toArray(new String[]{}));
results.put(keys, ResultFormatter.parse(k, data));
} else
results.put(keys, ResultFormatter.parse(k, ""));
});
} else {
IncidentVariableGraphManager incidentManager = (IncidentVariableGraphManager) manager;
IncidentComputeWorker[] workers = incidentManager.getWorkers();
Map<Integer, Collection<String>> shardKeys = incidentManager.groupShardKeys(keys);
shardKeys.forEach((shard, sks) -> {
IncidentComputeWorker shardGraphsProcessor = workers[shard];
Map<Identifier, IncidentNode> incidentNodeMap = shardGraphsProcessor.getIncidentNodeMap();
CacheNode variableNode = (CacheNode) incidentNodeMap.get(identifier);
if (variableNode instanceof TopVariableNode) {
Object data = ((TopVariableNode) variableNode).getData();
results.put(keys, ResultFormatter.parse(k, data));
} else if (variableNode instanceof CacheNode) {
Object data = ((CacheNode) variableNode).getData(sks.toArray(new String[]{}));
results.put(keys, ResultFormatter.parse(k, data));
} else
results.put(keys, ResultFormatter.parse(k, ""));
});
}
});
logger.info("query previous slot return, results = {}", results);
return results;
}
@Override
public Object mergePrevious(Identifier identifier, Collection<String> keys) {
if (keys == null) {
logger.warn("query merge previous, keys = NULL");
return null;
} else {
logger.info("query merge previous, identifier = {}, keys = {} ", identifier, keys.toArray(new String[]{}));
}
Map<Long, SlotWindow> allSlots = engine.getAllSlots();
Map<Collection<String>, Map<String, Object>> results = new HashMap<>();
List<CacheNode> toMergeList = new ArrayList<>();
allSlots.forEach((k, v) -> {
SlotWindow window = v;
Identifier id = identifier;
VariableMeta meta = null;
VariableGraphManager manager = null;
Map<DimensionType, DimensionVariableGraphManager> dimensionedGraphManagers = window.getDimensionedGraphManagers();
Map<DimensionType, IncidentVariableGraphManager> dimensionedIncidentVariableGraphManager = window.getDimensionedIncidentVariableGraphManager();
List<VariableMeta> metas = window.getMetas();
if (MetaUtil.getMetas(metas, id) != null) {
meta = MetaUtil.getMetas(metas, id);
manager = dimensionedGraphManagers.get(DimensionType.valueOf(meta.getDimension().toUpperCase()));
} else {
meta = IncidentVariableMetaRegister.getMeta(id);
manager = dimensionedIncidentVariableGraphManager.get(DimensionType.valueOf(meta.getDimension().toUpperCase()));
}
if (meta == null) {
logger.error("variable does not exist");
return;
}
if (manager instanceof DimensionVariableGraphManager) {
DimensionVariableGraphManager dimensionManager = (DimensionVariableGraphManager) manager;
DimensionVariableGraphManager.VariableGraphProcessor[] shardGraphsProcessors = dimensionManager.getShardGraphsProcessors();
Map<Integer, Collection<String>> shardKeys = manager.groupShardKeys(keys);
shardKeys.forEach((shard, sks) -> {
DimensionVariableGraphManager.VariableGraphProcessor shardGraphsProcessor = shardGraphsProcessors[shard];
VariableGraph variableGraph = shardGraphsProcessor.getVariableGraph();
Map<Identifier, VariableNode> variableMap = variableGraph.getVariableMap();
VariableNode variableNode = variableMap.get(identifier);
if (variableNode instanceof CacheNode) {
toMergeList.add((CacheNode) variableNode);
}
});
} else {
IncidentVariableGraphManager incidentManager = (IncidentVariableGraphManager) manager;
IncidentComputeWorker[] workers = incidentManager.getWorkers();
Map<Integer, Collection<String>> shardKeys = incidentManager.groupShardKeys(keys);
shardKeys.forEach((shard, sks) -> {
IncidentComputeWorker shardGraphsProcessor = workers[shard];
Map<Identifier, IncidentNode> incidentNodeMap = shardGraphsProcessor.getIncidentNodeMap();
CacheNode variableNode = (CacheNode) incidentNodeMap.get(identifier);
toMergeList.add(variableNode);
});
}
});
NodePrimaryData data = merge(toMergeList, keys.toArray(new String[]{}));
if (data == null) {
return null;
}
logger.info("merge previous slot primary data = {}", data);
logger.info("query merge previous slot return, result = {}", data.getResult());
return data.getResult();
}
@Override
public Object mergePrevious(Identifier identifier, String key) {
if (key == null) {
return null;
}
List<String> keys = new ArrayList<>();
keys.add(key);
return mergePrevious(identifier, keys);
}
private NodePrimaryData merge(List<CacheNode> toMergeList, String... keys) {
NodePrimaryData pre = null;
for (CacheNode node : toMergeList) {
pre = node.merge(pre, keys);
}
if (pre != null) {
return pre;
}
return null;
}
public Object queryCurrent(Identifier identifier, Collection<String> keys) {
VariableMeta meta;
VariableGraphManager manager;
Identifier id = identifier;
Map<DimensionType, DimensionVariableGraphManager> dimensionedGraphManagers = engine.getCurrentCommonManagers();
Map<DimensionType, IncidentVariableGraphManager> dimensionedIncidentVariableGraphManager = engine.getCurrentIncidentManagers();
List<VariableMeta> metas = engine.getMetas();
if (MetaUtil.getMetas(metas, id) != null) {
meta = MetaUtil.getMetas(metas, id);
manager = dimensionedGraphManagers.get(DimensionType.valueOf(meta.getDimension().toUpperCase()));
} else {
meta = IncidentVariableMetaRegister.getMeta(id);
manager = dimensionedIncidentVariableGraphManager.get(DimensionType.valueOf(meta.getDimension().toUpperCase()));
}
if (meta == null) {
logger.error("variable does not exist");
logger.warn(">>>>>>query current slot return, results = NULL");
return null;
}
VariableQuery query;
if (meta.getType().equals("top")) {
if (meta.getGroupKeys() != null && meta.getGroupKeys().size() > 0) {
if (keys == null || keys.size() <= 0) {
return null;
}
query = VariableQueryUtil.sendKeyTopQuery(manager, id, keys, 20);
} else {
query = VariableQueryUtil.broadcastTopQuery(manager, id, 20);
}
} else {
if (meta.getGroupKeys() != null && meta.getGroupKeys().size() > 0) {
if (keys == null || keys.size() <= 0) {
return null;
}
query = VariableQueryUtil.sendKeyQuery(manager, id, keys);
} else {
query = VariableQueryUtil.broadcastQuery(manager, id);
}
}
Object obj = query.waitQueryResult(2, TimeUnit.SECONDS);
if (obj != null) {
logger.info(">>>>>>query current slot return, results = {}", obj);
}
return obj;
}
}
|
#!/bin/bash
# Usage:
# Go into cmd loop: sudo ./cleos.sh
# Run single cmd: sudo ./cleos.sh <cleos paramers>
PREFIX="docker-compose exec nodeos cleos -u http://localhost:8013 --wallet-url http://keos:8020"
if [ -z $1 ] ; then
while :
do
read -e -p "cleos " cmd
history -s "$cmd"
$PREFIX $cmd
done
else
$PREFIX "$@"
fi
|
#ifndef INCLUDED_NETWORK_CLIENT_DATAS_H
#define INCLUDED_NETWORK_CLIENT_DATAS_H
#include "network/message.h"
#include "network/message_handler_sub_system.h"
#include "network/message_sender_system.h"
#include "single_message_sender.h"
#include "platform/export.h"
namespace network {
class ClientDatasMessage: public Message
{
friend class ::boost::serialization::access;
public:
DEFINE_MESSAGE_BASE( ClientDatasMessage )
core::ProgramState::ClientDatas_t mClientDatas;
template<class Archive>
void serialize( Archive& ar, const unsigned int version );
};
template<class Archive>
void ClientDatasMessage::serialize( Archive& ar, const unsigned int version )
{
ar& boost::serialization::base_object<Message>( *this );
ar& mClientDatas;
}
class ClientDatasMessageHandlerSubSystem : public MessageHandlerSubSystem
{
public:
DEFINE_SUB_SYSTEM_BASE( ClientDatasMessageHandlerSubSystem )
ClientDatasMessageHandlerSubSystem();
virtual void Init();
virtual void Execute( Message const& message );
};
class ClientDatasMessageSenderSystem : public MessageSenderSystem
{
public:
DEFINE_SYSTEM_BASE( ClientDatasMessageSenderSystem )
ClientDatasMessageSenderSystem();
virtual void Init();
virtual void Update( double DeltaTime );
};
} // namespace network
REAPING2_CLASS_EXPORT_KEY2( network__ClientDatasMessage, network::ClientDatasMessage, "client_datas" );
#endif//INCLUDED_NETWORK_CLIENT_DATAS_H
// TODO: to main.cpp:
// Eng.AddSystem(AutoId("client_datas_message_sender_system"));
|
#ifndef GAME_H
#define GAME_H
#include "model.h"
#include "game_object_3D.h"
#include "camera.h"
#include "window.h"
#include "state.h"
#include "finite_state_machine.h"
class Game
{
public:
Game();
~Game();
Game(const Game&) = delete;
Game& operator=(const Game&) = delete;
Game(Game&&) = delete;
Game& operator=(Game&&) = delete;
bool initialize(const std::string& title);
void executeGameLoop();
private:
std::shared_ptr<FiniteStateMachine> mFSM;
std::shared_ptr<Window> mWindow;
std::shared_ptr<Camera> mCamera;
ResourceManager<Model> mModelManager;
ResourceManager<Texture> mTextureManager;
ResourceManager<Shader> mShaderManager;
std::shared_ptr<GameObject3D> mTable;
std::shared_ptr<GameObject3D> mTeapot;
};
#endif
|
<reponame>akokhanovskyi/kaa<filename>server/appenders/mongo-appender/src/test/java/org/kaaproject/kaa/server/appenders/mongo/appender/LogEventTest.java<gh_stars>0
/*
* Copyright 2014-2016 CyberVision, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaaproject.kaa.server.appenders.mongo.appender;
import com.mongodb.DBObject;
import org.junit.Assert;
import org.junit.Test;
import org.kaaproject.kaa.common.dto.logs.LogEventDto;
public class LogEventTest {
private static final String KEY = "key";
private static final String HEADER_VALUE = "value";
private static final Integer EVENT_VALUE = 5;
private static final String HEADER = "{\"" + KEY + "\":\"" + HEADER_VALUE + "\"}";
private static final String EVENT = "{" + KEY + ":" + EVENT_VALUE + "}";
@Test
public void basicLogEventTest() {
LogEventDto logEventDto = new LogEventDto(HEADER, EVENT);
LogEvent logEvent = new LogEvent(logEventDto, null, null);
DBObject dBHeader = logEvent.getHeader();
DBObject dbEvent = logEvent.getEvent();
Assert.assertEquals(HEADER_VALUE, dBHeader.get(KEY));
Assert.assertEquals(EVENT_VALUE, dbEvent.get(KEY));
}
}
|
#!/usr/bin/env node
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const prompts_1 = __importDefault(require("prompts"));
const chalk_1 = __importDefault(require("chalk"));
const numberkong_1 = require("numberkong");
const TableExt_1 = __importDefault(require("./TableExt"));
const Palette_1 = require("./Palette");
const TableWidget_1 = __importDefault(require("./TableWidget"));
const configuration_1 = require("./configuration");
const readline_1 = __importDefault(require("readline"));
const ansi_escape_sequences_1 = __importDefault(require("ansi-escape-sequences"));
function parseMoves(str) {
if (!str) {
return [];
}
let result = [];
let moves = str.split(',');
for (let move of moves) {
result.push(numberkong_1.Move.fromString(move));
}
return result;
}
function formatMoves(moves, selectedIndex) {
let result = "";
moves.forEach((move, index) => {
if (typeof selectedIndex !== "undefined" && index === selectedIndex) {
result += chalk_1.default.red.bold(move.toString()) + " ";
}
else {
result += chalk_1.default.blue(move.toString()) + " ";
}
});
return result;
}
function printMoves(moves, selectedIndex, erasePrevious) {
if (erasePrevious) {
process.stdout.write(ansi_escape_sequences_1.default.erase.inLine(2));
process.stdout.write(ansi_escape_sequences_1.default.cursor.horizontalAbsolute(0));
}
process.stdout.write(formatMoves(moves, selectedIndex));
}
async function run() {
const config = await prompts_1.default(configuration_1.questions);
let table = new TableExt_1.default();
let moves = [];
let selectedMoveIndex = 0;
table.stream.on('readable', () => {
let msgs = table.stream.read().toString().split('!');
for (let msg of msgs) {
let [header, body] = msg.split(':');
if (header === "show") {
console.log(body);
table.stream.write("moves");
}
else if (header === "moves") {
moves = parseMoves(body);
printMoves(moves, selectedMoveIndex);
process.stdout.write(ansi_escape_sequences_1.default.cursor.previousLine(1));
}
else {
console.log(msg);
}
}
});
table.init(config.columns);
let printer = new TableWidget_1.default(table, configuration_1.defaultPalette);
table.renderer = (table) => {
process.stdout.write(ansi_escape_sequences_1.default.erase.display(2));
return printer.format();
};
configuration_1.defaultPalette.rules.push(new Palette_1.PaletteCellRule((table, cell) => {
let name = cell.toString();
let moves = table.playableMoves;
return moves.length > 0 &&
(name === moves[selectedMoveIndex]?.positions[0].toString()
|| name === moves[selectedMoveIndex]?.positions[1].toString());
}, chalk_1.default.red, -1));
configuration_1.defaultPalette.updateRules();
table.stream.write("show");
readline_1.default.emitKeypressEvents(process.stdin);
process.stdin.setRawMode(true);
process.stdin.resume();
process.stdin.on('keypress', (str, key) => {
if (key.ctrl && key.name === 'c') {
process.stdout.write("\n");
process.exit();
}
else if (key.name === 'a') {
table.stream.write("append");
selectedMoveIndex = 0;
}
else if (moves.length > 0) {
if (key.name === 'right') {
++selectedMoveIndex;
selectedMoveIndex %= moves.length;
process.stdout.write(ansi_escape_sequences_1.default.erase.display(2));
printer.print();
printMoves(moves, selectedMoveIndex);
}
else if (key.name === 'left') {
--selectedMoveIndex;
if (selectedMoveIndex < 0) {
selectedMoveIndex = moves.length - 1;
;
}
process.stdout.write(ansi_escape_sequences_1.default.erase.display(2));
printer.print();
printMoves(moves, selectedMoveIndex);
}
else if (key.name === "return") {
table.stream.write(moves[selectedMoveIndex].toString());
selectedMoveIndex = 0;
}
}
});
}
run();
//# sourceMappingURL=index.js.map |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
"google.golang.org/api/googleapi"
)
const sniffBuffSize = 512
func NewContentSniffer(r io.Reader) *ContentSniffer {
return &ContentSniffer{r: r}
}
// ContentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
type ContentSniffer struct {
r io.Reader
start []byte // buffer for the sniffed bytes.
err error // set to any error encountered while reading bytes to be sniffed.
ctype string // set on first sniff.
sniffed bool // set to true on first sniff.
}
func (sct *ContentSniffer) Read(p []byte) (n int, err error) {
// Ensure that the content type is sniffed before any data is consumed from Reader.
_, _ = sct.ContentType()
if len(sct.start) > 0 {
n := copy(p, sct.start)
sct.start = sct.start[n:]
return n, nil
}
// We may have read some bytes into start while sniffing, even if the read ended in an error.
// We should first return those bytes, then the error.
if sct.err != nil {
return 0, sct.err
}
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
return sct.r.Read(p)
}
// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
func (sct *ContentSniffer) ContentType() (string, bool) {
if sct.sniffed {
return sct.ctype, sct.ctype != ""
}
sct.sniffed = true
// If ReadAll hits EOF, it returns err==nil.
sct.start, sct.err = ioutil.ReadAll(io.LimitReader(sct.r, sniffBuffSize))
// Don't try to detect the content type based on possibly incomplete data.
if sct.err != nil {
return "", false
}
sct.ctype = http.DetectContentType(sct.start)
return sct.ctype, true
}
// IncludeMedia combines an existing HTTP body with media content to create a multipart/related HTTP body.
//
// bodyp is an in/out parameter. It should initially point to the
// reader of the application/json (or whatever) payload to send in the
// API request. It's updated to point to the multipart body reader.
//
// ctypep is an in/out parameter. It should initially point to the
// content type of the bodyp, usually "application/json". It's updated
// to the "multipart/related" content type, with random boundary.
//
// The return value is a function that can be used to close the bodyp Reader with an error.
func IncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) func() {
var mediaType string
media, mediaType = getMediaType(media)
body, bodyType := *bodyp, *ctypep
pr, pw := io.Pipe()
mpw := multipart.NewWriter(pw)
*bodyp = pr
*ctypep = "multipart/related; boundary=" + mpw.Boundary()
go func() {
w, err := mpw.CreatePart(typeHeader(bodyType))
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err))
return
}
_, err = io.Copy(w, body)
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err))
return
}
w, err = mpw.CreatePart(typeHeader(mediaType))
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err))
return
}
_, err = io.Copy(w, media)
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err))
return
}
mpw.Close()
pw.Close()
}()
return func() { pw.CloseWithError(errAborted) }
}
var errAborted = errors.New("googleapi: upload aborted")
func getMediaType(media io.Reader) (io.Reader, string) {
if typer, ok := media.(googleapi.ContentTyper); ok {
return media, typer.ContentType()
}
sniffer := NewContentSniffer(media)
typ, ok := sniffer.ContentType()
if !ok {
// TODO(mcgreevy): Remove this default. It maintains the semantics of the existing code,
// but should not be relied on.
typ = "application/octet-stream"
}
return sniffer, typ
}
// DetectMediaType detects and returns the content type of the provided media.
// If the type can not be determined, "application/octet-stream" is returned.
func DetectMediaType(media io.ReaderAt) string {
if typer, ok := media.(googleapi.ContentTyper); ok {
return typer.ContentType()
}
typ := "application/octet-stream"
buf := make([]byte, 1024)
n, err := media.ReadAt(buf, 0)
buf = buf[:n]
if err == nil || err == io.EOF {
typ = http.DetectContentType(buf)
}
return typ
}
func typeHeader(contentType string) textproto.MIMEHeader {
h := make(textproto.MIMEHeader)
h.Set("Content-Type", contentType)
return h
}
|
#!/bin/bash
make --quiet
find tests/5 -type f -name '*.ged' | sort -V | while read v5
do
v7=tests/7${v5#*5}
cmp=$([ "$#" -gt 0 ] && echo -n "diff" || echo -n "cmp -s")
./ged5to7 $v5 2>/dev/null | $cmp $v7 - && echo " OK" ${v5#*5/} || echo FAILED ${v5#*5/}
done
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.u1F17F = void 0;
var u1F17F = {
"viewBox": "0 0 2600 2760.837",
"children": [{
"name": "path",
"attribs": {
"d": "M2222 393q45 0 78 32.5t33 79.5v1845q0 47-33 79t-78 32H377q-45 0-78-32t-33-79V505q0-47 33-79.5t78-32.5h1845zm-799 259H763q-14 0-23.5 9.5T730 684v1490q0 13 9.5 23t23.5 10h322q14 0 24-10t10-23v-474h304q219 0 375-155t156-370q0-214-156-368.5T1423 652zm-41 306q84 0 146.5 64.5T1591 1175t-61.5 152-147.5 64h-257q-2 0-6-2.5t-4-5.5V967q0-2 4-5.5t6-3.5h257z"
},
"children": []
}]
};
exports.u1F17F = u1F17F; |
function factorial(n)
{
let facto=1;
for(var i=2; i<=n; i++)
facto *= i;
return facto;
}
console.log(factorial(6)); //720 |
#!/usr/bin/env bash
# fail on error
set -e
set -o pipefail
# install packages
declare -ar pippkgs=(
# enough to run dev to build ports on windows
'dataclassy'
'networkx'
'region-profiler'
'typer'
'dynawheel'
'typer'
'python-gitlab'
'python-on-whales'
'pyyaml'
'twine'
# build sys
'pypreprocessor' # cpp build
)
# crank on the packages
pip install --pre ${pippkgs[@]}
# setup of for precommit
# ${ASI}/bin/intstall-pre-commit
|
import React from 'react';
const Dropdown = (props) => {
return (
<select
name={props.name}
value={props.value}
onChange={props.onChange}>
<option value="">Please select</option>
<option value="Name">Name</option>
<option value="Age">Age</option>
<option value="Gender">Gender</option>
</select>
)
}
export default Dropdown; |
// State struct representing the current value of the counter
struct State {
var counter: Int
}
// Enum defining the actions that can be dispatched
enum Action {
case increment
case decrement
}
// Reducer function to handle state transitions based on dispatched actions
func reducer(state: State, action: Action) -> State {
switch action {
case .increment:
return State(counter: state.counter + 1)
case .decrement:
let newCounter = max(0, state.counter - 1) // Ensure counter does not go below 0
return State(counter: newCounter)
}
}
// Unit tests for the reducer function
class ReducerTests: XCTestCase {
var initialState: State!
override func setUp() {
super.setUp()
initialState = State(counter: 0)
}
func testInitialState() {
let newState = reducer(state: initialState, action: .increment)
XCTAssertEqual(newState.counter, 1)
}
func testIncrementAction() {
let newState = reducer(state: initialState, action: .increment)
XCTAssertEqual(newState.counter, 1)
}
func testDecrementAction() {
let newState = reducer(state: initialState, action: .decrement)
XCTAssertEqual(newState.counter, 0)
}
func testDecrementBelowZero() {
let stateWithNonZeroCounter = State(counter: 3)
let newState = reducer(state: stateWithNonZeroCounter, action: .decrement)
XCTAssertEqual(newState.counter, 2)
}
func testDecrementAtZero() {
let newState = reducer(state: initialState, action: .decrement)
XCTAssertEqual(newState.counter, 0)
}
} |
module Easymarklet
class BareGenerator < Rails::Generators::NamedBase
source_root File.expand_path('../templates', __FILE__)
def copy_js_file
template "bare_bookmarklet.js", "app/assets/javascripts/#{file_name}_bookmarklet.js"
end
def display_msg
puts ""
puts "You can link to your new bookmarklet with this :"
puts ""
puts "<%= link_to '#{file_name.titleize}', easymarklet_js('#{file_name}_bookmarklet.js') %>"
puts ""
end
end
end
|
def is_palindrome(word):
reversed_word = word[::-1]
if reversed_word == word:
return True
else:
return False
result = is_palindrome("noon")
print(result) |
// Copyright 2021 The Rode Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"flag"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
)
var _ = Describe("client flags", func() {
var (
flagSet *flag.FlagSet
actualConfig *ClientConfig
expectedClientId = fake.LetterN(10)
expectedClientSecret = fake.UUID()
expectedTokenUrl = fake.URL()
expectedScopes = strings.Join([]string{fake.LetterN(10), fake.LetterN(10)}, " ")
expectedUsername = fake.LetterN(10)
expectedPassword = fake.LetterN(10)
expectedRodeHost = fake.LetterN(10)
)
BeforeEach(func() {
flagSet = flag.NewFlagSet("rode-client", flag.ContinueOnError)
actualConfig = SetupRodeClientFlags(flagSet)
})
DescribeTable("flag parsing",
func(flags []string, expectedConfig *ClientConfig) {
err := flagSet.Parse(flags)
Expect(err).ToNot(HaveOccurred())
Expect(actualConfig).To(Equal(expectedConfig))
},
Entry("defaults", []string{}, &ClientConfig{
Rode: &RodeClientConfig{
Host: "rode:50051",
},
OIDCAuth: &OIDCAuthConfig{},
BasicAuth: &BasicAuthConfig{},
}),
Entry("rode config", []string{
"--rode-host=" + expectedRodeHost,
"--rode-insecure-disable-transport-security",
}, &ClientConfig{
Rode: &RodeClientConfig{
Host: expectedRodeHost,
DisableTransportSecurity: true,
},
OIDCAuth: &OIDCAuthConfig{},
BasicAuth: &BasicAuthConfig{},
}),
Entry("oidc auth", []string{
"--oidc-client-id=" + expectedClientId,
"--oidc-client-secret=" + expectedClientSecret,
"--oidc-token-url=" + expectedTokenUrl,
"--oidc-tls-insecure-skip-verify",
"--oidc-scopes=" + expectedScopes,
}, &ClientConfig{
Rode: &RodeClientConfig{
Host: "rode:50051",
},
OIDCAuth: &OIDCAuthConfig{
ClientID: expectedClientId,
ClientSecret: expectedClientSecret,
Scopes: expectedScopes,
TokenURL: expectedTokenUrl,
TlsInsecureSkipVerify: true,
},
BasicAuth: &BasicAuthConfig{},
}),
Entry("basic auth", []string{
"--basic-auth-username=" + expectedUsername,
"--basic-auth-password=" + expectedPassword,
}, &ClientConfig{
Rode: &RodeClientConfig{
Host: "rode:50051",
},
OIDCAuth: &OIDCAuthConfig{},
BasicAuth: &BasicAuthConfig{
Username: expectedUsername,
Password: <PASSWORD>,
},
}),
Entry("proxy auth", []string{
"--proxy-auth",
}, &ClientConfig{
Rode: &RodeClientConfig{
Host: "rode:50051",
},
OIDCAuth: &OIDCAuthConfig{},
BasicAuth: &BasicAuthConfig{},
ProxyAuth: true,
}),
)
})
|
/*
* Copyright [2020-2030] [https://www.stylefeng.cn]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Guns采用APACHE LICENSE 2.0开源协议,您在使用过程中,需要注意以下几点:
*
* 1.请不要删除和修改根目录下的LICENSE文件。
* 2.请不要删除和修改Guns源码头部的版权声明。
* 3.请保留源码和相关描述文件的项目出处,作者声明等。
* 4.分发源码时候,请注明软件出处 https://gitee.com/stylefeng/guns
* 5.在修改包名,模块名称,项目代码等时,请注明软件出处 https://gitee.com/stylefeng/guns
* 6.若您的项目无法满足以上几点,可申请商业授权
*/
package cn.stylefeng.roses.kernel.timer.modular.param;
import cn.stylefeng.roses.kernel.rule.annotation.ChineseDescription;
import cn.stylefeng.roses.kernel.rule.pojo.request.BaseRequest;
import lombok.Data;
import lombok.EqualsAndHashCode;
import javax.validation.constraints.NotBlank;
import javax.validation.constraints.NotNull;
/**
* 定时任务
*
* @author stylefeng
* @date 2020/6/30 18:26
*/
@EqualsAndHashCode(callSuper = true)
@Data
public class SysTimersParam extends BaseRequest {
/**
* 定时器id
*/
@NotNull(message = "主键timerId不能为空", groups = {edit.class, detail.class, delete.class, startTimer.class, stopTimer.class})
@ChineseDescription("定时器id")
private Long timerId;
/**
* 任务名称
*/
@NotBlank(message = "任务名称不能为空", groups = {add.class, edit.class})
@ChineseDescription("任务名称")
private String timerName;
/**
* 执行任务的class的类名(实现了TimerTaskRunner接口的类的全称)
*/
@NotBlank(message = "任务的class的类名不能为空", groups = {add.class, edit.class})
@ChineseDescription("执行任务的class的类名")
private String actionClass;
/**
* 定时任务表达式
*/
@NotBlank(message = "定时任务表达式不能为空", groups = {add.class, edit.class})
@ChineseDescription("定时任务表达式")
private String cron;
/**
* 状态(字典 1运行 2停止)
*/
@NotNull(message = "任务状态不能为空", groups = {edit.class})
@ChineseDescription("状态(字典 1运行 2停止)")
private Integer jobStatus;
/**
* 参数
*/
@ChineseDescription("参数")
private String params;
/**
* 备注信息
*/
@ChineseDescription("备注信息")
private String remark;
/**
* 是否删除标记
*/
@ChineseDescription("是否删除标记")
private String delFlag;
/**
* 启用定时任务
*/
public @interface startTimer {
}
/**
* 停止定时任务
*/
public @interface stopTimer {
}
}
|
class CompressedDataStructure():
def __init__(self):
self.data = {}
def insert(self,key,value):
self.data[key] = value
def search(self,key):
if key in self.data:
return self.data[key]
else:
raise KeyError
def delete(self,key):
del self.data[key] |
<filename>src/routing/spline/bundling/SdBoneEdge.ts
// import {Point} from '../../../math/geometry/point'
// import {Assert} from '../../../utils/assert'
// import {CdtEdge} from '../../ConstrainedDelaunayTriangulation/CdtEdge'
// import {VisibilityEdge} from '../../visibility/VisibilityEdge'
// import {SdVertex} from './SdVertex'
// // [DebuggerDisplay("({SourcePoint.x},{SourcePoint.y})->({TargetPoint.x},{TargetPoint.y})")]
// export class SdBoneEdge {
// VisibilityEdge: VisibilityEdge
// Source: SdVertex
// Target: SdVertex
// numberOfPassedPaths: number
// constructor(
// visibilityEdge: VisibilityEdge,
// source: SdVertex,
// target: SdVertex,
// ) {
// this.VisibilityEdge = visibilityEdge
// this.Source = source
// this.Target = target
// }
// get TargetPoint(): Point {
// return this.Target.point
// }
// get SourcePoint(): Point {
// return this.Source.point
// }
// get IsOccupied(): boolean {
// return this.numberOfPassedPaths > 0
// }
// CrossedCdtEdges: Set<CdtEdge>
// get IsPassable(): boolean {
// return (
// this.Target.IsTargetOfRouting ||
// this.Source.IsSourceOfRouting ||
// this.VisibilityEdge.IsPassable == null ||
// this.VisibilityEdge.IsPassable()
// )
// }
// AddOccupiedEdge() {
// this.numberOfPassedPaths++
// }
// RemoveOccupiedEdge() {
// this.numberOfPassedPaths--
// /*Assert.assert(this.numberOfPassedPaths >= 0)*/
// }
// }
|
#!/bin/sh
git_user_id=$1
git_repo_id=$2
release_note=$3
git_host=$4
if [ "$git_host" = "" ]; then
git_host="github.com"
echo "[INFO] No command line input provided. Set \$git_host to $git_host"
fi
if [ "$git_user_id" = "" ]; then
git_user_id="ionos-cloud"
echo "[INFO] No command line input provided. Set \$git_user_id to $git_user_id"
fi
if [ "$git_repo_id" = "" ]; then
git_repo_id="sdk-python"
echo "[INFO] No command line input provided. Set \$git_repo_id to $git_repo_id"
fi
if [ "$release_note" = "" ]; then
release_note="Minor update"
echo "[INFO] No command line input provided. Set \$release_note to $release_note"
fi
# Initialize the local directory as a Git repository
git init
# Adds the files in the local repository and stages them for commit.
git add .
# Commits the tracked changes and prepares them to be pushed to a remote repository.
git commit -m "$release_note"
# Sets the new remote
git_remote=`git remote`
if [ "$git_remote" = "" ]; then # git remote not defined
if [ "$GIT_TOKEN" = "" ]; then
echo "[INFO] \$GIT_TOKEN (environment variable) is not set. Using the git credential in your environment."
git remote add origin https://${git_host}/${git_user_id}/${git_repo_id}.git
else
git remote add origin https://${git_user_id}:${GIT_TOKEN}@${git_host}/${git_user_id}/${git_repo_id}.git
fi
fi
git pull origin master
# Pushes (Forces) the changes in the local repository up to the remote repository
echo "Git pushing to https://${git_host}/${git_user_id}/${git_repo_id}.git"
git push origin master 2>&1 | grep -v 'To https'
|
/*
* gzip_constants.h - constants for the gzip wrapper format
*/
#ifndef LIB_GZIP_CONSTANTS_H
#define LIB_GZIP_CONSTANTS_H
#define GZIP_MIN_HEADER_SIZE 10
#define GZIP_FOOTER_SIZE 8
#define GZIP_MIN_OVERHEAD (GZIP_MIN_HEADER_SIZE + GZIP_FOOTER_SIZE)
#define GZIP_ID1 byte(0x1F)
#define GZIP_ID2 byte(0x8B)
#define GZIP_CM_DEFLATE byte(8)
#define GZIP_FTEXT uint8_t(0x01)
#define GZIP_FHCRC uint8_t(0x02)
#define GZIP_FEXTRA uint8_t(0x04)
#define GZIP_FNAME uint8_t(0x08)
#define GZIP_FCOMMENT uint8_t(0x10)
#define GZIP_FRESERVED uint8_t(0xE0)
#define GZIP_MTIME_UNAVAILABLE 0
#define GZIP_XFL_SLOWEST_COMRESSION 0x02
#define GZIP_XFL_FASTEST_COMRESSION 0x04
#define GZIP_OS_FAT 0
#define GZIP_OS_AMIGA 1
#define GZIP_OS_VMS 2
#define GZIP_OS_UNIX 3
#define GZIP_OS_VM_CMS 4
#define GZIP_OS_ATARI_TOS 5
#define GZIP_OS_HPFS 6
#define GZIP_OS_MACINTOSH 7
#define GZIP_OS_Z_SYSTEM 8
#define GZIP_OS_CP_M 9
#define GZIP_OS_TOPS_20 10
#define GZIP_OS_NTFS 11
#define GZIP_OS_QDOS 12
#define GZIP_OS_RISCOS 13
#define GZIP_OS_UNKNOWN 255
#endif /* LIB_GZIP_CONSTANTS_H */
|
/* © 2017
* @author <NAME>
*/
/* eslint-disable react/no-array-index-key */
import React, { Component } from 'react';
import { withTracker } from 'meteor/react-meteor-data';
import PropTypes from 'proptypes';
import { Songs } from '../collections';
import SongList from './SongList';
const queryLimit = 20;
class TabHistory extends Component {
static propTypes = {
songs: PropTypes.arrayOf(PropTypes.object),
totalSongs: PropTypes.number,
currentRoom: PropTypes.shape(),
};
static defaultProps = {
songs: [],
totalSongs: 0,
currentRoom: {},
};
state = {
songList: [],
querySkip: queryLimit,
currentSongsAmount: this.props.songs.length,
};
_loadMoreSong = () => {
const querySongs = Songs.find(
{
author: Meteor.userId(),
roomId: this.props.currentRoom ? this.props.currentRoom._id : null,
},
{
skip: this.state.querySkip,
limit: queryLimit,
sort: { timeAdded: -1 },
}
).fetch();
let songs = [];
if (this.state.songList.length === 0) {
songs = this.props.songs.slice();
} else {
songs = this.state.songList.slice();
}
songs = songs.concat(querySongs);
this.setState({
songList: songs,
querySkip: this.state.querySkip + queryLimit,
currentSongsAmount: songs.length,
});
};
render() {
const { totalSongs, songs } = this.props;
const { songList, currentSongsAmount } = this.state;
return (
<div>
<SongList historyTab songs={songList.length > 0 ? songList : songs} />
<div className="container">
{totalSongs !== 0 && totalSongs !== currentSongsAmount ? (
<div className="songs__list--center songs__list__control">
<button onClick={this._loadMoreSong}>Load more</button>
</div>
) : (
''
)}
</div>
</div>
);
}
}
export default withTracker(({ currentRoom }) => ({
songs: Songs.find(
{
author: Meteor.userId(),
roomId: currentRoom ? currentRoom._id : null,
},
{
limit: 20,
sort: { timeAdded: -1 },
}
).fetch(),
totalSongs: Songs.find({
author: Meteor.userId(),
roomId: currentRoom ? currentRoom._id : null,
}).count(),
}))(TabHistory);
|
#!/bin/bash
FN="FlowSorted.DLPFC.450k_1.28.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/experiment/src/contrib/FlowSorted.DLPFC.450k_1.28.0.tar.gz"
"https://bioarchive.galaxyproject.org/FlowSorted.DLPFC.450k_1.28.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-flowsorted.dlpfc.450k/bioconductor-flowsorted.dlpfc.450k_1.28.0_src_all.tar.gz"
)
MD5="27988a648d68cdb7a2472d9135cf869e"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
#!/usr/bin/env bash
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as sudo ./build_and_push_images.sh"
exit 1
fi
apt-get -qq --yes --no-install-recommends install whois debootstrap apt-cacher-ng 2>/dev/null
registry='192.168.1.1'
proxy_url='http://192.168.1.1:3142'
linux='debian'
release='bullseye'
variant='slim'
version='latest'
origin=$linux:$release-$variant
base=$release/base:$version
docker build -t $registry/$release/base:$version 00-base/ --build-arg IMAGE=$origin --build-arg http_proxy=$proxy_url --build-arg https_proxy=$proxy_url
docker push $registry/$release/base:$version
docker build -t $registry/$release/disk-wipe:$version 01-disk-wipe/ --build-arg REGISTRY=$registry --build-arg IMAGE=$base
docker push $registry/$release/disk-wipe:$version
docker build -t $registry/$release/disk-partition:$version 02-disk-partition/ --build-arg REGISTRY=$registry --build-arg IMAGE=$base
docker push $registry/$release/disk-partition:$version
docker build -t $registry/$release/install-root-fs:$version 03-install-root-fs/ --build-arg REGISTRY=$registry --build-arg IMAGE=$base
docker push $registry/$release/install-root-fs:$version
docker build -t $registry/$release/cloud-init:$version 04-cloud-init/ --build-arg REGISTRY=$registry --build-arg IMAGE=$base
docker push $registry/$release/cloud-init:$version
|
def compare_strings(str_1, str_2):
if str_1 == str_2:
return str_1
else:
msg = []
msg.append('The outputs differ.')
msg.append('Expected:')
msg.append(str_1)
msg.append('Actual:')
msg.append(str_2)
msg.append(diff_strings(str_1, str_2))
return '\n'.join(msg)
def diff_strings(str_1, str_2):
# Function to find differences between two strings
# Implement your own logic to find differences between the strings
# For example, you can use difflib or custom difference finding algorithm
# Return the differences as a string
# For example: "Differences: Line 1 - 'abc' | Line 2 - 'def'"
# This function is not fully implemented and should be tailored to the specific requirements
return "Differences: Not implemented" |
#!/usr/bin/env bash
#
# Copyright (c) 2018 The Emircoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Check for assertions with obvious side effects.
export LC_ALL=C
EXIT_CODE=0
# PRE31-C (SEI CERT C Coding Standard):
# "Assertions should not contain assignments, increment, or decrement operators."
OUTPUT=$(git grep -E '[^_]assert\(.*(\+\+|\-\-|[^=!<>]=[^=!<>]).*\);' -- "*.cpp" "*.h")
if [[ ${OUTPUT} != "" ]]; then
echo "Assertions should not have side effects:"
echo
echo "${OUTPUT}"
EXIT_CODE=1
fi
exit ${EXIT_CODE}
|
SELECT Name, Salary
FROM employees
ORDER BY Name ASC; |
#!/bin/bash
chromium-browser --disable-session-crashed-bubble --disable-infobars http://0.0.0.0:5000/hub
sudo python3 /home/pi/Desktop/pomoDoNotDisturb/server.py |
#!/bin/bash
# Dokumente
mkdir -p -- "/volumeUSB1/usbshare/dokumente/_data" ; touch "/volumeUSB1/usbshare/dokumente/_data/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/_data /volumeUSB1/usbshare/dokumente/_data
mkdir -p -- "/volumeUSB1/usbshare/dokumente/Apps" ; touch "/volumeUSB1/usbshare/dokumente/Apps/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/Apps /volumeUSB1/usbshare/dokumente/Apps
mkdir -p -- "/volumeUSB1/usbshare/dokumente/Belege" ; touch "/volumeUSB1/usbshare/dokumente/Belege/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/Belege /volumeUSB1/usbshare/dokumente/Belege
mkdir -p -- "/volumeUSB1/usbshare/dokumente/Bewerbung_Promotion" ; touch "/volumeUSB1/usbshare/dokumente/Bewerbung_Promotion/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/Bewerbung_Promotion /volumeUSB1/usbshare/dokumente/Bewerbung_Promotion
mkdir -p -- "/volumeUSB1/usbshare/dokumente/FH" ; touch "/volumeUSB1/usbshare/dokumente/FH/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/FH /volumeUSB1/usbshare/dokumente/FH
mkdir -p -- "/volumeUSB1/usbshare/dokumente/v_latexvorlage" ; touch "/volumeUSB1/usbshare/dokumente/v_latexvorlage/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/v_latexvorlage /volumeUSB1/usbshare/dokumente/v_latexvorlage
mkdir -p -- "/volumeUSB1/usbshare/dokumente/v_ppvorlage" ; touch "/volumeUSB1/usbshare/dokumente/v_ppvorlage/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/v_ppvorlage /volumeUSB1/usbshare/dokumente/v_ppvorlage
mkdir -p -- "/volumeUSB1/usbshare/dokumente/v_wordvorlage" ; touch "/volumeUSB1/usbshare/dokumente/v_wordvorlage/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/v_wordvorlage /volumeUSB1/usbshare/dokumente/v_wordvorlage
mkdir -p -- "/volumeUSB1/usbshare/dokumente/v_pmvorlage" ; touch "/volumeUSB1/usbshare/dokumente/v_pmvorlage/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/v_pmvorlage /volumeUSB1/usbshare/dokumente/v_pmvorlage
mkdir -p -- "/volumeUSB1/usbshare/dokumente/Zerts" ; touch "/volumeUSB1/usbshare/dokumente/Zerts/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/Zerts /volumeUSB1/usbshare/dokumente/Zerts
# E-Books
mkdir -p -- "/volumeUSB1/usbshare/e_books" ; touch "/volumeUSB1/usbshare/e_books/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/e_books /volumeUSB1/usbshare/e_books
# Git-Auth
mkdir -p -- "/volumeUSB1/usbshare/Git_auth" ; touch "/volumeUSB1/usbshare/Git_auth/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/Git_auth /volumeUSB1/usbshare/Git_auth
# Pics
mkdir -p -- "/volumeUSB1/usbshare/Pics" ; touch "/volumeUSB1/usbshare/Pics/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/Pics /volumeUSB1/usbshare/Pics
mkdir -p -- "/volumeUSB1/usbshare/Pictures" ; touch "/volumeUSB1/usbshare/Pictures/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/Pictures /volumeUSB1/usbshare/Pictures
mkdir -p -- "/volumeUSB1/usbshare/Kamera-Uploads" ; touch "/volumeUSB1/usbshare/Kamera-Uploads/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/Kamera-Uploads /volumeUSB1/usbshare/Kamera-Uploads
# Promotion
mkdir -p -- "/volumeUSB1/usbshare/Promotion" ; touch "/volumeUSB1/usbshare/Promotion/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/Promotion /volumeUSB1/usbshare/Promotion
# Workspace
mkdir -p -- "/volumeUSB1/usbshare/workspace" ; touch "/volumeUSB1/usbshare/workspace/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/workspace /volumeUSB1/usbshare/workspace /volume1/NAS/exclude-list.txt
# Masterthesis
mkdir -p -- "/volumeUSB1/usbshare/Masterthesis" ; touch "/volumeUSB1/usbshare/Masterthesis/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/Masterthesis /volumeUSB1/usbshare/Masterthesis
# Bachelor
mkdir -p -- "/volumeUSB1/usbshare/dokumente/BA/data" ; touch "/volumeUSB1/usbshare/dokumente/BA/data/backup.marker"
sh /volume1/NAS/rsync-time-backup.sh /volume1/NAS/dokumente/BA/data /volumeUSB1/usbshare/dokumente/BA/data
|
TERMUX_PKG_HOMEPAGE=https://nodejs.org/
TERMUX_PKG_DESCRIPTION="Open Source, cross-platform JavaScript runtime environment"
TERMUX_PKG_LICENSE="MIT"
TERMUX_PKG_MAINTAINER="Yaksh Bariya <yakshbari4@gmail.com>"
TERMUX_PKG_VERSION=17.3.0
TERMUX_PKG_REVISION=1
TERMUX_PKG_SRCURL=https://nodejs.org/dist/v${TERMUX_PKG_VERSION}/node-v${TERMUX_PKG_VERSION}.tar.xz
TERMUX_PKG_SHA256=e4e4c4e64854698f2590144a177bcc7e7c0befb52020288fdae5c0da0d015d03
# Note that we do not use a shared libuv to avoid an issue with the Android
# linker, which does not use symbols of linked shared libraries when resolving
# symbols on dlopen(). See https://github.com/termux/termux-packages/issues/462.
TERMUX_PKG_DEPENDS="libc++, openssl, c-ares, libicu, zlib"
TERMUX_PKG_CONFLICTS="nodejs-lts, nodejs-current"
TERMUX_PKG_BREAKS="nodejs-dev"
TERMUX_PKG_REPLACES="nodejs-current, nodejs-dev"
TERMUX_PKG_SUGGESTS="clang, make, pkg-config, python"
TERMUX_PKG_RM_AFTER_INSTALL="lib/node_modules/npm/html lib/node_modules/npm/make.bat share/systemtap lib/dtrace"
TERMUX_PKG_BUILD_IN_SRC=true
TERMUX_PKG_HOSTBUILD=true
termux_step_post_get_source() {
# Prevent caching of host build:
rm -Rf $TERMUX_PKG_HOSTBUILD_DIR
}
termux_step_host_build() {
local ICU_VERSION=70.1
local ICU_TAR=icu4c-${ICU_VERSION//./_}-src.tgz
local ICU_DOWNLOAD=https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION//./-}/$ICU_TAR
termux_download \
$ICU_DOWNLOAD\
$TERMUX_PKG_CACHEDIR/$ICU_TAR \
8d205428c17bf13bb535300669ed28b338a157b1c01ae66d31d0d3e2d47c3fd5
tar xf $TERMUX_PKG_CACHEDIR/$ICU_TAR
cd icu/source
if [ "$TERMUX_ARCH_BITS" = 32 ]; then
./configure --prefix $TERMUX_PKG_HOSTBUILD_DIR/icu-installed \
--disable-samples \
--disable-tests \
--build=i686-pc-linux-gnu "CFLAGS=-m32" "CXXFLAGS=-m32" "LDFLAGS=-m32"
else
./configure --prefix $TERMUX_PKG_HOSTBUILD_DIR/icu-installed \
--disable-samples \
--disable-tests
fi
make -j $TERMUX_MAKE_PROCESSES install
}
termux_step_configure() {
local DEST_CPU
if [ $TERMUX_ARCH = "arm" ]; then
DEST_CPU="arm"
elif [ $TERMUX_ARCH = "i686" ]; then
DEST_CPU="ia32"
elif [ $TERMUX_ARCH = "aarch64" ]; then
DEST_CPU="arm64"
elif [ $TERMUX_ARCH = "x86_64" ]; then
DEST_CPU="x64"
else
termux_error_exit "Unsupported arch '$TERMUX_ARCH'"
fi
export GYP_DEFINES="host_os=linux"
export CC_host=gcc
export CXX_host=g++
export LINK_host=g++
LDFLAGS+=" -ldl"
# See note above TERMUX_PKG_DEPENDS why we do not use a shared libuv.
./configure \
--prefix=$TERMUX_PREFIX \
--dest-cpu=$DEST_CPU \
--dest-os=android \
--shared-cares \
--shared-openssl \
--shared-zlib \
--with-intl=system-icu \
--cross-compiling
export LD_LIBRARY_PATH=$TERMUX_PKG_HOSTBUILD_DIR/icu-installed/lib
perl -p -i -e "s@LIBS := \\$\\(LIBS\\)@LIBS := -L$TERMUX_PKG_HOSTBUILD_DIR/icu-installed/lib -lpthread -licui18n -licuuc -licudata -ldl -lz@" \
$TERMUX_PKG_SRCDIR/out/tools/v8_gypfiles/mksnapshot.host.mk \
$TERMUX_PKG_SRCDIR/out/tools/v8_gypfiles/torque.host.mk \
$TERMUX_PKG_SRCDIR/out/tools/v8_gypfiles/bytecode_builtins_list_generator.host.mk \
$TERMUX_PKG_SRCDIR/out/tools/v8_gypfiles/v8_libbase.host.mk \
$TERMUX_PKG_SRCDIR/out/tools/v8_gypfiles/gen-regexp-special-case.host.mk
}
termux_step_create_debscripts() {
cat <<- EOF > ./postinst
#!$TERMUX_PREFIX/bin/sh
npm config set foreground-scripts true
EOF
}
|
#!/bin/bash
# Copyright 2020 The Kubernetes Authors.
# SPDX-License-Identifier: Apache-2.0
MYGOBIN=$(go env GOPATH)/bin
VERSION=$1
$MYGOBIN/go-bindata \
--pkg "${VERSION//.}" \
-o kubernetesapi/"${VERSION//.}"/swagger.go \
kubernetesapi/"${VERSION//.}"/swagger.json
|
#!/usr/bin/env bash
USERNAME=notebook
APPNAME=notebook
sudo adduser --system --group --no-create-home ${USERNAME}
sudo mkdir -p /opt/${APPNAME}/config
sudo mkdir -p /opt/${APPNAME}/log
if [[ ! -f /opt/${APPNAME}/config/application-production.yaml ]]; then
cat <<EOT > /tmp/${APPNAME}.yaml
cn:
spark2fire:
auth:
jwt:
secretKey: "ThisIsMyJWTSecretKeyAtLeast256bit!"
spring:
datasource:
username: ${USERNAME}
password: password
logging:
file:
name: /opt/${APPNAME}/log/${APPNAME}.log
EOT
sudo cp /tmp/${APPNAME}.yaml /opt/${APPNAME}/config/application-production.yaml
fi
sudo chown -R ${USERNAME}:${USERNAME} /opt/${APPNAME}/
cat <<EOT > /tmp/${APPNAME}.service
[Unit]
Description=${APPNAME} API
After=syslog.target
[Service]
User=${USERNAME}
WorkingDirectory=/opt/${APPNAME}
ExecStart=/opt/${APPNAME}/${APPNAME} --spring.profiles.active=production
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
EOT
[[ -f /etc/systemd/system/${APPNAME}.service ]] || sudo cp /tmp/${APPNAME}.service /etc/systemd/system/
sudo systemctl stop ${APPNAME}.service
sudo cp ${APPNAME}.jar /opt/${APPNAME}/${APPNAME}
sudo chown ${USERNAME}:${USERNAME} /opt/${APPNAME}/${APPNAME}
sudo chmod a+x /opt/${APPNAME}/${APPNAME}
sudo systemctl enable ${APPNAME}.service
sudo systemctl restart ${APPNAME}.service
sleep 3
sudo systemctl status ${APPNAME}.service
|
#!/bin/bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
usage()
{
echo "usage: run_kubeflow_test.sh
[--results-gcs-dir GCS directory for the test results]
[--dataflow-tft-image image path to the dataflow tft]
[--dataflow-predict-image image path to the dataflow predict]
[--dataflow-tfma-image image path to the dataflow tfma]
[--dataflow-tfdv-image image path to the dataflow tfdv]
[--dataproc-create-cluster-image image path to the dataproc create cluster]
[--dataproc-delete-cluster-image image path to the dataproc delete cluster]
[--dataproc-analyze-image image path to the dataproc analyze]
[--dataproc-transform-image image path to the dataproc transform]
[--dataproc-train-image image path to the dataproc train]
[--dataproc-predict-image image path to the dataproc predict]
[--kubeflow-dnntrainer-image image path to the kubeflow dnntrainer]
[--kubeflow-deployer-image image path to the kubeflow deployer]
[--local-confusionmatrix-image image path to the confusion matrix]
[--local-roc-image image path to the roc]
[--namespace namespace for the deployed pipeline system]
[--test-name test name: tf-training, xgboost]
[-h help]"
}
while [ "$1" != "" ]; do
case $1 in
--results-gcs-dir ) shift
RESULTS_GCS_DIR=$1
;;
--dataflow-tft-image ) shift
DATAFLOW_TFT_IMAGE=$1
;;
--dataflow-predict-image ) shift
DATAFLOW_PREDICT_IMAGE=$1
;;
--dataflow-tfma-image ) shift
DATAFLOW_TFMA_IMAGE=$1
;;
--dataflow-tfdv-image ) shift
DATAFLOW_TFDV_IMAGE=$1
;;
--dataproc-create-cluster-image ) shift
DATAPROC_CREATE_CLUSTER_IMAGE=$1
;;
--dataproc-delete-cluster-image ) shift
DATAPROC_DELETE_CLUSTER_IMAGE=$1
;;
--dataproc-analyze-image ) shift
DATAPROC_ANALYZE_IMAGE=$1
;;
--dataproc-transform-image ) shift
DATAPROC_TRANSFORM_IMAGE=$1
;;
--dataproc-train-image ) shift
DATAPROC_TRAIN_IMAGE=$1
;;
--dataproc-predict-image ) shift
DATAPROC_PREDICT_IMAGE=$1
;;
--kubeflow-dnntrainer-image ) shift
KUBEFLOW_DNNTRAINER_IMAGE=$1
;;
--kubeflow-deployer-image ) shift
KUBEFLOW_DEPLOYER_IMAGE=$1
;;
--local-confusionmatrix-image ) shift
LOCAL_CONFUSIONMATRIX_IMAGE=$1
;;
--local-roc-image ) shift
LOCAL_ROC_IMAGE=$1
;;
--namespace ) shift
NAMESPACE=$1
;;
--test-name ) shift
TEST_NAME=$1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
if [ -z "$RESULTS_GCS_DIR" ]; then
usage
exit 1
fi
if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ]]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
GITHUB_REPO=kubeflow/pipelines
BASE_DIR=/python/src/github.com/${GITHUB_REPO}
TEST_DIR=${BASE_DIR}/test/sample-test
cd ${BASE_DIR}
# Install argo
echo "install argo"
ARGO_VERSION=v2.2.0
mkdir -p ~/bin/
export PATH=~/bin/:$PATH
curl -sSL -o ~/bin/argo https://github.com/argoproj/argo/releases/download/$ARGO_VERSION/argo-linux-amd64
chmod +x ~/bin/argo
echo "Run the sample tests..."
# Generate Python package
cd ${BASE_DIR}/sdk/python
./build.sh /tmp/kfp.tar.gz
# Install python client, including DSL compiler.
pip3 install /tmp/kfp.tar.gz
# Run the tests
if [ "$TEST_NAME" == 'tf-training' ]; then
SAMPLE_KUBEFLOW_TEST_RESULT=junit_SampleKubeflowOutput.xml
SAMPLE_KUBEFLOW_TEST_OUTPUT=${RESULTS_GCS_DIR}
#TODO: convert the sed commands to sed -e 's|gcr.io/ml-pipeline/|gcr.io/ml-pipeline-test/' and tag replacement.
# Compile samples
cd ${BASE_DIR}/samples/kubeflow-tf
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:\([a-zA-Z0-9_.-]\)\+|${DATAFLOW_TFT_IMAGE}|g" kubeflow-training-classification.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:\([a-zA-Z0-9_.-]\)\+|${KUBEFLOW_DNNTRAINER_IMAGE}|g" kubeflow-training-classification.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:\([a-zA-Z0-9_.-]\)\+|${DATAFLOW_PREDICT_IMAGE}|g" kubeflow-training-classification.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:\([a-zA-Z0-9_.-]\)\+|${LOCAL_CONFUSIONMATRIX_IMAGE}|g" kubeflow-training-classification.py
dsl-compile --py kubeflow-training-classification.py --output kubeflow-training-classification.tar.gz
cd "${TEST_DIR}"
python3 run_kubeflow_test.py --input ${BASE_DIR}/samples/kubeflow-tf/kubeflow-training-classification.tar.gz --result $SAMPLE_KUBEFLOW_TEST_RESULT --output $SAMPLE_KUBEFLOW_TEST_OUTPUT --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_KUBEFLOW_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_KUBEFLOW_TEST_RESULT}
elif [ "$TEST_NAME" == "tfx" ]; then
SAMPLE_TFX_TEST_RESULT=junit_SampleTFXOutput.xml
SAMPLE_TFX_TEST_OUTPUT=${RESULTS_GCS_DIR}
# Compile samples
cd ${BASE_DIR}/samples/tfx
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:\([a-zA-Z0-9_.-]\)\+|${DATAFLOW_TFT_IMAGE}|g" taxi-cab-classification-pipeline.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:\([a-zA-Z0-9_.-]\)\+|${DATAFLOW_PREDICT_IMAGE}|g" taxi-cab-classification-pipeline.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:\([a-zA-Z0-9_.-]\)\+|${DATAFLOW_TFDV_IMAGE}|g" taxi-cab-classification-pipeline.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:\([a-zA-Z0-9_.-]\)\+|${DATAFLOW_TFMA_IMAGE}|g" taxi-cab-classification-pipeline.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:\([a-zA-Z0-9_.-]\)\+|${KUBEFLOW_DNNTRAINER_IMAGE}|g" taxi-cab-classification-pipeline.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:\([a-zA-Z0-9_.-]\)\+|${KUBEFLOW_DEPLOYER_IMAGE}|g" taxi-cab-classification-pipeline.py
dsl-compile --py taxi-cab-classification-pipeline.py --output taxi-cab-classification-pipeline.tar.gz
cd "${TEST_DIR}"
python3 run_tfx_test.py --input ${BASE_DIR}/samples/tfx/taxi-cab-classification-pipeline.tar.gz --result $SAMPLE_TFX_TEST_RESULT --output $SAMPLE_TFX_TEST_OUTPUT --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_TFX_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_TFX_TEST_RESULT}
elif [ "$TEST_NAME" == "sequential" ]; then
SAMPLE_SEQUENTIAL_TEST_RESULT=junit_SampleSequentialOutput.xml
SAMPLE_SEQUENTIAL_TEST_OUTPUT=${RESULTS_GCS_DIR}
# Compile samples
cd ${BASE_DIR}/samples/basic
dsl-compile --py sequential.py --output sequential.tar.gz
cd "${TEST_DIR}"
python3 run_basic_test.py --input ${BASE_DIR}/samples/basic/sequential.tar.gz --result $SAMPLE_SEQUENTIAL_TEST_RESULT --output $SAMPLE_SEQUENTIAL_TEST_OUTPUT --testname sequential --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_SEQUENTIAL_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_SEQUENTIAL_TEST_RESULT}
elif [ "$TEST_NAME" == "condition" ]; then
SAMPLE_CONDITION_TEST_RESULT=junit_SampleConditionOutput.xml
SAMPLE_CONDITION_TEST_OUTPUT=${RESULTS_GCS_DIR}
# Compile samples
cd ${BASE_DIR}/samples/basic
dsl-compile --py condition.py --output condition.tar.gz
cd "${TEST_DIR}"
python3 run_basic_test.py --input ${BASE_DIR}/samples/basic/condition.tar.gz --result $SAMPLE_CONDITION_TEST_RESULT --output $SAMPLE_CONDITION_TEST_OUTPUT --testname condition --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_CONDITION_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_CONDITION_TEST_RESULT}
elif [ "$TEST_NAME" == "exithandler" ]; then
SAMPLE_EXIT_HANDLER_TEST_RESULT=junit_SampleExitHandlerOutput.xml
SAMPLE_EXIT_HANDLER_TEST_OUTPUT=${RESULTS_GCS_DIR}
# Compile samples
cd ${BASE_DIR}/samples/basic
dsl-compile --py exit_handler.py --output exit_handler.tar.gz
cd "${TEST_DIR}"
python3 run_basic_test.py --input ${BASE_DIR}/samples/basic/exit_handler.tar.gz --result $SAMPLE_EXIT_HANDLER_TEST_RESULT --output $SAMPLE_EXIT_HANDLER_TEST_OUTPUT --testname exithandler --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_EXIT_HANDLER_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_EXIT_HANDLER_TEST_RESULT}
elif [ "$TEST_NAME" == "immediatevalue" ]; then
SAMPLE_IMMEDIATE_VALUE_TEST_RESULT=junit_SampleImmediateValueOutput.xml
SAMPLE_IMMEDIATE_VALUE_TEST_OUTPUT=${RESULTS_GCS_DIR}
# Compile samples
cd ${BASE_DIR}/samples/basic
dsl-compile --py immediate_value.py --output immediate_value.tar.gz
cd "${TEST_DIR}"
python3 run_basic_test.py --input ${BASE_DIR}/samples/basic/immediate_value.tar.gz --result $SAMPLE_IMMEDIATE_VALUE_TEST_RESULT --output $SAMPLE_IMMEDIATE_VALUE_TEST_OUTPUT --testname immediatevalue --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_IMMEDIATE_VALUE_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_IMMEDIATE_VALUE_TEST_RESULT}
elif [ "$TEST_NAME" == "paralleljoin" ]; then
SAMPLE_PARALLEL_JOIN_TEST_RESULT=junit_SampleParallelJoinOutput.xml
SAMPLE_PARALLEL_JOIN_TEST_OUTPUT=${RESULTS_GCS_DIR}
# Compile samples
cd ${BASE_DIR}/samples/basic
dsl-compile --py parallel_join.py --output parallel_join.tar.gz
cd "${TEST_DIR}"
python3 run_basic_test.py --input ${BASE_DIR}/samples/basic/parallel_join.tar.gz --result $SAMPLE_PARALLEL_JOIN_TEST_RESULT --output $SAMPLE_PARALLEL_JOIN_TEST_OUTPUT --testname paralleljoin --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_PARALLEL_JOIN_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_PARALLEL_JOIN_TEST_RESULT}
elif [ "$TEST_NAME" == "xgboost" ]; then
SAMPLE_XGBOOST_TEST_RESULT=junit_SampleXGBoostOutput.xml
SAMPLE_XGBOOST_TEST_OUTPUT=${RESULTS_GCS_DIR}
# Compile samples
cd ${BASE_DIR}/samples/xgboost-spark
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster:\([a-zA-Z0-9_.-]\)\+|${DATAPROC_CREATE_CLUSTER_IMAGE}|g" xgboost-training-cm.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataproc-delete-cluster:\([a-zA-Z0-9_.-]\)\+|${DATAPROC_DELETE_CLUSTER_IMAGE}|g" xgboost-training-cm.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:\([a-zA-Z0-9_.-]\)\+|${DATAPROC_ANALYZE_IMAGE}|g" xgboost-training-cm.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataproc-transform:\([a-zA-Z0-9_.-]\)\+|${DATAPROC_TRANSFORM_IMAGE}|g" xgboost-training-cm.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataproc-train:\([a-zA-Z0-9_.-]\)\+|${DATAPROC_TRAIN_IMAGE}|g" xgboost-training-cm.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:\([a-zA-Z0-9_.-]\)\+|${DATAPROC_PREDICT_IMAGE}|g" xgboost-training-cm.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-local-roc:\([a-zA-Z0-9_.-]\)\+|${LOCAL_ROC_IMAGE}|g" xgboost-training-cm.py
sed -i -e "s|gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:\([a-zA-Z0-9_.-]\)\+|${LOCAL_CONFUSIONMATRIX_IMAGE}|g" xgboost-training-cm.py
dsl-compile --py xgboost-training-cm.py --output xgboost-training-cm.tar.gz
cd "${TEST_DIR}"
python3 run_xgboost_test.py --input ${BASE_DIR}/samples/xgboost-spark/xgboost-training-cm.tar.gz --result $SAMPLE_XGBOOST_TEST_RESULT --output $SAMPLE_XGBOOST_TEST_OUTPUT --namespace ${NAMESPACE}
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp ${SAMPLE_XGBOOST_TEST_RESULT} ${RESULTS_GCS_DIR}/${SAMPLE_XGBOOST_TEST_RESULT}
fi
|
<gh_stars>1-10
package org.brapi.test.BrAPITestServer.model.entity.core;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.ManyToOne;
import javax.persistence.Table;
import org.brapi.test.BrAPITestServer.model.entity.BrAPIBaseEntity;
@Entity
@Table(name = "study_data_link")
public class DataLinkEntity extends BrAPIBaseEntity {
@Column
private String dataFormat;
@Column
private String description;
@Column
private String fileFormat;
@Column
private String name;
@Column
private String provenance;
@Column
private String scientificType;
@Column
private String url;
@Column
private String version;
@ManyToOne
private StudyEntity study;
public String getDataFormat() {
return dataFormat;
}
public void setDataFormat(String dataFormat) {
this.dataFormat = dataFormat;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getFileFormat() {
return fileFormat;
}
public void setFileFormat(String fileFormat) {
this.fileFormat = fileFormat;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getProvenance() {
return provenance;
}
public void setProvenance(String provenance) {
this.provenance = provenance;
}
public String getScientificType() {
return scientificType;
}
public void setScientificType(String scientificType) {
this.scientificType = scientificType;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
}
|
let nombre = prompt("¿Cual es tu nombre?")
console.log("Hola " + nombre + "!") |
<filename>Final/finalFunctions.cpp<gh_stars>0
/*
Final Functions
Study guide with contain all the codes made at Algorythms subject
*/
#include <iostream>
#include <limits.h>
#include <vector>
#include <iomanip>
#include <algorithm>
using namespace std;
#define INF INT_MAX
#define size 50;
void audiophobia (int D[size][size], int c) {
for (int k=0; k<c; k++) {
for (int i=0; i<c; i++) {
for (int j=0; j<c; j++) {
if(D[i][k] != INT_MAX && D[k][j] != INT_MAX && D[i][j] > max(D[i][k],D[k][j])){
D[i][j] = max(D[i][k],D[k][j]);
}
}
}
}
}
int cuttingSticks(int ci[], int n) {
int Matrix[50][50];
for (int i=1; i<=n; i++) {
for (int j=i-1; j>=0; j--) {
if (j+1 == i)
Matrix[j][i] = 0;
else {
Matrix[j][i] = INT_MAX;
for (int k=j+1; k<i; k++) {
if (Matrix[j][i] > Matrix[j][k]+Matrix[k][i])
Matrix[j][i] = Matrix[j][k]+Matrix[k][i];
}
Matrix[j][i] += ci[i] - ci[j];
}
}
}
//Regresamos valor
return Matrix[0][n];
}
void rockytheBeagle(int N, vector<int> &Dist, vector<vector<pair<int, int> > > &G) {
priority_queue<pair<int, int>,
vector<pair<int, int> >,
greater<pair<int,int> > > Q;
Dist.assign(N, INF);
Dist[0] = 0;
pair<int, int> p(0, 0);
Q.push(p);
while (!Q.empty()) {
int u = Q.top().first;
Q.pop();
for (int i=0; i<G[u].size(); i++) {
pair<int, int> p = G[u][i];
int v = p.first;
int w = p.second;
if (Dist[v] > Dist[u]+w) {
Dist[v] = Dist[u]+w;
pair<int,int> nvo(v,Dist[v]);
Q.push(nvo);
}
}
}
}
double howManyTrees(int n) {
std::vector<double> v(n,0);
v.insert(v.begin(), 1);
for (int i=1; i<=n; i++) {
for (int j=1; j<=i; j++) {
v[i] += v[i-j]*v[j-1];
}
}
// Regresamos ultimo valor del vector
return v[n];
}
int LCS(string a, string b) {
int aSize = a.length();
int bSize = b.length();
int Mat[aSize+1][bSize+1];
for (int i=0; i<=aSize; i++)
Mat[i][0] = 0;
for (int i=0; i<=bSize; i++)
Mat[0][i] = 0;
for (int i=1; i<=aSize; i++) {
for (int j=1; j<=bSize; j++) {
if (a[i-1] == b[j-1])
Mat[i][j] = Mat[i-1][j-1]+1;
else
Mat[i][j] = max(Mat[i][j-1], Mat[i-1][j]);
}
}
return Mat[aSize][bSize];
}
double median(double arr1[], double arr2[], int n) {
if (n<=0)
return 0;
if (n==1)
return (arr1[0] + arr2[0]) / 2;
if (n==2)
return (min(arr1[1], arr2[1]) + max(arr1[0], arr2[0])) / 2;
int medianArr1 = (n%2==0) ?
(arr1[n/2]+arr1[n/2-1])/2 : arr1[n/2];
int medianArr2 = (n%2==0) ?
(arr2[n/2]+arr2[n/2-1])/2 : arr2[n/2];
if (medianArr1 < medianArr2) {
return (!n%2) ?
median(arr1+n/2-1, arr2, n-(n/2)+1):
median(arr1+n/2, arr2, n-(n/2));
}
else if (medianArr1 == medianArr2) {
return medianArr1;
}
if (!n%2) {
return median(arr2+n/2-1, arr1, n-(n/2)+1);
}
else {
return median(arr2+n/2, arr1, n-(n/2));
}
}
bool railRoad(int N1, int N2, int train1[], int train2[], int order[]) {
bool flag[N1+1][N2+1];
flag[0][0] = true;
for (i=0; i<N1; i++) {
if (flag[i][0])
flag[i+1][0] = (order[i+1]==train1[i+1]) ? true : false;
else
flag[i+1][0] = false;
}
for (i=0; i<N2; i++) {
if (flag[0][i])
flag[0][i+1] = (order[i+1]==train2[i+1]) ? true : false;
else
flag[0][i+1] = false;
}
for (i=1; i<=N1; i++) {
for (j=1; j<=N2; j++) {
if (order[i+j] == train1[i] && flag[i-1][j])
flag[i][j] = true;
else if (order[i+j] == train2[j] && flag[i][j-1])
flag[i][j] = true;
else
flag[i][j] = false;
}
}
return flag[N1][N2];
}
void delSoldiers(int &rest, int soldiers[200000], int &index, int &alive) {
if (alive > 0) {
soldiers[index] -= rest;
if (soldiers[index] < 0) {
alive--;
rest = abs(soldiers[index]);
index++;
delSoldiers(rest, soldiers, index, alive);
}
else if (soldiers[index] == 0) {
alive--;
index++;
if (alive > 0) {
cout << alive << endl;
}
}
else {
cout << alive << endl;
}
}
}
int kombat(int n, int k, int damage[], string buttons) {
int res = 0;
int counter = 0;
char last = '@';
priority_queue<int, vector<int>, greater<int> > hits;
for (int i=0; i<n; i++) {
if (buttons[i] != last) {
last = buttons[i];
counter = 1;
while (!hits.empty()) {
res += hits.top();
hits.pop();
}
hits.push(damage[i]);
}
else if (counter < k) {
hits.push(damage[i]);
counter++;
}
else {
if (hits.top() < damage[i]) {
hits.pop();
hits.push(damage[i]);
}
counter++;
}
}
while (!hits.empty()) {
res += hits.top();
hits.pop();
}
return res;
}
int main() {
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.