blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6284ae27cbc30d2c2a15ab1b154f41dea49d7877
|
Shell
|
bookgh/docker-cloudboot
|
/docker-install.sh
|
UTF-8
| 3,403
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
VERSION=18.09.4
BIN_DIR=/usr/local/docker/bin
[ $# -ne 1 ] && echo "Usage $0 {install|uninstall}"
repeat() {
while true; do
$@ && return
done
}
install(){
# 安装 wget
which wget > /dev/null 2>&1 || { rm -f /etc/yum.repos.d/*.repo; \
curl -so /etc/yum.repos.d/epel-7.repo http://mirrors.aliyun.com/repo/epel-7.repo; \
curl -so /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo; \
sed -i '/aliyuncs.com/d' /etc/yum.repos.d/Centos-7.repo /etc/yum.repos.d/epel-7.repo; \
yum install -y wget; }
# 时间同步
yum install -y ntpdate
ntpdate ntp1.aliyun.com
hwclock -w
crontab -l > /tmp/crontab.tmp
echo "*/20 * * * * /usr/sbin/ntpdate ntp1.aliyun.com > /dev/null 2>&1 && /usr/sbin/hwclock -w" >> /tmp/crontab.tmp
cat /tmp/crontab.tmp | uniq > /tmp/crontab
crontab /tmp/crontab
rm -f /tmp/crontab.tmp /tmp/crontab
# 关闭selinux, firewalld
setenforce 0
sed -i 's#SELINUX=.*#SELINUX=disabled#' /etc/selinux/config
systemctl stop firewalld
systemctl disable firewalld
# 下载解压docker 二进制文件
[ -d $BIN_DIR ] || mkdir -p $BIN_DIR
repeat wget -c http://kaifa.hc-yun.com:30040/docker-install-bin/docker-${VERSION}.tgz
tar xvf docker-${VERSION}.tgz -C $BIN_DIR --strip-components 1
rm -f docker-${VERSION}.tgz
[ $(grep "# docker path" ~/.bashrc | wc -l) -eq 0 ] && echo -e "\n# docker path\nPATH=$BIN_DIR:\$PATH" >> ~/.bashrc
# 配置docker加速
[ -d /etc/docker ] || mkdir /etc/docker
cat > /etc/docker/daemon.json <<'EOF'
{
"registry-mirrors": ["http://3272dd08.m.daocloud.io", "https://docker.mirrors.ustc.edu.cn"],
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"log-level": "warn",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"data-root": "/var/lib/docker"
}
EOF
# 创建服务管理脚本
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
After=keepalived.service glusterd.service autofs.service
[Service]
Type=idle
Environment="PATH=$BIN_DIR:/bin:/sbin:/usr/bin:/usr/sbin"
ExecStart=$BIN_DIR/dockerd
ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
ExecReload=/bin/kill -s HUP $MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
iptables -P INPUT ACCEPT
iptables -F
iptables -X
iptables -F -t nat
iptables -X -t nat
iptables -F -t raw
iptables -X -t raw
iptables -F -t mangle
iptables -X -t mangle
systemctl daemon-reload
systemctl start docker
systemctl enable docker
systemctl restart docker
repeat wget -c -O /usr/local/bin/docker-compose http://kaifa.hc-yun.com:30040/docker-install-bin/docker-compose
chmod +x /usr/local/bin/docker-compose
source ~/.bashrc
docker -v
docker-compose -v
echo -e '\nsource ~/.bashrc\n'
}
uninstall(){
systemctl stop docker
systemctl disable docker
rm -rf $BIN_DIR
rm -f /etc/docker/daemon.json
rm -f /etc/systemd/system/docker.service
rm -f /usr/local/bin/docker-compose
}
case "$1" in
install) install
;;
uninstall) uninstall
;;
*) echo "Usage $0 {install|uninstall}"
esac
| true
|
04adf6e63d006acebac047a42b376ee0f32f0869
|
Shell
|
romika/dotfiles
|
/.toggletouchpad.sh
|
UTF-8
| 242
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
touchpadEnabled=$(xinput list-props 11 | grep "Device Enabled" | awk -F ":" '{print $2}')
if [ $touchpadEnabled -eq 1 ]
then
xinput --set-prop 11 "Device Enabled" 0
else
xinput --set-prop 11 "Device Enabled" 1
fi
| true
|
06737b74a35af573c1bebf7f3d089737411ede83
|
Shell
|
areebimtar/hive
|
/QA/tests-shi/bin/hive/restart-manager
|
UTF-8
| 1,089
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Restart manager server
[[ "$1" != "-h" && "$1" != "--help" ]] || { echo "Usage: `basename $0` <cci_build_no>"; exit 1; }
set -e; s_dir=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")"); . "$s_dir/../../lib/functions.sh" "$s_dir"
export QA_INSTANCE_VIRT=${QA_INSTANCE_VIRT:-0}
read_configs virt
check_variables QA_PGUSER QA_PGPASSWORD QA_PGDATABASE_HIVE QA_MANAGER_PORT QA_MANAGER_PORT_API QA_NODE_BINARY QA_RABBIT_URI
product_dir="$INSTANCES_DIR/$QA_INSTANCE/virt/$QA_INSTANCE_VIRT/builds/current/product"
param="$product_dir/dist/manager/server.js"
if [ "$1" = "--kill" ]; then
kill_process "$QA_NODE_BINARY" "$param"
else
[ -n "$QA_LOG_DIR" ] && log_file="$QA_LOG_DIR/manager-$QA_INSTANCE_VIRT.log" || log_file=''
export DB_USER=$QA_PGUSER
export DB_PASSWORD=$QA_PGPASSWORD
export DB_NAME=$QA_PGDATABASE_HIVE
export HIVE_MANAGER_PORT=$QA_MANAGER_PORT
export HIVE_MANAGER_API_PORT=$QA_MANAGER_PORT_API
export RABBIT_URI=$QA_RABBIT_URI
restart_process "$QA_NODE_BINARY" "$product_dir" "$log_file" "$param"
fi
RESULT_MESSAGE="$PROG: finished successfully"
| true
|
e64f4cce89cdba57d0a5604c9006810592639f9d
|
Shell
|
sariya/CUMC_taub
|
/split_per_chromosome/launch_job_split.sh
|
UTF-8
| 657
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
source ~/.bashrc
usage() { #spit out the usage
cat <<UsageDisplay
launch_job_split.sh -o <output dir> -i <input direct> -c <chromsome>
Options:
-i <input_prefix>
UsageDisplay
exit;
}
if [ $# -eq 0 ] # exit if no arguments!
then
usage;
fi
while getopts "x:c:i:o:h" args # iterate over arguments
do
case $args in
c)
chr_name=$OPTARG;;
x)
prefix_out=$OPTARG;; #use it for output prefix
o)
out_dir=$OPTARG;; #directory for output
i)
infile_base=$OPTARG;;
esac
done
cd $out_dir
plink --nonfounders --allow-no-sex --bfile $infile_base --chr $chr_name --make-bed --out "$prefix_out""_CHR""$chr_name"
| true
|
7ad4c0011f664bf8913fa6d1c37672e808be3f3d
|
Shell
|
mosn/istio
|
/prow/lib.sh
|
UTF-8
| 15,929
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2018 Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Cluster names for multicluster configurations.
export CLUSTER1_NAME=${CLUSTER1_NAME:-"cluster1"}
export CLUSTER2_NAME=${CLUSTER2_NAME:-"cluster2"}
export CLUSTER3_NAME=${CLUSTER3_NAME:-"cluster3"}
export CLUSTER_NAMES=("${CLUSTER1_NAME}" "${CLUSTER2_NAME}" "${CLUSTER3_NAME}")
export CLUSTER_POD_SUBNETS=(10.10.0.0/16 10.20.0.0/16 10.30.0.0/16)
export CLUSTER_SVC_SUBNETS=(10.255.10.0/24 10.255.20.0/24 10.255.30.0/24)
function setup_gcloud_credentials() {
if [[ $(command -v gcloud) ]]; then
gcloud auth configure-docker -q
elif [[ $(command -v docker-credential-gcr) ]]; then
docker-credential-gcr configure-docker
else
echo "No credential helpers found, push to docker may not function properly"
fi
}
function setup_and_export_git_sha() {
if [[ -n "${CI:-}" ]]; then
if [ -z "${PULL_PULL_SHA:-}" ]; then
if [ -z "${PULL_BASE_SHA:-}" ]; then
GIT_SHA="$(git rev-parse --verify HEAD)"
export GIT_SHA
else
export GIT_SHA="${PULL_BASE_SHA}"
fi
else
export GIT_SHA="${PULL_PULL_SHA}"
fi
else
# Use the current commit.
GIT_SHA="$(git rev-parse --verify HEAD)"
export GIT_SHA
export ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}"
fi
GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)"
export GIT_BRANCH
setup_gcloud_credentials
}
# Download and unpack istio release artifacts.
function download_untar_istio_release() {
local url_path=${1}
local tag=${2}
local dir=${3:-.}
# Download artifacts
LINUX_DIST_URL="${url_path}/istio-${tag}-linux.tar.gz"
wget -q "${LINUX_DIST_URL}" -P "${dir}"
tar -xzf "${dir}/istio-${tag}-linux.tar.gz" -C "${dir}"
}
function build_images() {
# Build just the images needed for tests
targets="docker.pilot docker.proxyv2 "
targets+="docker.app docker.app_sidecar docker.test_policybackend "
targets+="docker.mixer "
targets+="docker.operator "
DOCKER_BUILD_VARIANTS="${VARIANT:-default}" DOCKER_TARGETS="${targets}" make dockerx
}
function kind_load_images() {
NAME="${1:-istio-testing}"
# If HUB starts with "docker.io/" removes that part so that filtering and loading below works
local hub=${HUB#"docker.io/"}
for i in {1..3}; do
# Archived local images and load it into KinD's docker daemon
# Kubernetes in KinD can only access local images from its docker daemon.
docker images "${hub}/*:${TAG}" --format '{{.Repository}}:{{.Tag}}' | xargs -n1 kind -v9 --name "${NAME}" load docker-image && break
echo "Attempt ${i} to load images failed, retrying in 1s..."
sleep 1
done
# If a variant is specified, load those images as well.
# We should still load non-variant images as well for things like `app` which do not use variants
if [[ "${VARIANT:-}" != "" ]]; then
for i in {1..3}; do
docker images "${hub}/*:${TAG}-${VARIANT}" --format '{{.Repository}}:{{.Tag}}' | xargs -n1 kind -v9 --name "${NAME}" load docker-image && break
echo "Attempt ${i} to load images failed, retrying in 1s..."
sleep 1
done
fi
}
# Loads images into all clusters.
function kind_load_images_on_clusters() {
declare -a LOAD_IMAGE_JOBS
for c in "${CLUSTER_NAMES[@]}"; do
kind_load_images "${c}" &
LOAD_IMAGE_JOBS+=("${!}")
done
for pid in "${LOAD_IMAGE_JOBS[@]}"; do
wait "${pid}" || return 1
done
}
function clone_cni() {
# Clone the CNI repo so the CNI artifacts can be built.
if [[ "$PWD" == "${GOPATH}/src/istio.io/istio" ]]; then
TMP_DIR=$PWD
cd ../ || return
git clone -b "${GIT_BRANCH}" "https://github.com/istio/cni.git"
cd "${TMP_DIR}" || return
fi
}
function cleanup_kind_cluster() {
NAME="${1}"
echo "Test exited with exit code $?."
kind export logs --name "${NAME}" "${ARTIFACTS}/kind" -v9 || true
if [[ -z "${SKIP_CLEANUP:-}" ]]; then
echo "Cleaning up kind cluster"
kind delete cluster --name "${NAME}" -v9 || true
fi
}
# Cleans up the clusters created by setup_kind_clusters
function cleanup_kind_clusters() {
for c in "${CLUSTER_NAMES[@]}"; do
cleanup_kind_cluster "${c}"
done
}
function setup_kind_cluster() {
IP_FAMILY="${1:-ipv4}"
IMAGE="${2:-kindest/node:v1.18.2}"
NAME="${3:-istio-testing}"
CONFIG="${4:-}"
# Delete any previous KinD cluster
echo "Deleting previous KinD cluster with name=${NAME}"
if ! (kind delete cluster --name="${NAME}" -v9) > /dev/null; then
echo "No existing kind cluster with name ${NAME}. Continue..."
fi
# explicitly disable shellcheck since we actually want $NAME to expand now
# shellcheck disable=SC2064
trap "cleanup_kind_cluster ${NAME}" EXIT
# If config not explicitly set, then use defaults
if [[ -z "${CONFIG}" ]]; then
# Different Kubernetes versions need different patches
K8S_VERSION=$(cut -d ":" -f 2 <<< "${IMAGE}")
if [[ -n "${IMAGE}" && "${K8S_VERSION}" < "v1.13" ]]; then
# Kubernetes 1.12
CONFIG=./prow/config/trustworthy-jwt-12.yaml
elif [[ -n "${IMAGE}" && "${K8S_VERSION}" < "v1.15" ]]; then
# Kubernetes 1.13, 1.14
CONFIG=./prow/config/trustworthy-jwt-13-14.yaml
else
# Kubernetes 1.15+
CONFIG=./prow/config/trustworthy-jwt.yaml
fi
# Configure the cluster IP Family only for default configs
if [ "${IP_FAMILY}" = "ipv6" ]; then
cat <<EOF >> "${CONFIG}"
networking:
ipFamily: ipv6
EOF
fi
fi
# Create KinD cluster
if ! (kind create cluster --name="${NAME}" --config "${CONFIG}" -v9 --retain --image "${IMAGE}" --wait=60s); then
echo "Could not setup KinD environment. Something wrong with KinD setup. Exporting logs."
exit 1
fi
kubectl apply -f ./prow/config/metrics
}
# Sets up 3 kind clusters. Clusters 1 and 2 are configured for direct pod-to-pod traffic across
# clusters, while cluster 3 is left on a separate network.
function setup_kind_clusters() {
TOPOLOGY="${1}"
IMAGE="${2}"
KUBECONFIG_DIR="$(mktemp -d)"
# The kind tool will error when trying to create clusters in paralell unless we create the network first
docker network inspect kind > /dev/null 2>&1 || docker network create kind
# Trap replaces any previous trap's, so we need to explicitly cleanup both clusters here
trap cleanup_kind_clusters EXIT
function deploy_kind() {
IDX="${1}"
CLUSTER_NAME="${CLUSTER_NAMES[$IDX]}"
CLUSTER_POD_SUBNET="${CLUSTER_POD_SUBNETS[$IDX]}"
CLUSTER_SVC_SUBNET="${CLUSTER_SVC_SUBNETS[$IDX]}"
CLUSTER_YAML="${ARTIFACTS}/config-${CLUSTER_NAME}.yaml"
cat <<EOF > "${CLUSTER_YAML}"
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
networking:
podSubnet: ${CLUSTER_POD_SUBNET}
serviceSubnet: ${CLUSTER_SVC_SUBNET}
EOF
CLUSTER_KUBECONFIG="${KUBECONFIG_DIR}/${CLUSTER_NAME}"
# Create the clusters.
# TODO: add IPv6
KUBECONFIG="${CLUSTER_KUBECONFIG}" setup_kind_cluster "ipv4" "${IMAGE}" "${CLUSTER_NAME}" "${CLUSTER_YAML}"
# Kind currently supports getting a kubeconfig for internal or external usage. To simplify our tests,
# its much simpler if we have a single kubeconfig that can be used internally and externally.
# To do this, we can replace the server with the IP address of the docker container
# https://github.com/kubernetes-sigs/kind/issues/1558 tracks this upstream
CONTAINER_IP=$(docker inspect "${CLUSTER_NAME}-control-plane" --format "{{ .NetworkSettings.Networks.kind.IPAddress }}")
kind get kubeconfig --name "${CLUSTER_NAME}" --internal | \
sed "s/${CLUSTER_NAME}-control-plane/${CONTAINER_IP}/g" > "${CLUSTER_KUBECONFIG}"
}
declare -a DEPLOY_KIND_JOBS
for i in "${!CLUSTER_NAMES[@]}"; do
deploy_kind "${i}" & DEPLOY_KIND_JOBS+=("${!}")
done
for pid in "${DEPLOY_KIND_JOBS[@]}"; do
wait "${pid}" || exit 1
done
# Install MetalLB for LoadBalancer support. Must be done synchronously since METALLB_IPS is shared.
for CLUSTER_NAME in "${CLUSTER_NAMES[@]}"; do
install_metallb "${KUBECONFIG_DIR}/${CLUSTER_NAME}"
done
# Export variables for the kube configs for the clusters.
export CLUSTER1_KUBECONFIG="${KUBECONFIG_DIR}/${CLUSTER1_NAME}"
export CLUSTER2_KUBECONFIG="${KUBECONFIG_DIR}/${CLUSTER2_NAME}"
export CLUSTER3_KUBECONFIG="${KUBECONFIG_DIR}/${CLUSTER3_NAME}"
if [[ "${TOPOLOGY}" != "SINGLE_CLUSTER" ]]; then
# Clusters 1 and 2 are on the same network
connect_kind_clusters "${CLUSTER1_NAME}" "${CLUSTER1_KUBECONFIG}" "${CLUSTER2_NAME}" "${CLUSTER2_KUBECONFIG}" 1
# Cluster 3 is on a different network but we still need to set up routing for MetalLB addresses
connect_kind_clusters "${CLUSTER1_NAME}" "${CLUSTER1_KUBECONFIG}" "${CLUSTER3_NAME}" "${CLUSTER3_KUBECONFIG}" 0
connect_kind_clusters "${CLUSTER2_NAME}" "${CLUSTER2_KUBECONFIG}" "${CLUSTER3_NAME}" "${CLUSTER3_KUBECONFIG}" 0
fi
}
function connect_kind_clusters() {
C1="${1}"
C1_KUBECONFIG="${2}"
C2="${3}"
C2_KUBECONFIG="${4}"
POD_TO_POD_AND_SERVICE_CONNECTIVITY="${5}"
C1_NODE="${C1}-control-plane"
C2_NODE="${C2}-control-plane"
C1_DOCKER_IP=$(docker inspect -f "{{ .NetworkSettings.Networks.kind.IPAddress }}" "${C1_NODE}")
C2_DOCKER_IP=$(docker inspect -f "{{ .NetworkSettings.Networks.kind.IPAddress }}" "${C2_NODE}")
if [ "${POD_TO_POD_AND_SERVICE_CONNECTIVITY}" -eq 1 ]; then
# Set up routing rules for inter-cluster direct pod to pod & service communication
C1_POD_CIDR=$(KUBECONFIG="${C1_KUBECONFIG}" kubectl get node -ojsonpath='{.items[0].spec.podCIDR}')
C2_POD_CIDR=$(KUBECONFIG="${C2_KUBECONFIG}" kubectl get node -ojsonpath='{.items[0].spec.podCIDR}')
C1_SVC_CIDR=$(KUBECONFIG="${C1_KUBECONFIG}" kubectl cluster-info dump | sed -n 's/^.*--service-cluster-ip-range=\([^"]*\).*$/\1/p' | head -n 1)
C2_SVC_CIDR=$(KUBECONFIG="${C2_KUBECONFIG}" kubectl cluster-info dump | sed -n 's/^.*--service-cluster-ip-range=\([^"]*\).*$/\1/p' | head -n 1)
docker exec "${C1_NODE}" ip route add "${C2_POD_CIDR}" via "${C2_DOCKER_IP}"
docker exec "${C1_NODE}" ip route add "${C2_SVC_CIDR}" via "${C2_DOCKER_IP}"
docker exec "${C2_NODE}" ip route add "${C1_POD_CIDR}" via "${C1_DOCKER_IP}"
docker exec "${C2_NODE}" ip route add "${C1_SVC_CIDR}" via "${C1_DOCKER_IP}"
fi
# Set up routing rules for inter-cluster pod to MetalLB LoadBalancer communication
connect_metallb "$C1_NODE" "$C2_KUBECONFIG" "$C2_DOCKER_IP"
connect_metallb "$C2_NODE" "$C1_KUBECONFIG" "$C1_DOCKER_IP"
}
function install_metallb() {
KUBECONFIG="${1}"
kubectl apply --kubeconfig="$KUBECONFIG" -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/namespace.yaml
kubectl apply --kubeconfig="$KUBECONFIG" -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/metallb.yaml
kubectl create --kubeconfig="$KUBECONFIG" secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
if [ -z "${METALLB_IPS[*]}" ]; then
# Take IPs from the end of the docker kind network subnet to use for MetalLB IPs
DOCKER_KIND_SUBNET="$(docker inspect kind | jq .[0].IPAM.Config[0].Subnet -r)"
METALLB_IPS=()
while read -r ip; do
METALLB_IPS+=("$ip")
done < <(cidr_to_ips "$DOCKER_KIND_SUBNET" | tail -n 100)
fi
# Give this cluster of those IPs
RANGE="${METALLB_IPS[0]}-${METALLB_IPS[9]}"
METALLB_IPS=("${METALLB_IPS[@]:10}")
echo 'apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- '"$RANGE" | kubectl apply --kubeconfig="$KUBECONFIG" -f -
}
function connect_metallb() {
REMOTE_NODE=$1
METALLB_KUBECONFIG=$2
METALLB_DOCKER_IP=$3
IP_REGEX='(([0-9]{1,3}\.?){4})'
LB_CONFIG="$(kubectl --kubeconfig="${METALLB_KUBECONFIG}" -n metallb-system get cm config -o jsonpath="{.data.config}")"
if [[ "$LB_CONFIG" =~ $IP_REGEX-$IP_REGEX ]]; then
while read -r lb_cidr; do
docker exec "${REMOTE_NODE}" ip route add "${lb_cidr}" via "${METALLB_DOCKER_IP}"
done < <(ips_to_cidrs "${BASH_REMATCH[1]}" "${BASH_REMATCH[3]}")
fi
}
function cidr_to_ips() {
CIDR="$1"
python3 - <<EOF
from ipaddress import IPv4Network; [print(str(ip)) for ip in IPv4Network('$CIDR').hosts()]
EOF
}
function ips_to_cidrs() {
IP_RANGE_START="$1"
IP_RANGE_END="$2"
python3 - <<EOF
from ipaddress import summarize_address_range, IPv4Address
[ print(n.compressed) for n in summarize_address_range(IPv4Address(u'$IP_RANGE_START'), IPv4Address(u'$IP_RANGE_END')) ]
EOF
}
function cni_run_daemon_kind() {
echo 'Run the CNI daemon set'
ISTIO_CNI_HUB=${ISTIO_CNI_HUB:-gcr.io/istio-testing}
ISTIO_CNI_TAG=${ISTIO_CNI_TAG:-latest}
# TODO: this should not be pulling from external charts, instead the tests should checkout the CNI repo
chartdir=$(mktemp -d)
helm init --client-only
helm repo add istio.io https://gcsweb.istio.io/gcs/istio-prerelease/daily-build/master-latest-daily/charts/
helm fetch --devel --untar --untardir "${chartdir}" istio.io/istio-cni
helm template --values "${chartdir}"/istio-cni/values.yaml --name=istio-cni --namespace=kube-system --set "excludeNamespaces={}" \
--set-string hub="${ISTIO_CNI_HUB}" --set-string tag="${ISTIO_CNI_TAG}" --set-string pullPolicy=IfNotPresent --set logLevel="${CNI_LOGLVL:-debug}" "${chartdir}"/istio-cni > "${chartdir}"/istio-cni_install.yaml
kubectl apply -f "${chartdir}"/istio-cni_install.yaml
}
# setup_cluster_reg is used to set up a cluster registry for multicluster testing
function setup_cluster_reg () {
MAIN_CONFIG=""
for context in "${CLUSTERREG_DIR}"/*; do
if [[ -z "${MAIN_CONFIG}" ]]; then
MAIN_CONFIG="${context}"
fi
export KUBECONFIG="${context}"
kubectl delete ns istio-system-multi --ignore-not-found
kubectl delete clusterrolebinding istio-multi-test --ignore-not-found
kubectl create ns istio-system-multi
kubectl create sa istio-multi-test -n istio-system-multi
kubectl create clusterrolebinding istio-multi-test --clusterrole=cluster-admin --serviceaccount=istio-system-multi:istio-multi-test
CLUSTER_NAME=$(kubectl config view --minify=true -o "jsonpath={.clusters[].name}")
gen_kubeconf_from_sa istio-multi-test "${context}"
done
export KUBECONFIG="${MAIN_CONFIG}"
}
function gen_kubeconf_from_sa () {
local service_account=$1
local filename=$2
SERVER=$(kubectl config view --minify=true -o "jsonpath={.clusters[].cluster.server}")
SECRET_NAME=$(kubectl get sa "${service_account}" -n istio-system-multi -o jsonpath='{.secrets[].name}')
CA_DATA=$(kubectl get secret "${SECRET_NAME}" -n istio-system-multi -o "jsonpath={.data['ca\\.crt']}")
TOKEN=$(kubectl get secret "${SECRET_NAME}" -n istio-system-multi -o "jsonpath={.data['token']}" | base64 --decode)
cat <<EOF > "${filename}"
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ${CA_DATA}
server: ${SERVER}
name: ${CLUSTER_NAME}
contexts:
- context:
cluster: ${CLUSTER_NAME}
user: ${CLUSTER_NAME}
name: ${CLUSTER_NAME}
current-context: ${CLUSTER_NAME}
kind: Config
preferences: {}
users:
- name: ${CLUSTER_NAME}
user:
token: ${TOKEN}
EOF
}
| true
|
9464e9987c24abe3018a4fec0b073b46a071e6f2
|
Shell
|
fdaugs/graphql-docs
|
/scripts/build.sh
|
UTF-8
| 513
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eu
if [ ! -f package.json ]; then
echo 'This must be run from the source root of the project'
exit 1
fi
if [ ! -d node_modules ]; then
echo 'The NPM package dependencies must have been installed'
exit 1
fi
rm -rf ./dist ./lib
for env in development production; do
echo "Compiling with environment '$env'"
NODE_ENV=$env ./node_modules/.bin/webpack
done
./node_modules/.bin/babel -d lib src --ignore '*.flow.js'
cp src/introspectionQuery.txt lib/introspectionQuery.txt
| true
|
4f3dcb1ecde813918ca52ee6b940a8050bcdb241
|
Shell
|
viethien/misc
|
/intro_to_redirection.sh
|
UTF-8
| 493
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#redirecting input and output
grep -E "from [0-9]{1,3}-" datasets_and_figures/yeast_chr1_orfs.fa
# -E specifies to use regular expressions
#to append to standard output to a new file use the >> command instead of the > command
#standard input: data that is provided as text to a unix program
#example:
tr 'A' 'a' < datasets_and_figures/yeast_chr1_orfs.fa
#redirecting standard input and output
tr 'A' 'a' < datasets_and_figures/yeast_chr1_orfs.fa > processed_dna.fa
| true
|
e4205426077416ebffc92bd5327392324a2c03d5
|
Shell
|
Albager/adminstuff
|
/sethostname.sh
|
UTF-8
| 3,226
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
echo
echo "This script will change the hostname in /etc/hostname and the \"FQDN\" in /etc/hosts files."
#Colors
txtrst=$(tput sgr0) # Text reset
txtbld=$(tput bold) # Bold text
txtylw=$(tput setaf 3) # Yellow
txtgrn=$(tput setaf 2) # Green
#Retrieve IPv4 assigned by static dhcp to $ip4
ip4=$(/sbin/ip -o -4 addr list enp0s25 | awk '{print $4}' | cut -d/ -f1)
#Retrieve hardware address to $mac
mac=$(ip link show enp0s25 | awk '/ether/ {print $2}')
#Retrieve FQDN from dns reverse lookup for current static-dhcp-assigned ip to $fqdn
fqdn=$(dig -x $ip4 +short | sed 's/.\{1\}$//')
#Assign existing (possibly wrong!) hostname to $hostn
hostn=$(cat /etc/hostname)
#Assign existing (possibly wrong!) fqdn to $oldfqdn
oldfqdn=$(grep -Po '(?<=127.0.1.1\s).*' /etc/hosts)
#Assign correct hostname from fqdn to $newhostn
newhostn=$(echo $fqdn | sed 's/\..*$//')
echo
#Display IPv4 address assigned by static dhcp
echo "The IPv4 address for your device \t${txtbld}$mac${txtrst} is ${txtbld}$ip4${txtrst}"
echo
#Display existing (possibly wrong!) FQDN
echo "Existing (possibly wrong!) FQDN is: \t${txtbld}$oldfqdn${txtrst}"
#Display correct FQDN (as retrieved from dns reverse lookup)
echo "Correct FQDN (from DNS) should be: \t${txtbld}${txtylw}$fqdn${txtrst}"
echo
#Display existing (possibly wrong!) hostname
echo "Existing (possibly wrong!) hostname is: ${txtbld}$hostn${txtrst}"
#Display correct hostname
echo "Correct hostname (from FQDN) should be: ${txtbld}${txtylw}$newhostn${txtrst}"
echo
while true; do
read -p "${txtbld}${txtylw}Change hostname and FQDN accordingly and reboot (y/n)?${txtrst} " choice
case "$choice" in
y|Y )
echo "yes"
echo
#Change fqdn
sudo sed -i "s/$oldfqdn/$fqdn/g" /etc/hosts
#Check fqdn
fqdncheck=$(grep -Po '(?<=127.0.1.1\s).*' /etc/hosts)
#Change hostname
sudo sed -i "s/$hostn/$newhostn/g" /etc/hostname
#Change hostname - for our purposes, hostname=fqdn!
#sudo sed -i "s/$hostn/$fqdn/g" /etc/hostname
#Check hostname
hostncheck=$(cat /etc/hostname)
#Display new hostname and FQDN
echo "Your new hostname is \t${txtbld}${txtgrn}$hostncheck${txtrst}"
echo "Your new FQDN is \t${txtbld}${txtgrn}$fqdncheck${txtrst}"
break
;;
n|N )
echo "no"
echo
while true; do
read -p "${txtbld}${txtylw}Do you want to enter the correct settings and reboot (y/n)?${txtrst} " choice1
case "$choice1" in
y|Y ) echo "yes"
echo
#Ask to type new hostname
echo "Enter new hostname: "
read newhost
#Change hostname
sudo sed -i "s/$hostn/$newhost/g" /etc/hostname
#Check hostname
hostncheck2=$(cat /etc/hostname)
#Display new hostname
echo "Your new hostname is now set to: \t${txtbld}${txtgrn}$hostncheck2${txtrst}"
echo
#Ask to type new FQDN
echo "Enter new FQDN: "
read newfqdn
#Change FQDN
sudo sed -i "s/$oldfqdn/$newfqdn/g" /etc/hosts
#Check FQDN
fqdncheck2=$(grep -Po '(?<=127.0.1.1\s).*' /etc/hosts)
#Display new FQDN
echo "Your new FQDN is now set to: \t\t${txtbld}${txtgrn}$fqdncheck2${txtrst}"
break 2
;;
n|N ) echo "no - quit"
return
;;
*)
echo "Invalid input... Type 'y' or 'n': "
;;
esac
done
;;
*)
echo "Invalid input... Type 'y' or 'n': "
;;
esac
done
echo
echo "Rebooting now... Bye!"
sleep 1
#Reboot
sudo reboot
| true
|
2033de62638676a2a234c4ce48ae26faa855c108
|
Shell
|
sophea/spring-boot-docker-mysql
|
/src/main/docker/run_docker_compose_localdb.sh
|
UTF-8
| 328
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
##HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1`
export HOSTIP=$(ifconfig | grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | grep -v 127.0.0.1 | awk '{ print $2 }' | cut -f2 -d: | head -n1
echo "IP address from docker ${HOSTIP}"
docker-compose -f docker-compose-localdb.yml up
| true
|
5a87f644bc4370b8453ea6fdf7ac302292d5e0cb
|
Shell
|
court-jus/dotfiles
|
/bin/manageVM
|
UTF-8
| 352
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$2" == "show" ]; then
xvnc4viewer -passwd ${HOME}/.vnc/passwd 10.31.254.41:$(awk -F: '$1 ~ /VNCPORT/ {print $2}' "$1")
elif [ "$2" == "web" ]; then
google-chrome http://$(awk -F: '$1 ~ /IP/ {print $2}' "$1")
elif [ "$2" == "restart" ]; then
sleep 2 ; ssh root@lost kvmctl reset $(awk -F: '$1 ~ /NOM/ {print $2}' "$1")
fi
| true
|
c16747fcc402669f104e1d5b12f238f8d5fe2513
|
Shell
|
sanyakarol/manjaro
|
/polybar/launch1.sh
|
UTF-8
| 421
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# Terminate already running bar instances
killall -q polybar
# Wait until the processes have been shut down
while pgrep -u $UID -x polybar >/dev/null; do sleep 1; done
# Launch polybar
polybar work1 -c $HOME/.config/polybar/configg.ini &
polybar work2 -c $HOME/.config/polybar/configgg.ini &
polybar work -c $HOME/.config/polybar/configggg.ini &
#polybar back -c $HOME/.config/polybar/configg.ini &
| true
|
671c93602b1b6354da9ef25954e2bca769fd2cf8
|
Shell
|
ronin13/Scripts
|
/loadutil
|
UTF-8
| 1,331
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/zsh
type=${1:="sess"}
if [[ $type == sess ]];then
export yprofile="dactyl-sess"
pushd ~/.dactyls/sessions
file=$(print -l *(om[1,-1]) | ${=DMENU})
[[ -z $file ]] && exit
if [[ -n $file && $file == *: ]];then
while :;do
url=$($=DMENU < $file)
[[ -z $url ]] && break
firefox -new-tab $url[(w)1]
done
else
for line in "${(f)$(<$file)}";do
firefox -new-tab $line[(w)1]
done
fi
popd
elif [[ $type == ist ]];then
export yprofile="dactyl-hist"
pushd ~/.dactyls/history/
#url=$(tac history | grep -v "about:blank" | uniq | ${(e)=YDMENU})
url=$(tac history | grep -v "about:" | uniq | $=DMENU)
[[ -n $url ]] && firefox -new-tab $url[(w)1]
popd
elif [[ $type == speed ]];then
export yprofile="speed"
while :;do
url=$(awk '{ print NR" "$0 }' ~/.dactyls/speedy/speed | ${(e)=YDMENU} | awk '{ print $2 }')
[[ -z $url ]] && exit
firefox -new-tab "$url"
done
elif [[ $type == lark ]];then
export yprofile="dactyl-lark"
while :;do
url=$(awk '{ print NR" "$0 }' ~/.dactyls/bmark/larks | ${(e)=YDMENU} | awk '{ print $2 }')
[[ -z $url ]] && exit
firefox -new-tab "$url"
done
fi
| true
|
e4f0a04067a9eba8b5642f0b463a5204782bbca5
|
Shell
|
EricZBL/ClusterBuildScripts
|
/install/installCompent.sh
|
UTF-8
| 2,246
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
################################################################################
## Copyright: HZGOSUN Tech. Co, BigData
## Filename: installAll.sh
## Description: 安装所有组件的脚本.
## Version: 2.0
## Author: zhangbaolin
## Created: 2018-6-26
################################################################################
#set -x
#set -e
cd `dirname $0`
## BIN目录,脚本所在的目录
BIN_DIR=`pwd`
cd ..
##安装包根目录
ROOT_HOME=`pwd`
##配置文件目录
CONF_DIR=${ROOT_HOME}/conf
WebUI_Dir=$(grep WebUI_Dir ${CONF_DIR}/cluster_conf.properties|cut -d '=' -f2)
WebUI_File=${WebUI_Dir}/WebUI_Address
if [[ -d ${WebUI_Dir} ]]; then
touch ${WebUI_File}
else
mkdir -p ${WebUI_Dir}
touch ${WebUI_File}
fi
## 本地模式目录
LOCAL_DIR=${ROOT_HOME}/local
ISLOCAL=$(grep "ISLOCAL" ${CONF_DIR}/cluster_conf.properties | cut -d "=" -f2)
if [[ "${ISLOCAL}" == "yes" ]]; then
sh ${LOCAL_DIR}/bin/componentInstall_local.sh
echo "安装本地模式集群完成"
else
cd ${BIN_DIR}
#检查selinux状态
sh selinuxStatus.sh
flag=$?
if [[ $flag == 1 ]]; then
exit 1
fi
##安装mysql
sh mysqlInstall.sh
##安装jdk
sh jdkInstall.sh
##安装zookeeper
sh zookeeperInstall.sh
##安装hadoop
sh hadoopInstall.sh
##安装hbase
sh hbaseInstall.sh
##安装phoenix
sh phoenixInstall.sh
##安装hive
sh hiveInstall.sh
##安装scala
sh scalaInstall.sh
##安装kafka
sh kafkaInstall.sh
##安装spark
sh sparkInstall.sh
##安装rocketmq
sh rocketmqInstall.sh
##安装haproxy
sh haproxyInstall.sh
##安装elastic
sh elasticInstall.sh
##安装kibana
sh kibanaInstall.sh
##安装azkaban
sh azkabanInstall.sh
#配置环境变量
sh create-global-env.sh
#配置组件日志目录
sh logconfig.sh
##根据集群类型修改yarn参数
ISMINICLUSTER=$(grep "ISMINICLUSTER" ${CONF_DIR}/cluster_conf.properties | cut -d '=' -f2)
if [ "x${ISMINICLUSTER}" == "xno" ]; then
sh config-yarn-CPU-RAM.sh
else
sh config-mini-yarn.sh
fi
echo "安装分布式集群完成"
fi
| true
|
a8d43b61425a4603e57c911bb5bfcba2194befc9
|
Shell
|
slotsma123/serpentine2
|
/scripts/readDepth.sh
|
UTF-8
| 1,273
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
## caculate read depth for a given bed file or bam file
if [ $# -lt 2 ]
then
echo "Usage: `basename $0` <bamFile or bedFile> <bedFile> [outDir]"
exit 65
fi
if [ $# -lt 3 ]
then
outDir='./'
else
outDir=$3
fi
if [[ $1 = *.bam ]]; then
a='-abam'
elif [[ $1 = *.bed ]]; then
a='-a'
fi
outBase=`basename $1`
echo $'chr\tstart\tend\tgene\tbaits\tbases_covered\ttotal_bases\tfraction_covered' > ${outDir}/${outBase}.basecoverage
coverageBed $a ${1} -b <(cut -f1-4 ${2}) >> ${outDir}/${outBase}.basecoverage
echo $'chr\tstart\tend\tgene\tdepth\tbases_covered\ttotal_bases\tfraction_covered' > ${outDir}/${outBase}.depth.tmp
coverageBed $a ${1} -b <(cut -f1-4 ${2}) -hist >> ${outDir}/${outBase}.depth.tmp
grep "^all" ${outDir}/${outBase}.depth.tmp | cut -f 2-3 > ${outDir}/${outBase}.hist
grep -v "^all" ${outDir}/${outBase}.depth.tmp > ${outDir}/${outBase}.depth
rm -f ${outDir}/${outBase}.depth.tmp
/data/Clinomics/Tools/serpentine2/scripts/do_hist.R -f ${outDir}/${outBase}.hist -o ${outDir}
echo $'chr\tstart\tend\tgene\tposition\tdepth' > ${outDir}/${outBase}.depth_per_base
coverageBed $a ${1} -b <(cut -f1-4 ${2}) -d >> ${outDir}/${outBase}.depth_per_base
#########################################################################
| true
|
e9f8adb31635608ca78d823d36205d7452949390
|
Shell
|
soarpenguin/bash-scripts
|
/controls/start_marathon.sh
|
UTF-8
| 562
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#set -x
export PS4='+ [`basename ${BASH_SOURCE[0]}`:$LINENO ${FUNCNAME[0]} \D{%F %T} $$ ] '
MYNAME="${0##*/}"
CURDIR=$(cd "$(dirname "$0")"; pwd)
chmod +x ${CURDIR}/bin/start
MYIP=`ifconfig | grep "inet addr:10\." | grep -oP "((\d+\.)){3}(\d+)" | head -n 1`
nohup ${CURDIR}/bin/start \
--master zk://10.10.11.3:2181,10.10.11.4:2181,10.10.11.5:2181/mesos \
--zk zk://10.10.11.3:2181,10.10.11.4:2181,10.10.11.5:2181/marathon \
--hostname "${MYIP}" --http_credentials "1verge:8bio8cwa" \
</dev/null >/dev/null 2>&1 &
| true
|
ef7497886b3e6ef8dfca4559ffe239802634e4a6
|
Shell
|
JwowSquared/holbertonschool-higher_level_programming
|
/0x10-python-network_0/3-methods.sh
|
UTF-8
| 97
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
# curls the address, returns available methods
curl -sI $1 | awk "NR==4" | cut -c 8-
| true
|
c78ae28666b1768e7616ac4eee95d4a800806364
|
Shell
|
daamien/temboard-agent
|
/packaging/deb/mkdeb.sh
|
UTF-8
| 2,251
| 3.8125
| 4
|
[
"PostgreSQL"
] |
permissive
|
#!/bin/bash -eux
UID_GID=$(stat -c %u:%g $0)
cd $(readlink -m $0/..)
WORKDIR=$(readlink -m build)
DESTDIR=$WORKDIR/destdir
DISTDIR=$(readlink -m ${PWD}/../../dist)
teardown () {
set +x
if [ "0" = "${CLEAN-1}" ] ; then
return
fi
rm -rf $WORKDIR
if hash temboard-agent ; then
echo "Cleaning previous installation." >&2
apt-get -qq purge -y temboard-agent
fi
set -x
}
trap teardown EXIT INT TERM
teardown
mkdir -p $DESTDIR
# V E R S I O N S
versions=($(pep440deb --echo --pypi temboard-agent))
pep440v=${versions[0]}
debianv=${versions[1]}
codename=$(lsb_release --codename --short)
release=0dlb1${codename}1
# Should match the interpreter used by scripts shebang. We should pin python
# version used.
python=/usr/bin/python
pythonv=$($python --version |& grep -Po 'Python \K([23]\..)')
# I N S T A L L
pip$pythonv install --pre --root $DESTDIR --prefix /usr --no-deps temboard-agent==$pep440v
# Fake --install-layout=deb, when using wheel.
mv $DESTDIR/usr/lib/python${pythonv}/{site,dist}-packages/
# B U I L D
fpm_args=()
if ! [ -f /usr/bin/systemctl ] ; then
fpm_args+=(--deb-init temboard-agent.init)
fi
fpm --verbose \
--force \
--debug-workspace \
--chdir $DESTDIR \
--input-type dir \
--output-type deb \
--name temboard-agent \
--version $debianv \
--iteration $release \
--architecture all \
--description "PostgreSQL Remote Control Agent" \
--category database \
--maintainer "${DEBFULLNAME} <${DEBEMAIL}>" \
--license PostgreSQL \
--url http://temboard.io/ \
--depends python-pkg-resources \
--depends ssl-cert \
--depends python-psycopg2 \
--depends python${pythonv} \
--after-install ../../share/restart-all.sh \
"${fpm_args[@]}" \
"$@" \
./=/
# T E S T
deb=$(ls temboard-agent_*-${release}_all.deb)
dpkg-deb -I $deb
dpkg-deb -c $deb
apt-get update --quiet
apt-get install --yes ./$deb
(
cd /
temboard-agent --version
python -c 'import temboardagent.toolkit'
)
# S A V E
mkdir -p ${DISTDIR}/
mv -fv $deb ${DISTDIR}/
# Point deb as latest build for changes generation.
ln -fs $(basename $deb) ${DISTDIR}/last_build.deb
chown -R ${UID_GID} ${DISTDIR}/
| true
|
8b27453342eb302e874aaf4e10bf28f542b0d08f
|
Shell
|
shanki84/terraform_challange
|
/templates/userdata.sh
|
UTF-8
| 2,052
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
# Check the logs
exec > >(tee /var/log/user-data-output.log|logger -t user-data -s 2>/dev/console) 2>&1
# fetch the metadata of the instance on JSON format
wget -q -O - http://169.254.169.254/latest/dynamic/instance-identity/document
echo "instance_id:" wget -q -O - http://169.254.169.254/latest/meta-data/instance-id
echo "ami-id:" wget -q -O - http://169.254.169.254/latest/meta-data/ami-id
echo "ami-launch-index:" wget -q -O - http://169.254.169.254/latest/meta-data/ami-launch-index
echo "ami-manifest-path:" wget -q -O - http://169.254.169.254/latest/meta-data/ami-manifest-path
echo "block-device-mapping:" wget -q -O - http://169.254.169.254/latest/meta-data/block-device-mapping/
echo "hostname:" wget -q -O - http://169.254.169.254/latest/meta-data/hostname
echo "instance-action:" wget -q -O - http://169.254.169.254/latest/meta-data/instance-action
echo "instance-type:" wget -q -O - http://169.254.169.254/latest/meta-data/instance-type
echo "kernel-id:" wget -q -O - http://169.254.169.254/latest/meta-data/kernel-id
echo "local-hostname:" wget -q -O - http://169.254.169.254/latest/meta-data/local-hostname
echo "local-ipv4:" wget -q -O - http://169.254.169.254/latest/meta-data/local-ipv4
echo "mac:" wget -q -O - http://169.254.169.254/latest/meta-data/mac
echo "metrics:" wget -q -O - http://169.254.169.254/latest/meta-data/metrics/
echo "network:" wget -q -O - http://169.254.169.254/latest/meta-data/network/
echo "placement:" wget -q -O - http://169.254.169.254/latest/meta-data/placement/
echo "profile:" wget -q -O - http://169.254.169.254/latest/meta-data/profile
echo "public-hostname:" wget -q -O - http://169.254.169.254/latest/meta-data/public-hostname
echo "public-ipv4:" wget -q -O - http://169.254.169.254/latest/meta-data/public-ipv4
echo "public-keys:" wget -q -O - http://169.254.169.254/latest/meta-data/public-keys/
echo "reservation-id:" wget -q -O - http://169.254.169.254/latest/meta-data/reservation-id
echo "security-groups:" wget -q -O - http://169.254.169.254/latest/meta-data/security-groups
| true
|
3b45027e52092895b33db4bc04e0bbbe56544772
|
Shell
|
ethiery/HPCassignments
|
/6_funWithPthreadsOMPandMPI/jobs/mergeBenches.sh
|
UTF-8
| 521
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
runners="SEQ OMP PTHREADS MPI_SEQ_SYNC MPI_SEQ_ASYNC"
line="# nb_procs"
for runner in ${runners}
do
for size in 4096 8192 16384 32768
do
line="${line} ${runner}_${size}"
done
done
echo "${line}" > data/benchALL.dat
for nbProcs in 1 2 4 8 16
do
line="${nbProcs} "
for runner in ${runners}
do
subline=`cat data/bench${runner}.dat | grep "^${nbProcs} " | cut -d " " -f 2-`
line="${line} ${subline}"
done
echo "${line}" >> data/benchALL.dat
done
| true
|
3f1ddf51fca71dbe3846e343e9957d38bb8b81d9
|
Shell
|
unshorn-forks/STAC
|
/Engagement_Challenges/Engagement_2/subspace/challenge_program/data/init
|
UTF-8
| 4,828
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Utility functions
pecho() { printf %s\\n "$*"; }
log() { pecho "$@"; }
error() { log "ERROR: $@" >&2; }
fatal() { error "$@"; exit 1; }
try() { "$@" || fatal "'$@' failed"; }
# Run a command as another user.
# Usage: run_as <user> <command> [<args>...]
run_as() {
run_as_user="$1"
shift || fatal "missing run-as user"
runuser -u "$run_as_user" -- "$@"
}
# Run a job in the background.
# Usage: add_job <name> <user> <command> [<args>...]
# <name> is the name of the job, one word
# <user> is the username to run the job as, or "-" for the current user
add_job() {
add_job_name="$1"
shift || fatal "missing job name"
add_job_user="$1"
shift || fatal "missing job user"
add_job_log="${INIT_LOG_DIR}/${add_job_name}.log"
case "$add_job_user" in
-)
"$@" > "${add_job_log}" 2>&1 &
;;
*)
runuser -u "$add_job_user" -- "$@" \
> "${add_job_log}" 2>&1 < /dev/null &
;;
esac
}
# Start basic system components.
try rsyslogd
try dbus-uuidgen --ensure
try mkdir /var/run/dbus
try dbus-daemon --system
# Start the mail system.
try /usr/libexec/postfix/aliasesdb
try /usr/libexec/postfix/chroot-update
try postfix start
# Start the app itself.
log "starting subspace app"
umask 006
add_job app "${APP_USER}" \
java -cp "${APP_JAR_DIR}/*" "${APP_MAIN_CLASS}" "${APP_CONFIG_FILE}"
try sleep 1
# Start stunnel to give us HTTPS.
log "starting stunnel"
run_as "${APP_USER}" stunnel "${STUNNEL_CONFIG_DIR}"/stunnel.conf
# Pre-configure the app:
# Create list of users.
log "pre-configuring subspace app"
app_user_info="`mktemp`" || fatal "could not create temporary file"
for line in \
"test1 password stac+test1@localhost -89.99999 179.99999" \
"test2 password stac+test2@localhost 0.00001 -0.00001" \
"test3 password stac+test3@localhost -89.99999 -0.00001" \
"test4 password stac+test4@localhost 0.00001 179.99999" \
;
do
pecho "$line" >> "$app_user_info" \
|| fatal "could not append to $app_user_info"
done
# Register the users.
while read -r username password email latitude longitude; do
uri="https://localhost:8443/register"
uri="${uri}?username=${username}"
uri="${uri}&password=${password}"
uri="${uri}&email=$(pecho "${email}" | sed 's/+/%2B/g')"
try curl --insecure "${uri}" > /dev/null 2>&1
done < "$app_user_info"
sleep 1
# Confirm the registrations.
for uri in \
$(grep '^https://localhost:8443/confirm?token=[a-z0-9-]\+$' "/var/mail/${FRONTEND_USER}") \
;
do
try curl --insecure "${uri}" > /dev/null 2>&1
done
sleep 1
# Set the user locations.
while read -r username password email latitude longitude; do
uri="https://localhost:8443/update-location"
uri="${uri}?username=${username}"
uri="${uri}&password=${password}"
uri="${uri}&lat=${latitude}"
uri="${uri}&lon=${longitude}"
try curl --insecure "${uri}" > /dev/null 2>&1
done < "$app_user_info"
sleep 1
# Start the frontend.
# umask 022 # back to normal
# run_as "${FRONTEND_USER}" vncserver \
# "${VNC_DISPLAY}" \
# -rfbport "${VNC_PORT}" \
# -rfbauth "${VNC_PASS_FILE}" \
# -geometry "${VNC_RESOLUTION}" \
# || fatal "could not start vncserver"
# Help the user get started.
log "================================================================"
#log "For testing purposes, Subspace is pre-configured with the"
#log "following users:"
#log
#log " username password email-address initial-location"
#log " ------------------------------------------------"
#while read -r username password email latitude longitude; do
# log " ${username} ${password} ${email} @${latitude},${longitude}"
#done < "$app_user_info"
#log
log "To start, check your mail using the 'mailx' or 'mutt' program."
log "Initially, you'll see all the emails from the creation of the "
log "above users. Read one of the \"Welcome\"" "messages to learn how "
log "to use Subspace."
log
log "If you want to create more users, the system is set up so you"
log "receive mail for all email addresses of the form"
log "user+foo@localhost. Remember that '+' needs to be url-encoded as"
log "'%2B' when passed as a parameter over HTTPS. For example, you"
log "could visit the following URL to register a new user:"
log
log " https://localhost:8443/register?username=kirk&password=kirkisawesome&email=user%2Bkirk@localhost"
log
log "Then check your mail again, and follow the instructions."
log
log "Most operations can be run more conveniently using the scripts "
log "provided in the examples/ directory."
log "================================================================"
# Give the user a shell.
login_user=$(cat /usr/local/etc/login-user)
cd /home/"$login_user" && runuser -u "$login_user" -- /bin/bash -l
| true
|
1dedf802e66466c862e504faa2d94dc5a16402b8
|
Shell
|
zggl/wax-ml
|
/build/has_changed.sh
|
UTF-8
| 280
| 3.421875
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
# see https://stackoverflow.com/questions/5143795/how-can-i-check-in-a-bash-script-if-my-local-git-repository-has-changes
if [ $(git status --porcelain | wc -l) -eq "0" ]; then
echo " 🟢 Git repo is clean."
else
echo " 🔴 Git repo dirty. Quit."
exit 1
fi
| true
|
c3b05812072247b5e7e87c6852b0321bcb1575f9
|
Shell
|
timonier/dumb-entrypoint
|
/src/dockerize/installer
|
UTF-8
| 856
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e -u -x
# Check environment
_fail() {
echo 1>&2 "$1"
echo 1>&2 "Usage: $(basename "$0") [install|uninstall]"
exit 255
}
if [ "$#" -lt 1 ]; then
_fail "Invalid number of arguments."
fi
# Run installer
INSTALL_DIRECTORY="${INSTALL_DIRECTORY:-/usr/sbin}"
case "$1" in
install)
mkdir -p "${INSTALL_DIRECTORY}"
export $(curl --location "https://gitlab.com/timonier/version-lister/raw/generated/jwilder/dockerize/latest" | xargs)
curl --location "${DOCKERIZE_RELEASE}" | tar --directory "${TMPDIR:-/tmp}" --extract --gzip
mv ${TMPDIR:-/tmp}/dockerize "${INSTALL_DIRECTORY}"/dockerize
chmod +x "${INSTALL_DIRECTORY}"/dockerize
;;
uninstall)
rm -f "${INSTALL_DIRECTORY}"/dockerize
;;
*)
_fail "Argument \"$1\" is invalid."
;;
esac
| true
|
ea73826741ccd41d10c8817ebdf1f758cdbbcfb9
|
Shell
|
jjzhang166/Kinect1473
|
/kinectExample1473/misc/kinect_fetch_fw
|
UTF-8
| 2,447
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# kinect_fetch_fw - Fetch and install the Microsoft Kinect UAC firmware
#
# Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
#
# wget and 7z from p7zip-full are needed, they can be installed with
# sudo aptitude install wget p7zip-full
#
# NOTE: p7zip-full >= 9.20 is required in order to extract .msi files
# correctly
set -e
SDK_URL=${SDK_URL:-"http://download.microsoft.com/download/F/9/9/F99791F2-D5BE-478A-B77A-830AD14950C3/KinectSDK-v1.0-beta2-x86.msi"}
SDK_MD5="40764fe9e00911bda5095e5be777e311"
[ $# -lt 1 ] && { echo "usage: $(basename "$0") <firmware destdir> [<path of kinect_upload_fw binary>]" 1>&2; exit 1; }
FW_DESTDIR=$(readlink -f $1)
LOADER_PATH=${2:-"/usr/local/sbin/kinect_upload_fw"}
command -v wget >/dev/null 2>&1 || { echo "$(basename "$0"): command 'wget' is needed." 1>&2 ; exit 1; }
command -v 7z >/dev/null 2>&1 || { echo "$(basename "$0"): command '7z' is needed." 1>&2; exit 1; }
TEMPDIR=$(mktemp -d)
trap 'rm -rf "$TEMPDIR" >/dev/null 2>&1' 0
trap "exit 2" 1 2 3 15
cat << EOM
This script is going to download the UAC Firmware for the Microsoft
Kinect Sensor device from the Microsoft Kinect for Windows SDK:
http://kinectforwindows.org/
The full license of the SDK can be found at:
http://www.kinectforwindows.org/download/EULA.htm
EOM
cd "$TEMPDIR"
ARCHIVE_NAME=$(basename "$SDK_URL")
rm -f "$ARCHIVE_NAME" && wget "$SDK_URL" -O "$ARCHIVE_NAME"
ARCHIVE_MD5=$(md5sum "$ARCHIVE_NAME" | grep --only-matching -m 1 '^[0-9a-f]*')
if [ "$ARCHIVE_MD5" != "$SDK_MD5" ];
then
echo "$(basename "$0"): Invalid hash for file '$ARCHIVE_NAME'." 1>&2
exit 1
fi
echo -n "Extracting the UAC firmware..."
7z e -y -r "$ARCHIVE_NAME" "UACFirmware.*" > /dev/null
echo " done."
FW_FILE=$(ls UACFirmware.* | cut -d ' ' -f 1)
install -d "${DESTDIR}${FW_DESTDIR}"
install -m 644 "$FW_FILE" "${DESTDIR}${FW_DESTDIR}"
FIRMWARE_PATH=$FW_DESTDIR/$(basename "$FW_FILE")
if [ -f "${DESTDIR}/lib/udev/rules.d/55-kinect_audio.rules" ];
then
sed -e "s|@LOADER_PATH@|$LOADER_PATH|g" \
-e "s|@FIRMWARE_PATH@|$FIRMWARE_PATH|g" \
-i "${DESTDIR}/lib/udev/rules.d/55-kinect_audio.rules"
fi
| true
|
09c858ca3110e3cbd61d5d8868fc296eb11bb0cd
|
Shell
|
langenhagen/scripts
|
/_archive/raspi-scripts/backup_owncloud_server.sh
|
UTF-8
| 989
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# makes a backup of the owncloud server on the raspberry pi
#
# taken from
# https://doc.owncloud.org/server/9.1/admin_manual/maintenance/backup.html
#
# author: langenhagen
echo "Really?"
echo "<ctrl+c> to escape or enter to proceed"
read THROW_AWAY_VAR
echo "`date` Starting Owncloud backup"
OWNCLOUD_BACKUPDIR="/home/pi/stuff/owncloud-backup"
mkdir -p $OWNCLOUD_BACKUPDIR
cd /var/www/owncloud/
sudo rsync -Aax config data $OWNCLOUD_BACKUPDIR
echo "`date` Sync of config/ and data/ complete"
DB_BACKUP_FILE_NAME="$OWNCLOUD_BACKUPDIR/owncloud-db-backup_`date +"%Y%m%d"`.bak"
sudo rm -f $DB_BACKUP_FILE_NAME
echo "`date` Removal of old database dump complete"
# mariadb backup (!sic -p for password there is no space between -p and the password!)
sudo mysqldump --single-transaction -h localhost -u root -pca3MARIADB7c65 owncloud > $DB_BACKUP_FILE_NAME
# sqlite backup
# sqlite3 data/owncloud.db .dump > $DB_BACKUP_FILE_NAME
echo "`date` Database dump complete"
| true
|
ad1dcf35ccb2ca620e873fdcb0d0fcdbb73ce6f6
|
Shell
|
Gorian/Bash-Framework
|
/libinfo.sh
|
UTF-8
| 450
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
. /scripts/lib/main.lib
librequire "standard";
std.msg "\nLoading Modules...\n" "default" "ok";
std.msg "Collecting information...\n" "default" "ok";
std.msg "Displaying information...\n" "default" "ok";
std.msg "\nLoaded Modules:\n" "blue,bold";
std.msg "${LIB_MODULE_LIST[*]}";
std.msg "\nclass list:\n" "blue,bold";
std.msg "${LIB_CLASS_LIST[*]}";
std.msg "\nfunction list:\n" "blue,bold";
std.msg "${LIB_FUNCTION_LIST[*]}\n";
| true
|
b360891c955ee906672882bd73db3fb48623fed4
|
Shell
|
yinxusen/dqn-zork
|
/bin/run.sh
|
UTF-8
| 198
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
FWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
WORK_DIR="$FWDIR/.."
export PYTHONPATH="$WORK_DIR/python/:$PYTHONPATH"
executable=$1
python $executable "${@:2}"
| true
|
7916cc0a604c8c18a1beebbb92a075fda2707ce6
|
Shell
|
Sethuraman/useful-docker-files
|
/upload-keys-concourse-to-s3/script.sh
|
UTF-8
| 843
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
exitWithMessage() {
echo "$1"
exit 1
}
validateEnvironmentVariables() {
if [ -z $AWS_ACCESS_KEY_ID ]; then
exitWithMessage "AWS_ACCESS_KEY_ID missing"
fi
if [ -z $AWS_SECRET_ACCESS_KEY ]; then
exitWithMessage "AWS_SECRET_ACCESS_KEY missing"
fi
if [ -z $AWS_DEFAULT_REGION ]; then
exitWithMessage "AWS_DEFAULT_REGION missing"
fi
if [ -z $BUCKET_NAME ]; then
exitWithMessage "BUCKET_NAME missing"
fi
if [ -z $CONCOURSE_DOWNLOAD_URL ]; then
exitWithMessage "CONCOURSE_DOWNLOAD_URL missing"
fi
}
mkdir keys
ssh-keygen -t rsa -f keys/tsa_host_key -N ''
ssh-keygen -t rsa -f keys/worker_key -N ''
ssh-keygen -t rsa -f keys/session_signing_key -N ''
aws s3 sync keys s3://$BUCKET_NAME/keys
wget -O concourse $CONCOURSE_DOWNLOAD_URL
aws s3 cp concourse s3://$BUCKET_NAME/binaries/concourse
| true
|
41311195fb7b5ea9b61dd9f8c328866767c603c1
|
Shell
|
snow1313113/go_leetcode
|
/build.sh
|
UTF-8
| 202
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "USAGE: $0 sub_dir"
echo "e.g.: $0 test"
exit -1
fi
cd `dirname $0`
CURRENT_DIR=`pwd`
export GOPATH=${CURRENT_DIR}
cd src/$1
#go build .
go install .
| true
|
d03471253f7645a624b56626f5134dab239228f6
|
Shell
|
woocashb/shutils
|
/nas_mountall.sh
|
UTF-8
| 896
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
NAS_IP=$1
MOUNT_ROOT=${2%/}
CLEAN=${3:-'false'}
ARGC=$#
usage(){
SCRIPT=$(basename $0)
printf "$SCRIPT IP MOUNT_DIR [CLEAN={false,true}]\n"
}
if [[ $ARGC -lt 2 ]];then
usage
exit 1;
fi
shares=$(showmount -e $NAS_IP | tail -n +2 | awk '{print $1}')
shares_mount(){
for share in ${shares[@]};do
share_leaf_dir=$(basename $share)
( [ ! -d ${MOUNT_ROOT}/${share_leaf_dir} ] && mkdir ${MOUNT_ROOT}/${share_leaf_dir}) && mount -t nfs ${NAS_IP}:${share} ${MOUNT_ROOT}/${share_leaf_dir};
done
}
shares_umount(){
for share in ${shares[@]};do
share_leaf_dir=$(basename $share)
umount ${MOUNT_ROOT}/${share_leaf_dir}
[ -d ${MOUNT_ROOT}/${share_leaf_dir} ] && rmdir ${MOUNT_ROOT}/${share_leaf_dir}
done
}
case $ARGC in
2) shares_mount
;;
3) if [ $CLEAN == 'true' ];then
shares_umount;
else
usage
fi
;;
*) usage
;;
esac
| true
|
65cb90723915a1a51ddb462ebac2731fa875b0ef
|
Shell
|
couchbaselabs/mobile-training-todo
|
/docker/sg/config/run.sh
|
UTF-8
| 834
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
if [ "$#" -ne 2 ]; then
echo "Usage: run.sh <Sync-Gateway-Config-File> <Log-Directory>"
exit 1
fi
wait_for_uri() {
expected=$1
shift
uri=$1
echo "Waiting for $uri to be available..."
while true; do
status=$(curl -s -w "%{http_code}" -o /dev/null $*)
if [ "x$status" = "x$expected" ]; then
break
fi
echo "$uri not up yet, waiting 5 seconds..."
sleep 5
done
echo "$uri ready, continuing"
}
# Stop sync_gateway service:
echo "Stop running sync_gateway service ..."
systemctl stop sync_gateway
wait_for_uri 200 http://cb-server:8091/pools/default/buckets/todo -u admin:password
echo "Sleeping for 10 seconds to give server time to settle..."
sleep 10
# Start sync_gateway:
echo "Start sync_gateway ..."
/opt/couchbase-sync-gateway/bin/sync_gateway "$1" 2>&1 | tee "$2/sg.log"
| true
|
c1dc5d3001b192fca202a0021da0134270a9beed
|
Shell
|
gejiawen/my-shell-toolbox
|
/docker/work.sh
|
UTF-8
| 3,133
| 3.9375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# ------------------------------------
# @description
# 镜像工作维护脚本
#
# @usage
# ./work.sh [action]
#
# @params
# action - action表示工作脚本任务,目前有 ‘build’, ‘push’, ‘run’,‘clean’,‘demo’
#
# @author
# gejiawen<806717031@qq.com>
# ------------------------------------
docker_registry="<YOUR_DOCKER_REGISTRY>"
docker_registry_path="YOUR_DOCKER_REGISTRY_PATH"
docker_image_name="<YOUR_DOCKER_IMAGE_NAME>"
docker_image_version="<YOUR_DOCKER_IMAGE_VERSION>"
docker_image_path="${docker_registry}/${docker_registry_path}/${docker_image_name}"
docker_container_name="<YOUR_CONTAINER_NAME>"
docker_username="<YOUR_USERNAME>"
docker_pwd="<YOUR_PASSWORD>"
function echo_and_exe () {
echo
echo "cmd: $*"
eval $*
ret=$?
return $ret
}
function build () {
echo "------------------------------------"
echo "docker image name: ${docker_image_name}:${docker_image_version}"
echo "docker image path: ${docker_image_path}:${docker_image_version}"
cmd="docker build -t ${docker_image_path}:${docker_image_version} ."
echo_and_exe $cmd
if [[ $? -ne 0 ]]; then
echo "error at: ${cmd}"
exit 1
else
echo "build ${docker_image_path}:${docker_image_version} success!"
echo "------------------------------------"
fi
exit 0
}
function demo () {
echo "------------------------------------"
count=`docker ps -a | grep ${docker_container_name} | wc -l`
if [[ ${count} -lt 1 ]];then
docker run -itd --name ${docker_container_name} ${docker_image_path}:${docker_image_version}
else
docker rm -fv ${docker_container_name}
docker run -itd --name ${docker_container_name} ${docker_image_path}:${docker_image_version}
fi
echo "------------------------------------"
exit 0
}
function push () {
echo "------------------------------------"
echo "login ${docker_registry}"
docker login --username=${docker_username} ${docker_registry} --password=${docker_pwd}
docker push ${docker_image_path}:${docker_image_version}
echo "------------------------------------"
exit 0
}
function run () {
echo "------------------------------------"
# your business logic
echo "------------------------------------"
exit 0
}
function clean () {
echo "------------------------------------"
echo "will remove docker container: ${docker_container_name}"
echo "will remove docker image: ${docker_image_path}:${docker_image_version}"
echo "------------------------------------"
docker rm -fv ${docker_container_name}
docker rmi ${docker_image_path}:${docker_image_version}
echo "------------------------------------"
exit 0
}
action=$1
if [[ -z $action ]];then
echo "[ERROR] action is missing."
exit 1
elif [[ $action == "build" ]];then
build
elif [[ $action == "demo" ]];then
demo
elif [[ $action == "push" ]];then
push
elif [[ $action == "run" ]];then
run
elif [[ $action == "clean" ]];then
clean
else
echo "[ERROR] unsupported action type."
exit 1
fi
| true
|
89e4da19e94b9eba9849d21b253e32e77cc3d1fd
|
Shell
|
TheWrightonLab/metagenome_annotation
|
/ANNOTATE_PROTEIN_FASTA.sh
|
UTF-8
| 1,507
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
# ANNOTATE_PROTEIN_FASTA.sh
#
# written by Richard Wolfe
#
# $1 = protein sequence fasta file to annotate
# $2 = IPER or NO_IPER or PFAM
#
# makes a directory annotate_IPER or annotate_NO_IPER or annotate_PFAM with the results
#make sure 1 attribute
#check if varialble 2 is empty
if [ -z $2 ]
then
echo "You did not provide 2 attribute"
exit 1
fi
#check if valid string for $2
if [ "$2" == "IPER" ]
then
echo "iperscan option selected"
elif [ "$2" == "PFAM" ]
then
echo "PFAM scripts will be run"
elif [ "$2" == "NO_IPER" ]
then
echo "iperscan will not be performed"
else
echo "Variable 2 must be either IPER or NO_IPER"
exit 1
fi
#check if file exists
if [ ! -f $1 ]
then
echo "Error .. file $1 does not exist. Exiting script"
exit 1
fi
mkdir annotate_$2
cd annotate_$2
#we need to rename all the headers so they are
# scaffold_0_0 ... scaffold_0_N-1
python /ORG-Data/scripts/bin/convert_velvet_to_ida_output.py -i ../$1 -o renamed_$1
#need to add the scaffold 0 part
sed -i "s/>scaffold_/>scaffold_0_/g" renamed_$1
#make the All_ContigstoPull.txt txt file so all the genes are pulled
#1 line because only 1 scaffold (scaffold_0)
echo 'ORIG_scaffold_0' > All_contigstopull.txt
#run pipeline
#we dont have .fna file so we will use NONEXIST.fna in the paramater
/ORG-Data/scripts/bin/Phylogeny_Protpipe/ANNOTATION_PIPELINE_IPER_OPTION.sh renamed_$1 NONEXIST.fna All_contigstopull.txt ORIG $2
echo " "
echo "Script finished"
echo " "
| true
|
dd7abb7e5e1327cbf2eb3f522ab4563642a529b0
|
Shell
|
caninen/pyjnius
|
/.github/actions/scripts/manylinux_entrypoint.sh
|
UTF-8
| 361
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
yum install -y java-1.7.0-openjdk-devel
for target in $(ls /opt/python/); do
python=/opt/python/$target/bin/python
$python -m pip install -U setuptools cython
$python setup.py bdist_wheel
git clean -dfx jnius/
done
for whl in dist/*.whl; do
auditwheel repair $whl
done
rm dist/*-linux_*.whl
mv wheelhouse/*.whl dist/
| true
|
1fd1fc27c279dfcff467ce94230290e50f034a55
|
Shell
|
erig0/pkgsrc-wip
|
/py-octoprint/files/octoprint.sh
|
UTF-8
| 279
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
# PROVIDE: octoprint
# REQUIRE: DAEMON
$_rc_subr_loaded . /etc/rc.subr
name="octoprint"
rcvar=$name
command="@PREFIX@/bin/octoprint-3.7"
pidfile="/tmp/$name.pid"
procname="@PREFIX@/bin/python3.7"
command_args="daemon start"
load_rc_config $name
run_rc_command "$1"
| true
|
873e9d8da27e4f61d6a3e1fd29fee2db421f39c4
|
Shell
|
xchwarze/AmazonProductAPI
|
/.env.example
|
UTF-8
| 356
| 2.53125
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# Review "Usage" section on page:
# https://github.com/josegonzalez/php-dotenv/blob/master/README.markdown
# unquoted values containing [null, true, false] are turned into
# their PHP equivalents
# when quoted, they are simply string values
AWS_API_KEY="YOUR-AWS-KEY"
AWS_API_SECRET_KEY="YOUR-AWS-SECRET-KEY"
AWS_ASSOCIATE_TAG="YOUR-AMAZON-ASSOCIATE-ID"
| true
|
97ed5b570ad6e14a1445fa32853d1926f5306438
|
Shell
|
Okipa/laravel-utils-dotfiles
|
/server/localesInstall.sh
|
UTF-8
| 1,877
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
serverLocalesInstallScriptDirectory=$(dirname "$(readlink -f ${BASH_SOURCE[0]})")
source $(realpath ${serverLocalesInstallScriptDirectory}/../helpers/loadScriptingColors.sh)
source $(realpath ${serverLocalesInstallScriptDirectory}/../helpers/requiresSudoRights.sh)
echo -e "${gray}=================================================${reset}\n"
[[ $1 = "--force" ]] && FORCE=true || FORCE=false
if [ "$FORCE" == false ]; then
read -p "Would you like to install the project-related server locales ? [${green}y${reset}/${red}N${reset}]" -n 1 -r
echo
fi
if [ "$FORCE" == true ] || [[ "$REPLY" =~ ^[Yy]$ ]]; then
localesInstallScriptPath=${serverLocalesInstallScriptDirectory}/../../.utils.custom/server/localesInstall.sh
source $(realpath ${serverLocalesInstallScriptDirectory}/../helpers/checkFileExists.sh) ${localesInstallScriptPath}
source ${localesInstallScriptPath}
echo -e "${gray}=================================================${reset}\n"
echo "${purple}▶${reset} Updating server locales ..."
echo "${purple}→ update-locale ${reset}"
update-locale
echo -e "${green}✔${reset} Server locales updated\n"
else
echo "${red}✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗${reset}"
echo "${purple}▶${reset} Locales installation aborted."
echo -e "${red}✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗✗${reset}\n"
fi
| true
|
de2bb284a604e40207ec55ca1818a03049e3f407
|
Shell
|
NagaBhushanSharma/-ProgramingConstructions
|
/RepetitionsAndFunctions/Repetitions/RepetitionsWithForLoop/PrimeRange.sh
|
UTF-8
| 642
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
# Check if a number is prime
# function to return whether prime or not in given range
check_prime() {
current_number=$1
flag=0
i=2
while test $i -le `expr $current_number / 2`
do
if test `expr $current_number % $i` -eq 0
then
flag=1
fi
i=`expr $i + 1`
done
if test $flag -eq 0
then echo $current_number
fi
}
# Assign From and to number
echo "Enter the range"
read -p "Enter Digit Which you want prim number From:" from_number
read -p "To:" to_number
for (( number=$from_number; number<=$to_number; number++ ))
do
check_prime $number
done
| true
|
55aebc2b807f7ccb006019f79593968f6235eaac
|
Shell
|
alexey7776723/epa_tren
|
/Homework4_BashScriptin_Task5.sh
|
UTF-8
| 451
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
magicNumber=$(( $RANDOM % 100 ))
## write your code here ##
read number
win=$[]
while :
do
if
[ $number -lt $magicNumber ]
then
echo $number
echo 'less'
else
if
[ $number -gt $magicNumber ]
then
echo $number
echo 'greater'
else
# [ $number -eq $magicNumber ]
echo $number
echo 'You win!'
break
fi
fi
read number
done
| true
|
53da450516d96a9442f5a16ac36c2cf8f5f410f5
|
Shell
|
BeeeOn/server
|
/dist/rpm/pre_uninstall.sh
|
UTF-8
| 257
| 3.125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/sh
case "$1" in
0) # uninstall
echo "stopping service beeeon-server..."
/usr/bin/systemctl stop beeeon-server.service \
&& echo "service beeeon-server has been stopped" \
|| echo "failed to stop beeeon-server service"
;;
1) # upgrade
;;
esac
| true
|
6a7016311175dac0d6d81129aabf450761f58b70
|
Shell
|
smartcontractkit/wirelogd
|
/DEBIAN/postinst
|
UTF-8
| 540
| 3.28125
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/sh
set -e
case "$1" in
configure)
systemctl daemon-reload
if [ -z "$2" ]; then
if ! getent passwd wirelogd >>/dev/null 2>&1 ; then
useradd --home-dir /var/run/wirelogd --shell /usr/sbin/nologin --system --user-group wirelogd
if command -v setfacl >>/dev/null 2>&1 ; then
setfacl -m u:wirelogd:rX,g:wirelogd:rX /etc/wireguard
fi
fi
systemctl enable --now wirelogd.service
fi
;;
*)
;;
esac
| true
|
a0450ae97339b8e47c2c2802c149c27aeadb9163
|
Shell
|
exceptionless/Exceptionless
|
/build/update-config.sh
|
UTF-8
| 1,977
| 3.296875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/bin/bash
ApiUrl="${EX_ApiUrl:-}"
Html5Mode="${EX_Html5Mode:-false}"
EnableSsl="${EX_EnableSsl:-false}"
EnableAccountCreation="${EX_EnableAccountCreation:-true}"
OAuth="${EX_ConnectionStrings__OAuth:-}"
IFS=';' read -a oauthParts <<< "$OAuth"
for part in ${oauthParts[@]}
do
key="$( cut -d '=' -f 1 <<< $part )"; echo "key: $key"
value="$( cut -d '=' -f 2- <<< $part )"; echo "value: $value"
if [ "$key" == "FacebookId" ]; then
FacebookAppId=$value
fi
if [ "$key" == "GitHubId" ]; then
GitHubAppId=$value
fi
if [ "$key" == "GoogleId" ]; then
GoogleAppId=$value
fi
if [ "$key" == "IntercomId" ]; then
IntercomAppId=$value
fi
if [ "$key" == "MicrosoftId" ]; then
MicrosoftAppId=$value
fi
if [ "$key" == "SlackId" ]; then
SlackAppId=$value
fi
done
config_header="(function () {
'use strict';
angular.module('app.config', [])"
config="
.constant('BASE_URL', '$ApiUrl' || window.location.origin)
.constant('EXCEPTIONLESS_API_KEY', '$EX_ExceptionlessApiKey')
.constant('EXCEPTIONLESS_SERVER_URL', '$EX_ExceptionlessServerUrl')
.constant('FACEBOOK_APPID', '$FacebookAppId')
.constant('GITHUB_APPID', '$GitHubAppId')
.constant('GOOGLE_APPID', '$GoogleAppId')
.constant('INTERCOM_APPID', '$IntercomAppId')
.constant('LIVE_APPID', '$MicrosoftAppId')
.constant('SLACK_APPID', '$SlackAppId')
.constant('STRIPE_PUBLISHABLE_KEY', '$EX_StripePublishableApiKey')
.constant('SYSTEM_NOTIFICATION_MESSAGE', '$EX_NotificationMessage')
.constant('USE_HTML5_MODE', $Html5Mode)
.constant('USE_SSL', $EnableSsl)
.constant('ENABLE_ACCOUNT_CREATION', $EnableAccountCreation);"
config_footer="
}());"
echo "Exceptionless UI Config"
echo "$config"
checksum=`echo -n $config | md5sum | cut -c 1-32`
echo "$config_header$config$config_footer" > "app.config.$checksum.js"
CONTENT=$(cat index.html)
echo "$CONTENT" | sed -E "s/app\.config\..+\.js/app.config.$checksum.js/" > index.html
| true
|
f3575e717e21e6b1b058a9d6456cd2d64969ced5
|
Shell
|
oslet/shell
|
/fruit-cmdline.sh
|
UTF-8
| 76
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
for fruit in $*
do
echo "I readlly like ${fruit}s"
done
| true
|
032a5ce0f681d4c44a286f842b3475a55bd40095
|
Shell
|
laritakr/dotzshrc
|
/configs/aliases.zsh
|
UTF-8
| 2,675
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
# For bare-metal emacs
#
# There are three context's that I consider for using my text editor
# of choice:
#
# * EDITOR - uh? I forget. I think it's less used for my cases. (I
# use VISUAL and GIT_EDITOR more often.)
# * VISUAL - this is for visual paging of things like man-pages or
# grep results
# * GIT_EDITOR - this is used for editing your commit messages
export EDITOR='editor'
export GIT_EDITOR='git_editor'
# Prompt for confirmation
alias e=$EDITOR
alias dr="dired.sh"
alias edaemon='editor-daemon'
alias org="$EDITOR ~/git/org/agenda.org"
alias tor-sync="$EDITOR --eval \"(tor-sync)\""
alias rm='rm -i'
alias file-count="find . -type f -print | wc -l"
alias bx="bundle exec"
alias gl='git lg'
alias gd='git diff'
alias gst='git st'
alias gb='git branch -vv'
alias gwc='git whatchanged -p --abbrev-commit --pretty=medium'
alias hb="hub browse"
alias psx="ps ax | ag $1"
alias rss="$EDITOR --eval \"(rss)\""
# For pandoc on Apple Silicon chips
alias pand="arch -x86_64 pandoc"
alias ssh-tor="ssh takeonrules_takeonrules@ssh.phx.nearlyfreespeech.net"
alias ledger-balance="bean-report ~/git/org/projects/jeremy-friesen-consulting/ledger.beancount balances"
# Hyrax aliases
alias hyrax-devup='cp $HOME/git/dotzshrc/hyrax/solr_wrapper_dev.yml $HOME/git/samvera/hyrax/.internal_test_app/config/ ; cd $HOME/git/samvera/hyrax/.internal_test_app ; rm -rf tmp/solr-development ; fcrepo_wrapper & solr_wrapper --config config/solr_wrapper_dev.yml & redis-server &'
alias hyrax-testup='cd $HOME/git/samvera/hyrax/.internal_test_app ; rm -rf tmp/solr-valkyrie-test/server/solr/hyrax-valkyrie-test ; rm -rf tmp/solr-test/server/solr/hydra-test ; fcrepo_wrapper --config config/fcrepo_wrapper_test.yml & solr_wrapper --config config/solr_wrapper_test.yml & solr_wrapper --config config/solr_wrapper_valkyrie_test.yml & redis-server &'
alias hyrax-old-testup='cd $HOME/git/samvera/hyrax/.internal_test_app ; rm -rf tmp/solr-valkyrie-test/server/solr/hyrax-valkyrie-test ; rm -rf tmp/solr-test/server/solr/hydra-test ; fcrepo_wrapper --config config/fcrepo_wrapper_test.yml & solr_wrapper --config config/solr_wrapper_test.yml & redis-server &'
alias hyrax-devdown='pkill -f solr_wrapper & pkill -f fcrepo_wrapper & redis-cli shutdown'
alias sqlite-browser="/Applications/DB\ Browser\ for\ SQLite.app/Contents/MacOS/DB\ Browser\ for\ SQLite"
# SSH Tunnel:
# ssh libvirt6.library.nd.edu -L 8080:localhost:8080
alias dns-flush="sudo dscacheutil -flushcache; sudo killall -HUP mDNSResponder"
alias net_traffic="lsof -r -i"
# `awsassumerole testlibnd-superAdmin` (see ~/.aws/config for profile)
awsassumerole(){
unset AWS_VAULT
export $(aws-vault exec $1 --assume-role-ttl 1h -- env | grep AWS)
}
| true
|
e76beb619d4f9c25b66fbc6b4a7ad2d06016d89f
|
Shell
|
hypervised/K8S
|
/provisioner/EFS/EFS_Provisioner.sh
|
UTF-8
| 1,896
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash -e
#Attach EFS to K8S / microk8s
#run syntax is sudo /bin/bash EFS_Provisioner.sh <EFS DNS Name> <EFS File System ID> <region>
#example sudo sh EFS_Provisioner.sh fs-12345678.efs.us-east-1.amazonaws.com fs-12345678 us-east-1
#dont forget to run chmod 555 EFS_Provisioner.sh
#$1=DNS name of EFS, example fs-12345678.efs.us-east-1.amazonaws.com
#$2= The file system id for efs, example fs-12345678
#$3= the region the EFS is located in
#install nfs tools and mount the file system
sudo apt install -yqq nfs-common
sudo mkdir efs
sudo mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 $1:/ efs
#get manifests
wget https://raw.githubusercontent.com/hypervised/K8S/master/provisioner/EFS/claim.yaml > claim.yaml
wget https://raw.githubusercontent.com/hypervised/K8S/master/provisioner/EFS/class.yaml > class.yaml
wget https://raw.githubusercontent.com/hypervised/K8S/master/provisioner/EFS/configmap.yaml > configmap.yaml
wget https://raw.githubusercontent.com/hypervised/K8S/master/provisioner/EFS/deployment.yaml > deployment.yaml
wget https://raw.githubusercontent.com/hypervised/K8S/master/provisioner/EFS/rbac.yaml > rbac.yaml
wget https://raw.githubusercontent.com/hypervised/K8S/master/provisioner/EFS/test-pod.yaml > test-pods.yaml
#update the yaml files with your EFS info
sudo sed -i '/dns.name:/s/REPLACEDNAME/$1/g' configmap.yaml
sudo sed -i '/file.system.id:/s/REPLACEFSID/$2/g' configmap.yaml
sudo sed -i '/dns.name:/s/REPLACEREGION/$3/g' configmap.yaml
sudo sed -i '/server:/s/REPLACESERVER/$1/g' deployment.yaml
#deploy the manifests
sudo microk8s.kubectl apply -f class.yaml
sudo microk8s.kubectl apply -f configmap.yaml
sudo microk8s.kubectl apply -f rbac.yaml
sudo microk8s.kubectl apply -f deployment.yaml
sudo microk8s.kubectl apply -f claim.yaml
sudo microk8s.kubectl apply -f test-pod.yaml
| true
|
2651ad176d72c946e27c8bbee3b87298bff0d37c
|
Shell
|
mkutr/Avister
|
/auto_commit_push_config.sh
|
UTF-8
| 1,119
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
cd .
timestamp() {
date +"at %H:%M:%S on %d/%m/%Y"
}
temp=$(echo git branch --show-current)
echo $temp
#eval x=($temp == "backup")
#echo $x
if [$temp == "backup"]
then
echo "error"
exit -1
fi
echo "mowsi"
git show-ref --verify --quiet refs/heads/backup
if [$? == 0]
then
printf "NOT EXISTS\n"
git stash
git checkout backup
git stash pop
timestamp() {
date +"at %H:%M:%S on %d/%m/%Y"
}
else
printf "EXISTS\n"
git stash save "backup_stash"
git stash branch temp-backup-branch
# git add .
git commit -am "Regular auto-commit $(timestamp)"
git checkout backup
git merge temp-backup-branch
git branch -D temp-backup-branch
git push --set-upstream origin backup
git checkout $temp
git stash pop "backup_stash"
fi
#git commit -am "Regular auto-commit $(timestamp)"
#
#git checkout -b $temp
#git stash pop
# git commit -m "commit_message"
# $? == 0 means local branch with <branch-name> exists.
#printf $?
#git merge backup
#git add --all
#timestamp() {
# date +"at %H:%M:%S on %d/%m/%Y"
#}
#git commit -am "Regular auto-commit $(timestamp)"
#git push origin master
| true
|
9f9ead40b2bf41c86f8af8498b1f57c9d7080063
|
Shell
|
gigayak/buildsystem
|
/pkgspecs/dart-sdk.make.sh
|
UTF-8
| 2,519
| 3.15625
| 3
|
[
"OPUBL-1.0",
"curl",
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -Eeo pipefail
version=1.12.2
echo "$version" > "$YAK_WORKSPACE/version"
cd "$YAK_WORKSPACE"
# TODO: Break out depot_tools into its own package
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
export PATH="$PATH:$YAK_WORKSPACE/depot_tools"
# Point depot_tools at python2.7 :[
# (This is an AWFUL way to do it!)
ln -sfv python2.7 /usr/bin/python
# The following would be better if I ever got it working:
#grep -lRE '^#!.*python' "$YAK_WORKSPACE/depot_tools" \
# | xargs -I{} -- \
# sed -r \
# -e 's@^(#!.*python)(.*)$@\12.7\2@g' \
# -i {}
#grep -LRE '^#!.*python' "$YAK_WORKSPACE/depot_tools" \
# | xargs -I{} -- \
# sed -r \
# -e 's@python([^2])@python2.7\1@g' \
# -e 's@python$@python2.7@g'
# Prevent "cd *-*/" from slurping up "depot_tools/"
mkdir dart-sdk
cd dart-sdk
gclient.py config https://github.com/dart-lang/sdk.git
# TODO: Maybe use an API here if GitHub has one that doesn't require special
# API keys?
archive="http://gsdview.appspot.com/dart-archive/channels/stable/release"
hash="$(curl "$archive/$version/VERSION")"
#hash="$(curl "https://github.com/dart-lang/sdk/releases/tag/$version" \
# | sed -nre 's@^.*href="[^"]+/commit/([0-9a-f]+)".*$@\1@gp')"
# --jobs=1 is an attempt to rate limit some of the downloading, as
# this command was failing nondeterministically without much in the form
# of error messages...
gclient.py sync --revision="sdk@$hash" --jobs=1
cd sdk/
# TODO: Fix this issue upstream. Addresses:
# runtime/bin/builtin_natives.cc: In function
# 'void dart::bin::Builtin_Builtin_PrintString(Dart_NativeArguments)':
# runtime/bin/builtin_natives.cc:95:35: error: ignoring return value of
# 'size_t fwrite(const void*, size_t, size_t, FILE*)', declared with
# attribute warn_unused_result [-Werror=unused-result]
# fwrite(chars, 1, length, stdout);
# ^
# cc1plus: all warnings being treated as errors
#
# This sed line prevents this warning from being treated as an error.
# It does not silence the warning, though, in the hopes that it will be
# fixed one day.
#sed \
# -re 's@^(\s*)(-Wextra)(\s*\\)$@\1\2\3\n\1-Wno-error=unused-result\3@g' \
# -i runtime/dart_bootstrap.host.mk
sed \
-re 's@^(.*-Wnon-virtual-dtor.*)$@\1\n'"'"'-Wno-error=unused-result'"'"',@g' \
-i runtime/bin/bin.gypi
sed \
-re 's@^(.*-Wnon-virtual-dtor.*)$@\1\n"-Wno-error=unused-result",@g' \
-i runtime/BUILD.gn
tools/build.py \
--mode=release \
--arch=x64 #ia32
| true
|
49999611b8b29354516b6371da6944c1ca2d24d7
|
Shell
|
guilhermedelima/study
|
/linux-commands/Scripts/tomcat
|
UTF-8
| 698
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
#Script para prover Tomcat como servico (SysVinit)
CATALINA_HOME=/opt/apache-tomcat-7.0.39
TOMCAT_DAEMON=bin/startup.sh
TOMCAT_STOP=bin/shutdown.sh
get_pid(){
ps ax | grep tomcat-7.0.39 | grep -v grep | awk '{print $1}'
}
start(){
if [ -z $PID ] ; then
$CATALINA_HOME/$TOMCAT_DAEMON
else
echo "Tomcat is already running"
fi
}
stop(){
if [ -z $PID ] ; then
echo "Tomcat is not running yet"
else
$CATALINA_HOME/$TOMCAT_STOP
fi
}
print_pid(){
if [ -z $PID ] ; then
echo "Tomcat is not running"
else
echo $PID
fi
}
PID=$(get_pid)
case $1 in
start)
start
;;
stop)
stop
;;
pid)
print_pid
;;
*)
echo "Tomcat service - start|stop|pid"
;;
esac
| true
|
7840ee1d66c584addcea70c884a80d715b626b42
|
Shell
|
ringly/ios
|
/Tools/appicons.sh
|
UTF-8
| 1,387
| 3.3125
| 3
|
[] |
no_license
|
WORKSPACE_DIRECTORY="$(dirname "$0")/.."
echo WORKSPACE_DIRECTORY
# set the build version if specified
if [ -z ${BUILD_NUMBER+null} ]; then
echo "No build number set. This is okay - will build without modifying “Info.plist” or badging icon."
else
/usr/libexec/PlistBuddy -c "Set CFBundleVersion ${BUILD_NUMBER}" "${WORKSPACE_DIRECTORY}/Ringly/Ringly/Ringly-Info.plist"
# badge the icon files with information
HASH=`/usr/bin/git rev-parse --short HEAD`
VERSION=`/usr/libexec/PlistBuddy -c "Print :CFBundleShortVersionString" "${WORKSPACE_DIRECTORY}/Ringly/Ringly/Ringly-Info.plist"`
echo "${WORKSPACE_DIRECTORY}"
for line in $(find ../Ringly/Ringly/Images.xcassets/AppIcon.appiconset -iname "*.png"); do
WIDTH=`/usr/local/bin/identify -format %w ${line}`
HEIGHT=`/usr/local/bin/identify -format %h ${line}`
START_Y=`expr ${HEIGHT} - ${HEIGHT} \* 2 / 5`
POINTSIZE=`expr ${HEIGHT} / 6`
/usr/local/bin/mogrify \
-fill 'rgba(0,0,0,0.5)' \
-draw "rectangle 0,${START_Y} ${WIDTH},${HEIGHT}" \
-fill 'rgb(255, 255, 255)' \
-font '/Library/Fonts/Arial.ttf' \
-pointsize ${POINTSIZE} \
-antialias \
-gravity South \
-draw "text 0,0 '${HASH}'" \
-gravity North \
-draw "text 0,${START_Y} '${VERSION} • #${BUILD_NUMBER}'" \
${line}
done
fi
| true
|
c12d0e6814b505afe0297562f39bd9a7fcfff304
|
Shell
|
Yaroslav-95/swayrice
|
/dotfiles/.local/bin/ftv
|
UTF-8
| 337
| 3.703125
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/sh
# Script to start a new textfile from template
templdir=$HOME"/docs/templates/"
if [ -z "$2" ]; then
echo "Specify template filename and new filename"
exit 0
fi
template="$templdir$1"
file=$(readlink -f "$2")
if [ ! -f "$template" ]; then
echo "Template $1 does not exist"
exit 0
fi
cp $template $file
nvim $file
| true
|
01c946b91c90b5ec9067276fe85abd482a7584e5
|
Shell
|
tuyen81/self_learning
|
/shell/running_package/testcases/coreutils/touch.sh
|
UTF-8
| 1,074
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#==============================================================================
# DISCRIPTION: Test script is test to 'touch' command.
# In test script 'touch' command used to check timestamps of a sample.txt file
# The sample.txt file is created with size is 0 byte
#==============================================================================
check=0
# Create a directory and a sample.txt file with size is 0 byte for testing
mkdir -p ${data_dir}
touch ${data_dir}/sample.txt
# Check timestamps of sample.txt file
ls -l ${data_dir}/sample.txt > ${log_file} 2>&1
if [ $? != "0" ]; then
check=1
fi
# Wait for 2 seconds, update timestamps of sample.txt file
sleep 2
touch ${data_dir}/sample.txt >> ${log_file} 2>&1
if [ $? != "0" ]; then
check=1
fi
# Check timestamps of sample.txt file
ls -l ${data_dir}/sample.txt >> ${log_file} 2>&1
if [ $? != "0" ]; then
check=1
fi
# Check result of command touch with expected output
assert_passed $check 0
# Remove directory and a sample.txt file for testing
rm -rf ${data_dir}
| true
|
591cb81e9c391700ad1969ec4cb877e074a470b0
|
Shell
|
datou0412/qyp-portal
|
/bin/nodejsctl
|
UTF-8
| 1,929
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
export PATH=/opt/taobao/install/node.js/bin:/bin:/usr/bin:$PATH
cd `dirname $0`/..
BIN_HOME=`pwd`
echo ${BIN_HOME}
BASE_HOME=${BIN_HOME}
PROJECT_NAME=`basename ${BASE_HOME}`
PROJECT_ROOT=${BASE_HOME}
PROJECT_START=bin/app.js
NODEJS_BIN=node
STDOUT_LOG=${BASE_HOME}/../log/nodejs.log
ulimit -c unlimited
export ENABLE_NODE_LOG=YES
export NODE_LOG_DIR=${BASE_HOME}/logs/
if [[ -d ${BASE_HOME}/target/nodejs ]]; then
NODEJS_APP_DIR=${BASE_HOME}/target/nodejs
else
NODEJS_APP_DIR=${BASE_HOME}/target/${PROJECT_NAME}
fi
CUSTOM_NODEJS_BIN=${NODEJS_APP_DIR}/node_modules/node/bin/node
if [[ -f ${CUSTOM_NODEJS_BIN} ]]; then
NODEJS_BIN=${CUSTOM_NODEJS_BIN}
chmod a+x ${NODEJS_BIN} # In case it's not executable
fi
if [[ -f ${NODEJS_APP_DIR}/bin/.env ]]; then
export NODE_ENV=`cat ${NODEJS_APP_DIR}/bin/.env`
else
export NODE_ENV="production"
fi
PROG_NAME=$0
ACTION=$1
usage() {
echo "Usage: $PROG_NAME {start|stop|restart}"
exit 1;
}
if [[ $# -lt 1 ]]; then
usage
fi
function get_pid {
PID=`ps ax | grep ${NODEJS_BIN} | grep -v grep | grep ${PROJECT_START} | awk '{print $1}'`
}
function prepare {
get_pid
}
#start nodejs
start(){
prepare
if [[ -z $PID ]]; then
echo "Using node/iojs@`${NODEJS_BIN} -v`(${NODEJS_BIN})."
cd ${PROJECT_ROOT}
nohup ${NODEJS_BIN} --harmony bin/app.js >> $STDOUT_LOG 2>&1 &
sleep 5
get_pid
echo "Start nodejs success. PID=$PID"
else
echo "${PROJECT_NAME} is already running, PID=$PID"
fi
}
stop(){
prepare
if [[ ! -z "$PID" ]]; then
echo "Waiting ${PROJECT_NAME} stop for 5s ..."
kill -15 $PID
sleep 5
else
echo "${PROJECT_NAME} is not running"
fi
}
case "$ACTION" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
*)
usage
;;
esac
| true
|
9383613e0e686973ef3b1a562b72eb7a7443b487
|
Shell
|
aither64/build-vpsfree-templates
|
/include/opensuse.sh
|
UTF-8
| 1,105
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
. $BASEDIR/include/common.sh
if [ $DISTNAME == "openSUSE-leap" ]; then
REPOSITORY=http://download.opensuse.org/distribution/leap/$RELVER/repo/oss/
UPDATES=http://download.opensuse.org/update/leap/$RELVER/oss/
else
REPOSITORY=http://download.opensuse.org/distribution/$RELVER/repo/oss/
UPDATES=http://download.opensuse.org/update/$RELVER/
fi
EXTRAPKGS='vim'
REMOVEPKGS='apache2-utils apache2-prefork apache2 postfix'
ZYPPER="zypper -v --root=$INSTALL --non-interactive --no-gpg-checks "
function bootstrap {
$ZYPPER addrepo --refresh $REPOSITORY openSUSE-oss
$ZYPPER addrepo --refresh $UPDATES openSUSE-updates
$ZYPPER install openSUSE-release
$ZYPPER install -t pattern base sw_management
$ZYPPER install $EXTRAPKGS
$ZYPPER rm $REMOVEPKGS
}
function configure-opensuse {
configure-append <<EOF
systemctl disable NetworkManager.service
systemctl disable NetworkManager-wait-online.service
systemctl disable NetworkManager-dispatcher.service
systemctl enable network.service
usermod -L root
systemctl enable sshd
EOF
}
| true
|
d19d1b573b1a5080df97100c1e215399997f76bc
|
Shell
|
tomyuk/zkit
|
/setup.d/default.sh
|
UTF-8
| 612
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
# -*- bash-script -*-
#
# default of common settings
#
if [[ -z $FULLNAME ]]; then
FULLNAME=$(fgrep $USER /etc/passwd | cut -d: -f5)
fi
if [[ -z $EMAIL ]]; then
EMAIL="${USER}@${HOSTNAME}"
fi
if [[ -z $GPG_SIGNER ]]; then
if __zkit_have gpg2; then
gpg=gpg2
elif __zkit_have gpg; then
gpg=gpg
fi
if [[ -n $gpg ]]; then
if [[ -r ${HOME}/.gnupg/gpg.conf ]]; then
GPG_SIGNER=$(awk '/^default-key/ { print $2 }' ${HOME}/.gnupg/gpg.conf)
fi
if [[ -z $GPG_SIGNER ]]; then
GPG_SIGNER=$( $gpg -k 2>/dev/null |
awk '/^pub/ { split($2,x,"/"); print x[2]; exit }' )
fi
fi
fi
| true
|
4538039aba93893a5371fdfe02e58c44574bb57b
|
Shell
|
BirdmanAtHome/knfsd-cache-utils
|
/deployment/terraform-module-knfsd/resources/monitoring/export-root.sh
|
UTF-8
| 3,103
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# !/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make a directory for stats export
mkdir -p /statsexport
# Create a file for nfs_inode_cache_active_objects and populate an initial value
touch /statsexport/nfs_inode_cache_active_objects
chown "nobody" /statsexport/nfs_inode_cache_active_objects
nfs_inode_cache_active_objects=$(cat /proc/slabinfo | grep nfs_inode | awk '{print $2}')
echo "$nfs_inode_cache_active_objects" > /statsexport/nfs_inode_cache_active_objects
# Create a file for nfs_inode_cache_objsize and populate an initial value
touch /statsexport/nfs_inode_cache_objsize
chown "nobody" /statsexport/nfs_inode_cache_objsize
nfs_inode_cache_objsize=$(cat /proc/slabinfo | grep nfs_inode | awk '{print $4}')
echo "$nfs_inode_cache_objsize" > /statsexport/nfs_inode_cache_objsize
# Create a file for nfs_inode_cache_active_objects and populate an initial value
touch /statsexport/dentry_cache_active_objects
chown "nobody" /statsexport/dentry_cache_active_objects
dentry_cache_active_objects=$(cat /proc/slabinfo | grep dentry | awk '{print $2}')
echo "$dentry_cache_active_objects" > /statsexport/dentry_cache_active_objects
# Create a file for dentry_cache_objsize and populate an initial value
touch /statsexport/dentry_cache_objsize
chown "nobody" /statsexport/dentry_cache_objsize
dentry_cache_objsize=$(cat /proc/slabinfo | grep dentry | awk '{print $4}')
echo "$dentry_cache_objsize" > /statsexport/dentry_cache_objsize
# Loop that runs and updates the files with updated values every 60 seconds
while sleep 60
do
# Export nfs_inode_cache_active_objects to a file that CollectD can read
nfs_inode_cache_active_objects=$(cat /proc/slabinfo | grep nfs_inode | awk '{print $2}')
echo "$nfs_inode_cache_active_objects" > /statsexport/nfs_inode_cache_active_objects
# Export nfs_inode_cache_objsize to a file that CollectD can read
nfs_inode_cache_objsize=$(cat /proc/slabinfo | grep nfs_inode | awk '{print $4}')
echo "$nfs_inode_cache_objsize" > /statsexport/nfs_inode_cache_objsize
# Export dentry_cache_active_objects to a file that CollectD can read
dentry_cache_active_objects=$(cat /proc/slabinfo | grep dentry | grep -v ext4_fc_dentry_update | awk '{print $2}')
echo "$dentry_cache_active_objects" > /statsexport/dentry_cache_active_objects
# Export dentry_cache_objsize to a file that CollectD can read
dentry_cache_objsize=$(cat /proc/slabinfo | grep dentry | grep -v ext4_fc_dentry_update | awk '{print $4}')
echo "$dentry_cache_objsize" > /statsexport/dentry_cache_objsize
done
| true
|
23537322795b492db999362d3075f959d4817217
|
Shell
|
serhaninci/MicroservicesNode
|
/_docker_setup/reset.sh
|
UTF-8
| 503
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
eval `docker-machine env manager1`
docker service rm movies-service notification-service cinema-catalog-service payment-service booking-service
for server in manager1 worker1 worker2
do
eval `docker-machine env $server`
for image in crizstian/movies-service crizstian/cinema-catalog-service crizstian/booking-service crizstian/payment-service crizstian/notification-service
do
IMAGE=$(docker images $image -q)
docker rmi -f $IMAGE
done
done
| true
|
a382abf7c9fa971ae4588a4e7fda35cc8595f020
|
Shell
|
toy/rbenv-use
|
/bin/rbenv-sh-use
|
UTF-8
| 1,080
| 3.9375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Usage: rbenv use NAME [--global|--shell|--local]
#
# Switches Ruby versions without having to keep patchlevels in mind.
set -e
[ -n "$RBENV_DEBUG" ] && set -x
if ! type rbenv-whatis &>/dev/null; then
echo "rbenv-whatis plugin not installed, please run:" >&2
echo -e "\n\tgit clone https://github.com/toy/rbenv-whatis.git $RBENV_ROOT/plugins/whatis\n" >&2
exit 1
fi
# Provide rbenv completions
if [ "$1" = "--complete" ]; then
echo system
exec rbenv-versions --bare
fi
case $# in
1)
if [ "$1" = default ]; then
_ruby=--unset
else
_ruby=$1
fi
_command=shell;;
2)
_ruby=$1
case $2 in
--global|-g|--default|-d) _command=global;;
--shell|-s) _command=shell;;
--local|-l) _command=local;;
esac;;
*)
echo "usage: rbenv use NAME [OPTION]" >&2
exit 1;;
esac
_ruby=$(command rbenv whatis --installed "$_ruby")
if [ "$_command" = shell ]; then
rbenv sh-shell "$_ruby"
else
echo "eval $(rbenv sh-shell --unset) && command rbenv $_command $_ruby"
fi
| true
|
7078788d6a111b36212e03e6421c451b935a94d9
|
Shell
|
jerbowes/saltstack
|
/salt/satools/files/root/bin/ucheck
|
UTF-8
| 418
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
#-----------------------------------------------------------------
# Scan ps every 5 minutes to see if u16sentry is running,
# Restart if not
# Chmod 600 to all u16sentry logs in /tmp
#-----------------------------------------------------------------
if test -z "`ps -ef | grep u16sentry | grep -v grep`"
then
/root/bin/u16sentry &
sleep 1
chmod 600 /tmp/u16sentry*.*
chmod 600 /var/log/u16sentry.log
fi
| true
|
6f3339b08652165bbcfc3ee3dc1c5f0b7d7afe22
|
Shell
|
TileDB-Inc/TileDB-Presto
|
/docker/entrypoint.sh
|
UTF-8
| 458
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if ! ${PRESTO_HOME}/bin/launcher status; then
${PRESTO_HOME}/bin/launcher start;
sleep 2;
fi
printf "Waiting for presto to initialize.."
until ${PRESTO_HOME}/bin/presto-cli-${PRESTO_VERSION}-executable.jar --execute 'SELECT * FROM system.runtime.nodes' &> /dev/null ;
do
printf ".";
sleep 1;
printf ".";
sleep 1;
done
printf "\n"
${PRESTO_HOME}/bin/presto-cli-${PRESTO_VERSION}-executable.jar --schema tiledb --catalog tiledb "$@"
| true
|
7eb045f462a64221e2beb89151c7dd4e5e6ffdb2
|
Shell
|
SUNILBM123/Infa_10.1.1_BDMSolution
|
/Infa_10.1.1_BDMSolution/hdireconfigure.sh
|
UTF-8
| 10,203
| 3.203125
| 3
|
[] |
no_license
|
HDIClusterName=${1}
HDIClusterLoginUsername=${2}
HDIClusterLoginPassword=${3}
HDIClusterSSHHostname=${4}
HDIClusterSSHUsername=${5}
HDIClusterSSHPassword=${6}
blazeworkingdir=${7}
SPARK_HDFS_STAGING_DIR=${8}
SPARK_EVENTLOG_DIR=${9}
osUserName=${10}
osPwd=${11}
domainHost=${12}
domainusername=${13}
domainpassword=${14}
infahome=${15}
domainname={16}
debianlocation=/opt/Informatica/Archive/debian/InformaticaHadoop-10.1.1U2-Deb
removeconnection()
{
cd $infahome/isp/bin
sh infacmd.sh removeConnection -dn $domainname -un $domainusername -pd $domainpassword -cn HADOOP
sh infacmd.sh removeConnection -dn $domainname -un $domainusername -pd $domainpassword -cn HBASE
sh infacmd.sh removeConnection -dn $domainname -un $domainusername -pd $domainpassword -cn HDFS
sh infacmd.sh removeConnection -dn $domainname -un $domainusername -pd $domainpassword -cn HIVE
}
rerunbdmutil()
{
echo "running BDM UTILITY"
cd $infahome/tools/BDMUtil
echo Y Y | sh BDMSilentConfig.sh
echo "BDM util configuration complete"
}
getclusterdetails()
{
echo "Getting list of hosts from ambari"
hostsJson=$(curl -u $HDIClusterLoginUsername:$HDIClusterLoginPassword -X GET https://$HDIClusterName.azurehdinsight.net/api/v1/clusters/$HDIClusterName/hosts)
echo "Parsing list of hosts"
hosts=$(echo $hostsJson | sed 's/\\\\\//\//g' | sed 's/[{}]//g' | awk -v k="text" '{n=split($0,a,","); for (i=1; i<=n; i++) print a[i]}' | sed 's/\"\:\"/\|/g' | sed 's/[\,]/ /g' | sed 's/\"//g' | grep -w 'host_name')
echo $hosts
#additional configurations required
echo "Extracting headnode0"
headnode0=$(echo $hosts | grep -Eo '\bhn0-([^[:space:]]*)\b')
echo $headnode0
echo "Extracting headnode0 IP addresses"
headnode0ip=$(dig +short $headnode0)
echo "headnode0 IP: $headnode0ip"
resulthost=$(sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "uname -a | cut -d ' ' -f 2")
echo "resulthost name is:"$resulthost
#Add a new line to the end of hosts file
echo "">>/etc/hosts
echo "Adding headnode IP addresses"
echo "$headnode0ip headnodehost $resulthost $headnode0">>/etc/hosts
echo "Extracting workernode"
workernodes=$(echo $hosts | grep -Eo '\bwn([^[:space:]]*)\b')
echo "Extracting workernodes IP addresses"
echo "workernodes : $workernodes"
wnArr=$(echo $workernodes | tr "\n" "\n")
}
removeknownhostentries()
{
sudo ssh-keygen -f /root/.ssh/known_hosts -R $headnode0ip
sudo ssh-keygen -f /root/.ssh/known_hosts -R $headnode0
for workernode in $wnArr
do
echo "[$workernode]"
workernodeip=$(dig +short $workernode)
echo "workernode $workernodeip"
sudo ssh-keygen -f /root/.ssh/known_hosts -R $workernodeip
sudo ssh-keygen -f /root/.ssh/known_hosts -R $workernode
done
}
removeetchost()
{
matchstring="headnodehost"
if [ -n "$(grep $matchstring /etc/hosts)" ]
then
echo "removing previous headnode entry"
sed -i "/$matchstring/d" /etc/hosts
else
echo "nothing to remove"
fi
}
createstagingdir()
{
echo "creating staging directories"
sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "sudo ln -f -s /bin/bash /bin/sh"
sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "hadoop fs -mkdir -p" $blazeworkingdir
sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "hadoop fs -chmod 777" $blazeworkingdir
sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "hadoop fs -mkdir -p" $SPARK_HDFS_STAGING_DIR
sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "hadoop fs -chmod 777" $SPARK_HDFS_STAGING_DIR
sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "hadoop fs -mkdir -p" $SPARK_EVENTLOG_DIR
sshpass -p $HDIClusterSSHPassword ssh -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "hadoop fs -chmod 777" $SPARK_EVENTLOG_DIR
}
createshellscript()
{
shelltowrite="test.sh"
echo "#!/bin/sh" > $shelltowrite
echo "workernodeip=\$1">>$shelltowrite
echo "HDIClusterSSHUsername=\$2">>$shelltowrite
echo "HDIClusterSSHPassword=\$3">>$shelltowrite
echo "sshpass -p \$HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no \$HDIClusterSSHUsername@\$workernodeip \"sudo mkdir ~/rpmtemp\"">>$shelltowrite
echo "sshpass -p \$HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no \$HDIClusterSSHUsername@\$workernodeip \"sudo chmod 777 ~/rpmtemp\"">>$shelltowrite
echo "echo \"copying Binaries to\" \$workernodeip">>$shelltowrite
echo "sshpass -p \$HDIClusterSSHPassword scp -q -o StrictHostKeyChecking=no $debianlocation/informatica_10.1.1U2-1.deb \$HDIClusterSSHUsername@\$workernodeip:\"~/rpmtemp/\"">>$shelltowrite
echo "echo \"Installing Debian in\" \$workernodeip">>$shelltowrite
echo "sshpass -p \$HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no \$HDIClusterSSHUsername@\$workernodeip \"sudo chmod -R 777 ~/rpmtemp\"">>$shelltowrite
echo "sshpass -p \$HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no \$HDIClusterSSHUsername@\$workernodeip \"sudo dpkg --force-all -i ~/rpmtemp/informatica_10.1.1U2-1.deb\"">>$shelltowrite
echo "sshpass -p \$HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no \$HDIClusterSSHUsername@\$workernodeip \"sudo rm -rf ~/rpmtemp\"">>$shelltowrite
echo "sshpass -p \$HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no \$HDIClusterSSHUsername@\$workernodeip \"sudo ln -f -s /bin/bash /bin/sh\"">>$shelltowrite
echo "echo \"Debian Installation completed\"">>$shelltowrite
chmod -R 777 $shelltowrite
}
installdebian()
{
echo "Installing debian"
for workernode in $wnArr
do
echo "[$workernode]"
workernodeip=$(dig +short $workernode)
echo "workernode $workernodeip"
sudo sh $shelltowrite $workernodeip $HDIClusterSSHUsername $HDIClusterSSHPassword >$workernodeip.txt &
done
wait
echo "out of wait"
echo "Debian installation successful"
}
copyhelperfilesfromcluster()
{
#remove already existing authentication id of vm if any
remote_knownhostsfile="/home/"$HDIClusterSSHUsername"/.ssh/known_hosts"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip ""sudo ssh-keygen -f "$remote_knownhostsfile" -R " $domainHost"
echo "Installing sshpass on cluster"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "sudo apt install sshpass "
echo "searching for file in remote cluster"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "sudo find / -name decrypt.sh >oneclicksnap.txt"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "sudo find / -name key_decryption_cert.prv >>oneclicksnap.txt"
sleep 5
echo "downloading oneclicksnap.txt"
echo sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip ""sshpass -p" $osPwd "scp -q -o StrictHostKeyChecking=no oneclicksnap.txt "$osUserName"@"$domainHost":""/home/"$osUserName"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip ""sshpass -p" $osPwd "scp -q -o StrictHostKeyChecking=no oneclicksnap.txt "$osUserName"@"$domainHost":""/home/"$osUserName"
echo "downloading done"
sleep 20
#code to iterate snap.txt and download the file and copy to it to local directory
counter=0
skipcount=2
filename="/home/"$osUserName"/oneclicksnap.txt"
echo "displaying the content of downloaded file"
cat $filename
echo "parsing and processing the file contents"
IFS=$'\n' read -d '' -r -a totalfiles < "$filename"
for line in "${totalfiles[@]}"
do
name="$line"
echo "downloading file:"$name
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip ""sshpass -p" $osPwd "scp -q -o StrictHostKeyChecking=no "$name $osUserName"@"$domainHost":""~""
IFS='/' read -ra NAMES <<< "$name"
counter=${#NAMES[@]}
((chckcounter=$counter - $skipcount))
#$basechkcounter=$chckcounter
intermediatestring=""
while [ $chckcounter -gt 0 ]
do
#echo ${NAMES[$chckcounter]}
intermediatestring=${NAMES[$chckcounter]}/$intermediatestring
((chckcounter=$chckcounter - 1))
done
intermediatestring=/$intermediatestring
#echo $intermediatestring
#echo ${NAMES[(counter-1)]}
echo "creating directory:"$intermediatestring
mkdir -p $intermediatestring
sleep 5
echo "moving file:"${NAMES[(counter-1)]}
mv /home/$osUserName/${NAMES[(counter-1)]} $intermediatestring
chmod -R 777 $intermediatestring
done
echo "Removing sshpass installation on cluster"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$headnode0ip "sudo apt-get --purge remove sshpass --assume-yes"
}
fixforBDM7342()
{
workernodehelperdir="/home/helper"
for workernode in $wnArr
do
#statements
workernodeip=$(dig +short $workernode)
echo "creating directory in :"$workernode
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$workernodeip ""sudo mkdir "$workernodehelperdir"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$workernodeip ""sudo ln -sf /etc/hadoop/conf/hdfs-site.xml "$workernodehelperdir"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$workernodeip ""sudo ln -sf /etc/hadoop/conf/core-site.xml "$workernodehelperdir"
sshpass -p $HDIClusterSSHPassword ssh -q -o StrictHostKeyChecking=no $HDIClusterSSHUsername@$workernodeip ""sudo ln -sf /etc/hadoop/conf/mapred-site.xml "$workernodehelperdir"
done
}
echo "Inside Main"
removeetchost
getclusterdetails
removeknownhostentries
removeconnection
rerunbdmutil
createstagingdir
createshellscript
installdebian
copyhelperfilesfromcluster
fixforBDM7342
| true
|
ade1671e568e88b8c1f229c102cbda65ead80f56
|
Shell
|
matt-mulligan/calithumpian
|
/scripts/stop_app.sh
|
UTF-8
| 716
| 3.875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# ##################################################
# Stop_App Script
# Starts the flask application in its pipenv environment
#
version="1.0.0" # Sets version variable
# HISTORY:
#
# * 2020-04-13 - v1.0.0 - Initial creation
#
# ##################################################
# Provide a variable with the location of this script.
scriptPath="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
application="calithumpian"
entryPoint="calithumpian.py"
# send start message to console
echo "Beginning stop_app.sh helper script"
# run command
ps -ef | grep $entryPoint | grep -v grep | awk '{print $2}' | xargs kill
# Send end message to console
echo "$application stopped!"
| true
|
f84715ac71e58290bdcc4a2f7ea43b864f3034e5
|
Shell
|
andrewcooke-isti/JiraTestResultReporter
|
/src/main/bash/deploy-plugin.sh
|
UTF-8
| 515
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ `basename $PWD` != "JiraTestResultReporter" ]; then
echo "This script must be run from the JiraTestResultReporter directory" 1>&2
exit 1
fi
./src/main/bash/compile-plugin.sh
# https://wiki.jenkins-ci.org/display/JENKINS/Plugin+tutorial
sudo service jenkins stop
sudo cp target/JiraTestResultReporter.hpi /var/lib/jenkins/plugins/
sudo rm -fr /var/lib/jenkins/plugins/JiraTestResultReporter
sudo touch /var/lib/jenkins/plugins/JiraTestResultReporter.hpi.pinned
sudo service jenkins start
| true
|
5824a64b99cedab74557277db35d4e7a02dc69ba
|
Shell
|
sk77github/moniter
|
/logstash_install.sh
|
UTF-8
| 13,959
| 2.890625
| 3
|
[] |
no_license
|
#安装 logstash 版本 2.1.1
#自动化安装脚本
#!/bin/bash
cd /data/ && mkdir logstash
wget https://download.elastic.co/logstash/logstash/packages/centos/logstash-2.1.1-1.noarch.rpm
rpm -ivh logstash-2.1.1-1.noarch.rpm
chown -R logstash:logstash /data/logstash/ #服务安装后修改目录权限
chown -R logstash:logstash /var/log/logstash/
这里的权限问题要注意
logstash需要java环境 需要确保/usr/bin/java存在可执行
cat > /etc/sysconfig/logstash <<EOF
###############################
# Default settings for logstash
###############################
# Override Java location
#JAVACMD=/usr/bin/java
# Set a home directory
#LS_HOME=/var/lib/logstash
# Arguments to pass to logstash agent
#LS_OPTS=""
# Arguments to pass to java
LS_HEAP_SIZE="2000m"
# pidfiles aren't used for upstart; this is for sysv users.
#LS_PIDFILE=/var/run/logstash.pid
# user id to be invoked as; for upstart: edit /etc/init/logstash.conf
#LS_USER=logstash
# logstash logging
#LS_LOG_FILE=/var/log/logstash/logstash.log
#LS_USE_GC_LOGGING="true"
# logstash configuration directory
#LS_CONF_DIR=/etc/logstash/conf.d
# Open file limit; cannot be overridden in upstart
#LS_OPEN_FILES=16384
# Nice level
#LS_NICE=19
# If this is set to 1, then when stop is called, if the process has
# not exited within a reasonable time, SIGKILL will be sent next.
# The default behavior is to simply log a message program stop failed; still running
KILL_ON_STOP_TIMEOUT=0
EOF
#安装测试成功与否测试命令
service logstash test
#配置文件目录 /etc/logstash/conf.d/xxxxx.conf
#配置服务的目录
######/etc/init.d/logstash#####
#LS_HOME=/data/logstash
#LS_LOG_DIR=/data/logstash
#配置文件参考文档网址 https://www.elastic.co/guide/en/logstash/current/index.html
--------------------------------------------------------------------------------------------------------------
logstash管理:
#脚本配置成功后测试命令
service logstash configtest
#logstash 启动命令
service logstash start
#logstash 停止命令
service logstash stop
非服务方式时的关闭和启动
关闭命令
kill -15 pid
测试配置命令
/opt/logstash/bin/logstash -f /data/logstash/conf/logstash-indexer-enterprise.conf --configtest
启动命令
/opt/logstash/bin/logstash agent -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}
重启命令
先关闭再启动
--------------------------------------------------------------------------------------------------------------------------------
logstash配置:
//shiper 配置 start
input {
file {
type => "testType"
path => [ "/data/testLog/test.log*" ]
exclude => [ "*.gz" ]
}
}
output {
if [type] == "testType" {
kafka {
codec => plain {
format => "%{message}"
}
topic_id => "testLog"
bootstrap_servers => "100.106.15.1:9092, 100.106.15.2:9092, 100.106.15.3:9092"
metadata_max_age_ms => 6000
}
}
}
//shiper 配置 end
//indexer 配置 start
input {
kafka {
topic_id => "testLog"
zk_connect => "100.106.15.1:2181,100.106.15.2:2181,100.106.15.3:2181"
group_id =>"testLogGroup"
consumer_threads => 7
codec => "plain"
}
}
output {
statsd {
host => "100.106.15.9"
port => 8125
namespace => "logstash"
increment => "laxin.exception"
}
}
//shiper 配置 end
graphite 访问:
curl 100.106.15.7:8085/render?target=stats.logstash.host.laxin.exception\&from=-15min\&format=json
logstash添加字段:
filter{
if [type] == "php-activity-exception" {
grok {
match => {"message" => "(?<timestamp>%{YEAR}-%{MONTHNUM}-%{MONTHDAY}%{SPACE}%{HOUR}:?%{MINUTE}(?::?%{SECOND}))%{SPACE}\[(%{SYSLOGHOST:ip}|-)\]\[\-\]\[\-\]\[%{LOGLEVEL:log_level}\]\[(?<classname>.*?)\]%{SPACE}(?<content>(.|\r|\n)*)"}
}
date {
match => [ "timestamp" , "YYYY-MM-DD hh:mm:ss" ]
}
}
}
添加了message字段和timestamp字段,以及ip字段,log_level字段,classname字段,content字段
filter {
if [type] == "op-trade" or [type] == "op-payment" {
if [status] == "0" {
mutate {
add_field => ["human_status", "ok"]
}
}
else {
mutate {
add_field => ["human_status", "fail"]
}
}
}
}
添加了human_status字段
重要的可查询的概念:field,type,tag
input:
大多数input插件都会配置的configuration options:type
Value type is string
There is no default value for this setting.
Add a type field to all events handled by this input.
Types are used mainly for filter activation.
The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer)
then a new input will not override the existing type. A type set at the shipper stays with that event for its life even
when sent to another Logstash server.
插件列表
file:
By default, each event is assumed to be one line. If you would like to join multiple log lines into one event,
you’ll want to use the multiline codec or filter.
例如:
codec => multiline {
pattern => "^\["
negate => true
what => "previous"
}
把当前行的数据添加到前面一行后面,,直到新进的当前行匹配 ^\[ 正则为止。
The plugin keeps track of the current position in each file by recording it in a separate file named sincedb.
file input 例子:
input {
file {
type => "testType"
path => [ "/data/testLog/test.log*" ]
}
}
file input 配置项:
1,start_positionedit
Value can be any of: beginning, end
Default value is "end"
Choose where Logstash starts initially reading files: at the beginning or at the end.
The default behavior treats files like live streams and thus starts at the end.
If you have old data you want to import, set this to beginning.
This option only modifies "first contact" situations where a file is new and not seen before,
i.e. files that don’t have a current position recorded in a sincedb file read by Logstash.
If a file has already been seen before, this option has no effect and the position recorded in the sincedb file will be used.
2,tags
Value type is array
There is no default value for this setting.
Add any number of arbitrary tags to your event.
This can help with processing later.
kafka:
This input will read events from a Kafka topic. It uses the high level consumer API provided by Kafka to read messages from the broker.
It also maintains the state of what has been consumed using Zookeeper. The default input codec is json
Ideally you should have as many threads as the number of partitions for a perfect balance
more threads than partitions means that some threads will be idle
kafka {
topic_id => "nginxlog"
zk_connect => "100.xxx.xxx.xxx:2181,100.xxx.xxx.xxx:2181,100.xxx.xxx.xxx:2181"
group_id =>"nginxlogGroup"
consumer_threads => 3
codec => "plain"
type => ""
}
filter:
grok
Logstash ships with about 120 patterns by default. You can find them here:
https://github.com/logstash-plugins/logstash-patterns-core/tree/master/patterns.
You can add your own trivially. (See the patterns_dir setting)
配置选项
match
Value type is hash
Default value is {}
A hash of matches of field ⇒ value
For example:
filter {
grok { match => { "message" => "Duration: %{NUMBER:duration}" } }
}
If you need to match multiple patterns against a single field, the value can be an array of patterns
filter {
grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } }
}
patterns_dir
Value type is array
Default value is []
Logstash ships by default with a bunch of patterns, so you don’t necessarily need to define this yourself unless
you are adding additional patterns. You can point to multiple pattern directories using this setting Note that Grok
will read all files in the directory and assume its a pattern file (including any tilde backup files)
patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"]
Pattern files are plain text with format:
NAME PATTERN
For example:
NUMBER \d+
tag_on_failure
Value type is array
Default value is ["_grokparsefailure"]
Append values to the tags field when there has been no successful match
使用说明:
一,预定义匹配模式
55.3.244.1 GET /index.html 15824 0.043
%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}
正则模式:字段描述符
二,自定义匹配模式,正则命名捕获
(?<field_name>the pattern here)
For example, postfix logs have a queue id that is an 10 or 11-character hexadecimal value. I can capture that easily like this:
(?<queue_id>[0-9A-F]{10,11})
自定义模式文件:
patterns
相当于把第一步里的正则表达式用名称代替,比如用WORD代替\w+(所有的字母出现一次或多次)
USERNAME [a-zA-Z0-9._-]+
USER %{USERNAME}
EMAILLOCALPART [a-zA-Z][a-zA-Z0-9_.+-=:]+
EMAILADDRESS %{EMAILLOCALPART}@%{HOSTNAME}
HTTPDUSER %{EMAILADDRESS}|%{USER}
INT (?:[+-]?(?:[0-9]+))
BASE10NUM (?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)))
NUMBER (?:%{BASE10NUM})
BASE16NUM (?<![0-9A-Fa-f])(?:[+-]?(?:0x)?(?:[0-9A-Fa-f]+))
BASE16FLOAT \b(?<![0-9A-Fa-f.])(?:[+-]?(?:0x)?(?:(?:[0-9A-Fa-f]+(?:\.[0-9A-Fa-f]*)?)|(?:\.[0-9A-Fa-f]+)))\b
POSINT \b(?:[1-9][0-9]*)\b
NONNEGINT \b(?:[0-9]+)\b
WORD \b\w+\b
NOTSPACE \S+
SPACE \s*
DATA .*?
GREEDYDATA .*
UUID [A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}
date
The date filter is used for parsing dates from fields, and then using that date or timestamp as the logstash timestamp for the event.
配置例子
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
output:
kafka:
Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on the broker.
The only required configuration is the topic name. The default codec is json, so events will be persisted on the broker in json format.
If you select a codec of plain, Logstash will encode your messages with not only the message but also with a timestamp and hostname.
If you do not want anything but your message passing through, you should make the output configuration something like:
output {
kafka {
codec => plain {
format => "%{message}"
}
bootstrap_servers => "xxx.xxx.xxx.1:9092, xxx.xxx.xxx.2:9092, xxx.xxx.xxx.3:9092"
topic_id => "log4jlog"
}
}
For more information see http://kafka.apache.org/documentation.html#theproducer
Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs
需要先在kafka上创建好topic,命令如下
bin/kafka-topics.sh --zookeeper localhost:2181 --create --topic enterprise-ng-log --partitions 3 --replication-factor 2
elasticsearch:
document_type
Value type is string
There is no default value for this setting.
The document type to write events to. Generally you should try to write only similar events to the same type.
String expansion %{foo} works here. Unless you set document_type, the event type will be used if it exists otherwise the document type
will be assigned the value of logs
hosts
Value type is array
Default value is ["127.0.0.1"]
Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in
the hosts parameter. Remember the http protocol uses the http address (eg. 9200, not 9300).
["127.0.0.1:9200","127.0.0.2:9200"]
index
Value type is string
Default value is "logstash-%{+YYYY.MM.dd}"
The index to write events to. This can be dynamic using the %{foo} syntax. The default value will partition your indices by day
so you can more easily delete old data or only search specific date ranges. Indexes may not contain uppercase characters.
For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}
配置例子:
elasticsearch {
hosts => ["100.106.15.1:9200","100.106.15.2:9200","100.106.15.3:9200","100.106.15.6:9200","100.106.15.7:9200","100.106.15.9:9200"]
}
statsd:
The default final metric sent to statsd would look like this:
`namespace.sender.metric`
With regards to this plugin,
the default namespace is "logstash",
the default sender is the ${host} field,
and the metric name depends on what is set as the metric name in the increment, decrement, timing, count, `set or gauge variable.
increment 对应statsd的打点格式:<metricname>:1|c
decrement 对应statsd的打点格式:<metricname>:-1|c
count 对应statsd的打点格式:<metricname>:大于1的数|c
timing 对应statsd的打点格式:<metricname>:<value>|ms
gauge 对应statsd的打点格式:<metricname>:<value>|g
set 对应statsd的打点格式:<metricname>:<value>|s
file
当以文件做为output时,需要注意文件所在目录,logstash需要具有写权限
--------------------------------------------------------------------------------------------------------
Codec:
plain:
The "plain" codec is for plain text with no delimiting between events.
This is mainly useful on inputs and outputs that already have a defined framing in their transport protocol (such as
zeromq, rabbitmq, redis, etc)
multiline:
The original goal of this codec was to allow joining of multiline messages from files into a single event. For example,
joining Java exception and stacktrace messages into a single event.
The config looks like this:
input {
stdin {
codec => multiline {
pattern => "pattern, a regexp"
negate => "true" or "false"
what => "previous" or "next"
}
}
}
| true
|
ec392b755c25beb9196f38692d3620c19d927779
|
Shell
|
AppliedLogicSystems/ALSProlog
|
/unix/superclean.sh
|
UTF-8
| 395
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eu
echo "Doing superclean"
case `uname -rs` in
"SunOS 4"*) ARCH=sunos ;;
"SunOS 5"*) ARCH=solaris ;;
Linux*) ARCH=linux ;;
"HP-UX"*) ARCH=hpux ;;
"IRIX"*) ARCH=irix ;;
*"_NT"*) ARCH=win32 ;;
"Darwin"*) ARCH=darwin ;;
*) echo "Unknown machine type..."; exit 1 ;;
esac
rm -rf *.tgz *.zip $ARCH
rm -rf ../foreign_sdk/*/ALS_Prolog_Foreign_SDK
| true
|
bccdd063e258272e350496c088ef6d46f66a8f75
|
Shell
|
bom-d-van/me
|
/refresh.sh
|
UTF-8
| 431
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
me=/Users/bom_d_van/Code/go/workspace/src/github.com/bom-d-van/me
mv -f $me/thoughts_creation_file.txt{,.backup}
for f in $(find /Users/bom_d_van/Code/go/workspace/src/github.com/bom-d-van/me/thoughts)
do
echo "$f://$(GetFileInfo -d $f)" >> $me/thoughts_creation_file.txt
done
sed -i "" "s/\/Users\/bom_d_van\/Code\/go\/workspace\/src\/github.com\/bom-d-van\/me\/thoughts//g" $me/thoughts_creation_file.txt
| true
|
88f180c9550687ad26da54c4c9490b3ba96d9551
|
Shell
|
OhmyZakka/MySQL
|
/replication/galera/mariadb-galera-cluster/deploy/Sysinit.sh
|
UTF-8
| 1,731
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#Description Set kernel parameters for Mysql_pre;
#notice: This script is suit for CentOS7.
#Date 02/05/2017 2nd release
#Author Jamie Sun
echo "=====stop iptables;selinux====="
/etc/init.d/iptables stop
sed -i 's/SELINUX.*$/SELINUX=disabled/g' /etc/selinux/config
echo "=======set IO Scheduler,noop======"
echo noop > /sys/block/sda/queue/scheduler
echo "================set gemfire file describle=========="
grep "mysql hard nofile" $sec_limitfile
if [ $? -eq 0 ]
then
sed -i "s/gemfire hard.*$/gemfire hard nofile 65535/g"
else
echo "gemfire hard nofile 65535" >> $sec_limitfile
fi
grep "gemfire soft nofile" $sec_limitfile
if [ $? -eq 0 ]
then
sed -i "s/gemfire soft nofile.*$/gemfire soft nofile 65535/g"
else
echo "gemfire soft nofile 65535" >> $sec_limitfile
fi
grep "gemfire hard nproc" $sec_limitfile
if [ $? -eq 0 ]
then
sed -i "s/gemfire hard nproc.*$/gemfire hard nproc 65535/g"
else
echo "gemfire hard nproc 65535" >> $sec_limitfile
fi
grep "gemfire soft nproc" $sec_limitfile
if [ $? -eq 0 ]
then
sed -i "s/gemfire soft nproc.*$/gemfire soft nproc 65535/g"
else
echo "gemfire soft nproc 65535" >> $sec_limitfile
fi
#echo "BINDIP="`hostname -i`";export BINDIP" >> .bashrc
echo "===============set file describle================="
grep "ulimit -u" $sec_limitfile
if [ $? -eq 0 ]
then
sed -i "s/ulimit -u.*$/ulimit -u 65535/g"
else
echo "ulimit -u 65535" >> /root/.bashrc
fi
grep "ulimit -n" $sec_limitfile
if [ $? -eq 0 ]
then
sed -i "s/ulimit -n.*$/ulimit -n 65535/g"
else
echo "ulimit -n 65535" >> /root/.bashrc
fi
echo "=========reload /etc/sysctl.conf=============="
/sbin/sysctl -p
echo "`hostname` is done"
| true
|
f0289d47046a911b40afbdbf5c4801734b90102e
|
Shell
|
perfsonar/pscheduler
|
/pscheduler-server/pscheduler-server/daemons/threads
|
UTF-8
| 524
| 3.640625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh -e
#
# Dump the daemon threads
#
[ -t 1 ] && LONG= || LONG=l
[ $# -eq 0 ] && LIST='scheduler runner archiver ticker' || LIST=$@
for SERVICE in ${LIST}
do
PID_FILE=$(ps -ef \
| fgrep "/daemons/${SERVICE}" \
| fgrep -- '--pid-file' \
| head -1 \
| sed -e 's/^.*--pid-file\s\+//; s/\s\+.*$//')
if [ ! -r "${PID_FILE}" ]
then
echo "Can't find ${SERVICE} PID file ${PID_FILE}" 1>&2
exit 1
fi
pstree -ap${LONG} $(cat "${PID_FILE}")
echo
done
| true
|
904bc6d502524a187e8b2fbc622b858d2c9c5947
|
Shell
|
ralongi/tools
|
/scripts/check_perf_ci_cx6.sh
|
UTF-8
| 48,519
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
$dbg_flag
job_id=$1
card_type=$2
if [[ $# -lt 2 ]]; then
echo "Usage: $0 <Job ID> <Card Type>"
echo "Example: $0 6863212 cx6"
exit 0
fi
card_type=$(echo "$card_type" | awk '{print toupper($0)}')
if [[ $card_type == "CX5" ]]; then
source ~/github/tools/scripts/cx5_perf_ci_threshold.txt
elif [[ $card_type == "CX6" ]]; then
source ~/github/tools/scripts/cx6_perf_ci_threshold.txt
fi
get_delta_values()
{
$dbg_flag
result=$1
threshold=$2
delta=$(($result - $threshold))
pct=$(awk "BEGIN { pc=100*${delta}/${threshold}; i=int(pc); print (pc-i<0.5)?i:i+1 }")
}
log=$(bkr job-results J:$job_id --prettyxml | grep -A40 '"/kernel/networking/openvswitch/perf" role="CLIENTS"' | grep taskout.log | awk -F '"' '{print $2}')
html_result_file=$(bkr job-results J:$job_id --prettyxml | grep -A40 '"/kernel/networking/openvswitch/perf" role="CLIENTS"' | grep 'mlx5_100_cx6.html' | awk '{print $2}' | awk -F "=" '{print $2}' | sed 's/"//g')
log=${log=:-""}
pushd ~/temp
result_file=$(basename $log)
rm -f $result_file
rm -f pass_fail.txt
wget --quiet -O $result_file $log
# frame size=64, flows=1024, loss-rate=0
frame_size=64
flows=1024
loss_rate=0
fr64_fl1024_123_vno_vlan11_threshold=3410182
fr64_fl1024_143_vno_vlan11_threshold=7046358
fr64_fl1024_245_vno_vlan11_threshold=6827023
fr64_fl1024_489_vno_vlan11_threshold=13716274
fr64_fl1024_123_vyes_vlan0_threshold=3624251
fr64_fl1024_143_vyes_vlan0_threshold=7548083
fr64_fl1024_245_vyes_vlan0_threshold=7299298
fr64_fl1024_489_vyes_vlan0_threshold=14330525
fr64_fl1024_sriov_13_vyes_vlan0_threshold=39798738
fr64_fl1024_testpmd_vno_vlan0_threshold=4030497
# ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11
fr64_fl1024_123_vno_vlan11_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $1}' | awk -F '"' '{print $2}')
fr64_fl1024_123_vno_vlan11_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $2}' | awk -F "." '{print $1}')
fr64_fl1024_143_vno_vlan11_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $3}' | awk -F '"' '{print $2}')
fr64_fl1024_143_vno_vlan11_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $4}' | awk -F "." '{print $1}')
fr64_fl1024_245_vno_vlan11_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $5}' | awk -F '"' '{print $2}')
fr64_fl1024_245_vno_vlan11_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $6}' | awk -F "." '{print $1}')
r64_fl1024_489_vno_vlan11_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $7}' | awk -F '"' '{print $2}')
fr64_fl1024_489_vno_vlan11_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $8}' | awk -F "." '{print $1}')
# ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0
fr64_fl1024_123_vyes_vlan0_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $9}' | awk -F '"' '{print $2}')
fr64_fl1024_123_vyes_vlan0_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $10}' | awk -F "." '{print $1}')
fr64_fl1024_143_vyes_vlan0_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $11}' | awk -F '"' '{print $2}')
fr64_fl1024_143_vyes_vlan0_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $12}' | awk -F "." '{print $1}')
fr64_fl1024_245_vyes_vlan0_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $13}' | awk -F '"' '{print $2}')
fr64_fl1024_245_vyes_vlan0_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $14}' | awk -F "." '{print $1}')
fr64_fl1024_489_vyes_vlan0_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $15}' | awk -F '"' '{print $2}')
fr64_fl1024_489_vyes_vlan0_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $16}' | awk -F "." '{print $1}')
# sriov_pvp
fr64_fl1024_sriov_13_vyes_vlan0_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $17}' | awk -F '"' '{print $2}')
fr64_fl1024_sriov_13_vyes_vlan0_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $18}' | awk -F "." '{print $1}')
# testpmd as switch
fr64_fl1024_testpmd_vno_vlan0_testname=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $19}' | awk -F '"' '{print $2}')
fr64_fl1024_testpmd_vno_vlan0_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $20}' | awk -F "." '{print $1}')
# Report Results
if [[ $fr64_fl1024_123_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=64 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_123_vno_vlan11_result $fr64_fl1024_123_vno_vlan11_threshold
if [[ $fr64_fl1024_123_vno_vlan11_result -ge $fr64_fl1024_123_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_123_vno_vlan11_threshold, Result: $fr64_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_123_vno_vlan11_threshold, Result: $fr64_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_143_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=64 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_143_vno_vlan11_result $fr64_fl1024_143_vno_vlan11_threshold
if [[ $fr64_fl1024_143_vno_vlan11_result -ge $fr64_fl1024_143_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_143_vno_vlan11_threshold, Result: $fr64_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_143_vno_vlan11_threshold, Result: $fr64_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_245_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=64 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_245_vno_vlan11_result $fr64_fl1024_245_vno_vlan11_threshold
if [[ $fr64_fl1024_245_vno_vlan11_result -ge $fr64_fl1024_245_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_245_vno_vlan11_threshold, Result: $fr64_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_245_vno_vlan11_threshold, Result: $fr64_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_489_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=64 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_489_vno_vlan11_result $fr64_fl1024_489_vno_vlan11_threshold
if [[ $fr64_fl1024_489_vno_vlan11_result -ge $fr64_fl1024_489_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_489_vno_vlan11_threshold, Result: $fr64_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_489_vno_vlan11_threshold, Result: $fr64_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_123_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=64 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_123_vyes_vlan0_result $fr64_fl1024_123_vyes_vlan0_threshold
if [[ $fr64_fl1024_123_vyes_vlan0_result -ge $fr64_fl1024_123_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_123_vyes_vlan0_threshold, Result: $fr64_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_123_vyes_vlan0_threshold, Result: $fr64_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_143_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=64 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_143_vyes_vlan0_result $fr64_fl1024_143_vyes_vlan0_threshold
if [[ $fr64_fl1024_143_vyes_vlan0_result -ge $fr64_fl1024_143_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_143_vyes_vlan0_threshold, Result: $fr64_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_143_vyes_vlan0_threshold, Result: $fr64_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_245_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=64 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_245_vyes_vlan0_result $fr64_fl1024_245_vyes_vlan0_threshold
if [[ $fr64_fl1024_245_vyes_vlan0_result -ge $fr64_fl1024_245_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_245_vyes_vlan0_threshold, Result: $fr64_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_245_vyes_vlan0_threshold, Result: $fr64_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_489_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=64 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_489_vyes_vlan0_result $fr64_fl1024_489_vyes_vlan0_threshold
if [[ $fr64_fl1024_489_vyes_vlan0_result -ge $fr64_fl1024_489_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_489_vyes_vlan0_threshold, Result: $fr64_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_489_vyes_vlan0_threshold, Result: $fr64_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_sriov_13_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: sriov_pvp vIOMMU=yes vlan=0 frame=64 queues=1 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_sriov_13_vyes_vlan0_result $fr64_fl1024_sriov_13_vyes_vlan0_threshold
if [[ $fr64_fl1024_sriov_13_vyes_vlan0_result -ge $fr64_fl1024_sriov_13_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_sriov_13_vyes_vlan0_threshold, Result: $fr64_fl1024_sriov_13_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_sriov_13_vyes_vlan0_threshold, Result: $fr64_fl1024_sriov_13_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr64_fl1024_testpmd_vno_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: testpmd_as_switch vIOMMU=no vlan=0 frame=64 queues=1" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_testpmd_vno_vlan0_result $fr64_fl1024_testpmd_vno_vlan0_threshold
if [[ $fr64_fl1024_testpmd_vno_vlan0_result -ge $fr64_fl1024_testpmd_vno_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_testpmd_vno_vlan0_threshold, Result: $fr64_fl1024_testpmd_vno_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_testpmd_vno_vlan0_threshold, Result: $fr64_fl1024_testpmd_vno_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
# frame size=64, flows=1024 ovs_kernel, loss-rate=0.002
frame_size=64
flows=1024
loss_rate=0.002
# ovs_kernel_pvp
echo "Tests: ovs_kernel_pvp" | tee -a pass_fail.txt
fr64_fl1024_kernel_13_vno_vlan0_threshold=564824
fr64_fl1024_kernel_13_vno_vlan0_result=$(grep -A8 'jq --arg sz 64 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $2}' | awk -F "." '{print $1}' | tail -n1)
if [[ $fr64_fl1024_kernel_13_vno_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_kernel_pvp vIOMMU=no vlan=0 frame=64 queues=1 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr64_fl1024_kernel_13_vno_vlan0_result $fr64_fl1024_kernel_13_vno_vlan0_threshold
if [[ $fr64_fl1024_kernel_13_vno_vlan0_result -ge $fr64_fl1024_kernel_13_vno_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr64_fl1024_kernel_13_vno_vlan0_threshold, Result: $fr64_fl1024_kernel_13_vno_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr64_fl1024_kernel_13_vno_vlan0_threshold, Result: $fr64_fl1024_kernel_13_vno_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
# frame size=128, flows=1024, loss-rate=0
fr128_fl1024_123_vno_vlan11_threshold=3285538
fr128_fl1024_143_vno_vlan11_threshold=6201016
fr128_fl1024_245_vno_vlan11_threshold=6347283
fr128_fl1024_489_vno_vlan11_threshold=12687276
fr128_fl1024_123_vyes_vlan0_threshold=3480967
fr128_fl1024_143_vyes_vlan0_threshold=6577647
fr128_fl1024_245_vyes_vlan0_threshold=6820219
fr128_fl1024_489_vyes_vlan0_threshold=13229015
# ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=128
fr128_fl1024_123_vno_vlan11_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $2}' | awk -F "." '{print $1}')
fr128_fl1024_143_vno_vlan11_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $4}' | awk -F "." '{print $1}')
fr128_fl1024_245_vno_vlan11_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $6}' | awk -F "." '{print $1}')
fr128_fl1024_489_vno_vlan11_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $8}' | awk -F "." '{print $1}')
# ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=128
fr128_fl1024_123_vyes_vlan0_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $10}' | awk -F "." '{print $1}')
fr128_fl1024_143_vyes_vlan0_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $12}' | awk -F "." '{print $1}')
fr128_fl1024_245_vyes_vlan0_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $14}' | awk -F "." '{print $1}')
fr128_fl1024_489_vyes_vlan0_result=$(grep -A8 'jq --arg sz 128 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $16}' | awk -F "." '{print $1}')
if [[ $fr128_fl1024_123_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=128 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_123_vno_vlan11_result $fr128_fl1024_123_vno_vlan11_threshold
if [[ $fr128_fl1024_123_vno_vlan11_result -ge $fr128_fl1024_123_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_123_vno_vlan11_threshold, Result: $fr128_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_123_vno_vlan11_threshold, Result: $fr128_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr128_fl1024_143_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=128 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_143_vno_vlan11_result $fr128_fl1024_143_vno_vlan11_threshold
if [[ $fr128_fl1024_143_vno_vlan11_result -ge $fr128_fl1024_143_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_143_vno_vlan11_threshold, Result: $fr128_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_143_vno_vlan11_threshold, Result: $fr128_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr128_fl1024_245_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=128 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_245_vno_vlan11_result $fr128_fl1024_245_vno_vlan11_threshold
if [[ $fr128_fl1024_245_vno_vlan11_result -ge $fr128_fl1024_245_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_245_vno_vlan11_threshold, Result: $fr128_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_245_vno_vlan11_threshold, Result: $fr128_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr128_fl1024_489_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=128 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_489_vno_vlan11_result $fr128_fl1024_489_vno_vlan11_threshold
if [[ $fr128_fl1024_489_vno_vlan11_result -ge $fr128_fl1024_489_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_489_vno_vlan11_threshold, Result: $fr128_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_489_vno_vlan11_threshold, Result: $fr128_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr128_fl1024_123_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=128 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_123_vyes_vlan0_result $fr128_fl1024_123_vyes_vlan0_threshold
if [[ $fr128_fl1024_123_vyes_vlan0_result -ge $fr128_fl1024_123_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_123_vyes_vlan0_threshold, Result: $fr128_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_123_vyes_vlan0_threshold, Result: $fr128_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr128_fl1024_143_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=128 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_143_vyes_vlan0_result $fr128_fl1024_143_vyes_vlan0_threshold
if [[ $fr128_fl1024_143_vyes_vlan0_result -ge $fr128_fl1024_143_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_143_vyes_vlan0_threshold, Result: $fr128_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_143_vyes_vlan0_threshold, Result: $fr128_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr128_fl1024_245_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=128 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_245_vyes_vlan0_result $fr128_fl1024_245_vyes_vlan0_threshold
if [[ $fr128_fl1024_245_vyes_vlan0_result -ge $fr128_fl1024_245_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_245_vyes_vlan0_threshold, Result: $fr128_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_245_vyes_vlan0_threshold, Result: $fr128_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr128_fl1024_489_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=128 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr128_fl1024_489_vyes_vlan0_result $fr128_fl1024_489_vyes_vlan0_threshold
if [[ $fr128_fl1024_489_vyes_vlan0_result -ge $fr128_fl1024_489_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr128_fl1024_489_vyes_vlan0_threshold, Result: $fr128_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr128_fl1024_489_vyes_vlan0_threshold, Result: $fr128_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
# frame size=256, flows=1024, loss-rate=0
fr256_fl1024_123_vno_vlan11_threshold=3108010
fr256_fl1024_143_vno_vlan11_threshold=5817463
fr256_fl1024_245_vno_vlan11_threshold=5905521
fr256_fl1024_489_vno_vlan11_threshold=11490597
fr256_fl1024_123_vyes_vlan0_threshold=3299385
fr256_fl1024_143_vyes_vlan0_threshold=6182041
fr256_fl1024_245_vyes_vlan0_threshold=6364886
fr256_fl1024_489_vyes_vlan0_threshold=11985156
# ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=256
fr256_fl1024_123_vno_vlan11_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $2}' | awk -F "." '{print $1}')
fr256_fl1024_143_vno_vlan11_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $4}' | awk -F "." '{print $1}')
fr256_fl1024_245_vno_vlan11_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $6}' | awk -F "." '{print $1}')
fr256_fl1024_489_vno_vlan11_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $8}' | awk -F "." '{print $1}')
fr256_fl1024_123_vyes_vlan0_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $10}' | awk -F "." '{print $1}')
# ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=256
fr256_fl1024_143_vyes_vlan0_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $12}' | awk -F "." '{print $1}')
fr256_fl1024_245_vyes_vlan0_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $14}' | awk -F "." '{print $1}')
fr256_fl1024_489_vyes_vlan0_result=$(grep -A8 'jq --arg sz 256 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $16}' | awk -F "." '{print $1}')
if [[ $fr256_fl1024_123_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=256 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_123_vno_vlan11_result $fr256_fl1024_123_vno_vlan11_threshold
if [[ $fr256_fl1024_123_vno_vlan11_result -ge $fr256_fl1024_123_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_123_vno_vlan11_threshold, Result: $fr256_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_123_vno_vlan11_threshold, Result: $fr256_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr256_fl1024_143_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=256 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_143_vno_vlan11_result $fr256_fl1024_143_vno_vlan11_threshold
if [[ $fr256_fl1024_143_vno_vlan11_result -ge $fr256_fl1024_143_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_143_vno_vlan11_threshold, Result: $fr256_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_143_vno_vlan11_threshold, Result: $fr256_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr256_fl1024_245_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=256 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_245_vno_vlan11_result $fr256_fl1024_245_vno_vlan11_threshold
if [[ $fr256_fl1024_245_vno_vlan11_result -ge $fr256_fl1024_245_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_245_vno_vlan11_threshold, Result: $fr256_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_245_vno_vlan11_threshold, Result: $fr256_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr256_fl1024_489_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=256 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_489_vno_vlan11_result $fr256_fl1024_489_vno_vlan11_threshold
if [[ $fr256_fl1024_489_vno_vlan11_result -ge $fr256_fl1024_489_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_489_vno_vlan11_threshold, Result: $fr256_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_489_vno_vlan11_threshold, Result: $fr256_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr256_fl1024_123_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=256 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_123_vyes_vlan0_result $fr256_fl1024_123_vyes_vlan0_threshold
if [[ $fr256_fl1024_123_vyes_vlan0_result -ge $fr256_fl1024_123_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_123_vyes_vlan0_threshold, Result: $fr256_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_123_vyes_vlan0_threshold, Result: $fr256_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr256_fl1024_143_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=256 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_143_vyes_vlan0_result $fr256_fl1024_143_vyes_vlan0_threshold
if [[ $fr256_fl1024_143_vyes_vlan0_result -ge $fr256_fl1024_143_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_143_vyes_vlan0_threshold, Result: $fr256_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_143_vyes_vlan0_threshold, Result: $fr256_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr256_fl1024_245_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=256 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_245_vyes_vlan0_result $fr256_fl1024_245_vyes_vlan0_threshold
if [[ $fr256_fl1024_245_vyes_vlan0_result -ge $fr256_fl1024_245_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_245_vyes_vlan0_threshold, Result: $fr256_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_245_vyes_vlan0_threshold, Result: $fr256_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr256_fl1024_489_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=256 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_489_vyes_vlan0_result $fr256_fl1024_489_vyes_vlan0_threshold
if [[ $fr256_fl1024_489_vyes_vlan0_result -ge $fr256_fl1024_489_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr256_fl1024_489_vyes_vlan0_threshold, Result: $fr256_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr256_fl1024_489_vyes_vlan0_threshold, Result: $fr256_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
# frame size=1500, flows=1024, loss-rate=0
fr1500_fl1024_123_vno_vlan11_threshold=1641723
fr1500_fl1024_143_vno_vlan11_threshold=2318051
fr1500_fl1024_245_vno_vlan11_threshold=2615223
fr1500_fl1024_489_vno_vlan11_threshold=3713150
fr1500_fl1024_123_vyes_vlan0_threshold=1875955
fr1500_fl1024_143_vyes_vlan0_threshold=3284639
fr1500_fl1024_245_vyes_vlan0_threshold=3146886
fr1500_fl1024_489_vyes_vlan0_threshold=4336302
# ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=1500
fr1500_fl1024_123_vno_vlan11_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $2}' | awk -F "." '{print $1}')
fr1500_fl1024_143_vno_vlan11_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $4}' | awk -F "." '{print $1}')
fr1500_fl1024_245_vno_vlan11_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $6}' | awk -F "." '{print $1}')
fr1500_fl1024_489_vno_vlan11_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $8}' | awk -F "." '{print $1}')
# ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=1500
fr1500_fl1024_123_vyes_vlan0_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $10}' | awk -F "." '{print $1}')
fr1500_fl1024_143_vyes_vlan0_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $12}' | awk -F "." '{print $1}')
fr1500_fl1024_245_vyes_vlan0_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $14}' | awk -F "." '{print $1}')
fr1500_fl1024_489_vyes_vlan0_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $16}' | awk -F "." '{print $1}')
if [[ $fr1500_fl1024_123_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=1500 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr256_fl1024_245_vyes_vlan0_result $fr256_fl1024_245_vyes_vlan0_threshold
if [[ $fr1500_fl1024_123_vno_vlan11_result -ge $fr1500_fl1024_123_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_123_vno_vlan11_threshold, Result: $fr1500_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_123_vno_vlan11_threshold, Result: $fr1500_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr1500_fl1024_143_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=1500 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_143_vno_vlan11_result $fr1500_fl1024_143_vno_vlan11_threshold
if [[ $fr1500_fl1024_143_vno_vlan11_result -ge $fr1500_fl1024_143_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_143_vno_vlan11_threshold, Result: $fr1500_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_143_vno_vlan11_threshold, Result: $fr1500_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr1500_fl1024_245_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=1500 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_245_vno_vlan11_result $fr1500_fl1024_245_vno_vlan11_threshold
if [[ $fr1500_fl1024_245_vno_vlan11_result -ge $fr1500_fl1024_245_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_245_vno_vlan11_threshold, Result: $fr1500_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_245_vno_vlan11_threshold, Result: $fr1500_fl1024_245_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr1500_fl1024_489_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=1500 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_489_vno_vlan11_result $fr1500_fl1024_489_vno_vlan11_threshold
if [[ $fr1500_fl1024_489_vno_vlan11_result -ge $fr1500_fl1024_489_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_489_vno_vlan11_threshold, Result: $fr1500_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_489_vno_vlan11_threshold, Result: $fr1500_fl1024_489_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr1500_fl1024_123_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=1500 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_123_vyes_vlan0_result $fr1500_fl1024_123_vyes_vlan0_threshold
if [[ $fr1500_fl1024_123_vyes_vlan0_result -ge $fr1500_fl1024_123_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_123_vyes_vlan0_threshold, Result: $fr1500_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_123_vyes_vlan0_threshold, Result: $fr1500_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr1500_fl1024_143_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=1500 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_143_vyes_vlan0_result $fr1500_fl1024_143_vyes_vlan0_threshold
if [[ $fr1500_fl1024_143_vyes_vlan0_result -ge $fr1500_fl1024_143_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_143_vyes_vlan0_threshold, Result: $fr1500_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_143_vyes_vlan0_threshold, Result: $fr1500_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr1500_fl1024_245_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=1500 queues=2 pmds=4 vcpus=5" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_245_vyes_vlan0_result $fr1500_fl1024_245_vyes_vlan0_threshold
if [[ $fr1500_fl1024_245_vyes_vlan0_result -ge $fr1500_fl1024_245_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_245_vyes_vlan0_threshold, Result: $fr1500_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_245_vyes_vlan0_threshold, Result: $fr1500_fl1024_245_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr1500_fl1024_489_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=1500 queues=4 pmds=8 vcpus=9" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_489_vyes_vlan0_result $fr1500_fl1024_489_vyes_vlan0_threshold
if [[ $fr1500_fl1024_489_vyes_vlan0_result -ge $fr1500_fl1024_489_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_489_vyes_vlan0_threshold, Result: $fr1500_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_489_vyes_vlan0_threshold, Result: $fr1500_fl1024_489_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
# sriov_pvp
# Frame=1500
fr1500_fl1024_sriov_13_vyes_vlan0_threshold=8151274
fr1500_fl1024_sriov_13_vyes_vlan0_result=$(grep -A8 'jq --arg sz 1500 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $18}' | awk -F "." '{print $1}')
if [[ $fr1500_fl1024_sriov_13_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: sriov_pvp vIOMMU=yes vlan=0 frame=1500 queues=1 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr1500_fl1024_sriov_13_vyes_vlan0_result $fr1500_fl1024_sriov_13_vyes_vlan0_threshold
if [[ $fr1500_fl1024_sriov_13_vyes_vlan0_result -ge $fr1500_fl1024_sriov_13_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr1500_fl1024_sriov_13_vyes_vlan0_threshold, Result: $fr1500_fl1024_sriov_13_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr1500_fl1024_sriov_13_vyes_vlan0_threshold, Result: $fr1500_fl1024_sriov_13_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
# frame size=2000, flows=1024, loss-rate=0
fr2000_fl1024_123_vno_vlan11_threshold=1415964
fr2000_fl1024_143_vno_vlan11_threshold=2044585
fr2000_fl1024_123_vyes_vlan0_threshold=1549223
fr2000_fl1024_143_vyes_vlan0_threshold=2764836
# ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=2000
fr2000_fl1024_123_vno_vlan11_result=$(grep -A8 'jq --arg sz 2000 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $2}' | awk -F "." '{print $1}')
fr2000_fl1024_143_vno_vlan11_result=$(grep -A8 'jq --arg sz 2000 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $4}' | awk -F "." '{print $1}')
# ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=2000
fr2000_fl1024_123_vyes_vlan0_result=$(grep -A8 'jq --arg sz 2000 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $6}' | awk -F "." '{print $1}')
fr2000_fl1024_143_vyes_vlan0_result=$(grep -A8 'jq --arg sz 2000 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $8}' | awk -F "." '{print $1}')
if [[ $fr2000_fl1024_123_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=2000 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr2000_fl1024_123_vno_vlan11_result $fr2000_fl1024_123_vno_vlan11_threshold
if [[ $fr2000_fl1024_123_vno_vlan11_result -ge $fr2000_fl1024_123_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr2000_fl1024_123_vno_vlan11_threshold, Result: $fr2000_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr2000_fl1024_123_vno_vlan11_threshold, Result: $fr2000_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr2000_fl1024_143_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=2000 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr2000_fl1024_143_vno_vlan11_result $fr2000_fl1024_143_vno_vlan11_threshold
if [[ $fr2000_fl1024_143_vno_vlan11_result -ge $fr2000_fl1024_143_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr2000_fl1024_143_vno_vlan11_threshold, Result: $fr2000_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr2000_fl1024_143_vno_vlan11_threshold, Result: $fr2000_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr2000_fl1024_123_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=2000 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr2000_fl1024_123_vyes_vlan0_result $fr2000_fl1024_123_vyes_vlan0_threshold
if [[ $fr2000_fl1024_123_vyes_vlan0_result -ge $fr2000_fl1024_123_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr2000_fl1024_123_vyes_vlan0_threshold, Result: $fr2000_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr2000_fl1024_123_vyes_vlan0_threshold, Result: $fr2000_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr2000_fl1024_143_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=2000 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr2000_fl1024_143_vyes_vlan0_result $fr2000_fl1024_143_vyes_vlan0_threshold
if [[ $fr2000_fl1024_143_vyes_vlan0_result -ge $fr2000_fl1024_143_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr2000_fl1024_143_vyes_vlan0_threshold, Result: $fr2000_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr2000_fl1024_143_vyes_vlan0_threshold, Result: $fr2000_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
# frame size=9200, flows=1024, loss-rate=0
fr9200_fl1024_123_vno_vlan11_threshold=392024
fr9200_fl1024_143_vno_vlan11_threshold=616128
fr9200_fl1024_123_vyes_vlan0_threshold=400165
fr9200_fl1024_143_vyes_vlan0_threshold=712203
# ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=9200
fr9200_fl1024_123_vno_vlan11_result=$(grep -A8 'jq --arg sz 9200 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $2}' | awk -F "." '{print $1}')
fr9200_fl1024_143_vno_vlan11_result=$(grep -A8 'jq --arg sz 9200 --arg fl 1024' $result_file | grep 'result=' | grep vlan11 | awk -F "," '{print $4}' | awk -F "." '{print $1}')
# ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=9200
fr9200_fl1024_123_vyes_vlan0_result=$(grep -A8 'jq --arg sz 9200 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $6}' | awk -F "." '{print $1}')
fr9200_fl1024_143_vyes_vlan0_result=$(grep -A8 'jq --arg sz 9200 --arg fl 1024' $result_file | grep 'result=' | grep vlan0 | awk -F "," '{print $8}' | awk -F "." '{print $1}')
if [[ $fr9200_fl1024_123_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=9200 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr9200_fl1024_123_vno_vlan11_result $fr9200_fl1024_123_vno_vlan11_threshold
if [[ $fr9200_fl1024_123_vno_vlan11_result -ge $fr9200_fl1024_123_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr9200_fl1024_123_vno_vlan11_threshold, Result: $fr9200_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr9200_fl1024_123_vno_vlan11_threshold, Result: $fr9200_fl1024_123_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr9200_fl1024_143_vno_vlan11_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=no vlan=11 frame=9200 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr9200_fl1024_143_vno_vlan11_result $fr9200_fl1024_143_vno_vlan11_threshold
if [[ $fr9200_fl1024_143_vno_vlan11_result -ge $fr9200_fl1024_143_vno_vlan11_threshold ]]; then echo "Result: PASS Threshold: $fr9200_fl1024_143_vno_vlan11_threshold, Result: $fr9200_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr9200_fl1024_143_vno_vlan11_threshold, Result: $fr9200_fl1024_143_vno_vlan11_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr9200_fl1024_123_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=9200 queues=1 pmds=2 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr9200_fl1024_123_vyes_vlan0_result $fr9200_fl1024_123_vyes_vlan0_threshold
if [[ $fr9200_fl1024_123_vyes_vlan0_result -ge $fr9200_fl1024_123_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr9200_fl1024_123_vyes_vlan0_threshold, Result: $fr9200_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr9200_fl1024_123_vyes_vlan0_threshold, Result: $fr9200_fl1024_123_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
if [[ $fr9200_fl1024_143_vyes_vlan0_result ]]; then
echo "" | tee -a pass_fail.txt
echo "Test: ovs_dpdk_vhostuser_pvp vIOMMU=yes vlan=0 frame=9200 queues=1 pmds=4 vcpus=3" | tee -a pass_fail.txt
get_delta_values $fr9200_fl1024_143_vyes_vlan0_result $fr9200_fl1024_143_vyes_vlan0_threshold
if [[ $fr9200_fl1024_143_vyes_vlan0_result -ge $fr9200_fl1024_143_vyes_vlan0_threshold ]]; then echo "Result: PASS Threshold: $fr9200_fl1024_143_vyes_vlan0_threshold, Result: $fr9200_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; else echo "Result: FAIL Threshold: $fr9200_fl1024_143_vyes_vlan0_threshold, Result: $fr9200_fl1024_143_vyes_vlan0_result" | tee -a pass_fail.txt; fi
echo "Difference between actual result and threshold: $delta ($pct%)" | tee -a pass_fail.txt
fi
total_tests=$(grep 'Result:' pass_fail.txt | wc -l)
total_failed_tests=$(grep 'Result: FAIL' pass_fail.txt | wc -l)
echo "" | tee -a pass_fail.txt
if [[ $(grep -i fail pass_fail.txt) ]]; then
echo "Overall Result: $total_failed_tests of $total_tests tests FAILED"
echo "" | tee -a pass_fail.txt
echo "FAILED tests:"
echo "" | tee -a pass_fail.txt
grep -B1 -A1 'Result: FAIL' pass_fail.txt
else
echo "Overall Result: All tests PASSED"
fi
if [[ $(grep -i fail pass_fail.txt) ]]; then
echo "" | tee -a pass_fail.txt
echo "Overall Result: $total_failed_tests of $total_tests tests FAILED"
fi
echo "" | tee -a pass_fail.txt
echo "Beaker Job: https://beaker.engineering.redhat.com/jobs/$job_id"
echo "Results: $html_result_file"
echo "" | tee -a pass_fail.txt
popd
| true
|
b8aa1c1e84003279bc633586ac41a562dd7a75b2
|
Shell
|
tuyen81/self_learning
|
/shell/running_package/testcases/coreutils/basename.sh
|
UTF-8
| 872
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
#==============================================================================
# DESCRIPTION: Testing for basename command.
# The basename command is used to strip directory and suffix from
# include/stdio.h filename.
#==============================================================================
basename /usr/bin/toshiba > ${OUTPUT_DIR}/${package_name}/tmp.log 2>&1
cp ${OUTPUT_DIR}/${package_name}/tmp.log ${log_file} 2>/dev/null
check=0
s="toshiba"
if [ "$s" != "$(cat ${OUTPUT_DIR}/${package_name}/tmp.log)" ] ;then
check=1
fi
ss="stdio"
basename include/stdio.h .h > ${OUTPUT_DIR}/${package_name}/tmp.log 2>&1
cat ${OUTPUT_DIR}/${package_name}/tmp.log >> ${log_file}
if [ "$ss" != "$(cat ${OUTPUT_DIR}/${package_name}/tmp.log)" ] ;then
check=1
fi
assert_passed $check 0
rm -rf ${OUTPUT_DIR}/${package_name}/tmp.log
| true
|
1c07e4f5eb60657a830acc1fbd4da5a537655b06
|
Shell
|
Mortuie/derusselifier
|
/test.sh
|
UTF-8
| 169
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Bash version.... ${BASH_VERSION}...";
for i in {10..16}
do
wget https://people.bath.ac.uk/masrjb/CourseNotes/Notes.bpo/CM30078/slides$i.pdf
done
| true
|
d0f5a1707ed48942b421fc6e2a4b0febc2f1920c
|
Shell
|
NovasomIndustries/Utils-2019.07
|
/rock/external/io/rk_make.sh
|
UTF-8
| 558
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
TOP_DIR=$(pwd)
BUILDROOT_TARGET_PATH=$(pwd)/../../buildroot/output/target/
aarch64_version=$(aarch64-linux-gcc --version 2>/dev/null)
arm_version=$(arm-linux-gcc --version 2>/dev/null)
if [ ! "$aarch64_version" = "" ] ;then
gcc=aarch64-linux-gcc
echo "gcc is aarch64-linux-gcc"
elif [ ! "$arm_version" = "" ] ;then
gcc=arm-linux-gcc
echo "gcc is arm-linux-gcc"
fi
$gcc -rdynamic -g -funwind-tables -O0 -D_GNU_SOURCE -o io io.c -I$(pwd)
cp $TOP_DIR/io $BUILDROOT_TARGET_PATH/usr/bin/
echo "io is ready on buildroot/output/target/usr/bin/"
| true
|
8dd1210d6d68df242195aefff5f87ce59c3bb5d7
|
Shell
|
landryp/eos-inf
|
/bin/combine-obs
|
UTF-8
| 1,457
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
obslist=$1
outpath=$2
samplespaths=()
while IFS=, read -r likepath obstype
do
eventname=${likepath##*PosteriorSamples_}
samplespaths+=("--samples ${likepath%.csv}_eos.csv --column-map ${likepath%.csv}_eos.csv logmargweight logweight_${eventname%.csv} --column-map ${likepath%.csv}_eos.csv logvarmargweight logvar_${eventname%.csv} --column-map ${likepath%.csv}_eos.csv num_elements num_${eventname%.csv}")
done < $obslist
samples=$(printf "%s " "${samplespaths[@]}")
collate-samples -V eos ${outpath} ${samples}
calc-total-weight ${outpath} -v
#echo "eos,logmargweight,logvarmargweight,num_elements" > "${outpath}.tmp"
#header="eos"
#counter=0
#while IFS=, read -r likepath obstype
#do
# posteospath="${likepath%.*}_eos.csv"
# name=$(basename "${likepath%.*}")
# tail -n +2 $posteospath >> "${outpath}.tmp"
# counter=$(($counter+1))
# header="${header},${name}"
#done < $obslist
#marginalize-samples "${outpath}.tmp" eos -o $outpath --weight-column logmargweight --weight-column-is-log logmargweight -v
#rm "${outpath}.tmp"
#echo "eos,logmargweight,logvarmargweight,num_elements" > "${outpath}.tmp"
#while IFS=, read -r eos weight var num
#do
# num=$(echo print $num | python)
# if (( $(echo "$num == $counter" |bc -l) )); then
# echo "$eos,$weight,$var,$num" >> "${outpath}.tmp"
# fi
#done < <(tail -n +2 "${outpath}")
#mv "${outpath}.tmp" $outpath
#concatenate-eos-posts $obslist $outpath -v -o "${outpath%.*}_all.csv"
| true
|
7900f982842a49465af0bc3e32114d5f1f3a1cb6
|
Shell
|
Brainiarc7/devtools
|
/commitpkg.in
|
UTF-8
| 4,944
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
m4_include(lib/common.sh)
getpkgfile() {
case $# in
0)
error 'No canonical package found!'
return 1
;;
[!1])
error 'Failed to canonicalize package name -- multiple packages found:'
msg2 '%s' "$@"
return 1
;;
esac
echo "$1"
}
# Source makepkg.conf; fail if it is not found
if [[ -r '/etc/makepkg.conf' ]]; then
source '/etc/makepkg.conf'
else
die '/etc/makepkg.conf not found!'
fi
# Source user-specific makepkg.conf overrides
if [[ -r ~/.makepkg.conf ]]; then
. ~/.makepkg.conf
fi
cmd=${0##*/}
if [[ ! -f PKGBUILD ]]; then
die 'No PKGBUILD file'
fi
. PKGBUILD
pkgbase=${pkgbase:-$pkgname}
case "$cmd" in
commitpkg)
if (( $# == 0 )); then
die 'usage: commitpkg <reponame> [-f] [-s server] [-l limit] [-a arch] [commit message]'
fi
repo="$1"
shift
;;
*pkg)
repo="${cmd%pkg}"
;;
*)
die 'usage: commitpkg <reponame> [-f] [-s server] [-l limit] [-a arch] [commit message]'
;;
esac
# check if all local source files are under version control
for s in "${source[@]}"; do
if [[ $s != *://* ]] && ! svn status -v "$s" | grep -q '^[ AMRX~]'; then
die "$s is not under version control"
fi
done
# check if changelog and install files are under version control
for i in 'changelog' 'install'; do
while read -r file; do
# evaluate any bash variables used
eval file=\"$(sed 's/^\(['\''"]\)\(.*\)\1$/\2/' <<< "$file")\"
if ! svn status -v "${file}" | grep -q '^[ AMRX~]'; then
die "${file} is not under version control"
fi
done < <(sed -n "s/^[[:space:]]*$i=//p" PKGBUILD)
done
rsyncopts=(-e ssh -p --chmod=ug=rw,o=r -c -h -L --progress --partial -y)
archreleaseopts=()
while getopts ':l:a:s:f' flag; do
case $flag in
f) archreleaseopts+=('-f') ;;
s) server=$OPTARG ;;
l) rsyncopts+=("--bwlimit=$OPTARG") ;;
a) commit_arch=$OPTARG ;;
:) die "Option requires an argument -- '$OPTARG'" ;;
\?) die "Invalid option -- '$OPTARG'" ;;
esac
done
shift $(( OPTIND - 1 ))
if [[ -z $server ]]; then
case "$repo" in
core|extra|testing|staging|kde-unstable|gnome-unstable)
server='gerolde.archlinux.org' ;;
community*|multilib*)
server='aur.archlinux.org' ;;
*)
server='gerolde.archlinux.org'
msg "Non-standard repository $repo in use, defaulting to server $server" ;;
esac
fi
if [[ -n $(svn status -q) ]]; then
msgtemplate="upgpkg: $pkgbase $(get_full_version)"$'\n\n'
if [[ -n $1 ]]; then
stat_busy 'Committing changes to trunk'
svn commit -q -m "${msgtemplate}${1}" || die
stat_done
else
msgfile="$(mktemp)"
echo "$msgtemplate" > "$msgfile"
if [[ -n $SVN_EDITOR ]]; then
$SVN_EDITOR "$msgfile"
elif [[ -n $VISUAL ]]; then
$VISUAL "$msgfile"
elif [[ -n $EDITOR ]]; then
$EDITOR "$msgfile"
else
vi "$msgfile"
fi
[[ -s $msgfile ]] || die
stat_busy 'Committing changes to trunk'
svn commit -q -F "$msgfile" || die
unlink "$msgfile"
stat_done
fi
fi
declare -a uploads
declare -a commit_arches
declare -a skip_arches
for _arch in ${arch[@]}; do
if [[ -n $commit_arch && ${_arch} != "$commit_arch" ]]; then
skip_arches+=($_arch)
continue
fi
for _pkgname in ${pkgname[@]}; do
fullver=$(get_full_version $_pkgname)
if ! pkgfile=$(shopt -s nullglob;
getpkgfile "${PKGDEST+$PKGDEST/}$_pkgname-$fullver-${_arch}".pkg.tar.?z); then
warning "Skipping $_pkgname-$fullver-$_arch: failed to locate package file"
skip_arches+=($_arch)
continue 2
fi
uploads+=("$pkgfile")
sigfile="${pkgfile}.sig"
if [[ ! -f $sigfile ]]; then
msg "Signing package ${pkgfile}..."
if [[ -n $GPGKEY ]]; then
SIGNWITHKEY="-u ${GPGKEY}"
fi
gpg --detach-sign --use-agent ${SIGNWITHKEY} "${pkgfile}" || die
fi
if ! gpg --verify "$sigfile" >/dev/null 2>&1; then
die "Signature ${pkgfile}.sig is incorrect!"
fi
uploads+=("$sigfile")
done
done
for _arch in ${arch[@]}; do
if ! in_array $_arch ${skip_arches[@]}; then
commit_arches+=($_arch)
fi
done
archrelease "${archreleaseopts[@]}" "${commit_arches[@]/#/$repo-}" || die
new_uploads=()
# convert to absolute paths so rsync can work with colons (epoch)
while read -r -d '' upload; do
new_uploads+=("$upload")
done < <(realpath -z "${uploads[@]}")
uploads=("${new_uploads[@]}")
unset new_uploads
if [[ ${#uploads[*]} -gt 0 ]]; then
msg 'Uploading all package and signature files'
rsync "${rsyncopts[@]}" "${uploads[@]}" "$server:staging/$repo/" || die
fi
if [[ "${arch[*]}" == 'any' ]]; then
if [[ -d ../repos/$repo-i686 && -d ../repos/$repo-x86_64 ]]; then
pushd ../repos/ >/dev/null
stat_busy "Removing $repo-i686 and $repo-x86_64"
svn rm -q $repo-i686
svn rm -q $repo-x86_64
svn commit -q -m "Removed $repo-i686 and $repo-x86_64 for $pkgname"
stat_done
popd >/dev/null
fi
else
if [[ -d ../repos/$repo-any ]]; then
pushd ../repos/ >/dev/null
stat_busy "Removing $repo-any"
svn rm -q $repo-any
svn commit -q -m "Removed $repo-any for $pkgname"
stat_done
popd >/dev/null
fi
fi
| true
|
6791fce8ea64a36024e2afc906d1f1feeb08fb45
|
Shell
|
foxhatleo/leos-profiles
|
/oh-my-zsh.zsh
|
UTF-8
| 762
| 2.828125
| 3
|
[] |
no_license
|
export ZSH="$HOME/.oh-my-zsh"
if [ -d $ZSH ]; then
if [ -z "$OMZ_DISABLED" ]; then
[ -z "$ZSH_THEME" ] && ZSH_THEME="agnoster"
DEFAULT_USER="leoliang"
plugins=(
1password
brew
bundler
capistrano
command-not-found
common-aliases
debian
dnf
dotenv
gem
git
github
gradle
heroku
node
npm
macos
pip
pyenv
python
postgres
rails
rake
rbenv
ruby
sudo
systemd
ubuntu
ufw
vscode
yarn
zsh-syntax-highlighting
zsh-autosuggestions
zsh-completions
)
unset SSH_CLIENT
source $ZSH/oh-my-zsh.sh
fi
else
puts-err "Oh my zsh is not installed!"
fi
| true
|
927899cd56f99664d2b011a8d065bf300705717c
|
Shell
|
jhabboubi/learning_shell_scriping
|
/llss/if.sh
|
UTF-8
| 365
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
COLOR=$1
if [ $COLOR = "blue" ]; then
echo "yes, color is $COLOR and the path to this file `pwd` "
else
echo "The color is not blue"
fi
USER_GUESS=$2
COMPUTER=50
if [ $USER_GUESS -lt $COMPUTER ];then
echo "you are too low"
elif [ $USER_GUESS -gt $COMPUTER ] ;then
echo "you're too high"
else
echo "you guessed it!"
fi
| true
|
ec32a28601d436e26f94509c3e7eab63a1ba2a26
|
Shell
|
IM-TechieScientist/amogos_installer
|
/install.sh
|
UTF-8
| 1,539
| 3.78125
| 4
|
[] |
no_license
|
RED='\033[0;31m'
PURPLE='\033[0;35m'
BLUE='\033[0;34m'
GREEN='\033[0;32m'
AQUA='\033[1;34m'
NC='\033[0m'
#Clearing Terminal Screen
clear
#Printing Credits
echo -e "${GREEN}Welcome to the AmogOS Installer (x86_64)"
echo -e "${GREEN}This will install AmogOS on your Debian or Debian-based system"
echo ""
read -n 1 -s -r -p "Press any key to continue"
echo ""
function error {
echo -e "\\e[91m$1\\e[39m"
exit 1
}
echo "Installing required packages..."
echo " "
sudo apt update || error "Failed to update apt packages"
# Wait for apt lock to be released
i=0
while sudo fuser /var/{lib/{dpkg,apt/lists},cache/apt/archives}/lock > /dev/null 2>&1
do
case $(($i % 4)) in
0) j="-";;
1) j="\\";;
2) j="|";;
3) j="/";;
esac
printf "\r[$j] Waiting for other APT instances to finish..."
sleep 0.5
((i+=1))
done
[[ $i -gt 0 ]] && printf "Done.\n"
sudo apt install -y figlet lolcat
figlet "Installing LXDE" | lolcat
sudo apt install -y lxde
echo "sleeping 5 seconds"
sleep 5
figlet "Installing arc-theme" | lolcat
sudo apt install -y arc-theme
echo "sleeping 5 seconds"
sleep 5
figlet "Installing papirus-icons" | lolcat
sudo apt install -y papirus-icon-theme
echo "sleeping 5 seconds"
sleep 5
figlet "Installing cursor" | lolcat
sudo apt install -y breeze-cursor-theme
echo "sleeping 5 seconds"
sleep 5
figlet "Installing xfce4-panel" | lolcat
sudo apt install -y xfce4-panel
echo "sleeping 5 seconds"
sleep 5
figlet "Installation has completed!" | lolcat
echo "Please reboot now and do installation step two"
| true
|
e7c611ff5402ce9151b8db8f56f16da8e3367305
|
Shell
|
brettinternet/homelab
|
/containers/snapraid/start.sh
|
UTF-8
| 1,600
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/sh
### Validation and Setup
# remove default configuration
if [ ! -L /etc/snapraid.conf ] && [ -f /etc/snapraid.conf ]; then
rm /etc/snapraid.conf
fi
# Verify user config present
if [ ! -f /config/snapraid.conf ]; then
echo "No config found. You must configure SnapRAID before running this container."
exit 1
fi
# Verify user runner config present
if [ ! -f /config/snapraid-runner.conf ]; then
echo "No config found. You must configure snapraid-runner before running this container"
exit 1
fi
# Link user config to expected snapraid config location
if [ ! -L /etc/snapraid.conf ]; then
ln -s /config/snapraid.conf /etc/snapraid.conf
fi
### Declarations
function run_commands {
COMMANDS=$1
while IFS= read -r cmd; do echo "$cmd" && eval "$cmd" ; done < <(printf '%s\n' "$COMMANDS")
}
function run_exit_commands {
set +e
set +o pipefail
run_commands "${POST_COMMANDS_EXIT:-}"
}
### Runtime
trap run_exit_commands EXIT
run_commands "${PRE_COMMANDS:-}"
start=$(date +%s)
echo Starting SnapRAID runner at $(date +"%Y-%m-%d %H:%M:%S")
set +e
/usr/bin/python3 /app/snapraid-runner/snapraid-runner.py -c /config/snapraid-runner.conf
RC=$?
set -e
if [ $RC -ne 0 ]; then
if [ $RC -eq 3 ] && [ -n "${POST_COMMANDS_INCOMPLETE:-}" ]; then
run_commands "${POST_COMMANDS_INCOMPLETE:-}"
else
run_commands "${POST_COMMANDS_FAILURE:-}"
fi
fi
echo Runner successful
end=$(date +%s)
echo Finished SnapRAID runner at $(date +"%Y-%m-%d %H:%M:%S") after $((end-start)) seconds
[ $RC -ne 0 ] && exit $RC
run_commands "${POST_COMMANDS_SUCCESS:-}"
| true
|
5d10a21468cd0460d574a336d3293f46dcae85af
|
Shell
|
masonke/iptables
|
/iptables_config.sh
|
UTF-8
| 5,642
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
# https://www.netfilter.org/documentation/HOWTO/packet-filtering-HOWTO-7.html
# Assumptions:
# It is assumed that the URG flag is not being used legitimately. All segments with URG set will be dropped. You can change this behavior by commenting out the line with URG URG and uncomment the following lines.
# There is only one interface on the host, not counting the loopback.
# Notes:
# PREROUTING Chain: Tests immediately after being received by an interface and is much faster.
# PREROUTING is used with the raw, mangle and nat tables
#
# INPUT Chain: Tests right before being handed to a local process.
# INPUT OUTPUT are used with the default filter table.
#
# UDP as a client is handled with the RELATED clause at the end
#
# The order of rules is important, they are executed from top to bottom. The rules to check for
# bad or illegal packets need to be before allowing services.
# Also, lines need to be ordered to minimize the traversal. For example, dhcp is used only on boot up or to update the lease. Therefore, it is at the end so the services do not have to pass through the rules.
#
# Change the INPUT policy to ACCEPT, or you can lock yourself out. This needs to be the first rule, we will change it at the end to DENY
iptables -P INPUT ACCEPT
# Flush all current rules from iptables.
iptables -F
# Allow access for local host on the loopback. This might be overkill, but it is safer.
# Locally created packets do not pass via the PREROUTING chain, so we need to use the INPUT and OUTPUT chains
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
# Don't forward traffic
iptables -P FORWARD DROP
# Drop invalid packets
iptables -A INPUT -m state –state INVALID -j DROP
# Drop all fragments. This may cause problems for VPNs that are not configured correctly. If it does, then comment out the lines.
iptables -t raw -A PREROUTING -f -j LOG --log-level 7 --log-prefix "FRAG-DROP: "
iptables -t raw -A PREROUTING -f -j DROP
# Protect against common tcp attacks
# Block tcp packets that have no tcp flags set.
iptables -t raw -A PREROUTING -p tcp --tcp-flags ALL NONE -j DROP
# Block tcp packets that have all tcp flags set.
iptables -t raw -A PREROUTING -p tcp --tcp-flags ALL ALL -j DROP
# Drop all packets with the URG flag set. This flag is seldom used in modern applications.
#If this causes a problem, comment out this line and uncomment the lines after
iptables -t raw -A PREROUTING -p tcp --tcp-flags ALL URG -j LOG --log-prefix "URG-DROP:"
iptables -t raw -A PREROUTING -p tcp --tcp-flags ALL URG -j DROP
# Uncomment these rules if you need URG flag support. These lines will block illegal combinations
# Drop SYN,URG
#iptables -t raw -A PREROUTING -p tcp --tcp-flags SYN,URG SYN,URG -j DROP
# Block tcp packets with FIN and URG. This will catch the traditional XMAS.
#iptables -t raw -A PREROUTING -p tcp --tcp-flags ALL FIN,URG -j DROP
# Block illegal tcp flags combinations
# Block tcp packets with SYN and FIN
iptables -t raw -A PREROUTING -p tcp --tcp-flags SYN,FIN SYN,FIN -j DROP
# Block tcp packets with SYN and RST
iptables -t raw -A PREROUTING -p tcp --tcp-flags SYN,RST SYN,RST -j DROP
# Allow SYN alone
iptables -t raw -A PREROUTING -p tcp --tcp-flags SYN SYN -j ACCEPT
# Make sure NEW incoming tcp connections are SYN packets; otherwise we need to drop them
iptables -A INPUT -p tcp ! --syn -m state --state NEW -j DROP
# Drop excessive RST packets to avoid RST attacks, by given the next real data packet in the sequence a better chance to arrive first.
# This is a global limit, adjust as needed.
iptables -A INPUT -p tcp -m tcp --tcp-flags RST RST -m limit --limit 2/second --limit-burst 2 -j ACCEPT
# Allow HTTP connections on tcp port 80 from anywhere. Uncomment if needed
#iptables -t raw -A PREROUTING -p tcp --dport 80 -j ACCEPT
# Allow HTTPS connections on tcp port 443 from anywhere. Uncomment if needed
#iptables -t raw -A PREROUTING -p tcp --dport 443 -j ACCEPT
# Allow SSH connections on tcp port 22 from devices in 10/8
# This is essential when working on remote servers via SSH to prevent locking yourself out of the system
iptables -t raw -A PREROUTING -p tcp -s 10.0.0.0/8 --dport 22 -j ACCEPT
# Accept packets belonging to established and related connections.
# This needs to be one of the last access rules
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Put ICMP at the end to avoid TCP and UDP from hitting these rules.
# Limit the incoming icmp ping request to 1/sec. See the README for details on limits. Adjust the rate as needed:
iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j ACCEPT
iptables -A INPUT -p icmp -m limit --limit 1/s --limit-burst 1 -j LOG --log-level 7 --log-prefix "PING-DROP: "
iptables -A INPUT -p icmp -j DROP
# Allow other icmp
iptables -t raw -A PREROUTING -p icmp --icmp-type any -j ACCEPT
# Enable dhcp
iptables -t raw -A PREROUTING -p udp --dport 67:68 --sport 67:68 -j ACCEPT
# Change the INPUT to default Drop
# Do not put any rules below this line!
iptables -A INPUT -j LOG --log-level 7 --log-prefix "DEFAULT-DROP: "
iptables -A INPUT -j DROP
# Set default policies for INPUT, FORWARD and OUTPUT chains
# Make sure OUTPUT is ACCEPT or things will break quickly
# In this case, the INPUT DROP is redundant, enable if you want belts and suspenders. Remember, -F does not flush the policy.
#iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT ACCEPT
# Save settings
/sbin/service iptables save
# List rules and stats for the tables
iptables -nvL --line-numbers
iptables -t raw -nvL --line-numbers
#iptables -t nat -nvL --line-numbers
| true
|
d05d06e5260a02c48d55436defed7b31aec371c3
|
Shell
|
animetauren/azure-quickstart-templates
|
/lamp-app/install_lamp_centOS.sh
|
UTF-8
| 578
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
yum update -y
clear
echo 'Going to install the LAMP stack on your machine, here we go...'
echo '------------------------'
# set up a silent install of MySQL
dbpass=$1
# install the LAMP stack
yum install -y httpd php php-mysql mysql mysql-server
chkconfig httpd on
chkconfig mysql-server on
/etc/init.d/mysqld restart
/usr/bin/mysqladmin -u root password $dbpass
# write some PHP
echo -e "<?php phpinfo(); ?>" > /var/www/html/index.php
service httpd restart
clear
echo 'Okay.... apache, php and mysql is installed, running and set to your desired password'
| true
|
ed705ca3b90777be1be2e7c195ef0a1ee68c2131
|
Shell
|
seaflow-uw/seaflog
|
/build.sh
|
UTF-8
| 397
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Build seaflog command-line tool for 64-bit MacOS and Linux
VERSION=$(git describe --long --dirty --tags)
GOOS=darwin GOARCH=amd64 go build -o "seaflog-${VERSION}-darwin-amd64" cmd/seaflog/main.go || exit 1
GOOS=linux GOARCH=amd64 go build -o "seaflog-${VERSION}-linux-amd64" cmd/seaflog/main.go || exit 1
gzip "seaflog-${VERSION}-darwin-amd64"
gzip "seaflog-${VERSION}-linux-amd64"
| true
|
effeedabbff6dd69ade4ff52720e9b3484f2ff70
|
Shell
|
DaoWen/Cook
|
/scheduler/travis/setup.sh
|
UTF-8
| 244
| 2.546875
| 3
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
export PROJECT_DIR=`pwd`
# Install lein dependencies
lein with-profiles +test deps
# Install the current version of the jobclient
cd ${TRAVIS_BUILD_DIR}/jobclient
lein do clean, compile, install
cd ${PROJECT_DIR}
| true
|
00c625b36c216ac8921480c8f1eb10b00f3fd86d
|
Shell
|
tml3nr/moode-1
|
/command/player_wdog.sh
|
UTF-8
| 1,503
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# PlayerUI Copyright (C) 2013 Andrea Coiutti & Simone De Gregori
# Tsunamp Team
# http://www.tsunamp.com
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RaspyFi; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
# Rewrite by Tim Curtis and Andreas Goetz
#
#####################################
# watchdog for php5-fpm and daemon.php execution
# by Orion
#####################################
numproc=`pgrep -c php5-fpm`
WRKPIDFILE='/run/player_wrk.pid'
# check player_worker exec
if [[ !(-x "/var/www/command/daemon.php") ]]
then
chmod a+x /var/www/command/daemon.php
fi
while true
do
if (($numproc > 15)); then
killall daemon.php
rm $WRKPIDFILE > /dev/null 2>&1
service php5-fpm restart > /dev/null 2>&1
fi
if ! kill -0 `cat $WRKPIDFILE` > /dev/null 2>&1; then
rm $WRKPIDFILE > /dev/null 2>&1
if [ "$1" == "startup" ]; then
sleep 15
fi
/var/www/command/daemon.php > /dev/null 2>&1
fi
sleep 10
numproc=`pgrep -c php5-fpm`
done
| true
|
913353e8fe37e31241605f05951d00d5fc0cb1c6
|
Shell
|
haoxi911/imagenet_utils
|
/synsets/convert.sh
|
UTF-8
| 328
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
if [ ! -d "./outputs" ]; then
mkdir "./outputs"
fi
find "./" -type f -name "*.xlsx" | while read line; do
foldername=$(basename $(dirname $line))
filename=$(basename "$line" .xlsx);
if [ ! -d "./outputs/$foldername" ]; then
mkdir "./outputs/$foldername"
fi
in2csv $line > "./outputs/$foldername/$filename"
done
| true
|
2698d3201b3dda38e77530b45dbc0c00bbde5aaf
|
Shell
|
cardio503/RealHandcuffs
|
/scripts/find_tools.sh
|
UTF-8
| 4,062
| 4.375
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
# this script looks for the required tools:
# - TOOL_7ZIP points to 7z.exe
# - DIR_FALLOUT4 points to the Fallout 4 directory
# - DIR_FALLOUT4CREATIONKIT points to the Fallout 4 Creation Kit directory (usually the same as DIR_FALLOUT4)
# if the -g argument is passed, it will output the above variables in format KEY="VALUE"
# if not, it will just check for presence of the tools and print human-readable messages
# make sure we clean up on exit
ORIGINAL_DIR=$(pwd)
function clean_up {
cd "$ORIGINAL_DIR"
}
trap clean_up EXIT
set -e
# check arguments
GENERATE=0
for var in "$@"
do
case "$var" in
"-g" )
GENERATE=1;;
"--generate" )
GENERATE=1;;
* )
if [[ "$var" != "-h" && "$var" != "--help" ]]
then
echo "Invalid argument: $var"
fi
echo "Usage: $(basename "$0") [-g|--generate]"
exit -1;;
esac
done
# switch to base directory of repo
SCRIPTS_DIR=$(dirname "$(realpath "${BASH_SOURCE[0]}")")
BASE_DIR=$(realpath "$SCRIPTS_DIR/..")
cd "$BASE_DIR"
# search for 7zip
# if 7z.exe (or symlink to it) is in tools directory, this is used
# otherwise try to find the install location in the registry
if [[ -f "tools/7z.exe" ]]
then
[[ $GENERATE == 0 ]] && echo "7-Zip: In tools directory."
TOOL_7ZIP="$(realpath "$BASE_DIR/tools/7z.exe")"
if [[ "$TOOL_7ZIP" != "$BASE_DIR/tools/7z.exe" ]]
then
[[ $GENERATE == 0 ]] && echo " Resolved to: $TOOL_7ZIP"
fi
else
REGISTRY=$(reg query "HKLM\SOFTWARE\7-Zip") || { >&2 echo "ERROR: Unable to find 7-Zip registry key."; exit 1; }
PATH_7ZIP=$(echo "$REGISTRY" | sed -rn "s/\s*Path64\s+REG_SZ\s+(.*)/\1/p" | sed 's/\\/\//g' | sed 's/://')
PATH_7ZIP="/${PATH_7ZIP%/}"
if [[ -f "$PATH_7ZIP/7z.exe" ]]
then
TOOL_7ZIP="$PATH_7ZIP/7z.exe"
[[ $GENERATE == 0 ]] && echo "7-Zip: $TOOL_7ZIP"
fi
fi
if [[ -z "$TOOL_7ZIP" ]]
then
>&2 echo "ERROR: Unable to find 7-Zip."
exit 1
fi
# search for Fallout 4
# if "Fallout 4" folder is in tools directory (probably symlink), this is used
# otherwise try to find the install location in the registry
if [[ -d "tools/Fallout 4" ]]
then
[[ $GENERATE == 0 ]] && echo "Fallout 4: In tools directory."
DIR_FALLOUT4="$(realpath "$BASE_DIR/tools/Fallout 4")"
if [[ "$DIR_FALLOUT4" != "$BASE_DIR/tools/Fallout 4" ]]
then
[[ $GENERATE == 0 ]] && echo " Resolved to: $DIR_FALLOUT4"
fi
else
REGISTRY=$(reg query "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\Steam App 377160") || { >&2 echo "ERROR: Unable to find Fallout 4 registry key."; exit 2; }
PATH_FALLOUT4=$(echo "$REGISTRY" | sed -rn "s/\s*InstallLocation\s+REG_SZ\s+(.*)/\1/p" | sed -e 's/\\/\//g' -e 's/://')
PATH_FALLOUT4="/${PATH_FALLOUT4%/}"
if [[ -d "$PATH_FALLOUT4" ]]
then
DIR_FALLOUT4="$PATH_FALLOUT4"
[[ $GENERATE == 0 ]] && echo "Fallout 4: $DIR_FALLOUT4"
fi
fi
if [[ -z "$DIR_FALLOUT4" ]]
then
>&2 echo "ERROR: Unable to find Fallout 4."
exit 2
fi
# search for Fallout 4 Creation Kit
# if "Fallout 4 Creation Kit" folder is in tools directory (probably symlink), this is used
# otherwise try to find it in the same folder as Fallout 4
if [[ -d "tools/Fallout 4 Creation Kit" ]]
then
[[ $GENERATE == 0 ]] && echo "Fallout 4 Creation Kit: In tools directory."
DIR_FALLOUT4CREATIONKIT="$(realpath "$BASE_DIR/tools/Fallout 4 Creation Kit")"
if [[ "$DIR_FALLOUT4CREATIONKIT" != "$BASE_DIR/tools/Fallout 4 Creation Kit" ]]
then
[[ $GENERATE == 0 ]] && echo " Resolved to: $DIR_FALLOUT4CREATIONKIT"
fi
else
if [[ -f "$DIR_FALLOUT4/CreationKit.exe" ]]
then
DIR_FALLOUT4CREATIONKIT="$DIR_FALLOUT4"
[[ $GENERATE == 0 ]] && echo "Fallout 4 Creation Kit: In Fallout 4 directory."
fi
fi
if [[ -z "$DIR_FALLOUT4CREATIONKIT" ]]
then
>&2 echo "ERROR: Unable to find Fallout 4 Creation Kit."
exit 3
fi
# done, echo commands to set environment variables if requested to do so
if [[ $GENERATE == 1 ]]
then
echo TOOL_7ZIP=\"$TOOL_7ZIP\"
echo DIR_FALLOUT4=\"$DIR_FALLOUT4\"
echo DIR_FALLOUT4CREATIONKIT=\"$DIR_FALLOUT4CREATIONKIT\"
fi
| true
|
fe8de1a855b5a8046cc035d3ca2c2912225eb14d
|
Shell
|
drdelaney/jmdgentoooverlay
|
/Other/shell-scripts/mythtv/pxcut2.sh
|
UTF-8
| 10,491
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh -e
# Copyright (C) 2010 John Pilkington
# Largely based on scripts posted by Tino Keitel and Kees Cook in the Mythtv lists.
# Usage: ./mythcutprojectx <recording>
# <recording> is an mpeg2 file recorded by MythTV with a valid DB entry.
# This script is essentially a terminal-based replacement for the 'lossless' mpeg2 mythtranscode.
# It will pass the recording and the MythTV cutlist to ProjectX.
# If the cutlist is empty the entire recording will be processed.
# It uses ffmpeg to report what streams are present, chooses the first video and audio streams listed,
# and gives the user TIMEOUT seconds to accept that choice or quit and make another.
# It uses ProjectX to demux, and mplex (from mjpegtools) to remux.
# Output format is DVD compliant without nav packets.
# It then clears the cutlist, updates the filesize in the database and rebuilds the seek table.
# The result is apparently acceptable as a recording within MythTV and as input to MythArchive.
# The ProjectX log file and ffmpeg stream analysis are kept. Other tempfiles are deleted.
# The variable INVERT controls the sense in which the cutlist is applied.
# The script needs to be edited to define some local variables.
####################
# Variables RECDIR1, RECDIR2, TEMPDIR1, TEMPDIR2, PROJECTX, PASSWD, INVERT need to be customised.
# At present (July 2010) MythTV trunk and fixes apparently apply the cutlist in opposite senses.
# TESTRUN is initially set to true so that the polarity of the cutlist that will be passed to Project-X can be checked
# RECDIR1 and TEMPDIR1 should if possible be on different drive spindles. Likewise RECDIR2 and TEMPDIR2.
RECDIR1=/mnt/mythtv/jmd0_vg_2t3_0/videos
TEMPDIR1=/var/tmp/mythtv/tmp
RECDIR2=/mnt/mythtv/jmd0_vg_2t2_0/videos
TEMPDIR2=/var/tmp/mythtv/tmp
#PROJECTX=/path/to/ProjectX.jar (or to a link to it)
PROJECTX=$(equery files projectx | grep jar)
#PASSWD=`grep "^DBPassword" ~/.mythtv/mysql.txt | cut -d '=' -f 2-`
PASSWD=mythtv
# INVERT=true # old MythTV setting, used in "fixes"
INVERT=false # setting for use in trunk
TIMEOUT=20 # Longest 'thinking time' in seconds allowed before adopting the automatically selected audio stream.
#TESTRUN=true # cutlists will be shown but the recording will be unchanged
TESTRUN=false # the recording will be processed
#################
if [ "$1" = "-h" ] || [ "$1" = "--help" ] ; then
echo "Usage: "$0" <recording>"
echo "<recording> is an mpeg2 file recorded by MythTV with a valid DB entry."
echo "e.g. 1234_20100405123400.mpg in one of the defined RECDIRs"
echo "The output file replaces the input file which is renamed to <recording>.old"
exit 0
fi
# exit if .old file exists
if [ -f ${RECDIR1}/"$1".old ] ; then
echo " ${RECDIR1}/"$1".old exists: giving up." ; exit 1
fi
if [ -f ${RECDIR2}/"$1".old ] ; then
echo " ${RECDIR2}/"$1".old exists: giving up." ; exit 1
fi
# Customize with paths to alternative recording and temp folders
cd $RECDIR1
TEMP=$TEMPDIR1
if [ ! -f "$1" ] ; then
cd $RECDIR2
TEMP=$TEMPDIR2
if [ ! -f "$1" ] ; then
echo " "$1" not found. Giving up"
cd ~
exit 1
fi
fi
if [ $# -lt 3 ]
then
echo "Error: needs three arguments. Running ffmpeg -i "$1" 2>&1 | grep -C 4 Video "
echo
ffmpeg -i "$1" 2>&1 | grep -C 4 Video | tee temp$$.txt
echo
# Thanks to Christopher Meredith for the basic parsing magic here.
VPID=`grep Video temp$$.txt | head -n1 | cut -f 1,1 -d']' | sed 's+.*\[++g'`
# It has to be tweaked for multiple audio streams. This (with head -n1 ) selects the first listed by ffmpeg.
# You may alternatively wish to select for language, format, etc. May be channel, programme, user dependent.
APID=`grep Audio temp$$.txt | head -n1 | cut -f 1,1 -d']' | sed 's+.*\[++g'`
echo -e "Choosing the first audio track listed by \" ffmpeg -i \". It may not be the one you want."
echo -e "\nThe selected values would be "$VPID" and "$APID". The track info for these is \n"
grep "$VPID" temp$$.txt
grep "$APID" temp$$.txt
echo -e "\nTo accept these values press \"a\", or wait....\n"
echo "If you want to select other values, or quit to think about it, press another key within $TIMEOUT seconds."
read -t $TIMEOUT -n 1 RESP
if [ $? -gt 128 ] ; then
RESP="a"
fi
if [ "$RESP" != "a" ] ; then
echo -e "Quitting: if you want to select the PIDs from the command line its expected form is \n"
echo " "$0" 1234_20070927190000.mpg 0xvvv 0xaaa "
echo -e " filename_in_DB vPID aPID \n"
cd ~
exit 1
fi
echo -e "Going on: processing with suggested values $VPID $APID \n"
grep "$VPID" temp$$.txt
grep "$APID" temp$$.txt
echo
else
VPID="$2"
APID="$3"
fi
#Now do the actual processing
# chanid and starttime identify the recording in the DB
chanid=`echo "select chanid from recorded where basename=\"$1\";" |
mysql -N -u mythtv -p$PASSWD mythconverg `
starttime=`echo "select starttime from recorded where basename=\"$1\";" |
mysql -N -u mythtv -p$PASSWD mythconverg `
# In 0.24 an initial zero is apparently treated as a normal cut-in point.
# list0 shows cut-in points and eof, but in 0.23 never includes zero
list0=`echo "select mark from recordedmarkup
where chanid=$chanid and starttime='$starttime' and type=0 order by mark;" |
mysql -N -u mythtv -p$PASSWD mythconverg `
#list1 shows cut-out points. In 0.23 an initial 0 here is a cut-in.
list1=`echo "select mark from recordedmarkup
where chanid=$chanid and starttime='$starttime' and type=1 order by mark;" |
mysql -N -u mythtv -p$PASSWD mythconverg `
echo "CollectionPanel.CutMode=0" > cutlist$$ ;
if ! $INVERT ; then
FIRSTCUT=`echo "select mark from recordedmarkup
where chanid=$chanid and starttime='$starttime' and type=1 order by mark limit 1;" |
mysql -N -u mythtv -p$PASSWD mythconverg `
FIRSTEDIT=`echo "select mark from recordedmarkup
where chanid=$chanid and starttime='$starttime' and type in (0,1) order by mark limit 1;" |
mysql -N -u mythtv -p$PASSWD mythconverg `
if [ ${FIRSTCUT} = ${FIRSTEDIT} ] ; then
# echo "that was a cut-out point and we need to insert an earlier cut-in point"
echo "0" >> cutlist$$
fi
list=`echo "select mark from recordedmarkup
where chanid=$chanid and starttime='$starttime' and type in (0,1) order by mark;" |
mysql -N -u mythtv -p$PASSWD mythconverg `
else
for i in $list1 ;
do
if [ $i = "0" ]
then
list=`echo "select mark from recordedmarkup
where chanid=$chanid and starttime='$starttime' and type in (0,1) order by mark;" |
mysql -N -u mythtv -p$PASSWD mythconverg | tail -n +2 `
# tail -n +2 drops the initial zero.
else
echo "0" >> cutlist$$
# That isn't quite the same as inserting a leading zero in list. Does it matter?
list=`echo "select mark from recordedmarkup
where chanid=$chanid and starttime='$starttime' and type in (0,1) order by mark;" |
mysql -N -u mythtv -p$PASSWD mythconverg `
fi
# use only the first element of list1, as a switch.
break
done
fi
# find the key frame (mark type 9) right before each cut mark,
# extract the byte offset, write it into the ProjectX cutlist
for i in $list ;
do echo "select offset from recordedseek
where chanid=$chanid and starttime='$starttime' and type=9 and mark >= $i and mark < ($i + 100)
order by offset;" |
mysql -N -u mythtv -p$PASSWD mythconverg | head -n 1
# for each cycle, head -n 1 yields the first line only.
done >> cutlist$$
echo "list0"
echo $list0
echo
echo "list1"
echo $list1
echo
echo "list"
echo $list
echo
echo -e "\"list\" is MythTV's frame-count cutlist that is used to create the byte-count cutlist used here by Project-X."
echo "At the time of writing (July 2010) the internal cutlists used by fixes and trunk appear to work in opposite senses."
echo "The cut can be inverted by adding or removing a leading zero. "
echo -e "That is what the INVERT variable does. For fixes it should be set to \"true\" \n"
echo "This is the byte-count cutlist for Project-X. The first value is a cut-in point."
echo -e "Cut-out and cut-in then follow in sequence to EOF. \n"
cat cutlist$$
echo -e "\nThis is a test exit point for you to check that INVERT is correctly set."
echo -e "Its value now is \"${INVERT}\". When it's OK, edit the script to set TESTRUN=false.\n"
if $TESTRUN ; then
rm -f cutlist$$
rm -f temp$$.txt
cd ~
exit 0
fi
mv "$1" "$1".old
# use ProjectX to de-multiplex selected streams with the created cutlist
#ionice -c3 java -jar "$PROJECTX" -name tempcut$$ -id ${VPID},${APID} -out $TEMP -cut cutlist$$ "$1".old || :
ionice -c3 projectx -name tempcut$$ -id ${VPID},${APID} -out $TEMP -cut cutlist$$ "$1".old || :
# and pipe for re-multiplexing to mplex. -f 9 is dvd format without navpacks
DEMUXPREF=$TEMP/tempcut${$}
if [ -f $DEMUXPREF.mp2 ] ; then
DEMUXAUDIO=$DEMUXPREF.mp2
else
DEMUXAUDIO=$DEMUXPREF.ac3
fi
ionice -c3 mplex -o "$1" -V -f 9 $DEMUXPREF.m2v $DEMUXAUDIO
# tell mythDB about new filesize and clear myth cutlist
FILESIZE=`du -b "$1" | cut -f 1`
if [ "${FILESIZE}" -gt 1000000 ]; then
echo "Running: update recorded set filesize=${FILESIZE} where basename=\"$1\";"
echo "update recorded set filesize=${FILESIZE} where basename=\"$1\";" | mysql -u mythtv -p$PASSWD mythconverg
echo "Filesize has been reset"
echo "Running: ionice -c3 mythcommflag -f "$1" --clearcutlist"
ionice -c3 mythcommflag -f "$1" --clearcutlist
echo "Cutlist has been cleared"
fi
#rebuild seek table
echo "Running: ionice -c3 mythtranscode --mpeg2 --buildindex --showprogress --chanid "$chanid" --starttime "$starttime""
ionice -c3 mythtranscode --mpeg2 --buildindex --showprogress --chanid "$chanid" --starttime "$starttime"
echo -e "Seek table has been rebuilt.\n"
echo -e "Output file is $1. INVERT is set to \"${INVERT}\". PID streams $VPID and $APID were copied.\n"
if [ -f temp$$.txt ] ; then
echo -e "Their original parameters were \n"
grep "$VPID" temp$$.txt
grep "$APID" temp$$.txt
cat temp$$.txt >> "$DEMUXPREF"_log.txt
echo
fi
rm -f "$1".png
#rm -f $TEMP/tempcut${$}*
mv ${DEMUXPREF}_log.txt ${TEMP}/"$1"_pxlog.txt
rm -f $DEMUXPREF.m2v
rm -f $DEMUXAUDIO
rm -f cutlist$$
rm -f temp$$.txt
cd ~
exit 0
| true
|
7752cdc80a8e1bfaca6fc78c50fc86b50caf68fd
|
Shell
|
georgeroman/ilp-iroha-settlement
|
/examples/scripts/iroha-iroha-payment/iroha-iroha-payment.sh
|
UTF-8
| 2,663
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
printf "Sending transaction for depositing assets into Alice's Iroha0 account...\n"
../helpers/iroha-add-asset-quantity.py "localhost:50051" "alice@test" "../../iroha0-data/alice@test.priv" "coin0#test" "1000"
printf "Sending transaction for depositing assets into Bob's Iroha1 account...\n"
../helpers/iroha-add-asset-quantity.py "localhost:50052" "bob@test" "../../iroha1-data/bob@test.priv" "coin1#test" "1000"
printf "Checking Alice's Iroha0 balances...\n"
../helpers/iroha-check-balances.py "localhost:50051" "alice@test" "../../iroha0-data/alice@test.priv" "alice@test"
printf "Checking Bob's Iroha0 balances...\n"
../helpers/iroha-check-balances.py "localhost:50051" "alice@test" "../../iroha0-data/alice@test.priv" "bob@test"
printf "Checking Bob's Iroha1 balances...\n"
../helpers/iroha-check-balances.py "localhost:50052" "bob@test" "../../iroha1-data/bob@test.priv" "bob@test"
printf "Checking Charlie's Iroha1 balances...\n"
../helpers/iroha-check-balances.py "localhost:50052" "bob@test" "../../iroha1-data/bob@test.priv" "charlie@test"
# Set up all accounts
./setup-accounts.sh
# All connectors must be aware of the exchange rate of the assets being exchanged
printf "Informing connectors about the exchange rates...\n"
curl --silent --output /dev/null --show-error \
-X PUT -H 'Authorization: Bearer alice_auth_token' \
-d '{"COIN0#TEST": 1, "COIN1#TEST": 1}' \
http://localhost:7770/rates
curl --silent --output /dev/null --show-error \
-X PUT -H 'Authorization: Bearer bob_auth_token' \
-d '{"COIN0#TEST": 1, "COIN1#TEST": 1}' \
http://localhost:8770/rates
curl --silent --output /dev/null --show-error \
-X PUT -H 'Authorization: Bearer charlie_auth_token' \
-d '{"COIN0#TEST": 1, "COIN1#TEST": 1}' \
http://localhost:9770/rates
printf "Sending a payment from Alice to Charlie...\n"
docker run --rm --network examples_ilp-network interledgerrs/ilp-cli:latest \
--node http://alice-node:7770 pay alice \
--auth in_alice \
--amount 500 \
--to http://charlie-node:9770/accounts/charlie/spsp
sleep 10
printf "Checking Alice's Iroha0 balances...\n"
../helpers/iroha-check-balances.py "localhost:50051" "alice@test" "../../iroha0-data/alice@test.priv" "alice@test"
printf "Checking Bob's Iroha0 balances...\n"
../helpers/iroha-check-balances.py "localhost:50051" "alice@test" "../../iroha0-data/alice@test.priv" "bob@test"
printf "Checking Bob's Iroha1 balances...\n"
../helpers/iroha-check-balances.py "localhost:50052" "bob@test" "../../iroha1-data/bob@test.priv" "bob@test"
printf "Checking Charlie's Iroha1 balances...\n"
../helpers/iroha-check-balances.py "localhost:50052" "bob@test" "../../iroha1-data/bob@test.priv" "charlie@test"
| true
|
01b4a04a7eba90fcf71919f8328ec2f47d163716
|
Shell
|
LeviBenjaminGualterio/olger
|
/scripts/masiveolger.sh
|
UTF-8
| 488
| 3.21875
| 3
|
[] |
no_license
|
baseàth="/"
for f in $basepath/data/$1/*; do
if [ -d "$f" ]
then
echo ""
for file2 in "$f/*"
do
if [ -d "$file2" ]
then
echo ""
else
for f2 in $file2
do
ext="${f2: -4}"
if [[ ".xml" == $ext ]]
then
python3 $basepath/CVES/CVE-Scan/bin/converter.py "$f2" "$f2".json
python $basepath/CVES/CVE-Scan/nmaptograph.py "$f2".json "$1"
fi
done
fi
done
# $f is a directory
fi
done
| true
|
94188cee3773a92df15d151f51faff6dacdb77ba
|
Shell
|
kragebein/plexbot
|
/bot/functions.sh
|
UTF-8
| 5,607
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash -
# There is no reason what so ever to edit this file unless you totally know what you are doing. In that case, have fun.
source /drive/drive/.rtorrent/scripts/v3/bot/config.cfg
#source "${pb%/*}/lang/$language.lang" #TODO
_script="functions.sh"
dtg="[$(date +%d-%m-%y) $(date +%H:%M:%S)]"
log() {
# Log v3
# if ! [ -w "$log_path/$_script" ]; then
# echo "ERROR, incorrect permissions in log_path ($log_path/$_script), cannot write to it!!"
# echo "Check permissions or edit config.cfg"
# exit 1
# fi
message=$(echo "${*##log $1}" | awk -F "^$1 " '{print $2}')
case $1 in
'NOT'|'not'|'n')
echo "$dtg [notification]: $message" >> "$log_path/${_script%%.*}.log"
echo "$dtg [notification/${_script%%.*}]: $message" >> "$log_path/plexbot.log"
echo "$message" >&2
;;
'WRN'|'wrn'|'w')
echo "$dtg [warning]: $message" >> "$log_path/${_script%%.*}.log"
echo "$dtg [warning/${_script%%.*}]: $message" >> "$log_path/plexbot.log"
echo "[warning] $message" >&2
#syslog "[$0 - warning] - $message"
;;
'err'|'ERR'|'e')
echo "$dtg [error]: $message" >> "$log_path/${_script%%.*}.log"
echo "$dtg [error/${_script%%.*}]: $message" >> "$log_path/plexbot.log"
echo "[ERROR] $message" >&2
# syslog "[$0 - error] $message"
echo "Unrecoverable, exiting." >&2
say "#log :$_script: $message"
exit 1
;;
'say'|'SAY'|'s')
echo "$dtg [saying]: $message" >> "$log_path/${_script%%.*}.log"
echo "$dtg [saying/${_script%%.*}]: $message" >> "$log_path/plexbot.log"
echo "[saying] $message" >&2
# syslog "[$0 - warning] $message"
say "#log :$message"
;;
*) message="$*"
echo "$dtg [undef]: $message" >> "$log_path/${_script%%.*}.log"
echo "$dtg [undef/${_script%%.*}]: $message" >> "$log_path/plexbot.log"
echo "[undef]: $message" >&2
esac
}
debug() {
echo "$*" >&2
}
load() { #note to self, log before load.
if ! source "$1" ; then
_script="functions_loader"
log e "Unable to load $1, see error log."
fi
}
load "${pb%/*}/proto/$proto.sh" #load protocol
load "${pb%/*}/db/sql.sh" #load database
uac() {
# user access control
for i in "${!auth_user[@]}"; do if [ "$who_orig" == "${auth_user[$i]}" ]; then authorized="y";fi;done
if [ -z "$authorized" ]; then say "$who :Beklager, du e ikke autorisert.";log s "uac: $who_orig prøvd å bruk plexbota.";exit;fi
}
req_admin() {
# admin access control
for i in $(seq "${!admin_user[@]}");do if [ "$who_orig" == "${admin_user[$i]}" ]; then bot_admin="y";fi;done
if [ -z $bot_admin ]; then say "$who :Den her handlinga krev høgere tilgang. Uautorisert.";log s "$who_orig prøvd å bruk en adminkommando";exit;fi
}
reload_plugins() {
p=0
echo "declare -A plug" > "$pb/plugins/.loaded"
echo "plug=(" >> "$pb/plugins/.loaded"
for i in $pb/plugins/*.sh; do
lol="$(egrep "^regex=" "$i")"
if [ ! -z "$lol" ]; then
lol="${lol//\"/}"
lol="${lol//regex=/}"
echo "[$i]=\"$lol\"" >> "$pb/plugins/.loaded"
let p=p+1
fi
done
echo ")" >> "$pb/plugins/.loaded"
chmod +x "$pb/plugins/.loaded"
say "$who :Konfigurasjon lasta om, $p plugins e klar."
}
html_ascii () {
out="$*"
out="${out//%/%25}"
out="${out//\&/%26}"
out="${out//$/%24}"
out="${out// /%20}"
out="${out//"'"/%27}"
out="${out//'"'/%22}"
out="${out//\//%2F}"
out="${out//\(/%28}"
out="${out//\)/%29}"
out="${out//</%3C}"
out="${out//>/%3E}"
out="${out//\?/%3F}"
out="${out//\!/%21}"
out="${out//=/%3D}"
out="${out//\\/%5C}"
out="${out//,/%2C}"
out="${out//:/%3A}"
out="${out//;/%3B}"
out="${out//\[/%5B}"
out="${out//\]/%5D}"
out="${out//\{/%7B}"
out="${out//\}/%7D}"
echo "$out"
}
read.last() {
cat /tmp/.lastadd
}
put.last() {
echo "$imdbid" > /tmp/.lastadd
}
ttdb() { # if you input imdbid it will create $rating_key, if you input rating_key it will create $imdbid
_script="ttdb.log"
rewrite() {
buffer=$(mktemp)
sed "s/ttdb_token=\"$ttdb_token\"/ttdb_token=\"$key\"/g" "$pb/config.cfg" > "$buffer"
mv -f "$buffer" "$pb/config.cfg";chmod +x "$pb/config.cfg"
log n "rewrote token to config"
}
get_key() {
key="$(curl -s -X GET --header 'Accept: application/json' --header "Authorization: Bearer $ttdb_token" 'https://api.thetvdb.com/refresh_token' |jq -r '.token')"
if [ "$key" = "null" ]; then
key="$(curl -s -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{ "apikey": "'"$ttdb_api"'", "userkey": "'"$ttdb_ukey"'", "username": "'"$ttdb_user"'" }' 'https://api.thetvdb.com/login' |jq -r '.token')"
if [ "$key" = "null" ]; then
log s "Could not refresh/get new token! ttdb token failiure";exit
fi
fi
rewrite "$key"
ttdb_token="$key"
}
check() {
json="${json//\\n/}"
json="${json//\\r/}"
_test="$(echo "$json" |jq -r '.Error')"
if [ "$_test" != "null" ]; then
case $_test in
'Not authorized')
get_key;;
'Resource not found')
exit;; # ttdbid not found
*)
echo "$_test"
exit
;;
esac
fi
}
input="$1"
if [[ "$input" =~ ^.t.{0,9} ]]; then
json="$(curl -s -X GET --header 'Accept: application/json' --header "Authorization: Bearer $ttdb_token" "https://api.thetvdb.com/search/series?imdbId=$input")"
check "$json"
rating_key="$(echo -ne "$json" |jq '.data[0].id')"
export rating_key
echo "$rating_key"
else
json="$(curl -s -X GET --header 'Accept: application/json' --header "Authorization: Bearer $ttdb_token" "https://api.thetvdb.com/series/$input")"
check "$json"
imdbid="$(echo -ne "$json" |jq -r '.data.imdbId')"
export imdbid
echo "$imdbid"
fi
}
| true
|
ed84a0eb4478546f2471691f7e8efa3c986f5454
|
Shell
|
liis/tth_tests
|
/transfers/hadd_and_save_step2.sh
|
UTF-8
| 2,757
| 3.1875
| 3
|
[] |
no_license
|
#! /bin/sh
###############################
# hadd the root-files, produced by VHbb submitStep2.py to have one flat tree per dataset
# Specify INDIR -- directory of root files to hadd
# OUTDIR -- directory of merged root files
# Optionally set CLEAN_UP=1 to remove input files to hadd on the go
# COPY_TO_STORAGE=1 to copy trees from /home/ directory to storage element -- note that copying is slow.
###############################
if [ -z $ROOTSYS ]; then
echo "ROOTSYS is not defined: source ROOT, or hadd won't work!"
exit
fi
CLEAN_UP=0
COPY_TO_STORAGE=1
OVERWRITE_FILES_AT_STORAGE=0 # set different from 0, if you want to overwrite existing files at storage element
INDIR="Ntuples_new"
OUTDIR_LOCAL="Ntuples_new/Ntuples_merged"
OUTDIR_STORAGE="/hdfs/cms/store/user/liis/TTH_Ntuples_v3/"
SRMPATH="srm://ganymede.hep.kbfi.ee:8888/srm/v2/server?SFN="
BASE_STR="DiJetPt_"
DATASETS=("WZ_TuneZ2star_8TeV_pythia6_tauola" "ZZ_TuneZ2star_8TeV_pythia6_tauola" "WW_TuneZ2star_8TeV_pythia6_tauola" "WJetsToLNu_TuneZ2Star_8TeV-madgraph-tarball" "Tbar_tW-channel-DR_TuneZ2star_8TeV-powheg-tauola" "Tbar_s-channel_TuneZ2star_8TeV-powheg-tauola" "Tbar_t-channel_TuneZ2star_8TeV-powheg-tauola" "T_tW-channel-DR_TuneZ2star_8TeV-powheg-tauola" "T_s-channel_TuneZ2star_8TeV-powheg-tauola" "T_t-channel_TuneZ2star_8TeV-powheg-tauola" "TTJets_FullLeptMGDecays_8TeV-madgraph" "TTJets_HadronicMGDecays_8TeV-madgraph" "TTJets_MassiveBinDECAY_8TeV-madgraph" "TTJets_SemiLeptMGDecays_8TeV-madgraph" "DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph" "SingleMuRun2012AAug06" "SingleMuRun2012AJul13" "SingleMuRun2012BJul13" "SingleMuRun2012CAug24Rereco" "SingleMuRun2012C-EcalRecover_11Dec2012-v1_v2" "SingleMuRun2012CPromptv2" "SingleMuRun2012CPromptV2TopUp" "SingleMuRun2012D-PromptReco-v1")
for DATASET in ${DATASETS[@]}
do
NR_ROOTFILES=`ls $INDIR"/"*$DATASET*.root 2> /dev/null | wc -l`
echo Processing dataset $DATASET with $NR_ROOTFILES input files
if [ $NR_ROOTFILES != 0 ]; then
# hadd -f $OUTDIR_LOCAL"/"$BASE_STR$DATASET.root $INDIR"/"*$DATASET*.root
if [ $CLEAN_UP == 1 ]; then # clean up the rootfiles
echo "Removing initial root files... "
rm $INDIR"/"*$DATASET*.root
fi
fi
if [ $COPY_TO_STORAGE == 1 ] && [ -e $OUTDIR_LOCAL"/"$BASE_STR$DATASET.root ]; then
echo copying $OUTDIR_LOCAL"/"$BASE_STR$DATASET.root to storage: $OUTDIR_STORAGE
if [ -e $OUTDIR_STORAGE$BASE_STR$DATASET.root ] && [ $OVERWRITE_FILES_AT_STORAGE == 0 ] ; then
echo WARNING! Dataset already exists at destination -- file not copied!
else
srmcp -2 "file:///./"$OUTDIR_LOCAL"/"$BASE_STR$DATASET.root $SRMPATH$OUTDIR_STORAGE$BASE_STR$DATASET.root
echo ...done
fi
fi # end if copy to storage
done
| true
|
ad713720bba2c2a5fe177e1972f9c11b577d1053
|
Shell
|
jinnerbichler/neural-politician-ai
|
/intelligence/start.sh
|
UTF-8
| 5,746
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
INSTANCE_NAME="neural-politician"
set -e # Exits immediately if a command exits with a non-zero status.
if [ "$1" == "create" ]; then
# https://cloud.google.com/compute/pricing
gcloud compute instances create ${INSTANCE_NAME} \
--machine-type n1-standard-4 --zone us-east1-d \
--accelerator type=nvidia-tesla-k80,count=1 \
--boot-disk-size=100GB --image gpu-image \
--maintenance-policy TERMINATE --restart-on-failure \
--preemptible
while [ -n "$(gcloud compute ssh ${INSTANCE_NAME} --command "echo ok" --zone us-east1-d 2>&1 > /dev/null)" ]; do
echo "Waiting for VM to be available"
sleep 1.0
done
elif [ "$1" == "delete" ]; then
gcloud compute instances delete --zone us-east1-d ${INSTANCE_NAME} --quiet
elif [ "$1" == "restart" ]; then
gcloud compute instances start ${INSTANCE_NAME}
elif [ "$1" == "upload-data" ]; then
gcloud compute scp ./data/ ${INSTANCE_NAME}:~/ --recurse --zone us-east1-d
# gcloud compute ssh ${INSTANCE_NAME} --command="sudo chmod -R 777 /data" --zone us-east1-d
elif [ "$1" == "upload-models" ]; then
# gcloud compute ssh ${INSTANCE_NAME} --command="sudo chmod 777 ./models" --zone us-east1-d
gcloud compute scp ./models/ ${INSTANCE_NAME}:~/ --recurse --zone us-east1-d
elif [ "$1" == "init" ]; then
# https://cloud.google.com/compute/pricing
gcloud compute instances create ${INSTANCE_NAME} \
--machine-type n1-standard-4 --zone us-east1-d \
--accelerator type=nvidia-tesla-k80,count=1 \
--boot-disk-size=100GB \
--image-family ubuntu-1604-lts --image-project ubuntu-os-cloud \
--maintenance-policy TERMINATE --restart-on-failure \
--preemptible
while [ -n "$(gcloud compute ssh ${INSTANCE_NAME} --command "echo ok" --zone us-east1-d 2>&1 > /dev/null)" ]; do
echo "Waiting for VM to be available"
sleep 1.0
done
# Sleep to be sure
sleep 1.0
gcloud compute scp ./start.sh ./daemon.json ${INSTANCE_NAME}:~/ --zone us-east1-d
gcloud compute ssh ${INSTANCE_NAME} --command="~/start.sh init-remote" --zone us-east1-d
elif [ "$1" == "init-remote" ]; then
echo "Checking for CUDA and installing."
# Check for CUDA and try to install.
if ! dpkg-query -W cuda-9-1; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
sudo dpkg -i ./cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
sudo apt-get update
sudo apt-get install cuda-9-1 -y --allow-unauthenticated
fi
sudo nvidia-smi -pm 0
sudo nvidia-smi -ac 2505,875
# On instances with NVIDIA® Tesla® K80 GPU: disable autoboost
sudo nvidia-smi --auto-boost-default=DISABLED
nvidia-smi
# Installing Docker
echo "Installing Docker and Docker Compose"
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get -y install docker-ce=17.12.1~ce-0~ubuntu
docker --version
sudo curl -L https://github.com/docker/compose/releases/download/1.19.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
docker-compose --version
# Installing Nvidia Docker
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | \
sudo apt-key add -
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | \
sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update
sudo apt-get install -y nvidia-docker2
sudo pkill -SIGHUP dockerd
# https://github.com/NVIDIA/nvidia-docker/issues/262
sudo nvidia-modprobe -u -c=0
# Set default runtime
sudo mv ~/daemon.json /etc/docker/daemon.json
sudo service docker restart
# Test nvidia-smi with the latest official CUDA image
sudo docker run --rm nvidia/cuda nvidia-smi
# Fetch and cache base image
sudo docker pull jinnerbichler/neural-politician:latest
elif [ "$1" == "deploy" ]; then
gcloud compute scp \
./docker-compose.yml \
./Dockerfile \
./env \
word_rnn.py \
speech_data.py \
${INSTANCE_NAME}:~/ --zone us-east1-d
gcloud compute ssh ${INSTANCE_NAME} --command="sudo docker-compose -f ~/docker-compose.yml up -d --build --force-recreate" --zone us-east1-d
gcloud compute ssh ${INSTANCE_NAME} --command="mkdir -p models" --zone us-east1-d
gcloud compute ssh ${INSTANCE_NAME} --command="sudo docker-compose -f ~/docker-compose.yml logs -f" --zone us-east1-d
elif [ "$1" == "reset" ]; then
echo "Deleting models in ./models/"
gcloud compute ssh ${INSTANCE_NAME} --command="sudo rm -rf ./models/" --zone us-east1-d
echo "Deleting volumes"
gcloud compute ssh ${INSTANCE_NAME} --command="sudo docker-compose -f ~/docker-compose.yml down -v" --zone us-east1-d
elif [ "$1" == "stop" ]; then
gcloud compute ssh ${INSTANCE_NAME} --command="sudo docker-compose -f ~/docker-compose.yml stop" --zone us-east1-d
elif [ "$1" == "download-models" ]; then
gcloud compute scp ${INSTANCE_NAME}:./models/* ./models/ --recurse --zone us-east1-d
elif [ "$1" == "download-data" ]; then
gcloud compute scp ${INSTANCE_NAME}:./data/* ./data/ --recurse --zone us-east1-d
elif [ "$1" == "logs" ]; then
gcloud compute ssh ${INSTANCE_NAME} --command="sudo docker-compose -f ~/docker-compose.yml logs -f" --zone us-east1-d
fi
| true
|
3da62be794d9ea4e482b7f56c23f408969101545
|
Shell
|
jimmyjayp/regression-testing
|
/bin/rt-setup
|
UTF-8
| 3,992
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#desc# setup a new test
function usage {
echo "usage:
$0 -t testName [-H] -i {pd|qa|local|hostName} [-p alt-port] [-o [-u testUser]] [-U url]
$0 -I
testName eg. jj+test1, jj+mailtest, ... (CSRT_TEST_NAME)
-i initialize a new test (CSRT_API)
-I to initialize a new test when CSRT_API & CSRT_TEST_NAME are defined
-H use http scheme
-o open vscode workspaces for test user(s) (a, b and/or c)
-u a, b or c (select only one for vscode workspace)
-U full url of api
Env Vars
CSRT_ROOT $HOME/csrt-tests
Examples
Setup test 'jj+test50' for users a, b & c for onprem pre-release
bin/rt-setup -t jj+test50 -i oppr
"
exit
}
function initialize_test {
# echo "TEMPLATE=$template, CSRT_API=$CSRT_API"
for user in a b c; do
local testUserEmail="$DT_USER+${CSRT_TEST_NAME}$user@codestream.com"
mkdir -p ${user}
(cd ${user} && git clone git@github.com:jimmyjayp/regression-testing.git)
# (cd ${user} && git clone git@gitlab.com:jimmyjayp/regression-testing.git)
(cd ${user} && git clone git@github.com:jimmyjayp/gore.git)
echo "TEMPLATE=$template"
echo "testUserEmail=$testUserEmail"
cat $repoRoot/templates/$template \
| sed -e "s|{{CSRT_ROOT}}|$CSRT_ROOT|g" \
| sed -e "s/{{CSRT_TEST_NAME}}/$CSRT_TEST_NAME/g" \
| sed -e "s/{{CSRT_PORT}}/$altPort/g" \
| sed -e "s/{{CSRT_API}}/$CSRT_API/g" \
| sed -e "s/{{TEST_USER_EMAIL}}/$testUserEmail/g" \
| sed -e "s/{{TEST_USER}}/$user/g" \
| sed -e "s|{{CSRT_URL}}|$CSRT_URL|g" \
| sed -e "s/{{CSRT_SCHEME}}/$CSRT_SCHEME/g" >$user/code.code-workspace
done
}
function cleanup_test {
cd $CSRT_ROOT || exit 1
/bin/rm -rf $CSRT_ROOT/$CSRT_TEST_NAME
if [ "$CSRT_ENV" == "local"]; then
/bin/bash --login -c "dt_load_playground api; node bin/cs_delete.js --team $CSRT_TEST_NAME --dtu"
else
echo "dt-ssh ${CSRT_ENV}*api web \". ~/.bash_profile; dt_load_playground api; node bin/cs_delete.js --team $CSRT_TEST_NAME --dtu\""
dt-ssh ${CSRT_ENV}*api web ". ~/.bash_profile; dt_load_playground api; node bin/cs_delete.js --team $CSRT_TEST_NAME --dtu"
fi
}
function looks_like_ip {
echo $1 | egrep -qe '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
}
[ -z "$CSRT_ROOT" ] && export CSRT_ROOT=$HOME/csrt-tests
[ -z "$1" ] && usage
binDir=`dirname $0`
repoRoot=`(cd $binDir/.. && pwd)`
testAction=""
testUser=""
altPort=""
openInCode=0
CSRT_SCHEME=https
useHttp=0
url=""
template=user.vscode-workspace
while getopts "t:i:u:Ip:oHU:" arg
do
case $arg in
I) testAction=init;;
U) export CSRT_URL=$OPTARG;;
H) useHttp=1;;
t) export CSRT_TEST_NAME=$OPTARG;; # jj+test1
i) testAction=init; export CSRT_ENV=$OPTARG;;
p) altPort=":$OPTARG";;
u) testUser=$OPTARG;;
o) openInCode=1;;
*) usage;;
esac
done
shift `expr $OPTIND - 1`
if [ -n "$CSRT_URL" ]; then
[ -z "$CSRT_ENV" ] && export CSRT_ENV=local
[ -z "$CSRT_TEST_NAME" ] && echo "-t <testName> required" && exit 1
template=user+url.vscode-workspace
testAction=init
echo "CSRT_ENV=$CSRT_ENV"
else
case "$CSRT_ENV" in
pd|qa|qa2) export CSRT_API=$CSRT_ENV-api.codestream.us;;
local) export CSRT_API=localhost.codestream.us:12079;;
localhostx) export CSRT_API=localhost;;
*) looks_like_ip $CSRT_ENV && export CSRT_API=$CSRT_ENV || export CSRT_API=$CSRT_ENV.codestream.us;;
esac
[ "$altPort" == ":80" ] && CSRT_SCHEME=http && altPort=""
[ $useHttp -eq 1 ] && CSRT_SCHEME=http
[ \( -z "$CSRT_API" -a "$testAction" != ide \) -o -z "$CSRT_TEST_NAME" -o -z "$testAction" ] && usage
fi
# clear out pre-existing test
[ $testAction == init -a -d $CSRT_ROOT/$CSRT_TEST_NAME ] && echo "rm -rf $CSRT_ROOT/$CSRT_TEST_NAME" && rm -rf $CSRT_ROOT/$CSRT_TEST_NAME
[ ! -d $CSRT_ROOT/$CSRT_TEST_NAME ] && { echo mkdir -p $CSRT_ROOT/$CSRT_TEST_NAME && mkdir -p $CSRT_ROOT/$CSRT_TEST_NAME || exit 1; }
cd $CSRT_ROOT/$CSRT_TEST_NAME
[ $testAction == init ] && initialize_test && ls -1 $CSRT_ROOT/$CSRT_TEST_NAME/?/code.* && [ $openInCode -eq 1 ] && code $CSRT_ROOT/$CSRT_TEST_NAME/$testUser/code.*
| true
|
a248a4e466f2812fc55dc637db47c4a8739debb0
|
Shell
|
conceptslearningmachine-FEIN-85-1759293/dlinput-tf
|
/requirements.sh
|
UTF-8
| 629
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# check and install msgpack-c
filename="/tmp/test_msgpack.cc"
execfile="/tmp/test_msgpack"
tmpfile=$(mktemp ${filename})
content="
#include <msgpack.hpp>
#include <sstream>
int main(void)
{
msgpack::type::tuple<int, bool> src(1, true);
std::stringstream buffer;
msgpack::pack(buffer, src);
return 0;
}
"
echo "$content" > $filename
if g++ -o $execfile $filename; then
echo "msgpack installed";
else
git clone https://github.com/msgpack/msgpack-c.git
cd msgpack-c
cmake .
make
# sudo make install
cd ..
rm -rf msgpack-c
fi
rm -rf "$execfile"
rm -rf "$tmpfile"
| true
|
cdd3abb0b123f2db653e7aa2ec1a6a4ec62e4c7e
|
Shell
|
IACETH/cesm_convert_SST
|
/create_SST_forcing_example.sh
|
UTF-8
| 865
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
########################################################
######### Create SST and Sea Ice forcing files #########
########################################################
# AFTER
# http://www.cesm.ucar.edu/models/cesm1.2/cesm/doc/usersguide/x2306.html
# and the script from Jan Sedlacek
# I would name this script:
# create_SST_forcing_b.e122.B_RCP8.5_CAM5_CN.f19_g16.io144.580.sh
# ----------------------------------------------------------------------
# Required USER INPUT
casename=b.e122.B_RCP8.5_CAM5_CN.f19_g16.io144.580
# folder with SST data
root=/net/meso/climphys/cesm122/${casename}/archive/
# destination folder
dest=/net/exo/landclim/${USER}/SST_forcing/data/
first=2006
last=2099
# END USER INPUT
# ----------------------------------------------------------------------
# source the create_SST_forcing.sh
. ./generate_SST_forcing.sh
| true
|
d902ab22a0cc058673873c5fdfb1a49c35983da5
|
Shell
|
neilrobertson/BICRCode
|
/Tools/vstep/vstepregion2wiggle.sh
|
UTF-8
| 536
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/sh
# this is to cut out a piece of the vstep and present this as a wiggle.
if [ $# -ne 5 ]
then
echo "usage: vstepregion2wiggle.sh <vstepfile> <width> <chrom> <start> <end>"
exit 1
fi
name=`basename $1`" - wiggle portion"
echo "track type=wiggle_0 name=\""$name"\" visibility=\"full\" description=\""$name"\" alwaysZero=\"on\""
#awk -v achr="$3" ' $1==achr { print $0 } ' $1
awk -v awidth="$2" -v achr="$3" -v astart="$4" -v aend="$5" '$1==achr && $2 >= astart && $2 <= aend { print $1"\t"$2"\t"$2+awidth"\t"$3 }' $1
| true
|
c02e4d1221e10cdf7af93370761e6f290a27a98a
|
Shell
|
kokizzu/usql
|
/drivers/testdata/gen-golden.sh
|
UTF-8
| 2,016
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
pgsql_in_docker=false
pgsql_container=usql-pgsql
if [ "$pgsql_in_docker" != true ]; then
PGHOST="${PGHOST:-127.0.0.1}"
port=$(docker port "$pgsql_container" 5432/tcp)
PGPORT=${port##*:}
else
PGHOST="${PGHOST:-$pgsql_container}"
PGPORT=5432
fi
PGUSER="${PGUSER:-postgres}"
PGPASSWORD="${PGPASSWORD:-pw}"
export PGHOST PGPORT PGUSER PGPASSWORD
declare -A queries
queries=(
[descTable]="\d+ film*"
[listTables]="\dtvmsE+ film*"
[listFuncs]="\df+"
[listIndexes]="\di+"
[listSchemas]="\dn+"
[listDbs]="\l+"
)
for q in "${!queries[@]}"; do
query="${queries[$q]}"
cmd=(psql --no-psqlrc --command "$query")
if [ "$pgsql_in_docker" == true ]; then
docker run -it --rm -e PGHOST -e PGPORT -e PGUSER -e PGPASSWORD --link "$pgsql_container" postgres:13 "${cmd[@]}" >"pgsql.$q.golden.txt"
else
"${cmd[@]}" -o "pgsql.$q.golden.txt"
fi
done
mysql_in_docker=true
mysql_container=usql-mysql
if [ "$mysql_in_docker" != true ]; then
MYHOST="${MYHOST:-127.0.0.1}"
port=$(docker port "$mysql_container" 3306/tcp)
MYPORT=${port##*:}
else
MYHOST="${MYHOST:-$mysql_container}"
MYPORT=3306
fi
MYUSER="${MYUSER:-root}"
MYPASSWORD="${MYPASSWORD:-pw}"
declare -A queries
queries=(
[descTable]="DESC film; SHOW INDEX FROM film; DESC film_actor; SHOW INDEX FROM film_actor; DESC film_category; SHOW INDEX FROM film_category; DESC film_list; SHOW INDEX FROM film_list; DESC film_text; SHOW INDEX FROM film_text;"
[listTables]="SHOW TABLES LIKE 'film%'"
[listSchemas]="SHOW DATABASES"
)
for q in "${!queries[@]}"; do
query="${queries[$q]}"
cmd=(mysql -h "$MYHOST" -P "$MYPORT" -u "$MYUSER" --password="$MYPASSWORD" --no-auto-rehash --database sakila --execute "$query")
if [ "$mysql_in_docker" == true ]; then
docker run -it --rm --link "$mysql_container" mysql:8 "${cmd[@]}" 2>/dev/null >"mysql.$q.golden.txt"
else
"${cmd[@]}" 2>/dev/null >"mysql.$q.golden.txt"
fi
done
| true
|
bd7c758dc5677b425c135efaa55be0f5f475e541
|
Shell
|
dthpulse/vagrant-for-testing-ceph
|
/scripts/SES6/ses6_disk_fault_injection.sh
|
UTF-8
| 1,935
| 3.25
| 3
|
[] |
no_license
|
set -ex
echo "
############################################
###### ses6_disk_fault_injection.sh ######
############################################
"
. /tmp/config.conf
# calculating PG and PGP number
num_of_osd=$(ceph osd ls | wc -l)
num_of_existing_pools=$(ceph osd pool ls | wc -l)
num_of_pools=1
function power2() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l; }
size=$(ceph-conf -c /dev/null -D | grep "osd_pool_default_size" | cut -d = -f 2 | sed 's/\ //g')
osd_num=$(ceph osd ls | wc -l)
recommended_pg_per_osd=100
pg_num=$(power2 $(echo "(($osd_num*$recommended_pg_per_osd) / $size) / ($num_of_existing_pools + $num_of_pools)" | bc))
pgp_num=$pg_num
function health_ok() {
until [ "$(ceph health)" == "HEALTH_OK" ]
do
sleep 30
done
}
# get storage minion
storage_minion=$(salt-run select.minions roles=storage | head -1 | awk '{print $2}')
# get storage device name and partition
storage_device_partition=$(ssh $storage_minion "pvdisplay | grep -B 1 \"VG Name .* ceph\" | head -1 | cut -d / -f 3")
storage_device_name=$(echo $storage_device_partition | tr -d [:digit:])
ssh $storage_minion -tt << EOT
mkdir /debug
mount debugfs /debug -t debugfs
cd /debug/fail_make_request
echo 10 > interval
echo 100 > probability
echo -1 > times
echo 1 > /sys/block/$storage_device_name/make-it-fail
systemctl restart ceph-osd.target
exit
EOT
ceph osd pool create diskfaultinjection $pg_num $pgp_num
sleep 30
ceph health | grep "HEALTH_OK"
ceph osd tree
sleep 30
(ceph -s | grep ".* osds down" && echo "Failed device recognized by Ceph") || (echo "NOT recognized by Ceph" && exit 1)
health_ok
# bring device back to healthy state
ssh $storage_minion -tt << EOT
umount /debug
echo 0 > /sys/block/$storage_device_name/$storage_device_partition/make-it-fail
systemctl restart ceph-osd.target
exit
EOT
health_ok
ceph osd pool rm diskfaultinjection diskfaultinjection --yes-i-really-really-mean-it
| true
|
76e4c5ef045e567d571f2ccf6fe71d10954945ab
|
Shell
|
taeram/ubuntu-pranks
|
/utils.sh
|
UTF-8
| 1,508
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
############################
# Configuration
############################
##
# Get the id of the currently running X windows process. e.g. ":0"
##
export DISPLAY=$( ps aux | grep \/usr\/bin\/X | grep -v grep | sed -e 's/^.*\/X \(:[0-9]*\).*$/\1/' )
##
# Get the id of the mouse, according to xinput
##
export MOUSE_ID=$( xinput list | grep -i mouse | sed -e 's/^.*id=\([0-9]\)*.*$/\1/' )
##
# Print coloured text
#
# @param string $1 The text type. Must be one of: success, info, warning, danger
# @param string $1 The string to echo
##
function echoc {
BLUE='\e[0;34m'
RED='\e[0;31m'
GREEN='\e[0;32m'
YELLOW='\e[1;33m'
NORMAL='\e[0m'
if [ "$1" = "success" ]; then
echo -e "$GREEN$2$NORMAL"
elif [ "$1" = "info" ]; then
echo -e "$BLUE$2$NORMAL"
elif [ "$1" = "warning" ]; then
echo -e "$YELLOW$2$NORMAL"
elif [ "$1" = "danger" ]; then
echo -e "$RED$2$NORMAL"
fi
}
############################
# Ask a question and return the response
#
# @param string $1 The question
# @param integer $2 The number of characters to accept as input
#
# @return string
############################
function ask {
if [ ! -n "$1" ]; then
echoc "danger" "ask() requires a question to ask"
exit 1;
fi
if [ -z "$2" ]; then
READ_OPTIONS=""
else
READ_OPTIONS="-n $2"
fi
YELLOW='\e[1;33m'
NORMAL='\e[0m'
read $READ_OPTIONS -p "`echo -en "$YELLOW>>> $1 $NORMAL"`" ANSWER
echo $ANSWER
}
| true
|
f5d1f3017ea866c4457dacac1016eec5f6aab8b1
|
Shell
|
yongjiangbuaa/march3
|
/WebProject/GameStats/stats/cron/stats_test.sh
|
UTF-8
| 4,330
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
SIDLIST=`/usr/local/bin/php /data/htdocs/stats/scripts/get_sid_list.php`
RSORTSIDLIST=$SIDLIST
SIDLIST=""
for a in $RSORTSIDLIST
do
SIDLIST=${a}" "${SIDLIST}
done
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/test_referrer.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/test_referrer.php sid=$i fixdate=$j
done
exit
for j in {20150831..20150814};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
exit
for j in {20150831..20150831};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
exit
for j in {20150731..20150701};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20150630..20150601};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20150531..20150501};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20150430..20150401};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
exit
for j in {20150331..20150301};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20150228..20150201};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20150131..20150101};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
exit
for j in {20141231..20141201};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20141130..20141101};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20141031..20141001};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
for j in {20140930..20140901};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
exit
for j in {20140831..20140801};do
for i in $SIDLIST;do
dt=`date "+%Y-%m-%d %T"`
echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
done
done
#for j in {20140731..20140701};do
#for i in $SIDLIST;do
#dt=`date "+%Y-%m-%d %T"`
#echo "[$dt] run /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j"
#/usr/local/bin/php /data/htdocs/stats/infobright/stats_test.php sid=$i fixdate=$j
#done
#done
| true
|
136997b343543833ba1e9c1753df3cf59ef03e75
|
Shell
|
hulaba/Deeplearning
|
/InstanceSeg/Semantic/2Image2Tile/clip_train_label.sh
|
UTF-8
| 2,238
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# != 5 ];then
echo "fname,folder,size,repeat,oname"
exit
fi
name=$1
fname=`echo $name|cut -d'.' -f1`
folder=$2
size=$3
repeat=$4
oname=$5
echo $repeat,$size
step=`echo | awk "{print $size-$size*$repeat}"`
step=`echo $step | cut -d. -f1`
echo 'step and size:'$step,$size
wi=`gdalinfo ${fname}.tif|grep ^Size|cut -d' ' -f3-4|sed -e 's/ //g'|cut -d',' -f1`
hi=`gdalinfo ${fname}.tif|grep ^Size|cut -d' ' -f3-4|sed -e 's/ //g'|cut -d',' -f2`
let x=${wi}/$size
let y=${hi}/$size
echo $x,$y
for i in `seq 0 ${x}`;
do
echo $i
for j in `seq 0 ${y}`;
do
echo $j
let ox=$size*$i;
let oy=$size*$j;
let xmax=$ox+$size;
let ymax=$oy+$size;
let ox1=$size*$i+$step;
let oy1=$size*$j+$step;
let xmax1=$ox1+$size;
let ymax1=$oy1+$size;
let ox2=$size*$i+$step;
let oy2=$size*$j;
let xmax2=$ox2+$size;
let ymax2=$oy2+$size;
let ox3=$size*$i;
let oy3=$size*$j+$step;
let xmax3=$ox3+$size;
let ymax3=$oy3+$size;
# echo $ox $oy $xmax $ymax $ox1 $oy1 $xmax1 $ymax1
gdalwarp -overwrite -to SRC_METHOD=NO_GEOTRANSFORM -to DST_METHOD=NO_GEOTRANSFORM -te $ox $oy $xmax $ymax ${fname}.tif ${folder}/r_${oname}_${ox}_${oy}.tif
gdal_translate -of PNG ${folder}/r_${oname}_${ox}_${oy}.tif ${folder}/r_${oname}_${ox}_${oy}.png
# # image Flip
gdalwarp -to SRC_METHOD=NO_GEOTRANSFORM -te $ox1 $oy1 $xmax1 $ymax1 ${fname}.tif ${folder}/r_${oname}_${ox1}_${oy1}.tif
gdal_translate -of PNG ${folder}/r_${oname}_${ox1}_${oy1}.tif ${folder}/r_${oname}_${ox1}_${oy1}.png
# repect enhance
gdalwarp -overwrite -to SRC_METHOD=NO_GEOTRANSFORM -to DST_METHOD=NO_GEOTRANSFORM -te $ox2 $oy2 $xmax2 $ymax2 ${fname}.tif ${folder}/r_${oname}_${ox2}_${oy2}.tif
gdal_translate -of PNG ${folder}/r_${oname}_${ox2}_${oy2}.tif ${folder}/r_${oname}_${ox2}_${oy2}.png
# repect enhance
gdalwarp -to SRC_METHOD=NO_GEOTRANSFORM -te $ox3 $oy3 $xmax3 $ymax3 ${fname}.tif ${folder}/r_${oname}_${ox3}_${oy3}.tif
gdal_translate -of PNG ${folder}/r_${oname}_${ox3}_${oy3}.tif ${folder}/r_${oname}_${ox3}_${oy3}.png
done
done
rm ${folder}/*.tif
rm ${folder}/*.png.aux.xml
| true
|
70d0162aa8b8eab9629fd5ee07f4d2760d1f9bf6
|
Shell
|
beredim/virtuoso-docker-images
|
/docker-entrypoint.sh
|
UTF-8
| 2,424
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f /opt/virtuoso-opensource/database/virtuoso.db ]; then
echo "====================================================================="
echo "virtuoso.db doesn't seem to exist. This appears to be the first run."
echo "Therefore we are now going to secure the sparql endpoints"
cat /secure_sparql.isql >> /opt/virtuoso-opensource/database/autoexec.isql
rm /secure_sparql.isql
echo "====================================================================="
if [[ -n "$DBA_PASS" ]]; then
echo "====================================================================="
echo "Password for user 'dba' provided on first run."
echo "Changing password for user 'dba'"
echo "--" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "user_set_password ('dba', '$DBA_PASS');" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "CHECKPOINT;" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "--" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "====================================================================="
else
echo "====================================================================="
echo "Default password for dba user (dba:dba)."
echo "====================================================================="
fi
if [[ -n "$DAV_PASS" ]]; then
echo "====================================================================="
echo "Password for user 'dav' provided on first run."
echo "Changing password for user 'dav'"
echo "--" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "USER_CHANGE_PASSWORD ('dav', 'dav', '$DAV_PASS');" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "CHECKPOINT;" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "--" >> /opt/virtuoso-opensource/database/autoexec.isql
echo "====================================================================="
else
echo "====================================================================="
echo "Default password for dav user (dav:dav)."
echo "====================================================================="
fi
echo "Cleaning up autoexec.isql after 15 seconds"
{
sleep 15
echo "====================================================================="
echo -n "Cleaning up autoexec.isql..."
rm /opt/virtuoso-opensource/database/autoexec.isql
echo "Finished"
} &
fi
/opt/virtuoso-opensource/bin/virtuoso-t -f
| true
|
7a90ea3ec149cb2b15d3ed3f17dfac8b481592e5
|
Shell
|
acidburn0zzz/svntogit-community
|
/mephisto.lv2/trunk/PKGBUILD
|
UTF-8
| 1,418
| 2.609375
| 3
|
[] |
no_license
|
# Maintainer: David Runge <dvzrv@archlinux.org>
pkgname=mephisto.lv2
pkgver=0.18.2
pkgrel=1
pkgdesc="A Just-in-Time FAUST compiler embedded in an LV2 plugin"
arch=(x86_64)
url="https://open-music-kontrollers.ch/lv2/mephisto/"
license=(Artistic2.0)
groups=(lv2-plugins pro-audio)
depends=(glibc libglvnd libvterm libx11 lv2-host ttf-fira-code ttf-fira-sans)
makedepends=(faust fontconfig glew glu lv2 meson)
checkdepends=(lv2lint)
options=(debug)
source=(https://git.open-music-kontrollers.ch/lv2/$pkgname/snapshot/$pkgname-$pkgver.tar.xz{,.asc})
sha512sums=('6136dcc32c41bd494f156e84be0fb417efbcb42fbddfaff7f54d3e11dc4bba29957cf919b25ada27b8d0e796abbfbe3c7bd0e0fba3698d4871b166b38ba5fa2d'
'SKIP')
b2sums=('5ef07088b8ba006dcfc511c7b34a4fabb5482b4a3bd7287ecafbdb0414a10d7f7058a21ea66e94e78c35c3b32dc0e9911e40b920f1dddcd8f377e5596b521c7d'
'SKIP')
validpgpkeys=('5AE26CEBBF49705C19FB273A30258F99258CB76A') # Hanspeter Portner <dev@open-music-kontrollers.ch>
build() {
arch-meson build $pkgname-$pkgver
ninja -C build
}
check() {
ninja -C build test
}
package() {
depends+=(libGLEW.so libfaust.so libfontconfig.so)
DESTDIR="$pkgdir" ninja -C build install
# devendor ttf-fira-code
rm -vf "$pkgdir/usr/lib/lv2/$pkgname/"*.ttf
for font_type in {Bold,Light,Medium,Regular}; do
ln -svf /usr/share/fonts/TTF/FiraCode-$font_type.ttf "$pkgdir/usr/lib/lv2/$pkgname/"
done
install -vDm 644 $pkgname-$pkgver/{ChangeLog,README.md} -t "$pkgdir/usr/share/doc/$pkgname/"
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.