blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
734ae09b0edb65816db332f15a32d5df6a7908bf | Shell | vlandham/dotfiles_old | /bashrc | UTF-8 | 2,127 | 2.828125 | 3 | [] | no_license | export CLICOLOR=1;export LSCOLORS="cxfxexexDxexexDxDxcxcx";
#osx color terminal
#export CLICOLOR=1
# don't put duplicate lines in the history. See bash(1) for more options
# don't overwrite GNU Midnight Commander's setting of `ignorespace'.
HISTCONTROL=$HISTCONTROL${HISTCONTROL+,}ignoredups
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
#PS1='\[\033[01;31m\]\w\[\033[00m\]\n${debian_chroot:+($debian_chroot)}\[\033[01;34m\]\u\[\033[01;32m\]@\[\033[01;34m\]\h\[\033[00m\]\$ '
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
export PERL5LIB=/n/site/inst/shared/sys/lib/perl5/5.8.8:/n/site/inst/shared/sys/lib/perl5/site_perl/5.8.8/x86_64-linux-thread-multi:/n/site/inst/shared/sys/lib/perl5/site_perl/5.8.8:/n/site/inst/shared/sys/lib/perl5/site_perl:$PERL5LIB
#export JAVA_HOME=/usr/lib/jvm/java-1.6.0
#export ANT_HOME=~/tools/ant
alias vim='mvim'
#export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/1.6/Home
export BB_INSTALL="$HOME/tools"
export BB_PATH="$HOME/tools/bio.brew"
export NODE_PATH="/usr/local/lib/node"
PATH="/usr/local/bin:/usr/local/sbin:/usr/local/mysql/bin:/usr/local/share/npm/bin:$PATH"
PATH="$BB_INSTALL/bin:$BB_PATH/bin:$PATH"
export PATH
[[ -s "$BB_INSTALL/bio.brew/bb_load_env" ]] && source "$BB_INSTALL/bio.brew/bb_load_env"
[[ -s "$HOME/.rvm/scripts/rvm" ]] && . "$HOME/.rvm/scripts/rvm" # Load RVM function
| true |
59e902a354f3d492e2afb00b244b8036d32cd305 | Shell | loick111/docker-bungeecord | /entrypoint.sh | UTF-8 | 1,658 | 3.625 | 4 | [] | no_license | #!/usr/bin/env bash
error() {
echo "Error: $*" >&2
}
init_srv_config() {
servers=""
priorities=""
forced_hosts=""
LIST=$(env | grep -Eo '^SRV[0-9]+' | sort -u)
for srv in ${LIST}
do
name="${srv}_NAME"
address="${srv}_ADDRESS"
motd="${srv}_MOTD"
restricted="${srv}_RESTRICTED"
servers="${servers} ${!name:-${srv}}:
address: ${!address}
motd: '${!motd:-${srv}}'
restricted: ${!restricted:-false}
"
priorities="${priorities} - ${!name:-${srv}}
"
forced_hosts="${forced_hosts} ${!name:-${srv}}.${FORCED_HOSTS_DOMAIN}: ${!name:-${srv}}
"
done
export CONFIG_SERVERS=${servers}
export CONFIG_PRIORITIES=${priorities}
export CONFIG_FORCED_HOSTS=${forced_hosts}
}
init_groups_config() {
groups=""
LIST=$(env | grep -Eo '^ADMIN[0-9]+' | sort -u)
for admin in ${LIST}
do
groups="${groups} ${!admin}:
- admin
"
done
export CONFIG_GROUPS=${groups}
}
BUNGEE_JAR=${BUNGEE_HOME}/BungeeCord.jar
BUNGEE_JAR_URL=${BUNGEE_BASE_URL}/${BUNGEE_JOB_ID:-lastStableBuild}/artifact/bootstrap/target/BungeeCord.jar
if [[ ! -e ${BUNGEE_JAR} ]]
then
echo "Downloading ${BUNGEE_JAR_URL}"
if ! curl -o ${BUNGEE_JAR} -fsSL ${BUNGEE_JAR_URL}
then
error "failed to download ${BUNGEE_JAR_URL}"
exit 1
fi
fi
echo "Generating config..."
init_srv_config
init_groups_config
envsubst > /server/config.yml < /config.yml.tmpl
cat /server/config.yml
echo "Starting..."
JVM_OPTS="-Xms${INIT_MEMORY:-${MEMORY}} -Xmx${MAX_MEMORY:-${MEMORY}} ${JVM_OPTS}"
exec java $JVM_OPTS -jar $BUNGEE_JAR "$@" | true |
1a52d007c2aa4e5f7f07e31428c9af825b7e0f25 | Shell | jschoormans/R4D | /draft scripts/shellscripts/ReconGrasp1.sh | UTF-8 | 3,424 | 3.28125 | 3 | [] | no_license | #!/bin/bash
# new script - grasp recon with pics (GPU)
#-(test whitening)
printf "Shellscript for BART GRASP reconstruction --- running. All output in logfile\n\n"
printf "To do \n-logfile \n-automate reading of philips listfile \n-3D support \n-etc...\n\n"
# parameters
export ITERS=30
export REG=0.01
export CALIB=100
export SCALE=0.5
export SPOKES=40
export PHASES=20
#define BART version to use
printf "\nBART version:\n"
export bart=/home/jschoormans/bart-bld/bart-LAPACKE-bld/bart
#create logfile
touch logfile
# output version of BART used (as a check)
$bart version -V
# make tmp directory for files
tempdir=`mktemp -d`
trap 'rm -rf "$tempdir"' EXIT #remove files and exit upon ctrl-C
rootdir=$PWD
imdir="$PWD/ims"
mkdir -p $imdir
cd $imdir && rm * 2>/dev/null
cd $tempdir
# copy data to temp folder
printf "\nCopying data to tmp folder...\n"
cp -n $rootdir/data/data_allcoils.cfl $tempdir/grasp.cfl
cp -n $rootdir/data/data_allcoils.hdr $tempdir/grasp.hdr
cp -n $rootdir/data/noise_allcoils.hdr $tempdir/noise.hdr
cp -n $rootdir/data/noise_allcoils.cfl $tempdir/noise.cfl
printf "Extracting scan parameters...\n"
export READ=$($bart show -d0 grasp)
export COILS=$($bart show -d3 grasp)
export NSPOKES=$($bart show -d1 grasp)
echo $READ $COILS $NSPOKES
# noise whitening
# reshape noisedata
#$bart show -m $tempdir/noise_allcoils
#$bart reshape $(bart bitmask 0 1 2 3 4 5 6 7) 19936 1 1 24 1 1 1 1 $tempdir/noise_allcoils $tempdir/noisedata
#$bart show -m $tempdir/noisedata
# whitening
#$bart whiten -n $tempdir/data2 $tempdir/noisedata $tempdir/data3 $tempdir/optmatout $tempdir/covarout
#echo "optmatout"
#$bart show $tempdir/optmatout
#echo "covarout"
#$bart show $tempdir/covarout
# calculate trajectory for calibration
printf "Calculating trajectories...\n"
$bart traj -r -s4 -x$READ -y$CALIB t
$bart scale $SCALE t trajcalib
# create trajectory with 2064 spokes and 2x oversampling
$bart traj -G -s4 -x$READ -y$(($SPOKES * $PHASES)) t
$bart scale $SCALE t t2
# split off time dimension into index 10
$bart reshape $(bart bitmask 2 10) $SPOKES $PHASES t2 trajfull
calib_slice()
{
printf "Reshaping raw data...\n"
$bart reshape $(bart bitmask 0 1 2) 1 $READ $NSPOKES grasp data1
# extract first $CALIB spokes
$bart extract 2 0 $CALIB data1 data3
# apply inverse nufft to first $CALIB spokes
$bart nufft -i -t trajcalib data3 imgnufft
# transform back to k-space
$bart fft -u $(bart bitmask 0 1 2) imgnufft ksp
# find sensitivity map
$bart ecalib -S -c0.8 -m1 -r20 ksp sens2
}
recon_slice()
{
# extract spokes and split-off time dim
printf "extract"
$bart extract 1 0 $(($SPOKES * $PHASES)) grasp grasp2
$bart show -m grasp
$bart show -m grasp2
printf "reshape"
$bart reshape $(bart bitmask 1 2) $SPOKES $PHASES grasp2 grasp1
# move time dimensions to dim 10 and reshape
$bart transpose 2 10 grasp1 grasp2
$bart reshape $(bart bitmask 0 1 2) 1 $READ $SPOKES grasp2 grasp1
# reconstruction with tv penality along dimension 10
$bart pics -G -S -u10 -RT:$(bart bitmask 10):0:$REG -i$ITERS -t trajfull grasp1 sens2 impics
}
calib_slice
recon_slice
# pics recon
#printf "CS RECON...\n"
#$bart pics -RT:0:0:$REG -i$ITER -t trajrad sensemaps data2 r
# visualize recon
printf "\tVisualize..."
$bart toimg imgnufft $imdir/recon
$bart toimg sens2 $imdir/sens2
$bart toimg impics $imdir/impics
#rm $tempdir/*.cfl $tempdir/*.hdr
printf "End of script. \n"
| true |
3272a64c8c87a5c3228f2461d983c5c81aa2b40c | Shell | mrkafk/shed_scripts | /jupyter_control.sh | UTF-8 | 2,136 | 4.40625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
function activate_ve () {
if [ -f ".jupyter_params" ]; then
VE=$(cat .jupyter_params | grep -i 'virtualenv:' | awk '{print $2;}')
if [ -f "$VE/bin/activate" ]; then
echo "Activating $VE/bin/activate"
source "$VE/bin/activate"
else
echo "Virtualenv $VE/bin/activate could not be activated. Abort."
exit 1
fi
else
echo "File .jupyter_params not found."
exit 1
fi
}
function get_port () {
if [ -f ".jupyter_params" ]; then
PORT=$(cat .jupyter_params | grep -i 'port:' | awk '{print $2;}')
echo "$PORT"
else
echo "File .jupyter_params not found."
exit 1
fi
}
function jupyter_stop () {
if [ -f ".jupyter_params" ]; then
PORT=$(cat .jupyter_params | grep -i 'port:' | awk '{print $2;}')
VE=$(cat .jupyter_params | grep -i 'virtualenv:' | awk '{print $2;}')
PID=$(ps auxwww | grep "$VE/bin/jupyter-notebook" | grep "port $PORT" | grep -v grep | awk '{print $2};')
set -x
kill "$PID"
set +x
else
echo "File .jupyter_params not found."
exit 1
fi
}
case "$1" in
"start")
activate_ve
PORT=$(get_port)
echo "Starting jupyter (port $PORT), log ./.jupyter_log"
nohup jupyter notebook --port "$PORT" &> .jupyter_log &
sleep 1
cat .jupyter_log
;;
"stop")
jupyter_stop
;;
"template")
if [ ! -f '.jupyter_params' ]; then
echo "Writing template .jupyter_params"
cat <<EOF >.jupyter_params
virtualenv: ve
port: 8010
EOF
else
echo "File .jupyter_port present in current dir."
exit 1
fi
;;
*)
echo "Unknown command $1"
echo "Usage:"
echo " start"
echo " stop"
echo " template"
;;
esac
| true |
47607db41c40dea7ddbac5bca584ce2f8cc3b40a | Shell | alip/sydbox-1 | /tests/t3013-fs-truncate.sh | UTF-8 | 2,055 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
# vim: set sw=4 et ts=4 sts=4 tw=80 :
# Copyright 2010, 2012, 2013 Ali Polatel <alip@exherbo.org>
# Released under the terms of the 3-clause BSD license
test_description='sandbox truncate(2)'
. ./test-lib.sh
SYDBOX_TEST_OPTIONS="
$SYDBOX_TEST_OPTIONS
-mcore/violation/raise_fail:1
-mcore/violation/raise_safe:1
"
test_expect_failure 'deny truncate(NULL) with EFAULT' '
sydbox -- emily truncate -e EFAULT
'
test_expect_failure 'deny truncate()' '
f="$(unique_file)" &&
: > "$f" &&
test_must_violate sydbox \
-m core/sandbox/write:deny \
-- emily truncate -e EPERM "$f" &&
test_path_is_non_empty "$f"
'
test_expect_failure 'deny truncate() for non-existant file' '
f="no-$(unique_file)" &&
test_must_violate sydbox \
-m core/sandbox/write:deny \
-- emily truncate -e EPERM "$f"
'
test_expect_failure SYMLINKS 'deny truncate() for symbolic link' '
f="$(unique_file)" &&
l="$(unique_link)" &&
echo hey syd > "$f" &&
ln -sf "$l" "$f" &&
test_must_violate sydbox \
-m core/sandbox/write:deny \
-- emily truncate -e EPERM "$l" &&
test_path_is_non_empty "$f"
'
test_expect_failure SYMLINKS 'deny truncate() for dangling symbolic link' '
f="no-$(unique_file)" &&
l="$(unique_link)" &&
ln -sf no"$l" "$f" &&
test_must_violate sydbox \
-m core/sandbox/write:deny \
-- emily truncate no"$l"
'
test_expect_failure 'whitelist truncate()' '
f="$(unique_file)" &&
echo hello syd > "$f" &&
sydbox \
-m core/sandbox/write:deny \
-m "whitelist/write+$HOME_RESOLVED/**" \
-- emily truncate "$f" &&
test_path_is_empty "$f"
'
test_expect_failure SYMLINKS 'whitelist truncate() for symbolic link' '
f="$(unique_file)" &&
l="$(unique_link)" &&
echo hello syd > "$f" &&
ln -sf "$l" "$f" &&
sydbox \
-m core/sandbox/write:deny \
-m "whitelist/write+$HOME_RESOLVED/**" \
-- emily truncate -e ERRNO_0 "$l" &&
test_path_is_empty "$f"
'
test_done
| true |
3ccc42d4f46fd928dbe0fa8f6cb7616864e35a82 | Shell | TieDyedDevil/XS | /generators/buildinfo.sh | UTF-8 | 275 | 3.15625 | 3 | [] | no_license | #! /usr/bin/env sh
[ -n "$MESON_SOURCE_ROOT" ] && cd $MESON_SOURCE_ROOT
FILE=$(basename $0 .sh).hxx
COMPILER=$(${CC:-cc} --version|head -1)
DATA="#define BUILDINFO \"$USER @ `hostname`; `date --rfc-3339=s`; $COMPILER\""
echo "$DATA" | cmp -s - $FILE || echo "$DATA" > $FILE
| true |
d06466ad1b7a377502dcafe0c0b22d6714cbeab8 | Shell | qshan/ubuntu-live-remaster | /bin/remaster | UTF-8 | 1,406 | 4 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
set -E
function help() {
echo "Usage: $(basename $0) -i iso -w work -c customize -l label -d device"
}
function error() {
echo $1
help
exit 1
}
scripts=$(dirname $0)
ISO=""
WORK=""
LABEL=""
DEVICE=""
CUSTOMIZE=""
POSITIONAL=()
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-i|--iso)
ISO="$2"
shift # past argument
shift # past value
;;
-w|--work)
WORK="$2"
shift # past argument
shift # past value
;;
-c|--customize)
CUSTOMIZE="$2"
shift # past argument
shift # past value
;;
-l|--label)
LABEL="$2"
shift # past argument
shift # past value
;;
-d|--device)
DEVICE="$2"
shift # past argument
shift # past value
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
[[ -z $ISO ]] && error "--iso option is not set"
[[ -z $WORK ]] && error "--work option is not set"
[[ -z $LABEL ]] && error "--label option is not set"
[[ -z $DEVICE ]] && error "--device option is not set"
$scripts/remaster.extract $ISO $WORK
$scripts/remaster.customize $WORK/squashfs-root $CUSTOMIZE
$scripts/remaster.casper $WORK
bytes="$(expr $(du -bxs $WORK/iso | cut -f1) / 1024 + 1024 \* 100)K"
$scripts/remaster.partition $DEVICE $LABEL $bytes
$scripts/remaster.install $WORK $DEVICE $LABEL
| true |
a1acf5df0a019182f53512ded02b736f5a7dc183 | Shell | unixengineer/edge2ai_demo | /setup.sh | UTF-8 | 17,692 | 3.59375 | 4 | [] | no_license | #! /bin/bash
echo "-- Commencing SingleNodeCluster Setup Script"
set -e
set -u
if [ "$USER" != "root" ]; then
echo "ERROR: This script ($0) must be executed by root"
exit 1
fi
CLOUD_PROVIDER=${1:-aws}
TEMPLATE=${2:-}
DOCKERDEVICE=${3:-}
NOPROMPT=${4:-}
SSH_USER=${5:-}
SSH_PWD=${6:-}
NAMESPACE=${7:-}
export NAMESPACE
BASE_DIR=$(cd $(dirname $0); pwd -P)
KEY_FILE=${BASE_DIR}/myRSAkey
if [ -e $BASE_DIR/stack.$NAMESPACE.sh ]; then
source $BASE_DIR/stack.${NAMESPACE}.sh
else
source $BASE_DIR/stack.sh
fi
TEMPLATE=${TEMPLATE}.${CDH_MAJOR_VERSION}
# Often yum connection to Cloudera repo fails and causes the instance create to fail.
# yum timeout and retries options don't see to help in this type of failure.
# We explicitly retry a few times to make sure the build continues when these timeouts happen.
function yum_install() {
local packages=$@
local retries=10
while true; do
set +e
yum install -d1 -y ${packages}
RET=$?
set -e
if [[ ${RET} == 0 ]]; then
break
fi
retries=$((retries - 1))
if [[ ${retries} -lt 0 ]]; then
echo 'YUM install failed!'
exit 1
else
echo 'Retrying YUM...'
fi
done
}
function get_homedir() {
local username=$1
getent passwd $username | cut -d: -f6
}
######### Start Packer Installation
echo "-- Testing if this is a pre-packed image by looking for existing Cloudera Manager repo"
CM_REPO_FILE=/etc/yum.repos.d/cloudera-manager.repo
if [[ ! -f $CM_REPO_FILE ]]; then
echo "-- Cloudera Manager repo not found, assuming not prepacked"
echo "-- Installing base dependencies"
yum_install ${JAVA_PACKAGE_NAME} vim wget curl git bind-utils epel-release
yum_install python-pip npm gcc-c++ make
echo "-- Install CM yum repo"
wget --progress=dot:giga ${CM_REPO_FILE_URL} -O $CM_REPO_FILE
sed -i -E "s#https?://[^/]*#${CM_BASE_URL}#g" $CM_REPO_FILE
echo "-- Install MariaDB yum repo"
cat - >/etc/yum.repos.d/MariaDB.repo <<EOF
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.1/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
EOF
echo "-- Running remaining binary preinstalls"
yum clean all
rm -rf /var/cache/yum/
yum repolist
yum_install cloudera-manager-daemons cloudera-manager-agent cloudera-manager-server \
MariaDB-server MariaDB-client shellinabox mosquitto jq transmission-cli
npm install --quiet forever -g
pip install --quiet --upgrade pip
pip install --progress-bar off cm_client paho-mqtt
systemctl disable mariadb
echo "-- Install Maven"
curl http://mirrors.sonic.net/apache/maven/maven-3/3.6.2/binaries/apache-maven-3.6.2-bin.tar.gz > /tmp/apache-maven-3.6.2-bin.tar.gz
tar -C $(get_homedir $SSH_USER) -zxvf /tmp/apache-maven-3.6.2-bin.tar.gz
rm -f /tmp/apache-maven-3.6.2-bin.tar.gz
echo "export PATH=\$PATH:$(get_homedir $SSH_USER)/apache-maven-3.6.2/bin" >> $(get_homedir $SSH_USER)/.bash_profile
echo "-- Get and extract CEM tarball to /opt/cloudera/cem"
mkdir -p /opt/cloudera/cem
wget --progress=dot:giga ${CEM_URL} -P /opt/cloudera/cem
tar -zxf /opt/cloudera/cem/CEM-${CEM_VERSION}-centos7-tars-tarball.tar.gz -C /opt/cloudera/cem
rm -f /opt/cloudera/cem/CEM-${CEM_VERSION}-centos7-tars-tarball.tar.gz
echo "-- Install and configure EFM"
EFM_TARBALL=$(find /opt/cloudera/cem/ -path "*/centos7/*" -name "efm-*-bin.tar.gz")
EFM_BASE_NAME=$(basename $EFM_TARBALL | sed 's/-bin.tar.gz//')
tar -zxf ${EFM_TARBALL} -C /opt/cloudera/cem
ln -s /opt/cloudera/cem/${EFM_BASE_NAME} /opt/cloudera/cem/efm
ln -s /opt/cloudera/cem/efm/bin/efm.sh /etc/init.d/efm
chown -R root:root /opt/cloudera/cem/${EFM_BASE_NAME}
rm -f /opt/cloudera/cem/efm/conf/efm.properties
rm -f /opt/cloudera/cem/efm/conf/efm.conf
cp $BASE_DIR/efm.properties /opt/cloudera/cem/efm/conf
cp $BASE_DIR/efm.conf /opt/cloudera/cem/efm/conf
echo "-- Install and configure MiNiFi"
MINIFI_TARBALL=$(find /opt/cloudera/cem/ -path "*/centos7/*" -name "minifi-[0-9]*-bin.tar.gz")
MINIFITK_TARBALL=$(find /opt/cloudera/cem/ -path "*/centos7/*" -name "minifi-toolkit-*-bin.tar.gz")
MINIFI_BASE_NAME=$(basename $MINIFI_TARBALL | sed 's/-bin.tar.gz//')
MINIFITK_BASE_NAME=$(basename $MINIFITK_TARBALL | sed 's/-bin.tar.gz//')
tar -zxf ${MINIFI_TARBALL} -C /opt/cloudera/cem
tar -zxf ${MINIFITK_TARBALL} -C /opt/cloudera/cem
ln -s /opt/cloudera/cem/${MINIFI_BASE_NAME} /opt/cloudera/cem/minifi
chown -R root:root /opt/cloudera/cem/${MINIFI_BASE_NAME}
chown -R root:root /opt/cloudera/cem/${MINIFITK_BASE_NAME}
rm -f /opt/cloudera/cem/minifi/conf/bootstrap.conf
cp $BASE_DIR/bootstrap.conf /opt/cloudera/cem/minifi/conf
/opt/cloudera/cem/minifi/bin/minifi.sh install
echo "-- Disable services here for packer images - will reenable later"
systemctl disable cloudera-scm-server
systemctl disable cloudera-scm-agent
systemctl disable minifi
echo "-- Download and install MQTT Processor NAR file"
wget http://central.maven.org/maven2/org/apache/nifi/nifi-mqtt-nar/1.8.0/nifi-mqtt-nar-1.8.0.nar -P /opt/cloudera/cem/minifi/lib
chown root:root /opt/cloudera/cem/minifi/lib/nifi-mqtt-nar-1.8.0.nar
chmod 660 /opt/cloudera/cem/minifi/lib/nifi-mqtt-nar-1.8.0.nar
echo "-- Preloading large Parcels to /opt/cloudera/parcel-repo"
mkdir -p /opt/cloudera/parcel-repo
if [ "${#PARCEL_URLS[@]}" -gt 0 ]; then
set -- "${PARCEL_URLS[@]}"
while [ $# -gt 0 ]; do
component=$1
version=$2
url=$3
shift 3
echo ">>> $component - $version - $url"
curl --silent "${url%%/}/manifest.json" > /tmp/manifest.json
parcel_name=$(jq -r '.parcels[] | select(.parcelName | contains("'"$version"'-el7.parcel")) | select(.components[] | .name == "'"$component"'").parcelName' /tmp/manifest.json)
hash=$(jq -r '.parcels[] | select(.parcelName | contains("'"$version"'-el7.parcel")) | select(.components[] | .name == "'"$component"'").hash' /tmp/manifest.json)
wget --no-clobber --progress=dot:giga "${url%%/}/${parcel_name}" -O "/opt/cloudera/parcel-repo/${parcel_name}"
echo "$hash" > "/opt/cloudera/parcel-repo/${parcel_name}.sha"
transmission-create -s 512 -o "/opt/cloudera/parcel-repo/${parcel_name}.torrent" "/opt/cloudera/parcel-repo/${parcel_name}"
done
fi
echo "-- Configure and optimize the OS"
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
echo "echo never > /sys/kernel/mm/transparent_hugepage/enabled" >> /etc/rc.d/rc.local
echo "echo never > /sys/kernel/mm/transparent_hugepage/defrag" >> /etc/rc.d/rc.local
# add tuned optimization https://www.cloudera.com/documentation/enterprise/latest/topics/cdh_admin_performance.html
echo "vm.swappiness = 1" >> /etc/sysctl.conf
sysctl vm.swappiness=1
timedatectl set-timezone UTC
echo "-- Handle cases for cloud provider customisations"
case "${CLOUD_PROVIDER}" in
aws)
echo "server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4" >> /etc/chrony.conf
systemctl restart chronyd
;;
azure)
umount /mnt/resource
mount /dev/sdb1 /opt
;;
gcp)
;;
*)
echo $"Usage: $0 {aws|azure|gcp} template-file [docker-device]"
echo $"example: ./setup.sh azure default_template.json"
echo $"example: ./setup.sh aws cluster_template.json /dev/xvdb"
exit 1
esac
iptables-save > $BASE_DIR/firewall.rules
FWD_STATUS=$(systemctl is-active firewalld || true)
if [[ "${FWD_STATUS}" != "unknown" ]]; then
systemctl disable firewalld
systemctl stop firewalld
fi
#setenforce 0
if [[ -f /etc/selinux/config ]]; then
sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
fi
echo "-- Install JDBC connector"
wget --progress=dot:giga ${JDBC_CONNECTOR_URL} -P ${BASE_DIR}/
TAR_FILE=$(basename ${JDBC_CONNECTOR_URL})
BASE_NAME=${TAR_FILE%.tar.gz}
tar zxf ${BASE_DIR}/${TAR_FILE} -C ${BASE_DIR}/
mkdir -p /usr/share/java/
cp ${BASE_DIR}/${BASE_NAME}/${BASE_NAME}-bin.jar /usr/share/java/mysql-connector-java.jar
echo "-- Install CSDs"
for url in "${CSD_URLS[@]}"; do
echo "---- Downloading $url"
wget --progress=dot:giga ${url} -P /opt/cloudera/csd/
done
echo "-- Enable password authentication"
sed -i 's/PasswordAuthentication *no/PasswordAuthentication yes/' /etc/ssh/sshd_config
echo "-- Reset SSH user password"
echo "$SSH_PWD" | sudo passwd --stdin "$SSH_USER"
echo "-- Finished image preinstall"
else
echo "-- Cloudera Manager repo already present, assuming this is a prewarmed image"
fi
####### Finish packer build
echo "-- Checking if executing packer build"
if [[ ! -z ${PACKER_BUILD:+x} ]]; then
echo "-- Packer build detected, exiting with success"
sleep 2
exit 0
else
echo "-- Packer build not detected, continuing with installation"
sleep 2
fi
##### Start install
# Prewarm parcel directory
for parcel_file in $(find /opt/cloudera/parcel-repo -type f); do
dd if=$parcel_file of=/dev/null bs=10M &
done
PUBLIC_IP=$(curl https://api.ipify.org/ 2>/dev/null || curl https://ifconfig.me 2> /dev/null)
PUBLIC_DNS=$(dig -x ${PUBLIC_IP} +short)
echo "-- Set /etc/hosts"
echo "$(hostname -I) $(hostname -f) edge2ai-1.dim.local" >> /etc/hosts
echo "-- Configure networking"
hostnamectl set-hostname $(hostname -f)
if [[ -f /etc/sysconfig/network ]]; then
sed -i "/HOSTNAME=/ d" /etc/sysconfig/network
fi
echo "HOSTNAME=$(hostname -f)" >> /etc/sysconfig/network
echo "-- Generate self-signed certificate for ShellInABox with the needed SAN entries"
# Generate self-signed certificate for ShellInABox with the needed SAN entries
openssl req \
-x509 \
-nodes \
-newkey 2048 \
-keyout key.pem \
-out cert.pem \
-days 365 \
-subj "/C=US/ST=California/L=San Francisco/O=Cloudera/OU=Data in Motion/CN=$(hostname -f)" \
-extensions 'v3_user_req' \
-config <( cat <<EOF
[ req ]
default_bits = 2048
default_md = sha256
distinguished_name = req_distinguished_name
req_extensions = v3_user_req
string_mask = utf8only
[ req_distinguished_name ]
countryName_default = XX
countryName_min = 2
countryName_max = 2
localityName_default = Default City
0.organizationName_default = Default Company Ltd
commonName_max = 64
emailAddress_max = 64
[ v3_user_req ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = DNS:$(hostname -f),DNS:${PUBLIC_DNS},IP:$(hostname -I),IP:${PUBLIC_IP}
EOF
)
cat key.pem cert.pem > /var/lib/shellinabox/certificate.pem
# Enable and start ShelInABox
systemctl enable shellinaboxd
systemctl start shellinaboxd
if [[ -n "${CDSW_VERSION}" ]]; then
echo "CDSW_VERSION is set to '${CDSW_VERSION}'"
# CDSW requires Centos 7.5, so we trick it to believe it is...
echo "CentOS Linux release 7.5.1810 (Core)" > /etc/redhat-release
# If user doesn't specify a device, tries to detect a free one to use
# Device must be unmounted and have at least 200G of space
if [[ "${DOCKERDEVICE}" == "" ]]; then
echo "Docker device was not specified in the command line. Will try to detect a free device to use"
TMP_FILE=${BASE_DIR}/.device.list
# Find devices that are not mounted and have size greater than or equal to 200G
lsblk -o NAME,MOUNTPOINT,SIZE -s -p -n | awk '/^\// && NF == 2 && $TEMPLATE ~ /([2-9]|[0-9][0-9])[0-9][0-9]G/' > "${TMP_FILE}"
if [[ $(cat $TMP_FILE | wc -l) == 0 ]]; then
echo "ERROR: Could not find any candidate devices."
exit 1
elif [[ $(cat ${TMP_FILE} | wc -l) -gt 1 ]]; then
echo "ERROR: Found more than 1 possible devices to use:"
cat ${TMP_FILE}
exit 1
else
DOCKERDEVICE=$(awk '{print $1}' ${TMP_FILE})
fi
rm -f ${TMP_FILE}
fi
echo "Docker device: ${DOCKERDEVICE}"
else
echo "CDSW_VERSION is unset, skipping CDSW installation";
fi
echo "--Configure and start MariaDB"
echo "-- Configure MariaDB"
cat ${BASE_DIR}/mariadb.config > /etc/my.cnf
systemctl enable mariadb
systemctl start mariadb
#echo "-- Create DBs required by CM"
#mysql -u root < ${BASE_DIR}/create_db.sql
#echo "-- Secure MariaDB"
#mysql -u root < ${BASE_DIR}/secure_mariadb.sql
echo "-- Prepare CM database 'scm'"
/opt/cloudera/cm/schema/scm_prepare_database.sh mysql scm scm cloudera
echo "-- Install additional CSDs"
for csd in $(find $BASE_DIR/csds -name "*.jar"); do
echo "---- Copying $csd"
cp $csd /opt/cloudera/csd/
done
echo "-- Install additional parcels"
for parcel in $(find $BASE_DIR/parcels -name "*.parcel"); do
echo "---- Copying ${parcel}"
cp ${parcel} /opt/cloudera/parcel-repo/
echo "---- Copying ${parcel}.sha"
cp ${parcel}.sha /opt/cloudera/parcel-repo/
done
echo "-- Set CSDs and parcel repo permissions"
chown -R cloudera-scm:cloudera-scm /opt/cloudera/csd /opt/cloudera/parcel-repo
chmod 644 $(find /opt/cloudera/csd /opt/cloudera/parcel-repo -type f)
echo "-- Start CM, it takes about 2 minutes to be ready"
systemctl enable cloudera-scm-server
systemctl enable cloudera-scm-agent
systemctl start cloudera-scm-server
echo "-- Enable passwordless root login via rsa key"
ssh-keygen -f $KEY_FILE -t rsa -N ""
mkdir -p ~/.ssh
chmod 700 ~/.ssh
cat $KEY_FILE.pub >> ~/.ssh/authorized_keys
chmod 400 ~/.ssh/authorized_keys
ssh-keyscan -H $(hostname) >> ~/.ssh/known_hosts
sed -i 's/.*PermitRootLogin.*/PermitRootLogin without-password/' /etc/ssh/sshd_config
systemctl restart sshd
echo "-- Automate cluster creation using the CM API"
sed -i "\
s/YourHostname/$(hostname -f)/g;\
s/YourCDSWDomain/cdsw.${PUBLIC_IP}.nip.io/g;\
s/YourPrivateIP/$(hostname -I | tr -d '[:space:]')/g;\
s/YourPublicDns/$PUBLIC_DNS/g;\
s#YourDockerDevice#$DOCKERDEVICE#g;\
s#ANACONDA_PARCEL_REPO#$ANACONDA_PARCEL_REPO#g;\
s#ANACONDA_VERSION#$ANACONDA_VERSION#g;\
s#CDH_PARCEL_REPO#$CDH_PARCEL_REPO#g;\
s#CDH_BUILD#$CDH_BUILD#g;\
s#CDH_VERSION#$CDH_VERSION#g;\
s#CDSW_PARCEL_REPO#$CDSW_PARCEL_REPO#g;\
s#CDSW_BUILD#$CDSW_BUILD#g;\
s#CFM_PARCEL_REPO#$CFM_PARCEL_REPO#g;\
s#CFM_VERSION#$CFM_VERSION#g;\
s#CM_VERSION#$CM_VERSION#g;\
s#SCHEMAREGISTRY_BUILD#$SCHEMAREGISTRY_BUILD#g;\
s#STREAMS_MESSAGING_MANAGER_BUILD#$STREAMS_MESSAGING_MANAGER_BUILD#g;\
s#CSA_PARCEL_REPO#$CSA_PARCEL_REPO#g;\
s#FLINK_BUILD#$FLINK_BUILD#g;\
" $TEMPLATE
echo "-- Check for additional parcels"
chmod +x ${BASE_DIR}/check-for-parcels.sh
ALL_PARCELS=$(${BASE_DIR}/check-for-parcels.sh ${NOPROMPT})
if [[ "$ALL_PARCELS" == "OK" ]]; then
sed -i "s/^CSPOPTION//" $TEMPLATE
else
sed -i "/^CSPOPTION/ d" $TEMPLATE
fi
if [[ "$CSP_PARCEL_REPO" == "" ]]; then
sed -i "/CSPREPO/ d" $TEMPLATE
else
sed -i "s#CSPREPO#,"\""$CSP_PARCEL_REPO"\""#" $TEMPLATE
fi
if [[ "$CDH_MAJOR_VERSION" == "7" ]]; then
sed -i "/^CDH7OPTION/ d" $TEMPLATE
else
sed -i "s/^CDH7OPTION//" $TEMPLATE
fi
if [[ -n "$CDSW_BUILD" && "$CDH_MAJOR_VERSION" == "6" ]]; then # TODO: Change this when CDSW is available for CDP-DC
sed -i "s/^CDSWOPTION//" $TEMPLATE
sed -i "s#CDSWREPO#,"\""$CDSW_PARCEL_REPO"\""#" $TEMPLATE
else
sed -i "/^CDSWOPTION/ d" $TEMPLATE
sed -i "/CDSWREPO/ d" $TEMPLATE
fi
echo "-- Wait for CM to be ready before proceeding"
until $(curl --output /dev/null --silent --head --fail -u "admin:admin" http://localhost:7180/api/version); do
echo "waiting 10s for CM to come up.."
sleep 10
done
echo "-- CM has finished starting"
CM_REPO_URL=$(grep baseurl $CM_REPO_FILE | sed 's/.*=//;s/ //g')
python $BASE_DIR/create_cluster.py $(hostname -f) $TEMPLATE $KEY_FILE $CM_REPO_URL
echo "-- Configure and start EFM"
retries=0
while true; do
mysql -u efm -pcloudera < <( echo -e "drop database efm;\ncreate database efm;" )
nohup service efm start &
sleep 10
set +e
ps -ef | grep efm.jar | grep -v grep
cnt=$(ps -ef | grep efm.jar | grep -v grep | wc -l)
set -e
if [ "$cnt" -gt 0 ]; then
break
fi
if [ "$retries" == "5" ]; then
break
fi
retries=$((retries + 1))
echo "Retrying to start EFM ($retries)"
done
echo "-- Enable and start MQTT broker"
systemctl enable mosquitto
systemctl start mosquitto
echo "-- Copy demo files to a public directory"
mkdir -p /opt/demo
cp -f $BASE_DIR/simulate.py /opt/demo/
cp -f $BASE_DIR/spark.iot.py /opt/demo/
chmod -R 775 /opt/demo
echo "-- Start MiNiFi"
systemctl enable minifi
systemctl start minifi
# TODO: Implement Ranger DB and Setup in template
# TODO: Fix kafka topic creation once Ranger security is setup
echo "-- Create Kafka topic (iot)"
kafka-topics --zookeeper edge2ai-1.dim.local:2181/kafka --create --topic iot --partitions 10 --replication-factor 1
kafka-topics --zookeeper edge2ai-1.dim.local:2181/kafka --describe --topic iot
if [[ -n "$FLINK_BUILD" && "$CDH_MAJOR_VERSION" == "6" ]]; then # TODO: Change this when Flink is available for CDP-DC
echo "-- Flink: extra workaround due to CSA-116"
sudo -u hdfs hdfs dfs -chown flink:flink /user/flink
sudo -u hdfs hdfs dfs -mkdir /user/${SSH_USER}
sudo -u hdfs hdfs dfs -chown ${SSH_USER}:${SSH_USER} /user/${SSH_USER}
echo "-- Runs a quick Flink WordCount to ensure everything is ok"
echo "foo bar" > echo.txt
export HADOOP_USER_NAME=flink
hdfs dfs -put echo.txt
flink run -sae -m yarn-cluster -p 2 /opt/cloudera/parcels/FLINK/lib/flink/examples/streaming/WordCount.jar --input hdfs:///user/$HADOOP_USER_NAME/echo.txt --output hdfs:///user/$HADOOP_USER_NAME/output
hdfs dfs -cat hdfs:///user/$HADOOP_USER_NAME/output/*
unset HADOOP_USER_NAME
fi
echo "-- At this point you can login into Cloudera Manager host on port 7180 and follow the deployment of the cluster"
# Finish install
| true |
37c2470183d70d7b484af7150690978789dd8fda | Shell | pengdan01/spider | /crawler/log_analysis/userlog/url_query/script/batch_url_query.sh | UTF-8 | 898 | 3.203125 | 3 | [] | no_license | #!/bin/bash
set -u
today=`date +%Y%m%d`
# src=/data/userlog/upload_query
# dst=/app/user_data/md5_query/
#
# for date in `$HADOOP_HOME/bin/hadoop fs -ls $src | awk 'NF > 5' | awk -F'/' '{print $NF;}'`;do
# echo $date
# [[ "$date" < "20120322" ]] && continue
# if [ "$date" != "$today" ]; then
# bash -x gen_md5_map.sh -o $dst -i $src --date $date --query_or_url 0 --reducer 50
# [[ $? -ne 0 ]] && exit 1
# fi
# done
src=/app/user_data/pv_log
dst=/app/user_data/url_query
#$HADOOP_HOME/bin/hadoop fs -rmr ${dst}/*
for date in `$HADOOP_HOME/bin/hadoop fs -ls $src | awk 'NF > 5' | awk -F'/' '{print $NF;}'`;do
date=`echo $date | awk -F'_' '{print $1;}'`
echo $date
[[ $date < "20120617" ]] && continue
# [[ $date == "20120401" ]] && break
if [ "$date" != "$today" ]; then
bash -x gen_url_query.sh -o $dst -i $src --date $date
[[ $? -ne 0 ]] && exit 1
fi
done
| true |
ed9f9a1d1ff799e2198091bfc075b071b3c7b5a6 | Shell | hogepodge/locistack | /deploy/scripts/nova/initialize-nova-database.sh | UTF-8 | 1,354 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -x
/scripts/common/wait-for-it.sh --host=mariadb --port=3306 -t 60
# because we can't actually trust MariaDB to be ready
sleep 5
cat > /tmp/create_database.sql <<-EOF
CREATE DATABASE IF NOT EXISTS nova CHARACTER SET utf8;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
CREATE DATABASE IF NOT EXISTS nova_api CHARACTER SET utf8;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
CREATE DATABASE IF NOT EXISTS nova_cell0 CHARACTER SET utf8;
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
IDENTIFIED BY '$MYSQL_ROOT_PASSWORD';
EOF
mysql -u root -p$MYSQL_ROOT_PASSWORD -h ${CONTROL_HOST_IP} < /tmp/create_database.sql
# Assumes that /scripts/nova/generate-configs.sh has been run
nova-manage api_db sync
nova-manage cell_v2 map_cell0 --database_connection "mysql+pymysql://nova:$MYSQL_ROOT_PASSWORD@${CONTROL_HOST_IP}/nova_cell0?charset=utf8"
nova-manage cell_v2 create_cell --name=cell1 --verbose
nova-manage db sync
nova-manage cell_v2 list_cells
| true |
d8fd482ae9e660b94266a48d0c25b090961acd65 | Shell | malus-security/ioracle | /idaScripts/idaBatchAnalysis.sh | UTF-8 | 1,946 | 4.375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
if [ "$1" == "-h" ] || [ "$#" -ne 3 ]; then
echo "Error: Invalid Arguments"
echo "This script takes 3 arguments and its purpose is to run an ida analysis on several executables saving the results in databases named after hashed file paths."
echo "Usage: $0 pathToExtractediOSFileSystem filePathsToExecutables directoryForResults"
exit 0
fi
pathToExtractediOSFileSystem=$1
filePathsToExecutables=$2
directoryForResults=$3
mappingFile="$directoryForResults/hashedPathToFilePathMapping.csv"
#this helps us process the list of inputs by separating each line for the for loop
IFS=$'\n'
filelines=`cat $filePathsToExecutables`
rm $mappingFile
for line in $filelines ;
do
#the path to the extracted file system is concatenated with the path that existed on the iOS device
#for example assume an executable was stored at /myPrograms/coolProgram on the iPhone.
#The root of the filesystem extracted from iOS is stored at /myFiles/iOSXFileSystem on a PC
#Then we would want the filePath variable to be /myFiles/iOSXFileSystem/myPrograms/coolProgram to find the executable on the PC
filePath="$pathToExtractediOSFileSystem$line"
hashedPath=`echo $line | md5sum | sed 's/\ .*//g'`
#it's probably safer to separate by comma since the file paths might include spaces
echo "$hashedPath,$line" >> $mappingFile
#the third argument is the location to move the executables to and store the ida databases in
cp $filePath $directoryForResults/$hashedPath
#this is the line that actually runs IDA
#the -B option is for batch analysis, so IDA will run its default analysis and close itself when finished.
#the -o option lets us specify the file path of the database produced by analysis
#In this case, $directoryForResults/$hashedPath is the path to the executable we want to run analysis on.
idal64 -B -o$directoryForResults/$hashedPath.i64 $directoryForResults/$hashedPath
done
| true |
a240a8b3376d08acdf615c05f160742d4d2a5d72 | Shell | surajssd/scripts | /shell/k8s-install-single-node/install.sh | UTF-8 | 1,576 | 2.734375 | 3 | [] | no_license | #!/bin/bash
set -x
# enable fastmirror
curl https://raw.githubusercontent.com/surajssd/scripts/master/shell/post_machine_install/fastmirror.sh | sh
# install and start docker
yum install -y docker
systemctl enable docker && systemctl start docker
# install kubelet and start it
echo "
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
" | tee /etc/yum.repos.d/kubernetes.repo
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable kubelet && systemctl start kubelet
# set SELinux context kubernetes files
mkdir -p /etc/kubernetes/
chcon -R -t svirt_sandbox_file_t /etc/kubernetes/
# set SELinux context for etcd files
mkdir -p /var/lib/etcd
chcon -R -t svirt_sandbox_file_t /var/lib/etcd
# start kubeadm
kubeadm config images pull
kubeadm config images pull
kubeadm init
# set the kubectl context
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# install network
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
# make master usable
kubectl taint nodes --all node-role.kubernetes.io/master-
# list nodes available
kubectl get nodes
# https://kubernetes.io/docs/setup/independent/install-kubeadm/
# https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
| true |
76071ed38b8673b69ae8594f2a451fe7fdb993ef | Shell | blissGen/sourceCode | /Bash/lolRandom | UTF-8 | 422 | 3.265625 | 3 | [] | no_license | #!/bin/bash
lolRandom () {
randGen | lolcat
}
randGen () {
lengthOfLine=22
for i in {1..500}
do
declare -a randomNum
for j in $( seq 0 $lengthOfLine)
do
randomNum[j]=$(( $RANDOM % 1000))
done
for k in $( seq 0 $lengthOfLine)
do
echo -n ${randomNum[k]} "" | awk '{printf "%03d ", $0;}'
sleep 0.05
done
echo
#sleep 0.1
done
}
lolRandom
exit
| true |
a8112f50adf6e45e1a24a65096ded6bb4bb91d54 | Shell | ajw1980/my-check_mk-checks | /local_checks/check_certs.sh | UTF-8 | 1,504 | 3.828125 | 4 | [] | no_license | #!/bin/bash
threshold=1209600 # warn if Certificate will expire within 1209600 seconds -> 14 days
now=$(date +%s)
function check_cert () {
if [ -f $1 ]
then
expire=$(date -d "$(openssl x509 -in $1 -enddate -noout | cut -f2 -d'=')" +%s)
cn=$(openssl x509 -in $1 -subject -noout | sed -n '/^subject/s/^.*CN=//p')
lh=$(hostname | tr '[:upper:]' '[:lower:]')
if [ "$cn" == "$lh/emailAddress=root@$lh" ]
then
echo "0 certs_$1 - Certificate for $cn is self signed and will be ignored"
elif [ $now -ge $expire ]
then
echo "2 certs_$1 - Certificate for $cn expired"
elif [ $(expr $now + $threshold) -ge $expire ]
then
h=$(expr $(expr $expire - $now) / 3600)
echo "1 certs_$1 - Certificate for $cn will expire in $h hours"
else
echo "0 certs_$1 - Certificate for $cn ok"
fi
fi
}
for userdefinedcerts in $(cat /etc/check_mk/certcheck.d/* 2> /dev/null)
do
check_cert $userdefinedcerts
done
for apachecerts in $(egrep -h ".(pem|crt)" /etc/apache2/sites-enabled/* 2> /dev/null | awk '{ print $2 }' | sort -u)
do
check_cert $apachecerts
done
for apachecerts in $(egrep -h ".(pem|crt)" /etc/httpd/conf.d/* 2> /dev/null | awk '{ print $2 }' | sort -u)
do
check_cert $apachecerts
done
for nginxcerts in $(egrep -h ".(pem|crt)" /etc/nginx/conf.d/* 2> /dev/null | awk '{ print $2 }' | sed 's/;$//g' | sort -u)
do
check_cert $nginxcerts
done
| true |
5dca246edd4bfd9b9ac0a3501ee9fada9cb0ce30 | Shell | Calmskyy/FEUP-Projects | /SDIS/SDIS-2/peer.sh | UTF-8 | 833 | 3.375 | 3 | [] | no_license | #! /usr/bin/bash
# Script for running a peer
# To be run in src/build directory
# Check number input arguments
argc=$#
if (( argc != 4 ))
then
echo "Usage: $0 <peer_id> <svc_access_point> <server_addr> <server_port>"
exit 1
fi
# Assign input arguments to nicely named variables
peer_id=$1
svc_access_point=$2
server_addr=$3
server_port=$4
# Execute the program
java "-Djavax.net.ssl.keyStore=../keystore.jks" "-Djavax.net.ssl.keyStorePassword=password" "-Djavax.net.ssl.trustStore=../truststore.jks" "-Djavax.net.ssl.trustStorePassword=password" App ${peer_id} ${svc_access_point} ${server_addr} ${server_port}
# java "-Djavax.net.ssl.keyStore=keystore.jks" "-Djavax.net.ssl.keyStorePassword=password" "-Djavax.net.ssl.trustStore=truststore.jks" "-Djavax.net.ssl.trustStorePassword=password" App 1 1 127.0.0.1 9669
| true |
7462eec979b8edfb25d9de2f5f3381e138b27528 | Shell | humanfactors/michael-dotfiles | /bin/post-install-debian.bash | UTF-8 | 1,647 | 2.6875 | 3 | [] | no_license | #! /bin/bash
# Michael's Post Debian Install Script
mkdir ~/tmp/
mkdir ~/Code/
sudo apt-get -y update && apt-get -y upgrade
# Essentials
sudo apt-get -y install git
sudo apt-get -y install curl wget
sudo apt-get -y install emacs vim terminator
sudo apt-get -y install apt install linux-headers-$(uname -r | sed 's/[^-}*-[^-]*-//')
sudo apt-get -y chromium
# Scientific Computing
sudo apt-get -y install texlive
sudo apt-get -y install pandoc pandoc-citeproc pandoc-data
sudo apt-get -y install python2 python-pip
sudo apt-get -y install python3 python3-pip
sudo apt-get -y install r-base
sudo apt-get -y install golang
# Fonts
sudo apt-get -y install ttf-mscorefonts-installer
git clone git clone https://github.com/powerline/fonts.git
cd fonts/
./install.sh
cd ..
rm -rf fonts/
fc-cache -f -v
# Development
sudo apt-get -y install build-essential automake autoconf gnu-standards
sudo apt-get -y install pwgen htop
# Gnome Specific Settings
apt-get -y install arc-theme
sudo apt install papirus-icon-theme
# Frameworks
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# Zsh Configurations
zsh
git clone https://github.com/zsh-users/zsh-autosuggestions $ZSH_CUSTOM/plugins/zsh-autosuggestions
git clone https://github.com/denysdovhan/spaceship-prompt.git "$ZSH_CUSTOM/themes/spaceship-prompt"
git clone https://github.com/syl20bnr/spacemacs ~/.emacs.d
# Firefox Install
# Download latest firefox
# sudo apt remove firefox-esr
# tar xf firefox-58.0.tar.bz2
# sudo mv firefox /opt/firefox
# sudo ln -s /opt/firefox/firefox /usr/bin/firefox
# sudo gedit /usr/share/applications/firefox.desktop | true |
8d5f28ecae51d0cdef4a072ebeb2895055d4e615 | Shell | h4ck3rm1k3/awips2 | /tools/clusterAutoDeploy/auto-install-rcm-stop.sh | UTF-8 | 1,251 | 3.65625 | 4 | [] | no_license | #!/bin/sh
DATE=`date`
echo "**************************************************************************************"
echo "Auto Radar Server Stop & Uninstall Started - $DATE"
echo "**************************************************************************************"
echo "Getting variables from env.txt"
. env.txt
echo "----Checking for running RadarServer:----"
COUNT=`ps aux | grep -c "RadarServer"`
if [ $COUNT -gt 1 ]
then
echo "Found running RadarServer...stopping"
echo "/etc/init.d/edex_rcm stop"
/etc/init.d/edex_rcm stop
else
echo "No RadarServer Running"
fi
if [ -d ${RCMBAKFOLDER} ]
then
echo "Removing ${RCMBAKFOLDER}"
rm -rf ${RCMBAKFOLDER}
else
echo "No previous ${RCMBAKFOLDER} to cleanup"
fi
if [ -d ${RCMINSTALLFOLDER} ]
then
echo "Backingup ${RCMINSTALLFOLDER} to ${RCMBAKFOLDER}"
mv ${RCMINSTALLFOLDER} ${RCMBAKFOLDER}
else
echo "**** Warning: No existing ${RCMINSTALLFOLDER} to backup, no config files to copy after installing ****"
fi
DATE=`date`
echo "--------------------------------------------------------------------------------------"
echo "Auto Radar Server Stop & Uninstall Completed At $DATE"
echo "--------------------------------------------------------------------------------------"
echo ""
exit
| true |
bcaea698c6da2a8e63a2cf769a7213292711ddf5 | Shell | RedSkiesIO/cryptos | /scripts/mkimg.standard.sh | UTF-8 | 601 | 2.5625 | 3 | [] | no_license | profile_standard() {
title="Standard"
desc="CryptOS as it was intended.
Just enough to get you started.
Network connection is required."
profile_base
image_ext="iso"
apks="$apks syslinux accore"
arch="x86 x86_64 ppc64le"
output_format="iso"
kernel_cmdline="nomodeset"
apkovl="genapkovl-dhcp.sh"
kernel_addons="xtables-addons"
}
profile_virt() {
profile_standard
title="Virtual"
desc="Similar to standard.
Slimmed down kernel.
Optimized for virtual systems."
kernel_addons=
kernel_flavors="virt"
kernel_cmdline="console=tty0 console=ttyS0,115200"
syslinux_serial="0 115200"
}
| true |
bcbb56205600d376ada883b312b870ff6adfccd0 | Shell | petronny/aur3-mirror | /lua-lunit/PKGBUILD | UTF-8 | 791 | 2.625 | 3 | [
"MIT"
] | permissive | # Maintainer: Gustavo Alvarez <sl1pkn07@gmail.com>
pkgname=lua-lunit
_pkgname=lunit
pkgver=0.5
pkgrel=1
pkgdesc='Unit test for Lua'
arch=('any')
url="http://www.mroth.net/lunit"
license=('MIT')
depends=('lua' 'bash')
source=("http://www.mroth.net/lunit/${_pkgname}-${pkgver}.tar.gz"
'LICENSE')
sha1sums=('7a7cfddab10836f9ed16574be6a450a63a5d4be7'
'e471a511997528dd080d2997580499c37d06111a')
prepare() {
rm -fr "${pkgname}"
cp -R "${_pkgname}-${pkgver}" "${pkgname}"
}
package() {
cd "${pkgname}"
install -Dm644 lunit-console.lua "${pkgdir}/usr/share/lua/5.2/lunit-console.lua"
install -Dm644 lunit.lua "${pkgdir}/usr/share/lua/5.2/lunit.lua"
install -Dm755 lunit "${pkgdir}/usr/bin/lunit"
install -Dm644 "${srcdir}/LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true |
9fed49e31ee665d078f30832fbda9055d0c6ce1f | Shell | namickey/home-camera | /line-push.sh | UTF-8 | 786 | 2.796875 | 3 | [] | no_license | #!/bin/bash
# 最新の写真1枚を取得
filename=`ls -t image/all | head -n1`
s3url=$line_home_s3/$filename
# 過去送信で使用していない、ユニークなIDを取得
uuid=$(< /proc/sys/kernel/random/uuid)
# 送信(コマンド内で使用する変数については、事前に環境変数に設定しておく)
curl -v -X POST https://api.line.me/v2/bot/message/push \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer $line_home_Bearer" \
-H "X-Line-Retry-Key: $uuid" \
-d '{
"to": "'$line_home_userid'",
"messages":[
{
"type":"text",
"text":"home image"
},
{
"type": "image",
"originalContentUrl": "'$s3url'",
"previewImageUrl": "'$s3url'"
}
]
}' | true |
a689ecfbb6e6d1f0f8b099c7e99faa803cedf260 | Shell | ashneo76/placepop | /places/mapplacetofield.sh | UTF-8 | 141 | 3.0625 | 3 | [] | no_license | #!/bin/bash
infile=$1
srcfile=$2
while read line; do
grep -e "$line" $srcfile | awk -F"|" '{print $1"\t"$2}' | sort | uniq
done < $infile
| true |
00eaef65191032df6dd477fe718596cf28e628d5 | Shell | kwlee0220/marmot.server.dist | /misc/marmot-server | UTF-8 | 629 | 3.3125 | 3 | [] | no_license | #!/bin/sh
export MARMOT_HOST=localhost
export MARMOT_PORT=12985
export MARMOT_HOME=/opt/marmot.server.dist
export MARMOT_CLIENT_HOME=/opt/marmot-server
RETVAL=0
case $1 in
start)
nohup $MARMOT_HOME/bin/marmot_server > /dev/null 2>&1 &
# $MARMOT_HOME/bin/marmot_server
;;
stop)
nohup $MARMOT_HOME/bin/shutdown_marmot > /dev/null 2>&1 &
# $MARMOT_HOME/bin/shutdown_marmot
;;
restart)
nohup $MARMOT_HOME/bin/marmot_server > /dev/null 2>&1 &
sleep 1
nohup $MARMOT_HOME/bin/shutdown_marmot > /dev/null 2>&1 &
;;
status)
;;
*)
echo "Usage: $0 {star|stop|restart|status}"
RETVAL=2
;;
esac
exit $RETVAL
| true |
4c188829f67ca2b39e8ee896a83d1fe9b3805e67 | Shell | miquelraynal/conf | /bin/test-nfc | UTF-8 | 1,004 | 3.421875 | 3 | [] | no_license | #!/bin/sh
if [ ! "$1" ]; then
echo "usage: $(basename $0) <mtd-device> [<iterations>]"
echo "example: $(basename $0) /dev/mtd2 (default 10 iterations)"
exit 1
fi
DEV=$1
if [ "$2" ]; then
ITERATIONS=$2
else
ITERATIONS=10
fi
display() {
echo
echo
echo "#####===============> $* <==============#####"
echo
}
display "NAND BIT ERRS"
nandbiterrs -i $DEV
display "NAND PAGE TEST"
nandpagetest -c $ITERATIONS $DEV
display "NAND SUBPAGE TEST"
nandsubpagetest -c $ITERATIONS $DEV
#display "NAND TEST"
#nandtest -p $ITERATIONS $DEV
display "FLASH READ TEST"
flash_readtest -c $ITERATIONS $DEV
display "FLASH SPEED"
flash_speed -d -c $ITERATIONS $DEV
display "FLASH STRESS"
flash_stress -c $ITERATIONS $DEV
#display "FLASH TORTURE"
#flash_torture -c $ITERATIONS $DEV
display "UBIFORMAT"
ubiformat $DEV
display "UBIATTACH"
ubiattach -p $DEV
display "UBIMKVOL"
ubimkvol /dev/ubi0 -N test_fe -s 64MiB
display "MOUNT"
mount -t ubifs ubi0:test_fe ./ubifs-volume/
| true |
2be296b12c2213104bdc0eaf8c68c13e0e844253 | Shell | sboosali/sboosali.github.io | /bash/get-patches | UTF-8 | 505 | 2.65625 | 3 | [] | no_license | #!/bin/bash
set -u
##################################################
export HaskellDirectory=~/haskell/
export WebDirectory=~/www/
export PatchesDirectory="${WebDirectory}"/sboosali.github.io/patches/
##################################################
(cd "${HaskellDirectory}/polyparse" && git diff v1.12 v1.30 > "${PatchesDirectory}/polyparse.diff")
(cd "${HaskellDirectory}/HaXml2" && git diff v1.26 v1.30 > "${PatchesDirectory}/haxml.diff")
################################################## | true |
ff0a8925f13f40bed3c10a798962e1bebedadc08 | Shell | JeremyFyke/JG_BG_simulation_workflow | /currently_unused_scripts/concatenate_daily_histaux_to_months.sh | UTF-8 | 458 | 2.953125 | 3 | [] | no_license | #!/bin/bash
CaseName=BG_iteration_1_switch_to_CAM54_midstream
SD=/glade/scratch/jfyke/archive/$CaseName/cpl/hist
for yr in `seq -f '%04g' 1 1`; do
for m in `seq -f '%02g' 1 12`; do
for ftype in ha2x1hi ha2x1h ha2x3h ha2x1d; do
fname_out=$SD/$CaseName.cpl.$ftype.$yr-$m.nc
echo $fname_out
if [ -f $fname_out ]; then
rm $fname_out
fi
ncrcat -O $SD/$CaseName.cpl.$ftype.$yr-$m-*.nc $fname_out
done
done
done
| true |
8425ad10aeb80c879f65299bbb5a13c84843ca0a | Shell | greenplum-db/gpdb | /src/test/gpdb_pitr/test_gpdb_pitr_cleanup.sh | UTF-8 | 1,428 | 3.625 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"PostgreSQL",
"OpenSSL",
"LicenseRef-scancode-stream-benchmark",
"ISC",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-ssleay-windows",
"BSD-2-Clause",
"Python-2.0"
] | permissive | #!/usr/bin/env bash
## ==================================================================
## Run this script to clean up after the GPDB PITR test.
##
## Note: Assumes PITR cluster is up and running, and that the gpdemo
## cluster settings are sourced.
## ==================================================================
# Store gpdemo master and primary segment data directories.
# This assumes default settings for the data directories.
DATADIR="${COORDINATOR_DATA_DIRECTORY%*/*/*}"
MASTER=${DATADIR}/qddir/demoDataDir-1
PRIMARY1=${DATADIR}/dbfast1/demoDataDir0
PRIMARY2=${DATADIR}/dbfast2/demoDataDir1
PRIMARY3=${DATADIR}/dbfast3/demoDataDir2
TEMP_DIR=$PWD/temp_test
# Stop the PITR cluster.
echo "Stopping the PITR cluster..."
REPLICA_MASTER=$TEMP_DIR/replica_m
COORDINATOR_DATA_DIRECTORY=$REPLICA_MASTER gpstop -ai -q
# Remove the temp_test directory.
echo "Removing the temporary test directory..."
rm -rf $TEMP_DIR
# Reset the wal_level on the gpdemo master and primary segments.
echo "Undoing WAL Archive settings on gpdemo cluster..."
for datadir in $MASTER $PRIMARY1 $PRIMARY2 $PRIMARY3; do
sed -i'' -e "/wal_level = replica/d" $datadir/postgresql.conf
sed -i'' -e "/archive_mode = on/d" $datadir/postgresql.conf
sed -i'' -e "/archive_command = 'cp %p/d" $datadir/postgresql.conf
done
# Start back up the gpdemo cluster.
echo "Starting back up the gpdemo cluster..."
gpstart -a
dropdb gpdb_pitr_database
| true |
f25c7d6587d2c053a166e93acaa935b079a10a22 | Shell | dancier/code-dancer | /iac/core/db-dump_chat_dancer.sh | UTF-8 | 236 | 2.65625 | 3 | [] | no_license | cd /run-env/db-dump
NOW=$(date +"%Y-%m-%d-%H-%M-%S")
FILENAME=db_dump_chat_dancer_${NOW}.sql.gz
echo "Dumping into ${FILENAME}"
docker exec chat-dancer-db pg_dump -h localhost -U chat-dancer chat-dancer | gzip -9 > $FILENAME
echo "Done" | true |
65b0365355bd2bd962afbed72d5371e3f9210a17 | Shell | yozot/utils | /smime | UTF-8 | 510 | 3.640625 | 4 | [] | no_license | #!/bin/sh
## $Id: smime,v 1.1 2002/12/05 17:04:15 yozo Exp $
## smime processing
## TODO: smime message composition
PATH=/usr/bin
OPENSSL=/usr/sbin/openssl
progname=`basename $0`
usage(){
echo "usage: ${progname} < message" 1>&2
echo "${progname} verifies a S/MIME message using openssl." 1>&2
}
if [ $# -eq 1 ]; then
case "$1" in
-h*)
usage
exit 0
;;
*)
echo "unknown option ($1)" 1>&2
exit 10
;;
esac
fi
${OPENSSL} smime -verify -noverify > /dev/null
| true |
e9259919a24983d22071759c1351e30b7e22aad1 | Shell | divyankgupta123000/1BM18CS030_UnixLab | /Lab-4/powofnum.sh | UTF-8 | 168 | 3.328125 | 3 | [] | no_license | #/!bin/sh
echo "Enter number and power: -"
read num pow
temp=$num
while [ $pow -gt 1 ]
do
num=`expr $num \* $temp`
pow=`expr $pow - 1`
done
echo "Answer = "$num
| true |
2a63d1ae50692eafdd17242dc4ee818660cf871a | Shell | petronny/aur3-mirror | /xlockblank/PKGBUILD | UTF-8 | 1,081 | 2.65625 | 3 | [
"LicenseRef-scancode-mit-old-style"
] | permissive | pkgname=xlockblank
pkgver=5.41
pkgrel=1
pkgdesc="screen saver / locker for the X Window System - only includes the blank mode"
arch=(i686 x86_64)
license=('BSD')
depends=( pam libxmu)
makedepends=()
conflicts=(xlockmore)
url="http://www.tux.org/~bagleyd/xlockmore.html"
options=('!makeflags')
source=(http://www.tux.org/~bagleyd/xlock/xlockmore-$pkgver/xlockmore-$pkgver.tar.bz2
LICENSE)
md5sums=('a9af1cc72f0fd096ba4bba9097f9291c'
'a64afab4283f53972a6702c2e59850d7')
build() {
cd $srcdir/xlockmore-$pkgver
./configure --prefix=/usr --disable-setuid \
--enable-appdefaultdir=/usr/share/X11/app-defaults \
--enable-pam --without-gtk2 --enable-blank-only --without-magick --without-mesa --without-opengl --without-xpm --without-motif --without-freetype --without-esound --without-ftgl --without-xinerama
make
make xapploaddir=$pkgdir/usr/share/X11/app-defaults \
mandir=$pkgdir/usr/man/man1 \
prefix=$pkgdir/usr install
install -D -m644 ../LICENSE $pkgdir/usr/share/licenses/$pkgname/LICENSE
mv $pkgdir/usr/man $pkgdir/usr/share/
}
| true |
1fcac3a576746a263abc12d53cee33db5028573a | Shell | teemukin65/tarinapeli_s | /src/main/docker/db/init-user-db.sh | UTF-8 | 645 | 2.984375 | 3 | [] | no_license | #!/bin/bash
set -e
echo " docker init-user:postgres , story user: $POSTGRES_STORY_USER"
echo " w/ pwd: $POSTGRES_STORY_PASSWORD"
POSTGRES_STORY_TEST_DB=${POSTGRES_STORY_DB}_test
echo "creating story db: ${POSTGRES_STORY_DB}"
echo "and creating story test db: ${POSTGRES_STORY_TEST_DB}"
echo
psql -v ON_ERROR_STOP=1 --username "postgres" <<-EOSQL
CREATE USER "$POSTGRES_STORY_USER" LOGIN NOSUPERUSER NOCREATEDB ;
ALTER USER "$POSTGRES_STORY_USER" WITH PASSWORD '$POSTGRES_STORY_PASSWORD';
CREATE DATABASE $POSTGRES_STORY_DB;
GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_STORY_DB TO $POSTGRES_STORY_USER;
CREATE DATABASE $POSTGRES_STORY_TEST_DB;
EOSQL
| true |
897a3f18f1d03915f2c7768b681e263c51f444d0 | Shell | teja624/home | /.zsh/modules/aws/lib/sh/api/cognito-idp/up_sign.sh | UTF-8 | 231 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | aws_cognito-idp_up_sign() {
local client_id="$1"
local username="$2"
local password="$3"
shift 3
cond_log_and_run aws cognito-idp sign-up --client-id $client_id --username $username --password $password "$@"
}
| true |
b55d2173f589b29162e7c9dc461dbe86dc6dc431 | Shell | glyn/garden-linux | /old/linux_backend/skeleton/lib/hook-parent-before-clone.sh | UTF-8 | 206 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
[ -n "$DEBUG" ] && set -o xtrace
set -o nounset
set -o errexit
shopt -s nullglob
cd $(dirname $0)/../
source ./etc/config
cp bin/wshd $rootfs_path/sbin/wshd
chmod 700 $rootfs_path/sbin/wshd
| true |
448efc978c30f4fd6c83e044e45919fb844d2e15 | Shell | 13768324554/dev-with-docker-on-ubuntu | /extras.sh | UTF-8 | 1,080 | 3 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "** INSTALLING support software for development"
sudo add-apt-repository -y ppa:jonathonf/vim
sudo apt-get update -y
sudo apt-get install -y zsh vim xauth xclip \
mysql-client libmysqlclient-dev \
postgresql-client libpq-dev \
tar wget curl libevent-dev libncurses-dev
echo "** INSTALLING tmux 2.6 from source"
sudo apt-get -y remove tmux
VERSION=2.6 && mkdir ~/tmux-src && \
wget -qO- https://github.com/tmux/tmux/releases/download/${VERSION}/tmux-${VERSION}.tar.gz | tar xvz -C ~/tmux-src && \
cd ~/tmux-src/tmux* && \
./configure && make -j"$(nproc)" && \
sudo make install && \
cd ~ && rm -rf ~/tmux-src
echo "** INSTALLING common programming packages"
sudo apt-get install -y nodejs-legacy ruby golang-go python-dev python-pip
pip install --upgrade pip
[[ -f ~/localextras.sh ]] && bash ~/localextras.sh
echo "** Consider installing MySQL, PostgreSQL, and Redis"
# sudo apt-get install -y postgresql libpq-dev redis-server mysql-server
# If there are problems with font configuration this may help
#sudo dpkg-reconfigure console-setup
| true |
bc4bec328ed348c28b78b9970cf52201d62a12f1 | Shell | geosconsulting/bash_psql | /shp_2_gwis_2.sh | UTF-8 | 279 | 2.859375 | 3 | [] | no_license | #!/bin/bash
ftp_dir="data/"
filetype="*.shp"
#psql service=S1
for nomefile in $ftp_dir$filetype
do
fname=$(basename $nomefile)
echo $fname
echo $nomefile
# psql service=$1
shp2pgsql -a -s 4326 -g geom $nomefile temp_ba | psql -U postgres -d test_egeos
done
| true |
93b80185844dd88a996a5ff9d12486eaf70294ec | Shell | smck1337/ts3-adminbot | /bot.sh | UTF-8 | 1,014 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# Colors
ESC_SEQ="\x1b["
COL_RESET=$ESC_SEQ"39;49;00m"
COL_RED=$ESC_SEQ"31;01m"
COL_GREEN=$ESC_SEQ"32;01m"
COL_YELLOW=$ESC_SEQ"33;01m"
echo -e "$COL_RED
------------------------------------------------------------------------
| |
|
| ======================================== |
| | $COL_YELLOW endless AdminBot v1 $COL_RED | |
| ======================================== |
| |
------------------------------------------------------------------------ $COL_RESET"
if [ $1 = 'stop' ]
then
pkill -f AdminsBot
echo -e "AdminsBot: $COL_GREEN A BOT sikeresen leallitva! $COL_RESET"
fi
if [ $1 = 'start' ]
then
screen -dmS AdminsBot php adminsbot.php -i 1
echo -e "AdminsBot: $COL_GREEN A BOT sikeresen elinditva! $COL_RESET"
fi
| true |
07549aeaab66cbc2260ec06b260bd6428404169c | Shell | mkarg75/vz-helpers | /nodevert_scale.sh | UTF-8 | 1,806 | 3.0625 | 3 | [] | no_license | #!/bin/bash
# Edit these
export PODS_PER_NODE=200
export NODEVERTICAL_NODE_COUNT=15
#export NODEVERTICAL_NODE_COUNT=117
export NODEVERTICAL_STEPSIZE=50
export NODEVERTICAL_PAUSE=30
export NODEVERTICAL_TS_TIMEOUT=600
# Presets and calculated values
export NODEVERTICAL_TEST_PREFIX=nodevertical_${NODEVERTICAL_NODE_COUNT}
export NODEVERTICAL_BASENAME=nodevertical-rerun-1
export NODEVERTICAL_CLEANUP=true
export NODEVERTICAL_POD_IMAGE="gcr.io/google_containers/pause-amd64:3.0"
# Max pods calculated from pod density and node count
export NODEVERTICAL_MAXPODS=$(echo "$PODS_PER_NODE * $NODEVERTICAL_NODE_COUNT" | bc)
# 50 pods take up to 3 minutes to get to running state, which
# comes to about 3.6 seconds per pod, so rounding up to 4 seconds
export EXPECTED_NODEVERTICAL_DURATION=$(echo "$NODEVERTICAL_MAXPODS * 4" | bc)
# There is roughly a 10 second delay between polls, so the
# number of polls should be the duration / 10
export JOB_COMPLETION_POLL_ATTEMPTS=$(printf %.0f $(echo "$EXPECTED_NODEVERTICAL_DURATION / 10" | bc))
starttime=$(date +%s%N | cut -b1-13)
time ansible-playbook -i inventory workloads/nodevertical.yml
endtime=$(date +%s%N | cut -b1-13)
echo Start $starttime >> /tmp/nodevert_${NODEVERTICAL_NODE_COUNT}.log
echo Endtime $enddtime >> /tmp/nodevert_${NODEVERTICAL_NODE_COUNT}.log
# create annotations in all the dasbhoards
for dashid in 1 2 3 4; do
curl -H "Content-Type: application/json" -X POST -d "{\"dashboardId\":${dashid},\"time\":${starttime},\"isRegion\":\"true\",\"timeEnd\":${endtime},\"tags\":[\"${NODEVERTICAL_TEST_PREFIX}\"],\"text\":\"${NODEVERTICAL_NODE_COUNT} nodes - Nodevertical test - ${NODEVERTICAL_MAXPODS} maxpods\"}" http://admin:admin@dittybopper-dittybopper.apps.$(oc cluster-info | head -1 | awk -F. '{print $2}').myocp4.com/api/annotations
done
| true |
6e753ca146fe817ce2969e2ad6038b684f783ce7 | Shell | white-mns/lo_parse | /_update_chack.sh | UTF-8 | 883 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
CURENT=`pwd` #実行ディレクトリの保存
cd `dirname $0` #解析コードのあるディレクトリで作業をする
RESULT_NO=$1
GENERATE_NO=$2
while :
do
wget http://ykamiya.ciao.jp/index.html
if [ $GENERATE_NO -eq 0 ]; then
grep "<FONT color=\"#ff3333\">${RESULT_NO}</FONT> 更新結果公開中!" index.html
# "
elif [ $GENERATE_NO -eq 1 ]; then
grep "<FONT color=\"#ff3333\">${RESULT_NO}</FONT> 再更新結果公開中!" index.html
# "
elif [ $GENERATE_NO -eq 2 ]; then
grep "<FONT color=\"#ff3333\">${RESULT_NO}</FONT> 再々更新結果公開中!" index.html
# "
fi
IS_UPDATE=$?
rm index.html
echo $IS_UPDATE
if [ $IS_UPDATE -eq 0 ]; then
echo "update!!"
./execute.sh ${RESULT_NO} 0
exit
fi
echo "no..."
sleep 1800
done
| true |
ac789e9c1853fbac1ac5812e6b44ddde5324b76c | Shell | wangc31/rest-quick-launcher | /in-one/entrypoint.sh | UTF-8 | 280 | 2.609375 | 3 | [] | no_license | #!/bin/sh
mkdir -p ${DCTM_REST_HOME}
if [ ! -f ${DCTM_REST_HOME}/WEB-INF/classes/dfc.properties ]; then
unzip ${REST_TMP}/dctm-rest.war -d ${CATALINA_HOME}/webapps/dctm-rest
fi
cp ${REST_TMP}/*.properties ${DCTM_REST_HOME}/WEB-INF/classes/
${CATALINA_HOME}/bin/catalina.sh run | true |
9663c12bdedb542576604d11a6c9b9653bfc7309 | Shell | spring-cloud/spring-cloud-dataflow | /src/deploy/carvel/configure-prometheus-proxy.sh | UTF-8 | 1,069 | 3.296875 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | #!/usr/bin/env bash
function set_properties() {
PREFIX=$1
yq "${PREFIX}.management.metrics.export.prometheus.rsocket.host=\"$HOST\"" -i ./scdf-values.yml
yq "${PREFIX}.management.metrics.export.prometheus.pushgateway.base-url=\"http://$HOST:$PORT\"" -i ./scdf-values.yml
yq "${PREFIX}.management.metrics.export.prometheus.pushgateway.enabled=true" -i ./scdf-values.yml
yq "${PREFIX}.management.metrics.export.prometheus.pushgateway.shutdown-operation=\"PUSH\"" -i ./scdf-values.yml
yq "${PREFIX}.management.metrics.export.prometheus.step=\"$STEP\"" -i ./scdf-values.yml
}
if [ "$2" = "" ]; then
echo "Usage is: <host> <port> [step]"
echo "Where <step> is the frequency of published metrics. Default is 10s"
exit 1
fi
HOST=$1
PORT=$2
if [ "$3" != "" ]; then
STEP=$3
else
STEP=10s
fi
set_properties ".scdf.server.config"
set_properties ".scdf.server.config.spring.cloud.dataflow.task"
set_properties ".scdf.skipper.config"
set_properties ".scdf.skipper.config.spring.cloud.skipper.server.platform.kubernetes.accounts.default"
| true |
6f85ae80a88800bc4681ef0fa35c4f931a93e246 | Shell | huongbn/opnfv-vnf-vyos-blueprint | /FIL-work/3-router-dpi-ids-loadbalancer/scripts/web/install-apache.sh | UTF-8 | 158 | 2.703125 | 3 | [] | no_license | #!/bin/bash
set -e
function install_apache2 {
sudo apt-get update && sudo apt-get dist-upgrade -y
sudo apt-get install -y apache2
}
install_apache2 | true |
b0c4eee6e96a364105a00b545a192ca5d4ae09ef | Shell | JosiahKennedy/openedx-branded | /edx/app/rabbitmq/log-rabbitmq-queues.sh | UTF-8 | 639 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env bash
set -x
vpc_name=default_env-default_deployment
log_directory=/edx/var/log/rabbitmq
OLD_IFS=$IFS
IFS=$'\n'
vhosts=`/usr/sbin/rabbitmqctl list_vhosts | grep "^/"`
for vhost in $vhosts; do
queues=`/usr/sbin/rabbitmqctl list_queues -p $vhost | awk 'NF==2{ print }'`
mkdir -p ${log_directory}/${vhost}
for queue in $queues; do
queue_name=`echo $queue | awk '{ print $1 }'`
echo $queue | sed 's/\s*/ /' | awk -v date="$(date)" -v vhost="$vhost" '{ print "date=\x27"date"\x27","vhost=\x27"vhost"\x27","queue=\x27"$1"\x27","length="$2}' >> ${log_directory}/${vhost}/${queue_name}.log
done
done
IFS=$OLD_IFS
| true |
17a41681633fbfaefeadbbc33bfba588985e44f1 | Shell | luotao717/arsdk | /build/scripts/ap96-small/makeowl_dev.sh | UTF-8 | 315 | 2.765625 | 3 | [] | no_license | #!/bin/bash
# source the variables needed for build
TOP=$1/build
cd $TOP
#
#echo "---------------------"
#echo "Resetting permissions"
#echo "---------------------"
#find . -name \* -user root -exec sudo chown build {} \; -print
#find . -name \.config -exec chmod 777 {} \; -print
#
make BOARD_TYPE=ap96-small
| true |
6e676952d929dd7cd22bfcdb4ff93890eae6aef8 | Shell | compulab-yokneam/Documentation | /etc/honister | UTF-8 | 645 | 2.75 | 3 | [] | no_license | #!/bin/bash -xv
for D in meta-bsp-imx8mp meta-compulab meta-compulab-bsp;do
cd ${D}
for pattern in append prepend remove;do
for f in $(grep -r "${pattern}" . | awk -F":" '(!/.patch/)&&($0=$1)');do
sed -i "s/_${pattern}_/\:${pattern}\:/g" ${f};
sed -i "s/_${pattern}/\:${pattern}/g" ${f};
done
done
for pattern in LAYERSERIES_COMPAT;do
for f in $(grep -r "${pattern}" . | awk -F":" '(!/.patch/)&&($0=$1)');do
sed -i 's/\(LAYERSERIES_COMPAT.*\)\"/\1 honister\"/g' ${f};
done
done
for f in $(grep -r '_${PN}' . | awk -F":" '(!/.patch/)&&($0=$1)');do
sed -i 's/_${PN}/\:${PN}/g' ${f};
done
cd -
done
| true |
3fda467fc5dd5ee62e4699004812ff9562a84c3c | Shell | delkyd/alfheim_linux-PKGBUILDS | /xwinmosaic-git/PKGBUILD | UTF-8 | 652 | 2.5625 | 3 | [] | no_license | # Maintainer: Anton S. Lobashev <soulthreads@yandex.ru>
pkgname=xwinmosaic-git
_gitname=xwinmosaic
pkgver=v0.4.2
pkgrel=1
pkgdesc="X11 window switcher with fancy look"
url="http://github.com/soulthreads/xwinmosaic"
arch=('i686' 'x86_64')
license=('BSD')
depends=('gtk2')
makedepends=('cmake' 'git')
options=(!libtool strip)
source=('git://github.com/soulthreads/xwinmosaic.git')
md5sums=('SKIP')
pkgver() {
cd $_gitname
git describe --always | sed 's|-|.|g'
}
build() {
cd $_gitname
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DWITH_SCRIPTS=ON "$srcdir/$_gitname"
make || return 1
}
package() {
cd $_gitname
make DESTDIR="$pkgdir" install
}
| true |
0ab059e43e73693565e60dc92156b4e0fe6507d2 | Shell | HaririLab/pipeline2.0_DNS | /gPPI_faces_splitRuns.sh | UTF-8 | 8,121 | 3.046875 | 3 | [] | no_license | # Following instructions here: https://afni.nimh.nih.gov/CD-CorrAna
# --- BEGIN GLOBAL DIRECTIVE --
#$ -o $HOME/$JOB_NAME.$JOB_ID.$TASK_ID.out
#$ -e $HOME/$JOB_NAME.$JOB_ID.$TASK_ID.out
# -- END GLOBAL DIRECTIVE --
BASEDIR=$(findexp DNS.01)
index=${SGE_TASK_ID}
ID=`head -$index $BASEDIR/Analysis/All_Imaging/DNSids_faces1263.txt | tail -1` # $1
ROI=$BASEDIR/Analysis/SPM/ROI/NEW_PIPELINE/Amygdala/Tyszka_L_BL.nii # $2
OUTNAME=Tyszka_L_BL_splitRuns #$3
PROCDIR=$BASEDIR/Analysis/All_Imaging/$ID/faces/
OUTDIR=$TMPDIR
OUTDIRFINAL=$BASEDIR/Analysis/All_Imaging/$ID/faces/gPPI/$OUTNAME
FACESORDER=`grep "$ID" $BASEDIR/Analysis/All_Imaging/DataLocations.csv | cut -d, -f12`;
nTRs=43 # total number of TRs that will be used for each block (25 for faces + 9 for each adjacent shapes)
blockStarts=(10 54 98 142)
blockEnds=(52 96 140 184)
echo "----JOB [$JOB_NAME.$JOB_ID] SUBJ $ID START [`date`] on HOST [$HOSTNAME]----"
echo "***** Faces order is $FACESORDER *****"
mkdir -p $OUTDIRFINAL
# create files for conditions
## (tutorial gives instructions for how to do this with non-TR sync'd)
rm $OUTDIR/Shapes.1D; rm $OUTDIR/Faces.1D;
for i in `seq 1 19`; do echo 1 >> $OUTDIR/Shapes.1D; echo 0 >> $OUTDIR/Faces.1D; done
for i in `seq 1 25`; do echo 0 >> $OUTDIR/Shapes.1D; echo 1 >> $OUTDIR/Faces.1D; done
for i in `seq 1 19`; do echo 1 >> $OUTDIR/Shapes.1D; echo 0 >> $OUTDIR/Faces.1D; done
# create other stim data
waver -dt 2 -GAM -inline 1@1 > $OUTDIR/GammaHR.1D
# create shapes regressor by calculating the convolved regressor for the full block then trimming to adjancent half
# (note that the xmat file has a blank line at the end)
3dDeconvolve -nodata 63 2 -xout -num_stimts 1 -stim_times 1 '1D: 0 88' 'SPMG1(38)' -x1D_stop -x1D $OUTDIR/ShapesRegressor_extended
grep -v "#" $OUTDIR/ShapesRegressor_extended.xmat.1D | awk '{print $3}' | head -53 | tail -43 > $OUTDIR/ShapesRegressor.1D
cd $OUTDIR
# loop through blocks
for i in `seq 0 3`; do
# extract the average time series of the ROI
# we will do this for the entire adjacent shapes block, and then trim it to just the closest adjacent half after all convolution is done
## andy (2013) uses 3dSynthesize to remove drift & motion effects and extract the seed ts from teh resulting cleaned data (doesn't use the cleaned data after that)
## > however the afni instructions don't mention this and just suggest using the preprocessed ts from afni_proc, then uses 3dDetrend (which Andy skips)
3dmaskave -mask $ROI -quiet $PROCDIR/epiWarped_blur6mm.nii.gz"[$((${blockStarts[$i]}-10))..$((${blockEnds[$i]}+10))]" > $OUTDIR/Seed_block$((i+1)).1D
# detrend seed time series and transpose to column
# Use \' to transpose Seed.1D
3dDetrend -polort 1 -prefix $OUTDIR/SeedR_block$((i+1)) $OUTDIR/Seed_block$((i+1)).1D\'; 1dtranspose $OUTDIR/SeedR_block$((i+1)).1D $OUTDIR/Seed_ts_block$((i+1)).1D; rm $OUTDIR/SeedR_block$((i+1)).1D
## Run 1dUpsample here for non-TR synchronized onsets?
# generate impulse response function
## change dt if non-TR sync'd
# deconvolve seed time series
# -FALTUNG fset fpre pen fac
3dTfitter -RHS $OUTDIR/Seed_ts_block$((i+1)).1D -FALTUNG $OUTDIR/GammaHR.1D $OUTDIR/Seed_Neur_block$((i+1)) 012 0
for cond in Shapes Faces; do
# create the interaction regressors, starting with the interaction at the neuronal level, then reconvolving using waver
### see instructions for additional steps if not sync'ed to TR grids
1deval -a $OUTDIR/Seed_Neur_block$((i+1)).1D\' -b $OUTDIR/${cond}.1D -expr 'a*b' > $OUTDIR/Interaction_Neur${cond}_block$((i+1)).1D
waver -GAM -peak 1 -TR 2 -input $OUTDIR/Interaction_Neur${cond}_block$((i+1)).1D -numout $((nTRs+20)) > $OUTDIR/Interaction_${cond}_block$((i+1)).1D
# now trim the regressors to only include the adjacent half of the shapes blocks
head -53 $OUTDIR/Interaction_${cond}_block$((i+1)).1D | tail -43 > $OUTDIR/Interaction_${cond}_trimmed_block$((i+1)).1D
### ARK: scale Interaction regressor to have a peak of 1! Testing this out to see if it changes results
# there is at least one case (DNS1388/Tyszka_R_ALL Anger) where all values in Interaction_${cond}.1D are negative; in this case, scale to a min of -1 instead of a max of 1
max=$(awk -v max=-999 '{if($1>max){max=$1}}END{print max}' $OUTDIR/Interaction_${cond}_trimmed_block$((i+1)).1D )
if [[ $max -eq 0 ]]; then
min=$(awk -v min=999 '{if($1<min){min=$1}}END{print min}' $OUTDIR/Interaction_${cond}_trimmed_block$((i+1)).1D )
1deval -a $OUTDIR/Interaction_${cond}_trimmed_block$((i+1)).1D -expr "-a/$min" > $OUTDIR/Interaction_${cond}_trimmed_scaled_block$((i+1)).1D
else
1deval -a $OUTDIR/Interaction_${cond}_trimmed_block$((i+1)).1D -expr "a/$max" > $OUTDIR/Interaction_${cond}_trimmed_scaled_block$((i+1)).1D
fi
done
# create the linear model
maskfile=$BASEDIR/Analysis/Max/templates/DNS500/DNS500template_MNI_BrainExtractionMask_2mm.nii.gz
# arguments to stim_times are in seconds!
# glt arg should always be 1
# using polort 3 here per recommendation in afni_proc.py help documentation
## wasn't sure if censor should be any different/included here, but seems fine since they do here and gang doesn't call them out on it https://afni.nimh.nih.gov/afni/community/board/read.php?1,155395,155395#msg-155395
outname=glm_output_$((i+1))
3dDeconvolve -input $PROCDIR/epiWarped_blur6mm.nii.gz"[${blockStarts[$i]}..${blockEnds[$i]}]" -xout -mask $maskfile -num_stimts 4 \
-stim_file 1 ShapesRegressor.1D -stim_label 1 ShapesPair \
-stim_times 2 '1D: 18' 'SPMG1(50)' -stim_label 2 Faces1 \
-stim_file 3 Interaction_Shapes_trimmed_scaled_block$((i+1)).1D -stim_label 3 Interaction_Shapes_scaled_block$((i+1)) \
-stim_file 4 Interaction_Faces_trimmed_scaled_block$((i+1)).1D -stim_label 4 Interaction_Faces_scaled_block$((i+1)) \
-censor $PROCDIR/glm_AFNI_splitRuns_noShapes/outliers_block$((i+1)).1D \
-x1D Decon_$((i+1)) -x1D_stop
3dREMLfit -input $PROCDIR/epiWarped_blur6mm.nii.gz"[${blockStarts[$i]}..${blockEnds[$i]}]" -matrix Decon_$((i+1)).xmat.1D -mask $maskfile \
-Rbuck ${outname}.nii.gz \
-noFDR -tout
#3dTcat -prefix ${outname}_coefs.nii.gz ${outname}.nii.gz'[5]' ${outname}.nii.gz'[7]'
#rm ${outname}.nii.gz
done # loop through blocks
# now relabel the files according to expression
case $FACESORDER in
1) fear=1; neut=2; ange=3; surp=4; ;; # FNAS
2) fear=2; neut=1; ange=4; surp=3; ;; # NFSA
3) fear=3; neut=4; ange=1; surp=2; ;; # ASFN
4) fear=4; neut=3; ange=2; surp=1; ;; # SANF
*) echo "Invalid faces order $FACESORDER!!! Exiting."
exit; ;;
esac
3dcalc -prefix fear_gr_shapes.nii.gz -a glm_output_${fear}.nii.gz'[5]' -b glm_output_${fear}.nii.gz'[7]' -expr '(b-a)'
3dcalc -prefix neutral_gr_shapes.nii.gz -a glm_output_${neut}.nii.gz'[5]' -b glm_output_${neut}.nii.gz'[7]' -expr '(b-a)'
3dcalc -prefix anger_gr_shapes.nii.gz -a glm_output_${ange}.nii.gz'[5]' -b glm_output_${ange}.nii.gz'[7]' -expr '(b-a)'
3dcalc -prefix surprise_gr_shapes.nii.gz -a glm_output_${surp}.nii.gz'[5]' -b glm_output_${surp}.nii.gz'[7]' -expr '(b-a)'
3dcalc -prefix faces_gr_shapes_avg.nii.gz -a fear_gr_shapes.nii.gz -b neutral_gr_shapes.nii.gz -c anger_gr_shapes.nii.gz -d surprise_gr_shapes.nii.gz -expr '(a+b+c+d)/4'
3dcalc -prefix anger_gr_neutral.nii.gz -a glm_output_${ange}.nii.gz'[7]' -b glm_output_${neut}.nii.gz'[7]' -expr '(a-b)'
3dcalc -prefix fear_gr_neutral.nii.gz -a glm_output_${fear}.nii.gz'[7]' -b glm_output_${neut}.nii.gz'[7]' -expr '(a-b)'
3dcalc -prefix anger+fear_gr_neutral.nii.gz -a glm_output_${ange}.nii.gz'[7]' -b glm_output_${fear}.nii.gz'[7]' -c glm_output_${neut}.nii.gz'[7]' -expr '((a+b)/2-c)'
3dcalc -prefix habit_1g2g3g4.nii.gz -a glm_output_1.nii.gz'[7]' -b glm_output_2.nii.gz'[7]' -c glm_output_3.nii.gz'[7]' -d glm_output_4.nii.gz'[7]' -expr '(0.75*a+0.25*b-0.25*c-0.75*d)'
rm glm_output*.nii.gz
rm Interaction*1D
rm Seed*1D
gunzip *nii.gz
cp -r $OUTDIR/* $OUTDIRFINAL
# -- BEGIN POST-USER --
echo "----JOB [$JOB_NAME.$JOB_ID] STOP [`date`]----"
mv $HOME/$JOB_NAME.$JOB_ID.${SGE_TASK_ID}.out $OUTDIRFINAL/$JOB_NAME.$JOB_ID.${SGE_TASK_ID}.out
# -- END POST-USER --
| true |
46980955893d9b2744d79332952c88e4b127889a | Shell | ucwlabs/iot-monitoring-ttn | /run.sh | UTF-8 | 279 | 2.5625 | 3 | [] | no_license | #!/bin/bash
DOCKER_COMPOSE_CMD=docker-compose
DATA_DIR=${PWD}/storage
mkdir -p $DATA_DIR
mkdir -p $DATA_DIR/influxdb
mkdir -p $DATA_DIR/grafana
chmod -R 777 $DATA_DIR
env DATA_DIR=$DATA_DIR $DOCKER_COMPOSE_CMD build ttn-bridge
env DATA_DIR=$DATA_DIR $DOCKER_COMPOSE_CMD up -d | true |
49c590b63a318ea737dbf2d59376cee05429ec38 | Shell | sybila/NewBioDiVinE | /tool/model_manipulation/combine | UTF-8 | 4,359 | 3.90625 | 4 | [] | no_license | #!/bin/bash
# Written by Jiri Barnat
# Usage: combine -h
MYNAME=`echo $0|sed 's!.*/!!'`
do_it()
{
if [ $DETERM == "yes" ]
then echo $1 |`dirname $0`/@BINPREFIX@ltl2ba -g -O6 -t -p >>$2
else echo $1 |`dirname $0`/@BINPREFIX@ltl2ba -g -O6 -t >>$2
fi
}
version()
{
echo "$MYNAME version 1.5"
}
usage()
{
echo "-----------------------------------------------------------------"
echo "DiVinE Tool Set"
echo "-----------------------------------------------------------------"
version
echo "-----------------------------------------------------------------"
echo "This script is used to make product .dve or .probdve files. It combines "
echo "a dve/probdve model file without LTL property with LTL properties given "
echo "in an .ltl file. Each combination is written into a separate .dve or "
echo ".probdve file."
echo
echo "Usage: "
echo "$MYNAME [switches] file.[m,prob,mprob]dve file.[m]ltl [preprocessor defs]"
echo
echo "switches:"
echo " -p x process only x-th property from file file.ltl or file.mltl"
echo -n " -o do not create product file, "
echo "but print its contents to stdout "
echo -n " (-o can be used only if either there is only one formula "
echo "in file.ltl"
echo " or there is a formula specified with the switch -p,"
echo " -o implies switch -q)"
echo " -q do not print names of produced files (quiet mode)"
echo " -h show this small help"
echo " -v show $MYNAME version"
echo " -d determinize/semidetrminize property automaton"
}
EXTENSION=bio
DETERM=no
SINGLEPROP=no
OUT=no
NMBR=0
QUIET=no
while getopts dqhop:v VOLBA
do
case $VOLBA in
o) OUT=yes; QUIET="yes";;
p) SINGLEPROP=yes; NMBR=$OPTARG;;
q) QUIET=yes;;
v) version;exit 1;;
d) DETERM=yes;;
\?|h) usage
exit 1;;
esac
done
shift $(($OPTIND-1))
if [ $# -lt 2 ];
then
usage
exit 1
fi
if [ ! -e $1 ];
then echo File $1 does not exists. >/dev/stderr ;
exit 1
fi;
if [ ! -e $2 ];
then echo File $2 does not exists. >/dev/stderr ;
exit 1
fi;
FILE=$1
LTL=$2
shift;shift;
if [ -n "`echo -n $FILE|grep probdve`" ];
then EXTENSION=probdve
fi
if [ -n "`echo -n $LTL|grep .mltl`" ];
then @BINPREFIX@preprocessor $* $LTL >___TemPLTL
LTL=___TemPLTL
fi
MAX=`grep ^'[ ]*#property' $LTL | nl | tail -n 1 | cut -f1 | sed 's/ //g'`
if [ $SINGLEPROP == "yes" ] && ( (($MAX < $NMBR)) || (($NMBR < "1")) );
then echo "Invalid formula specification. Use number between 1 and $MAX."
exit 1
fi
if [ $MAX != "1" ] && [ $SINGLEPROP == "no" ] && [ $OUT == "yes" ] ;
then echo "Cannot print more than one file to standard output."
exit 1
fi
# deal with .dve and .probdve
NAME=`echo $FILE|sed 's/\.dve$//' | sed 's/\.probdve$//'`
# deal with .mdve
if [ -n "`echo -n $FILE|grep mdve$`" ];
then @BINPREFIX@preprocessor $* $FILE >___TemPFilE2
PARAMS=`echo $*|sed 's/-D/[/'|sed 's/ //'`
if [ -n "`echo -n $PARAMS`" ];
then PARAMS="$PARAMS]"
fi
NAME=`echo $FILE|sed 's/\.mdve$//'`$PARAMS
FILE=___TemPFilE2;
fi
# deal with .mprobdve
if [ -n "`echo -n $FILE|grep mprobdve$`" ];
then @BINPREFIX@preprocessor $* $FILE >___TemPFilE2
PARAMS=`echo $*|sed 's/-D/[/'|sed 's/ //'`
if [ -n "`echo -n $PARAMS`" ];
then PARAMS="$PARAMS]"
fi
NAME=`echo $FILE|sed 's/\.mprobdve$//'`$PARAMS
FILE=___TemPFilE2;
fi
N=0
for i in `grep ^'[ ]*#property' $LTL|sed 's/ /%%%/g'`;
do
i=`echo $i | sed 's/%%%/ /g' | sed 's/[ ]*#property[ ]*//'`
N=`echo $(($N + 1))`
if [ $SINGLEPROP == "yes" ] && (($NMBR != $N)); then continue ; fi
grep -v "system async" $FILE | grep -v "system sync" >___TemPFilE
grep ^'[ ]*#define' $LTL >___TemPFilE1
do_it "$i" ___TemPFilE1
cpp -E ___TemPFilE1|grep -v ^'#'|grep -v ^$ >>___TemPFilE
echo >>___TemPFilE
if [ -n "`grep 'system async' $FILE`" ]
then echo "system async property LTL_property;" >>___TemPFilE
else echo "system sync property LTL_property;" >>___TemPFilE
fi
if [ $OUT == "yes" ]
then cat ___TemPFilE | uniq
else if [ $QUIET != "yes" ]; then echo $NAME.prop$N.$EXTENSION" "$i ; fi
cat ___TemPFilE | uniq >$NAME.prop$N.$EXTENSION
fi
done
rm -f ___TemPFilE ___TemPFilE1 ___TemPFilE2 ___TemPLTL
exit
| true |
f6290b0e0d9d961191fdfffd38f695a781c2838f | Shell | petronny/aur3-mirror | /hnefatafl/PKGBUILD | UTF-8 | 758 | 2.671875 | 3 | [] | no_license | # Maintainer: Reuben Castelino <projectdelphai[at]gmail[dot]com>
pkgname=hnefatafl
pkgver=0.1.3
pkgrel=1
pkgdesc="A viking game"
arch=('any')
url="https://github.com/projectdelphai/hnefatafl"
license=('GPL')
depends=('qt5-base')
source=(https://github.com/projectdelphai/$pkgname/archive/$pkgver.tar.gz)
noextract=()
md5sums=('42378149a9e8b5906c2098f0ec70fef7')
build() {
cd $pkgname-$pkgver
mkdir -p tmp
make
}
package() {
cd $pkgname-$pkgver
mkdir -p $pkgdir/usr/bin
install -m755 release/$pkgname $pkgdir/usr/bin/$pkgname
mkdir -p $pkgdir/usr/lib
install -m755 release/libjson_linux-gcc-4.8.2_libmt.so $pkgdir/usr/lib/libjson_linux-gcc-4.8.2_libmt.so
mkdir -p $pkgdir/$HOME
install -m755 release/hnefatafl-data $pkgdir$HOME/.hnefatafl-data
}
| true |
a6d24eff750cc9bdf72794e2b962ac1889ce4f75 | Shell | cytopia/dotfiles | /bash/config/bashrc.d/alias-cfg.bash | UTF-8 | 3,917 | 2.953125 | 3 | [] | no_license | ################################################################################
###
### System configuration
###
################################################################################
###
### /etc/fstab
###
cfg-fstab() {
if [ "$(id -u)" == "0" ]; then
${EDITOR:-vim} /etc/fstab
else
sudo "${EDITOR:-vim}" /etc/fstab;
fi
}
###
### /etc/hosts
###
cfg-hosts() {
if [ "$(id -u)" == "0" ]; then
${EDITOR:-vim} /etc/hosts
else
sudo "${EDITOR:-vim}" /etc/hosts
fi
}
cfg-xdg-dirs() {
if [ -f ${XDG_CONFIG_HOME}/user-dirs.dirs ]; then
${EDITOR:-vim} ${XDG_CONFIG_HOME}/user-dirs.dirs
fi
}
################################################################################
###
### Bash configuration
###
################################################################################
###
### ~/.bashrc
###
if [ -f "${HOME}/.bashrc" ]; then
cfg-bashrc() { ${EDITOR:-vim} ~/.bashrc; };
fi
###
### ~/.bash_profile
###
if [ -f "${HOME}/.bash_profile" ]; then
cfg-bash-profile() { ${EDITOR:-vim} ~/.bash_profile; };
fi
###
### ~/.config/bash/bashrc.d/*.bash
###
if [ -d "${XDG_CONFIG_HOME}/bash/bashrc.d/" ]; then
for _f in ${XDG_CONFIG_HOME}/bash/bashrc.d/*.bash ; do
_suffix="$( basename "${_f}" )"
_suffix="${_suffix%.bash}"
eval "cfg-bash-${_suffix}() { ${EDITOR:-vim} \"${_f}\"; }"
unset _f
unset _suffix
done
fi
###
### ~/.config/bash/bash_profile/*.bash
###
if [ -d "${XDG_CONFIG_HOME}/bash/bash_profile/" ]; then
for _f in ${XDG_CONFIG_HOME}/bash/bash_profile/*.bash ; do
_suffix="$( basename "${_f}" )"
_suffix="${_suffix%.bash}"
eval "cfg-bash-${_suffix}() { ${EDITOR:-vim} \"${_f}\"; }"
unset _f
unset _suffix
done
fi
################################################################################
###
### Ranger configuration
###
################################################################################
if [ -f "${HOME}/.config/ranger/rc,conf" ]; then
cfg-ranger() { ${EDITOR:-vim} ~/.config/ranger/rc.conf; }
fi
if [ -f "${HOME}/.config/ranger/rifle,conf" ]; then
cfg-ranger-rifle() { ${EDITOR:-vim} ~/.config/ranger/rifle.conf; } # edit open_with extensions
fi
if [ -f "${HOME}/.config/ranger/commands,py" ]; then
cfg-ranger-commands() { ${EDITOR:-vim} ~/.config/ranger/commands.py; } # scripts
fi
################################################################################
###
### Misc configuration
###
################################################################################
###
### Single dotfiles
###
if [ -f "${HOME}/.inputrc" ]; then
cfg-inputrc() { ${EDITOR:-vim} ~/.inputrc; };
fi
if [ -f "${HOME}/.gitconfig" ]; then
cfg-git() { ${EDITOR:-vim} ~/.gitconfig; };
fi
if [ -f "${HOME}/.ctags" ]; then
cfg-ctags() { ${EDITOR:-vim} ~/.ctags; };
fi
if [ -f "${HOME}/.muttrc" ]; then
cfg-mutt() { ${EDITOR:-vim} ~/.muttrc; };
fi
if [ -f "${HOME}/.tmux.conf" ]; then
cfg-tmux() { ${EDITOR:-vim} ~/.tmux.conf; };
fi
if [ -f "${HOME}/.vimrc" ]; then
cfg-vim() { ${EDITOR:-vim} ~/.vimrc; };
fi
if [ -f "${HOME}/.config/nvim/vim.init" ]; then
cfg-nvim() { ${EDITOR:-vim} ~/.config/nvim/vim.init; };
fi
if [ -f "${HOME}/.Xdefaults" ]; then
cfg-xdefaults() { ${EDITOR:-vim} ~/.Xdefaults; };
fi
if [ -f "${HOME}/.xinitrc" ]; then
cfg-xinitrc() { ${EDITOR:-vim} ~/.xinitrc; };
fi
if [ -f "${HOME}/.Xmodmap" ]; then
cfg-xmodmap() { ${EDITOR:-vim} ~/.Xmodmap; };
fi
if [ -f "${HOME}/.Xresources" ]; then
cfg-xresources() { ${EDITOR:-vim} ~/.Xresources; };
fi
if [ -f "${HOME}/.xsession" ]; then
cfg-xsession() { ${EDITOR:-vim} ~/.xsession; };
fi
###
### ~/.config/Xresources.d/*.xresources
###
if [ -d "${XDG_CONFIG_HOME}/Xresources.d/" ]; then
for _f in ${XDG_CONFIG_HOME}/Xresources.d/*.xresources ; do
_suffix="$( basename "${_f}" )"
_suffix="${_suffix%.xresources}"
eval "cfg-xresources-${_suffix}() { ${EDITOR:-vim} \"${_f}\"; }"
unset _f
unset _suffix
done
fi
| true |
266a322f870ef57a6d6446e1aaea966a1d401d49 | Shell | delkyd/alfheim_linux-PKGBUILDS | /worker/PKGBUILD | UTF-8 | 1,606 | 2.671875 | 3 | [] | no_license | # Maintainer: Aaron Fischer <mail@aaron-fischer.net>
# Contributor: J. W. Birdsong <jwbirdsong AT gmail DOT com>
# Contributor: Kevin Piche <kevin@archlinux.org>
pkgname=worker
pkgver=3.12.0
pkgrel=1
pkgdesc="A file manager for the X Window System"
arch=('i686' 'x86_64')
license=('GPL')
url="http://www.boomerangsworld.de/worker"
depends=('avfs' 'gcc-libs' 'libx11' 'file')
makedepends=('libxt')
source=(http://www.boomerangsworld.de/cms/${pkgname}/downloads/${pkgname}-${pkgver}.tar.bz2
http://www.boomerangsworld.de/cms/${pkgname}/downloads/${pkgname}-${pkgver}.tar.bz2.asc)
sha256sums=('b9ac437683dc92a266ce492a7ce825acfdb82aec209ce5fffeaa094a7d8e9da1'
'd577253c47bb9fde63e339ddf0f58f77e4bad290d2849641fb86ba468171b3be')
validpgpkeys=('F9299EE90A729029E71AF26B667132D0FBC52B37') # Ralf Hoffmann
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
# We need to disable the optimizations because of a bug in gcc
# (see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63140 for details)
# Ralf added a gcc constraint check to avoid compiling bad optimized code.
# Because we run a recent version of gcc, we disable the check, and
# disable also the compiler optimizations completely. Because the code
# uses some C++11 features, we need to set this explicitly.
#
# To bypass the pthread bug, the LDFLAGS is set here explicit until
# the bug is solved. See here for more detail and progress:
# https://sourceforge.net/p/workerfm/mailman/message/34136485/
CPPFLAGS="-O0 -std=gnu++11" LDFLAGS=-pthread ./configure --enable-xft --prefix=/usr --disable-cxx-check
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR="${pkgdir}" install
}
| true |
e66919ccb5495702f7e13654ad0320557d63182e | Shell | istio/istio.io | /scripts/lint_site.sh | UTF-8 | 8,345 | 3.828125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
FAILED=0
if [[ "$#" -ne 0 ]]; then
LANGS="$*"
else
LANGS="en zh"
fi
red='\e[0;31m'
clr='\e[0m'
error() {
echo -e "${red}$*${clr}"
}
# This performs spell checking and style checking over markdown files in a content
# directory. It transforms the shortcode sequences we use to annotate code blocks
# into classic markdown ``` code blocks, so that the linters aren't confused
# by the code blocks
check_content() {
DIR=$1
LANG=$2
TMP=$(mktemp -d)
# check for use of ```
if grep -nr -e "\`\`\`" --include "*.md" "${DIR}"; then
error "Ensure markdown content uses {{< text >}} for code blocks rather than \`\`\`. Please see https://istio.io/about/contribute/code-blocks/"
FAILED=1
fi
# make the tmp dir
mkdir -p "${TMP}"
# create a throwaway copy of the content
cp -R "${DIR}" "${TMP}"
cp .spelling "${TMP}"
cp mdl.rb "${TMP}"
# replace the {{< text >}} shortcodes with ```plain
find "${TMP}" -type f -name \*.md -exec sed -E -i "s/\\{\\{< text .*>\}\}/\`\`\`plain/g" {} ";"
# replace the {{< mermaid >}} shortcodes with ```mermaid
find "${TMP}" -type f -name \*.md -exec sed -E -i "s/\\{\\{< mermaid .*>\}\}/\`\`\`mermaid/g" {} ";"
# replace the {{< /text >}} shortcodes with ```
find "${TMP}" -type f -name \*.md -exec sed -E -i "s/\\{\\{< \/text .*>\}\}/\`\`\`/g" {} ";"
# replace the {{< /mermaid >}} shortcodes with ```
find "${TMP}" -type f -name \*.md -exec sed -E -i "s/\\{\\{< \/mermaid .*>\}\}/\`\`\`/g" {} ";"
# elide url="*"
find "${TMP}" -type f -name \*.md -exec sed -E -i "s/url=\".*\"/URL/g" {} ";"
# elide link="*"
find "${TMP}" -type f -name \*.md -exec sed -E -i "s/link=\".*\"/LINK/g" {} ";"
# remove any heading anchors
find "${TMP}" -type f -name \*.md -exec sed -E -i "s/(^#.*\S) *\{#.*\} */\1/g" {} ";"
# switch to the temp dir
pushd "${TMP}" >/dev/null
if ! find . -type f -name '*.md' -print0 | xargs -0 -r mdspell "${LANG}" --ignore-acronyms --ignore-numbers --no-suggestions --report; then
error "To learn how to address spelling errors, please see https://istio.io/about/contribute/build/#test-your-changes"
FAILED=1
fi
if ! mdl --ignore-front-matter --style mdl.rb .; then
FAILED=1
fi
if grep -nrP --include "*.md" -e "\(https://istio.io/(?!v[0-9]\.[0-9]/|archive/)" .; then
error "Ensure markdown content uses relative references to istio.io"
FAILED=1
fi
if grep -nr --include "*.md" -e "(https://preliminary.istio.io" .; then
error "Ensure markdown content doesn't contain references to preliminary.istio.io"
FAILED=1
fi
if grep -nr --include "*.md" -e https://github.com/istio/istio/blob/ .; then
error "Ensure markdown content uses {{< github_blob >}}"
FAILED=1
fi
if grep -nr --include "*.md" -e https://github.com/istio/istio/tree/ .; then
error "Ensure markdown content uses {{< github_tree >}}"
FAILED=1
fi
if grep -nr --include "*.md" -e https://raw.githubusercontent.com/istio/istio/ .; then
error "Ensure markdown content uses {{< github_file >}}"
FAILED=1
fi
# go back whence we came
popd >/dev/null
# cleanup
rm -fr "${TMP}"
}
SKIP_LANGS=( en zh pt-br )
for lang in $LANGS; do
for i in "${!SKIP_LANGS[@]}"; do
if [[ "${SKIP_LANGS[$i]}" = "${lang}" ]]; then
unset 'SKIP_LANGS[${i}]'
fi
done
SKIP_LANGS=( "${SKIP_LANGS[@]}" )
if [[ "$lang" == "en" ]]; then
list=$(find ./content/en/docs -name '*.md' -not -exec grep -q '^test: ' {} \; -print)
if [[ -n $list ]]; then
echo "$list"
error "Ensure every document *.md file includes a test: attribute in its metadata"
FAILED=1
fi
list=$(find ./content/en/docs -name 'index.md' -not -exec grep -q '^owner: ' {} \; -print)
if [[ -n $list ]]; then
echo "$list"
error "Ensure every document index.md file includes an owner: attribute in its metadata"
FAILED=1
fi
check_content "content/$lang" --en-us
while IFS= read -r -d '' f; do
if grep -H -n -e '“' "${f}"; then
# shellcheck disable=SC1111
error "Ensure content only uses standard quotation marks and not “"
FAILED=1
fi
done < <(find ./content/en -type f \( -name '*.html' -o -name '*.md' \) -print0)
elif [[ "$lang" == "zh" ]]; then
# only check English words in Chinese docs
check_content "content/$lang" --en-us
while IFS= read -r -d '' f; do
if grep -H -n -E -e "- (/docs|/about|/blog|/faq|/news)" "${f}"; then
error "Ensure translated content doesn't include aliases for English content"
FAILED=1
fi
if grep -H -n -E -e '"(/docs|/about|/blog|/faq|/news)' "${f}"; then
error "Ensure translated content doesn't include references to English content"
FAILED=1
fi
if grep -H -n -E -e '\((/docs|/about|/blog|/faq|/news)' "${f}"; then
error "Ensure translated content doesn't include references to English content"
FAILED=1
fi
done < <(find ./content/zh -type f \( -name '*.html' -o -name '*.md' \) -print0)
elif [[ "$lang" == "pt-br" ]]; then
# only check English words in Portuguese Brazil docs
check_content "content/$lang" --en-us
fi
done
if [ -d ./public ]; then
if [[ ${#SKIP_LANGS[@]} -ne 0 ]]; then
printf -v find_exclude " -name %s -prune -o" "${SKIP_LANGS[@]}"; read -r -a find_exclude <<< "$find_exclude"
fi
while IFS= read -r -d '' f; do
if grep -H -n -i -e blockquote "${f}"; then
error "Ensure content only uses {{< tip >}}, {{< warning >}}, {{< idea >}}, and {{< quote >}} instead of block quotes"
FAILED=1
fi
#if grep -H -n -e "\"https://github.*#L[0-9]*\"" "${f}"; then
# error "Ensure content doesn't use links to specific lines in GitHub files as those are too brittle"
# FAILED=1
#fi
done < <(find ./public "${find_exclude[@]}" -type f -name '*.html' -print0)
if ! htmlproofer ./public --file-ignore "${ignore_files}" --assume-extension --http-status-ignore "0,429" --check-html --check-external-hash --check-opengraph --checks-to-ignore "LinkCheck"; then
FAILED=1
fi
if [[ ${SKIP_LINK_CHECK:-} != "true" ]]; then
if [[ ${#SKIP_LANGS[@]} -ne 0 ]]; then
printf -v ignore_files "/^.\/public\/%s/," "${SKIP_LANGS[@]}"; ignore_files="${ignore_files%,}"
fi
echo "Running linkinator..."
if [[ ${CHECK_EXTERNAL_LINKS:-} == "true" ]]; then
if ! linkinator public/ -r -s 'github.com localhost:3000 localhost:5601 localhost:8001 localhost:9080 localhost:9081 en.wikipedia.org my-istio-logs-database.io' --silent; then
FAILED=1
fi
else
#TODO: Remove .../workload-selector/ from ignored links. PRs take a long time to get through istio/api, and a link is broken from there. Once this PR is complete, remove it: https://github.com/istio/api/pull/1405
if ! linkinator public/ -r -s 'github.com localhost:3000 localhost:5601 localhost:8001 localhost:9080 localhost:9081 en.wikipedia.org my-istio-logs-database.io ^((?!localhost).)*$ /docs/reference/config/type/v1beta1/workload-selector/' --silent; then
FAILED=1
fi
fi
fi
fi
if [[ ${FAILED} -eq 1 ]]; then
error "LINTING FAILED"
exit 1
fi
| true |
c9ab4e9087ae7b774e46a74976e274babd933734 | Shell | maruccccccc/dotfiles | /.bashrc | UTF-8 | 1,688 | 3.140625 | 3 | [] | no_license | PECO_VERSION='v0.5.2'
case "${OSTYPE}" in
darwin*)
OS_TYPE="darwin"
ARCH_TYPE="zip"
;;
linux*)
OS_TYPE="linux"
ARCH_TYPE="tar.gz"
;;
esac
if [ ! -f /usr/local/bin/peco ]; then
wget -O - "https://github.com/peco/peco/releases/download/${PECO_VERSION}/peco_${OS_TYPE}_amd64.${ARCH_TYPE}" | tar --directory /tmp -zxvf -
sudo mv /tmp/peco_${OS_TYPE}_amd64/peco /usr/local/bin
rm -rf /tmp/peco_${OS_TYPE}_amd64/
fi
function hs() {
cmd=$(history | sort -r | awk '{for(i=2;i<NF;i++){printf("%s%s",$i,OFS=" ")}print $NF;}' | awk '!a[$0]++' | sed -e '1,2d' | sed '/^hs/d' | peco --query $1)
eval ${cmd};
history -s $cmd
}
function hsc() {
cmd=$(history | sort -r | awk '{for(i=2;i<NF;i++){printf("%s%s",$i,OFS=" ")}print $NF;}' | awk '!a[$0]++' | sed -e '1,2d' | sed '/^hs/d' | peco --query $1)
echo -n $cmd | pbcopy
}
function share_history() {
history -a
history -c
history -r
}
function mkcd() {
mkdir $1;
cd $1;
}
function ak() {
eval `ssh-agent`
ssh-add ~/.ssh/github_id_rsa
}
shopt -u histappend
PROMPT_COMMAND='share_history'
alias ls='ls -G'
alias sl='ls -G'
alias ks='ls -G'
alias ll='ls -lG'
alias la='ls -laG'
alias lls='ls -G'
alias ivm='vim'
alias vmi='vim'
alias VIM='vim'
alias r='fc -s'
alias gba='git branch -vv'
alias gis='git status'
alias gich='git checkout'
alias gia='git add .'
alias gic='git commit'
alias gip="git push origin \$(git branch --contains | awk '{print \$2}')"
alias ps='ps --sort=start_time'
if [ "$TERM" = xterm ]; then TERM=xterm-256color; fi
export PS1="\[\e[36m\][\u:\h \w ] \[\e[33m\]$ \[\e[0m\]"
export GREP_OPTIONS='--color=always'
export EDITOR='/usr/bin/vim'
export HISTSIZE=100000
| true |
151cc74afe5a600ba2a03aaf8a709ce49a3e582b | Shell | HaoLiSky/EON | /tools/mpmd/run.sh | UTF-8 | 896 | 3.53125 | 4 | [] | no_license | #!/bin/bash
set -e
export EON_NUMBER_OF_CLIENTS=3
cores_per_pot=4
cores_per_node=8
eon_path=../eon
pot_path=vasp_mpmd
export EON_SERVER_PATH=${eon_path}/akmc.py
client_path=${eon_path}/client/client_mpi
client_ranks=$(expr $EON_NUMBER_OF_CLIENTS + 1)
pot_ranks=$(expr $cores_per_pot \* $EON_NUMBER_OF_CLIENTS)
command="mpirun -n $pot_ranks -wdir vasp $pot_path : -n $client_ranks $client_path"
#if no arguments
if [ -z $1 ]; then
${eon_path}/tools/mkvasp.py $EON_NUMBER_OF_CLIENTS vasp
$command
else
total_ranks=$(expr $pot_ranks + $client_ranks)
if [ "$1" = "-n" ]; then
echo $(expr $cores_per_node \* \( \( $total_ranks - 1 \) / $cores_per_node \) + $cores_per_node)
fi
if [ "$1" = "--dry-run" ]; then
echo mpirun command: $command
echo total nodes: $(expr $total_ranks / $cores_per_node)
echo total ranks: $total_ranks
fi
fi
| true |
94654b35146b6bde6cc0617facf479bafab4de6c | Shell | jonullberg/prog_check | /progcheck.sh | UTF-8 | 1,437 | 3.078125 | 3 | [] | no_license | #!/bin/bash
# Prog Check Dev Startup
SESSION_NAME="pc-dev"
cd ~/code/projects/prog_check
tmux has-session -t ${SESSION_NAME}
if [ $? != 0 ]
then
# Create the session
tmux new-session -s ${SESSION_NAME} -n subl -d
# FIrst window (0) -- subl and console
tmux send-keys -t ${SESSION_NAME} 'subl .' Enter
# Dev-Server (1)
tmux new-window -n db -t ${SESSION_NAME}
tmux send-keys -t ${SESSION_NAME}:1 'mongod --dbpath=./db --smallfiles' Enter
tmux split-window -h
tmux send-keys -t ${SESSION_NAME}:1 'mongo progcheck_dev' Enter
# Testing (2)
tmux new-window -n dev -t ${SESSION_NAME}
tmux send-keys -t ${SESSION_NAME}:2 'nodemon server/server -w ./server' Enger
tmux split-window -h
tmux send-keys -t ${SESSION_NAME}:2 'gulp watch'
# Go back to bash
tmux select-window -t ${SESSION_NAME}:0
tmux send-keys -t ${SESSION_NAME}:0 'gulp build' Enter
fi
tmux attach -t ${SESSION_NAME}
# tmux new -sprogcheck \; \
# send-keys -t 0 'subl .' Enter \; \
# new-window -n mongo \; \
# new-window -n dev \; \
# select-window -t 1 \; \
# send-keys -t 1 'mongod --dbpath=./db --smallfiles' Enter \; \
# split-window -h \; \
# send-keys -t 1 'mongo progcheck_dev' Enter \; \
# select-window -t 2 \; \
# send-keys -t 2 'gulp build' Enter \; \
# split-window -h \; \
# send-keys -t 2 'nodemon server/server -w ./server' Enter \; \
# split-window -v \; \
# send-keys -t 2 'gulp watch' Enter
| true |
fc6298c24fd89af18bf8e71645e0112572cabf86 | Shell | huangyingw/bashrc | /fav.sh | UTF-8 | 290 | 3.140625 | 3 | [] | no_license | #! /bin/bash
if [ -f $HOME/.passwd ]; then
FAV=`cat $HOME/.passwd |grep fav|awk '{print $3}'`
fi
rm /media/volgrp/fav/*
find "$FAV" -type f -mtime -100 -size +700M -exec ls -rt {} \+|tail -n 100|while read ss
do
ftemp=`basename "$ss"`
ln -s "$ss" /media/volgrp/fav/"$ftemp"
done
| true |
95b1c0ebd7e6a058d9584b8cd64a457c8fe87d54 | Shell | Russ76/jetson_easy | /include/modules.sh | UTF-8 | 12,637 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright (C) 2018, Raffaello Bonghi <raffaello@rnext.it>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MODULES_LIST=""
MODULES_FOLDER="modules"
# Default name setup jetson_easy
MODULES_CONFIG_NAME="setup.txt"
MODULES_CONFIG_FOLDER="config"
# Absolute path configuration file
MODULES_CONFIG_PROJECT=""
MODULES_CONFIG_PATH=""
MODULES_CONFIG_FILE=""
# File check if is in sudo mode
MODULES_SUDO_ME_FILE="jetson_easy.sudo"
# Default configuration this code start not in remote mode
MODULE_REMOTE=0
# --------------------------------
# LOAD_MODULES
# --------------------------------
modules_sort()
{
local MODULES_LIST_ARRAY
# transform list in array
IFS=$':' MODULES_LIST_ARRAY=($MODULES_LIST)
# sort the array
IFS=$'\n' MODULES_LIST_ARRAY=($(sort <<<"${MODULES_LIST_ARRAY[*]}"))
# Re arrange array in list with delimiter ":"
MODULES_LIST=$(echo ${MODULES_LIST_ARRAY[*]} | sed -e "s/ /:/g")
}
# MODULES_DEFAULT options:
# - DIS - Disable module
# - STOP - NO Install
# - RUN - Default install
# - AUTO - Automatic mode
modules_load_default()
{
# Read modules
for folder in $MODULES_FOLDER/* ; do
if [ -d "$folder" ] ; then
# Check if exist the same file with the name of the folder
local FILE_NAME=$(echo $folder | cut -f2 -d "/")
local FILE="$folder"/$FILE_NAME.sh
if [ -f $FILE ] ; then
# Unset save function
unset -f script_load_default
unset MODULE_DEFAULT
# Load source
source "$FILE"
# If MODULE_DEFAULT doesn't exist set automatically stop
if [ -z ${MODULE_DEFAULT+x} ] ; then
$MODULE_DEFAULT = "STOP"
fi
# Add in list all modules with default option
if [ $MODULE_DEFAULT != "DIS" ] ; then
MODULES_LIST+="$FILE_NAME|$MODULE_DEFAULT:"
fi
# Check if exist the function
if type script_load_default &>/dev/null ; then
script_load_default
# Load initialization variable function
# echo "Load Default variable for: $MODULE_NAME"
fi
fi
fi
done
# Sort all modules
modules_sort
}
# Load configuration return status:
# 0 - Load file or folder
# 1 - Load default
modules_load_config()
{
#Default load setup config name folder
local MODULES_CONFIG=$MODULES_CONFIG_FOLDER/$MODULES_CONFIG_NAME
if [ ! -z $1 ] ; then
MODULES_CONFIG=$1
fi
local config_path=""
# Load config path
if [[ "$MODULES_CONFIG" = /* ]]; then
# Save absolute path
config_path="$MODULES_CONFIG"
else
# Get absolute path from local path
config_path="$USER_PWD/$MODULES_CONFIG"
fi
# Check configuration file
if [[ -d $config_path ]]; then
# If is a directory check if exist file MODULES_CONFIG_NAME (standard name is: config/setup.txt)
local setup_file=$config_path/$MODULES_CONFIG_NAME
# Check if exist config file
if [[ -f $setup_file ]]; then
# echo "$setup_file is a jetson easy folder"
# Set variables
MODULES_CONFIG_PATH=$(realpath $config_path)
MODULES_CONFIG_FILE=$(realpath $setup_file)
MODULES_CONFIG_PROJECT=$(basename $MODULES_CONFIG_PATH)
return 0
#else
#echo "$setup_file is not a jetson easy folder"
fi
elif [[ -f $config_path ]]; then
#echo "$config_path is a jetson easy file"
# Set variables
MODULES_CONFIG_PATH=$(realpath $USER_PWD)
MODULES_CONFIG_FILE=$(realpath $config_path)
MODULES_CONFIG_PROJECT=$(basename $MODULES_CONFIG_PATH)
return 0
#else
#echo "$config_path is not valid"
fi
# Set default configuration
MODULES_CONFIG_PATH="$USER_PWD/$MODULES_CONFIG_FOLDER"
MODULES_CONFIG_FILE="$MODULES_CONFIG_PATH/$MODULES_CONFIG_NAME"
MODULES_CONFIG_PROJECT=$(basename $MODULES_CONFIG_PATH)
#echo "MODULES_CONFIG_PATH=$MODULES_CONFIG_PATH"
#echo "MODULES_CONFIG_FILE=$MODULES_CONFIG_FILE"
return 1
}
modules_load()
{
if [ -f $MODULES_CONFIG_FILE ] ; then
# echo "Setup \"$MODULES_CONFIG_FILE\" found!"
# Load all default values
modules_load_default
# Load and overwrite with setup file
source $MODULES_CONFIG_FILE
# Sort all modules
modules_sort
else
# echo "Setup \"$MODULES_CONFIG_FILE\" NOT found! Load default"
modules_load_default
fi
}
modules_save()
{
# Check if exist folder and file otherwise create it
if [ ! -d "$MODULES_CONFIG_PATH" ] ; then
mkdir -p $MODULES_CONFIG_PATH
fi
echo "# Configuration Biddibi boddibi Boo" > $MODULES_CONFIG_FILE
echo "# Author: Raffaello Bonghi" >> $MODULES_CONFIG_FILE
echo "# Email: raffaello@rnext.it" >> $MODULES_CONFIG_FILE
echo "" >> $MODULES_CONFIG_FILE
# Add remote information
if [ ! -z $MODULE_REMOTE_USER ] || [ ! -z $MODULE_REMOTE_HOST ]; then
echo "# Remote information" >> $MODULES_CONFIG_FILE
echo "MODULE_REMOTE_USER=\"$MODULE_REMOTE_USER\"" >> $MODULES_CONFIG_FILE
echo "MODULE_PASSWORD=\"$MODULE_PASSWORD\"" >> $MODULES_CONFIG_FILE
echo "MODULE_REMOTE_HOST=\"$MODULE_REMOTE_HOST\"" >> $MODULES_CONFIG_FILE
echo "" >> $MODULES_CONFIG_FILE
fi
echo "# List of availables modules" >> $MODULES_CONFIG_FILE
echo "MODULES_LIST=\"$MODULES_LIST\"" >> $MODULES_CONFIG_FILE
echo "" >> $MODULES_CONFIG_FILE
echo "# ----------------------------- " >> $MODULES_CONFIG_FILE
echo "# - Modules variables - " >> $MODULES_CONFIG_FILE
echo "# ----------------------------- " >> $MODULES_CONFIG_FILE
echo "" >> $MODULES_CONFIG_FILE
for folder in $MODULES_FOLDER/* ; do
if [ -d "$folder" ] ; then
# Check if exist the same file with the name of the folder
local FILE_NAME=$(echo $folder | cut -f2 -d "/")
local FILE="$folder"/$FILE_NAME.sh
if [ -f $FILE ] ; then
# Unset save function
unset -f script_save
# Load source
source "$FILE"
# Check if exist the function
if type script_save &>/dev/null
then
# Write name module
echo "# Variables for: $MODULE_NAME" >> $MODULES_CONFIG_FILE
# Save script
script_save $MODULES_CONFIG_FILE
# Add space
echo "" >> $MODULES_CONFIG_FILE
fi
fi
fi
done
# echo "Save in $MODULES_CONFIG_FILE"
}
modules_isInList()
{
IFS=':' read -ra MODULE <<< "$MODULES_LIST"
local mod
for mod in "${MODULE[@]}"; do
# Take name
local name=$(echo $mod | cut -d "|" -f 1)
# Check if the name is the same
if [ $name == $1 ] ; then
# Return the mode
echo $(echo $mod | cut -d "|" -f 2)
return 0
fi
done
# Otherwise return 0
echo "STOP"
}
modules_update()
{
IFS=':' read -ra MODULE <<< "$MODULES_LIST"
local new_list
local mod
local check=0
for mod in "${MODULE[@]}"; do
# Take name
local name=$(echo $mod | cut -d "|" -f 1)
# Check if the name is the same
if [ $name == $1 ] ; then
new_list+=":$1|$2"
check=1
else
# Add same module in the list
new_list+=":$mod"
fi
done
# If this module is not in list add in tail
if [ $check == 0 ] ; then
new_list+=":$1|$2"
fi
#Update MODULES_LIST
MODULES_LIST=$new_list
# Sort all modules
modules_sort
}
# Modules check sudo
# https://serverfault.com/questions/266039/temporarily-increasing-sudos-timeout-for-the-duration-of-an-install-script
modules_sudo_me_start()
{
# write sudo me file
touch $MODULES_SUDO_ME_FILE
# Loop script
while [ -f $MODULES_SUDO_ME_FILE ]; do
#echo "checking $$ ...$(date)"
sudo -v
sleep 10
done &
}
modules_sudo_me_stop()
{
# finish sudo loop
rm $MODULES_SUDO_ME_FILE
}
modules_run_script()
{
# Check if exist the function
if type script_run &>/dev/null ; then
# Move to same folder
cd $FOLDER
# run script
# exesudo script_run
script_run $LOCAL_FOLDER
# Restore previuous folder
cd $LOCAL_FOLDER
fi
}
modules_run()
{
# Load exesudo
#source include/exesudo.sh
if [ ! -z $MODULES_LIST ] ; then
# Start sudo_me
modules_sudo_me_start
echo "Start install script..."
IFS=':' read -ra MODULE <<< "$MODULES_LIST"
for mod in "${MODULE[@]}"; do
# Take name and option
local name=$(echo $mod | cut -d "|" -f 1)
local option=$(echo $mod | cut -d "|" -f 2)
# Check if exist the same file with the name of the folder
local FOLDER="$MODULES_FOLDER/$name"
local FILE="$FOLDER/$name.sh"
# Overload name FOLDER and file if the name is the same of project
if [ $name == "X-$MODULES_CONFIG_PROJECT" ] ; then
FOLDER="$MODULES_CONFIG_PATH"
FILE="$FOLDER/X-$MODULES_CONFIG_PROJECT.sh"
fi
if [ -f $FILE ] ; then
# Unset save function
unset -f script_run
unset -f script_check
unset MODULE_DEFAULT
# Local folder
local LOCAL_FOLDER=$(pwd)
case $option in
"AUTO") # Load source
source "$FILE"
echo "Running module - $MODULE_NAME in mode AUTO mode"
# Check if exist the function
if type script_check &>/dev/null ; then
# Move to same folder
cd $FOLDER
# Run script check function
script_check $LOCAL_FOLDER
local RET=$?
# Restore previuous folder
cd $LOCAL_FOLDER
if [ $RET == 1 ] ; then
# run Script
modules_run_script
else
echo "Not require other updates"
fi
else
echo "Any check function are installed, please check module $MODULE_NAME"
fi ;;
"RUN") # Load source
source "$FILE"
echo "Running module - $MODULE_NAME"
# run Script
modules_run_script ;;
*) ;;
esac
fi
done
# Stop sudo_me
modules_sudo_me_stop
echo "... Done"
else
echo "No modules"
fi
}
modules_require_reboot()
{
if [ -f /var/run/reboot-required ] || [ ! -z ${MODULES_REQUIRE_REBOOT+x} ] ; then
echo "1"
else
echo "0"
fi
}
| true |
4b7e2cefd9e2dbc8b6e80804823b206279945410 | Shell | lcnight/lcnight | /tools/database/create_dbtable | UTF-8 | 951 | 3.453125 | 3 | [] | no_license | #! /bin/bash
host=10.1.1.60
name=root
password=ta0mee
if [ $# -lt 0 ] ; then
echo "to be execute sql script in each database table";
exit;
fi;
get_db()
{
echo "create database db_map_$1 DEFAULT CHARACTER SET utf8;"
}
get_table()
{
echo "CREATE TABLE db_map_$1.t_map_$2 ( id int(11) NOT NULL auto_increment, md5sum char(32) NOT NULL, string varchar(4096) NOT NULL, PRIMARY KEY (id), UNIQUE KEY md5sum (md5sum)) ENGINE=InnoDB AUTO_INCREMENT=57842 DEFAULT CHARSET=utf8;";
}
db_num=100
tb_num=100
echo "db host:$host, name:$name, password:$password";
for((i=0;i<$db_num;i++)); do
echo;
echo "target is db id $i";
mysql -h$host -u$name -p$password -e "$(get_db $i)";
for((j=0;j<$tb_num;j++)); do
#echo "mysql -h$host -u$name -p$password --default-character-set=utf8 -e $(get_table $i $j)";
echo -e "\ttable id $j";
mysql -h$host -u$name -p$password -e "$(get_table $i $j)";
done;
done;
| true |
71ddc3b882c71397d06f776e47c85fe3d7eb98b7 | Shell | probepark/akka-grpc-ddata-shopping-cart | /src/docker/opt/docker/bin/entrypoint.sh | UTF-8 | 141 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
if [ "$DYN_JAVA_OPTS" != "" ]; then
export JAVA_OPTS="$(eval "echo $DYN_JAVA_OPTS") $JAVA_OPTS"
fi
exec "$@"
| true |
278d74412357c07c352a2f074c2f4e45752c9960 | Shell | pmandrik/HH_pair | /HH_bbWW/run.sh | UTF-8 | 867 | 2.96875 | 3 | [] | no_license |
source /cvmfs/sft.cern.ch/lcg/releases/ROOT/6.12.04-13971/x86_64-slc6-gcc62-opt/ROOT-env.sh
export PATH=/cvmfs/sft.cern.ch/lcg/releases/ROOT/6.12.04-13971/x86_64-slc6-gcc62-opt/bin:$PATH
source /cvmfs/sft.cern.ch/lcg/releases/gcc/6.2.0/x86_64-slc6/setup.sh
wdir=`pwd`
adir="results"
mkdir $adir
run_anal(){
postfix=$1
input_file_delphes=$2
input_file_lhe=$3
root -l -b -q "../make_interface.C(\""$input_file_delphes"\")"
python ../turn_off_unused_branches.py
root -l -b -q "analyser_WWbb.C(\""$input_file_delphes"\", \""$input_file_lhe"\")" > $adir/"log_"$postfix".log"
}
for index in 1 2 3 4 5 6 7 8 9 10 11 12; do
run_anal "EFT_"$index "../../../test_bbWW/delphes_"$index".root" "../../../HH_lhe_samples/GF_HH_"$index"_cmsgrid_final.lhe"
done
run_anal "SM" "../../../test_bbWW/delphes_1.root" "../../../HH_lhe_samples/GF_HH_1_cmsgrid_final.lhe"
| true |
34e1633d2bbebf4c919822604a299aa8cf8878f9 | Shell | ShroudZzz/LearnShell | /scripts/sh10.sh | UTF-8 | 574 | 3.28125 | 3 | [] | no_license | #! /bin/bash
echo "Now, I will detect your LINUX server's services!"
echo -e "The www, ftp, ssh, and mail will be detect! \n"
testing=$(netstat -tuln | grep ":80 ")
if [ "$testing" != "" ]; then
echo "www is runing in your system."
fi
testing=$(netstat -tuln | grep ":22 ")
if [ "$testing" != "" ]; then
echo "SSH is runing in your system."
fi
testing=$(netstat -tuln | grep ":21 ")
if [ "$testing" != "" ]; then
echo "FTP is runing in your system."
fi
testing=$(netstat -tuln | grep ":25 ")
if [ "$testing" != "" ]; then
echo "Mail is runing in your system."
fi
| true |
b367f1f07b8e7e9df71b3a9481b2556e3570dd70 | Shell | chen3feng/devenv | /install | UTF-8 | 875 | 3.484375 | 3 | [] | no_license | #!/bin/bash
tag="###chen3feng-devenv###"
this_dir="$(cd $(dirname $0) && pwd)"
install() {
local command="$1"
local from="$this_dir/$2"
local to="$3"
local comment="$4 $tag"
local stmt="$command $from"
if [ -e $to ] && grep "$comment" "$to" > /dev/null; then
# Find the tag line
local lineno=$(grep -n -m1 "$comment" "$to" | cut -f1 -d:)
let lineno++
sed -i "${lineno}s|.*|$stmt|g" $to # Replace next line of tag line
else
echo "$comment" >> $to
echo "$stmt" >> $to
fi
echo "$to is updated."
}
install source _bashrc ~/.bashrc "#"
install source _zshrc ~/.zshrc "#"
install source _vimrc ~/.vimrc '"'
install "\$include" _inputrc ~/.inputrc "#"
$this_dir/git/install
# Install the `vim-plug` plug manager
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
| true |
f050cb4d349c2fd856c346248690359cc1411027 | Shell | manucarbonell/Laia | /egs/iam/utils/join_lines_arks.sh | UTF-8 | 3,901 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e;
export LC_NUMERIC=C;
SDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)";
[ "$(pwd)/utils" = "$SDIR" ] || {
echo "Run the script from \"$(dirname $SDIR)\"!" >&2 && exit 1;
}
[ ! -f "$(pwd)/utils/parse_options.inc.sh" ] && \
echo "Missing $(pwd)/utils/parse_options.inc.sh file!" >&2 && exit 1;
add_dummy_ctc_end=false;
add_wspace_border=true;
eps="<eps>";
regex="^([^ ]+)-[0-9]+$";
wspace="<space>";
help_message="
Usage: ${0##*/} [options] syms input_lkh_ark output_lkh_ark
Description:
Join log-likelihoods in a Kaldi archive file into a single block
(e.g. form/page/paragraph) based on their ID (see --regex).
IMPORTANT: All the lines in the same block of text must be in the correct
order in the input archive file.
Options:
--add_dummy_ctc_end : (type = boolean, default = $add_dummy_ctc_end)
Add a fake ctc symbol frame at the end of each form.
--add_wspace_border : (type = boolean, default = $add_wspace_border)
Add fake whitespace frame at the beginning and at the
end of each form.
--eps : (type = string, default = \"$eps\")
Token representing the epsilon symbol.
--regex : (type = regex, default = \"$regex\")
RegEx used to extract the form ID fromt the line ID.
Parenthesis must be used to group the form ID.
--wspace : (type = string, default \"$wspace\")
Token representing the whitespace character.
";
source utils/parse_options.inc.sh || exit 1;
[ $# -ne 3 ] && echo "$help_message" >&2 && exit 1;
# Parse inputs from arguments
syms="$1";
inpf="$2";
outf="$3";
scpf="${3/.ark/.scp}";
for f in "$syms" "$inpf"; do
[ ! -s "$f" ] && echo "ERROR: File \"$f\" was not found!" >&2 && exit 1;
done;
mkdir -p "$(dirname "$outf")";
# Get the number of symbols (including CTC, but excluding epsilon) and
# the ID of the whitespace symbol.
info=( $(gawk -v eps="$eps" -v ws="$wspace" '{
if ($1 != eps) N++;
if ($1 == ws) wss=$2;
}END{ print N, wss }' "$syms") );
copy-matrix "ark:$inpf" ark,t:- | gawk -v AW="$add_wspace_border" \
-v AD="$add_dummy_ctc_end" -v ND="${info[0]}" -v WSS="${info[1]}" \
-v RE="$regex" '
function add_dummy_frame(n) {
printf("%s", n);
infv=-3.4 * 10^38;
printf(" %g", 0.0);
for (i = 2; i <= ND; ++i) { printf(" %g", infv); }
printf("\n");
}
function add_wspace_frame(n) {
printf("%s", n);
infv=-3.4 * 10^38;
for (i = 1; i < WSS; ++i) { printf(" %g", infv); }
printf(" %g", 0.0);
for (i = WSS + 1; i <= ND; ++i) { printf(" %g", infv); }
printf("\n");
}
BEGIN {
form_id="";
}{
S = 1; F = NF;
if ($2 == "[" && match($1, RE, A)) {
if (form_id == A[1]) {
add_wspace_frame(form_id);
} else {
if (AW == "true") {
if(form_id != "") { add_wspace_frame(form_id); }
add_wspace_frame(A[1]);
}
if (AD == "true") {
if (form_id != "") { add_dummy_frame(form_id); }
}
}
form_id = A[1];
S += 2;
}
if ($NF == "]") {
F = NF - 1;
}
if (S <= F) {
printf("%s", form_id);
for (i = S; i <= F; ++i)
printf(" %g", $i);
printf("\n");
}
}END{
if (AW == "true" && form_id != "") {
add_wspace_frame(form_id);
}
if (AD == "true" && form_id != "") {
add_dummy_frame(form_id);
}
}' | gawk '
BEGIN{
form_id="";
}{
if ($1 != form_id) {
if (form_id != "") printf("]\n");
form_id = $1;
printf("%s [\n", form_id);
}
for (i = 2; i <= NF; ++i) printf(" %g", $i);
printf("\n");
}END{
if (form_id != "") printf("]\n");
}' | copy-matrix ark,t:- "ark,scp:$outf,$scpf" ||
{ echo "ERROR: Creating file \"$outf\"!" >&2 && exit 1; }
exit 0;
| true |
b411ae72cb18866d5d010ed77c1eaa9464d03c18 | Shell | apache/shardingsphere-benchmark | /sysbench/proxy-mysql/prepare-proxy.sh | UTF-8 | 893 | 3.046875 | 3 | [] | no_license | #!/bin/bash
DATABASE_TYPE=$(sh toolkit/read-constant-from-file.sh .env "PROXY_MYSQL")
BASE_PATH=$(sh toolkit/read-constant-from-file.sh .env "BASE_PATH")
PREPARED_CONF_PATH=$(sh toolkit/read-constant-from-file.sh .env "PREPARED_CONF_PATH")
PROXY_DIRECTORY_NAME=$(sh toolkit/read-constant-from-file.sh .env "PROXY_DIRECTORY_NAME")
MYSQL_DRIVER=$(sh toolkit/read-constant-from-file.sh .env "MYSQL_DRIVER")
# TODO this action is useless
if [ ! -d "${BASE_PATH}/${DATABASE_TYPE}/${PREPARED_CONF_PATH}" ]; then
cp -R sysbench/${DATABASE_TYPE}/${PREPARED_CONF_PATH} ${BASE_PATH}/${DATABASE_TYPE}/
fi
# debug info
CURRENTPATH=`pwd`
echo "current path is ${CURRENTPATH}"
if [ ! -f ${BASE_PATH}/${DATABASE_TYPE}/${PROXY_DIRECTORY_NAME}/lib/${MYSQL_DRIVER} ]; then
cp sysbench/${DATABASE_TYPE}/${PREPARED_CONF_PATH}/${MYSQL_DRIVER} ${BASE_PATH}/${DATABASE_TYPE}/${PROXY_DIRECTORY_NAME}/lib/
fi
| true |
a4564df9405f59969b49771e5352a27a8666fde7 | Shell | rdm-dev/meta-jens | /recipes-rdm/errhlp/errhlp/errhlp.sh | UTF-8 | 1,162 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
function fail () {
echo "$@" >&2
exit 1
}
test $# -eq 1 || fail "$0 <error-identifier>"
. @LEDCTRL@/ledctrl
CALL_NAME=`basename "$0"`
case "$CALL_NAME" in
enable-error)
touch "/run/hp-errors/$1"
# led_error
;;
disable-error)
rm -f "/run/hp-errors/$1"
test /run/hp-errors/* = "/run/hp-errors/*" && silence_error
;;
enable-counted-error)
test -f "/run/hp-errors/count-$1" && ERRCNT=$(cat /run/hp-errors/count-$1)
test -z "$ERRCNT" && ERRCNT=0
ERRCNT=$(expr $ERRCNT + 1)
echo "$ERRCNT" > /run/hp-errors/count-$1
# led_error
;;
disable-counted-error)
test -f "/run/hp-errors/count-$1" && ERRCNT=$(cat /run/hp-errors/count-$1)
test -z "$ERRCNT" && ERRCNT=1
ERRCNT=$(expr $ERRCNT - 1)
echo "$ERRCNT" > /run/hp-errors/count-$1
test "$ERRCNT" -gt 0 || rm -f "/run/hp-errors/count-$1"
test /run/hp-errors/* = "/run/hp-errors/*" && silence_error
;;
*)
fail "<enable-error|disable-error|enable-counted-error|disable-counted-error> <error-identifier>"
;;
esac
test -S /dev/log && logger -s "$0 $1" || echo "$0 $1" >&2
exit 0
| true |
aae9f0d1fdf80b69856c1a33782212e3512f9a3a | Shell | kmstumpff/Scripts | /Timing/Timing.sh | UTF-8 | 1,271 | 3.640625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Functions
######################################################
GetStartTime() {
start_sec=$(date +%S)
start_min=$(date +%M)
start_hour=$(date +%H)
}
GetEndTime() {
end_sec=$(date +%S)
end_min=$(date +%M)
end_hour=$(date +%H)
}
DisplayTotalTime() {
tot_hour=$(($end_hour - $start_hour))
tot_min=$(($end_min - $start_min))
tot_sec=$(($end_sec - $start_sec))
if [ "$tot_sec" -lt "0" ]
then
tot_min=$(($tot_min - 1))
tot_sec=$(($tot_sec + 60))
fi
if [ "$tot_min" -lt "0" ]
then
tot_hour=$(($tot_hour - 1))
tot_min=$(($tot_min + 60))
fi
if [ "$tot_sec" -le "9" ]
then
tot_sec="0"$tot_sec
fi
if [ "$tot_min" -le "9" ]
then
tot_min="0"$tot_min
fi
if [ "$tot_hour" -le "9" ]
then
tot_hour="0"$tot_hour
fi
echo "Start time: $start_hour:$start_min:$start_sec"
echo "End time: $end_hour:$end_min:$end_sec"
echo "Elapsed time: $tot_hour:$tot_min:$tot_sec"
}
# End Functions
######################################################
# Get Starting Time
GetStartTime
# Do stuff here
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Stop doing stuff here
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Get End Time
GetEndTime
# Display Timing Results
DisplayTotalTime
| true |
176465f981dd5d9d2840cde6d2e0c594d1cff4b2 | Shell | alex65536/bulatov-lectures-2 | /antibulatov.sh | UTF-8 | 2,951 | 2.9375 | 3 | [] | no_license | #!/bin/bash
echo "Этот скрипт делает конспект более интересным для чтения."
echo "Но он сильно меняет исходники."
echo "НЕ КОММИТЬТЕ после применения этого скрипта к репозиторию!"
while :; do
echo -n "Продолжить? [Y/N] "
read -r OUTCOME
case "$OUTCOME" in
Y|y|Д|д)
break
;;
N|n|Н|н)
echo "Останов."
exit 0
;;
esac
done
shopt -s globstar
sed -i -E 's/% add new headers if necessary/\\textit{Конспект был улучшен с применением} \\texttt{antibulatov.sh}./g' tex/title.tex
sed -i -E '
s/([Уу])быв/\1бив/g
s/конечн/кончен/g
s/конечен/кончен/g
s/ескончен/есконечн/g
s/([Фф])ункци/\1укнци/g
s/([Дд])ифференц/\1езинфиц/g
s/([Чч])астн/\1аст/g
s/([Пп])риращ/\1ревращ/g
s/\bпредел/беспредел/g
s/\bПредел/Беспредел/g
s/([Сс])меша[н]+/\1мешн/g
s/([Пп])роизводн/\1роизводственн/g
s/([Яя])вн/\1вственн/g
s/([Ээ])кстремум/\1кстремизм/g
s/([Мм])инимум/\1инимализм/g
s/([Мм])аксимум/\1аксимализм/g
s/епрерывност/епрекращаемост/g
s/епрерывны([а-я]+)\b/епрекращающи\1ся/g
s/епрерывно([а-я]+)\b/епрекращающе\1ся/g
s/епрерывна([а-я]+)\b/епрекращающа\1ся/g
s/епрерывну([а-я]+)\b/епрекращающу\1ся/g
s/епрерывн([оаы])\b/епрекращаем\1/g
s/([Сс])оответствующ/\1оответсвующ/g
s/([Гг])ладк/\1адк/g
s/([Пп])ризнак/\1ризрак/g
s/([Сс])тепенн/\1тепн/g
s/([Пп])о Коши/\1о кошке/g
s/Коши/кошки/g
s/кошки-([А-Я])/кошки \1/g
s/\bмал/крохотн/g
s/\bбольш/громадн/g
s/\bгромадни([йхем])/громадны\1/g
s/громаднинство/большинство/g
s/\b([Сс])ходи/\1бегае/g
s/\b([Сс])ходя/\1бегаю/g
s/\b([Рр])асходи/\1азбегае/g
s/\b([Рр])асходя/\1азбегаю/g
s/интегральн/интернациональн/g
s/[Ии]нтеграл/Интернационал/g
s/([Ии])нтегрир/\1нтернационализир/g
s/([Кк])омпакт/\1омпот/g
s/([Кк])асат/\1усат/g
s/([Сс])вязн/\1вязан/g
s/([Оо])ткрыт/\1пенсорсн/g
s/([Дд])искриминант/\1искриминатор/g
s/([Кк])вадрир/\1адрир/g
s/([Сс])(жат[а-я]+)/\1\2 архиватором/g
s/([Аа])налитич/\1политич/g
s/([Пп])одстанов/\1одстав/g
' **/*.tex
sed -i -E 's/\{mes\}/{mr}/g' matanhelper.sty
echo "Building PDF..."
make build
make clean
| true |
e365a9921903c65be4db4a1b78b34c9ba546f679 | Shell | rorschachwhy/shell | /打印机脚本_第一版/wanglili.sh | UTF-8 | 1,343 | 3.15625 | 3 | [] | no_license |
#!/bin/sh
HOSTNAME=(
"t101=58.68.148.50"
"t201=58.68.233.90"
"t301=123.56.16.9"
"t401=58.68.148.59"
"t501=58.68.224.154"
"t601=58.68.148.57"
)
PORT="3306"
USERNAME="root"
PASSWORD="XXXX"
search1_sql="SELECT
*
FROM
platform.t_core_gprs_printer
WHERE
(\`serial_number\` = '118495086'
AND \`is_online\` = 'Y')"
setClose1_sql="
UPDATE platform.t_core_gprs_printer
SET
\`serial_number\` = '666666'
WHERE
\`serial_number\` = '118495086';"
setOpen1_sql="
UPDATE
platform.t_core_gprs_printer
SET
\`serial_number\` = '118495086'
WHERE
\`id\` = '1'"
echo "请输入要配置的环境和打印机的值:"
read env sess
echo $env $sess
while [ ! -n "$sess" ]
do
echo "输入参数缺失,请重新输入"
read env sess
echo $env $sess
done
for hostname in ${HOSTNAME[@]}
do
echo "$hostname"
echo ${hostname:0:2}
if [ ${hostname:0:2} != $env ];then
search_sql=$search1_sql
setClose_sql=$setClose1_sql
else
search_sql=$search1_sql
setOpen_sql=$setOpen1_sql
fi
mysql -u$USERNAME -p$PASSWORD -h${hostname:5} -P$PORT -e "$search_sql"
mysql -u$USERNAME -p$PASSWORD -h${hostname:5} -P$PORT -e "$setClose_sql"
mysql -u$USERNAME -p$PASSWORD -h${hostname:5} -P$PORT -e "$setOpen_sql"
done
read -p "Press any key to continue." var
| true |
cd516fb0b2f061df405d36a0913f57942348e939 | Shell | IStallcup/school | /cs344/p1/check_num_args | UTF-8 | 267 | 3.078125 | 3 | [] | no_license | #!/bin/bash
if [ "$#" -eq 1 ]
then
echo "one input specified: $1"
elif [ "$#" -eq 2 ]
then
echo "two inputs specified: $1, $2"
elif [ "$#" -ge 3 ]
then
echo "more than two inputs specified. error."
elif [ "$#" -eq 0 ]
then
echo "no inputs specified. error.":
fi
| true |
b58ac6f800ec5510f1a2261e72f13f7f2ebff638 | Shell | gilbertk27/soal-shift-sisop-modul-1-I08-2021 | /soal2/soal2_generate_laporan_ihir_shisop.sh | UTF-8 | 1,087 | 3.25 | 3 | [] | no_license | #!/bin/bash
#2a
awk -F '\t' 'NR>1 {price=$18-$21; percent=($21/price)*100; print $1, percent}' Laporan-TokoShiSop.tsv > filter.txt
#2b
list_name=$(awk 'match($3, "-17") && /Albuquerque/ {print $8 " "$9}' Laporan-TokoShiSop.tsv | sort | uniq )
#echo -e "$list_name\n"
#echo ""
#2c
least_sale=$(cut -f 8 Laporan-TokoShiSop.tsv | sort | uniq -c | head -n -1 | tail -n -1 | awk '{print "The type of customer segment with the least sales is " $2 " " $3" with "$1" transactions."}')
#echo -e "$least_sale\n"
#2d
max_profit=$(cut -f 13,21 Laporan-TokoShiSop.tsv | sort -s | uniq -c | awk 'NR>=2 && p!=$2 {print "The region which has the least total profit is " p " with total profit " s;s=0} {s+=$3*$1} {p=$2}' | head -n 1)
#echo -e "$max_profit\n"
#2e
awk 'BEGIN {max=0;num=0}{if($2>max) max=$2}{if($2==max) num=$1} END {print "The last transaction with the largest Transaction ID is ",num," with a percentage of ",max,"%."}' filter.txt > hasil.txt
echo -e "\nThe list of customer names in Albuquerque in 2017 includes: \n$list_name\n \n$least_sale\n \n$max_profit\n" >> hasil.txt
| true |
976f1d01d16849aecceb0079615581e419c91c13 | Shell | wsredniawa/kCSD-python | /continuous_integration/install.sh | UTF-8 | 4,266 | 3.140625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Based on a script from scikit-learn
# This script is meant to be called by the "install" step defined in
# .travis.yml. See http://docs.travis-ci.com/ for more details.
# The behavior of the script is controlled by environment variabled defined
# in the .travis.yml in the top level folder of the project.
set -e
# Fix the compilers to workaround avoid having the Python 3.4 build
# lookup for g++44 unexpectedly.
if [[ "$DISTRIB" == "conda_min" ]]; then
# Deactivate the travis-provided virtual environment and setup a
# conda-based environment instead
deactivate
# Use the miniconda installer for faster download / install of conda
# itself
wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
-O miniconda.sh
chmod +x miniconda.sh && ./miniconda.sh -b -p $HOME/miniconda
export PATH=/home/travis/miniconda/bin:$PATH
conda config --set always_yes yes
conda update --yes conda
# Configure the conda environment and put it in the path using the
# provided versions
conda create -n testenv --yes python=$PYTHON_VERSION pip nose coverage \
six=$SIX_VERSION numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION
source activate testenv
pip install matplotlib
elif [[ "$DISTRIB" == "conda" ]]; then
# Deactivate the travis-provided virtual environment and setup a
# conda-based environment instead
deactivate
# Use the miniconda installer for faster download / install of conda
# itself
wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
-O miniconda.sh
chmod +x miniconda.sh && ./miniconda.sh -b -p $HOME/miniconda
export PATH=/home/travis/miniconda/bin:$PATH
conda config --set always_yes yes
conda update --yes conda
# Configure the conda environment and put it in the path using the
# provided versions
conda create -n testenv --yes python=$PYTHON_VERSION pip nose coverage six=$SIX_VERSION \
numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION
source activate testenv
if [[ "$COVERAGE" == "true" ]]; then
pip install coveralls
fi
elif [[ "$DISTRIB" == "conda_extra" ]]; then
# Deactivate the travis-provided virtual environment and setup a
# conda-based environment instead
deactivate
# Use the miniconda installer for faster download / install of conda
# itself
wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
-O miniconda.sh
chmod +x miniconda.sh && ./miniconda.sh -b -p $HOME/miniconda
export PATH=/home/travis/miniconda/bin:$PATH
conda config --set always_yes yes
conda update --yes conda
# Configure the conda environment and put it in the path using the
# provided versions
conda create -n testenv --yes python=$PYTHON_VERSION pip nose coverage six=$SIX_VERSION \
numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION
source activate testenv
pip install scikit-monaco
pip install matplotlib
if [[ "$COVERAGE" == "true" ]]; then
pip install coveralls
fi
elif [[ "$DISTRIB" == "ubuntu" ]]; then
# deactivate
# Create a new virtualenv using system site packages for numpy and scipy
# virtualenv --system-site-packages testenv
# source testenv/bin/activate
pip install nose
pip install coverage
pip install numpy==$NUMPY_VERSION
pip install scipy==$SCIPY_VERSION
pip install six==$SIX_VERSION
pip install matplotlib
elif [[ "$DISTRIB" == "ubuntu_extra" ]]; then
# deactivate
# Create a new virtualenv using system site packages for numpy and scipy
# virtualenv --system-site-packages testenv
# source testenv/bin/activate
pip install nose
pip install coverage
pip install numpy==$NUMPY_VERSION
pip install scipy==$SCIPY_VERSION
pip install six==$SIX_VERSION
pip install scikit-monaco==$SKMONACO_VERSION
pip install matplotlib==$MATPLOTLIB_VERSION
fi
if [[ "$COVERAGE" == "true" ]]; then
pip install coveralls
fi
pip install . # Installs kcsd-python
python -c "import numpy; import os; assert os.getenv('NUMPY_VERSION') == numpy.__version__"
python -c "import scipy; import os; assert os.getenv('SCIPY_VERSION') == scipy.__version__"
| true |
ce4b37620b12a06c984d07204e47bbfe8e5c31f3 | Shell | ilventu/aur-mirror | /adventuregamestudio-git/PKGBUILD | UTF-8 | 1,141 | 3.171875 | 3 | [] | no_license | # Maintainer: Joe Davison <joedavison.davison@gmail.com>
pkgname=adventuregamestudio-git
pkgver=20120901
pkgrel=1
pkgdesc="Native port of the Adventure Game Studio engine to Linux (git version)"
arch=('i686' 'x86_64')
url="http://www.adventuregamestudio.co.uk/"
license=('Custom')
depends=('libogg' 'libvorbis' 'libtheora' 'dumb' 'freetype2' 'allegro4')
makedepends=('git')
install=ags-git.install
source=('ags-git.install')
md5sums=('fc9706e4e3636d7e789be40480a5fdc0')
provides=('adventuregamestudio')
_gitroot="https://github.com/adventuregamestudio/ags.git"
_gitname="ags"
_gitbranch="main"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone --branch $_gitbranch $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
make --directory=Engine --file=Makefile.linux || return 1
}
package() {
install -D -m 755 $srcdir/ags-build/Engine/ags \
$pkgdir/usr/bin/ags
}
| true |
fac6c32218b8cc711107fcb81c0294d82b973c12 | Shell | jeannoeldot/builds | /9-pkg-old/firefox/PKGBUILD | UTF-8 | 5,396 | 2.703125 | 3 | [] | no_license | # Maintainer: Devin Cofer <ranguvar{AT]archlinux[DOT}us>
# Contributor: blasse <koralik(at)gmail(dot)com>
# Maintainer: JND
pkgname=firefox
_mozver=2.0
_ffmajorver=4.0
_ffver=4.0rc1
_build=build1
pkgver=${_ffver}
pkgrel=1
pkgdesc="Mozilla Firefox customizable web browser (XULRunner independent, 64-bit TraceMonkey, beta)"
url="http://www.mozilla.org/projects/firefox"
arch=('i686' 'x86_64')
license=('MPL' 'GPL' 'LGPL')
makedepends=('autoconf2.13' 'gcc' 'zip' 'unzip' 'pkgconfig' 'diffutils'
'libgnomeui' 'python2' 'wireless_tools' 'yasm'
'xorg-server-xvfb' 'mesa'
)
depends=('gtk2' 'gcc-libs' 'libidl2' 'mozilla-common'
'nss' 'nspr' 'libxt' 'hunspell' 'startup-notification'
'libnotify' 'mime-types' 'dbus-glib' 'desktop-file-utils'
'libpng' 'libevent' 'alsa-lib' 'mesa'
'cairo-tee' 'lcms' 'libvpx'
)
optdepends=('libgnomeui: GNOME integration and MIME handling'
'wireless_tools: Location aware browsing'
)
provides=("firefox=$pkgver" "firefox-pgo=$pkgver" "firefox-beta=$pkgver")
conflicts=('firefox' 'firefox-pgo' 'firefox-beta' 'firefox4-beta' 'firefox-pgo-minefield' 'firefox-pgo-minefield-smp')
install=firefox.install
#"ftp://ftp.mozilla.org/pub/mozilla.org/firefox/nightly/${_ffver}-candidates/${_build}/source/firefox-${_ffver}.source.tar.bz2"
#"ftp://ftp.mozilla.org/pub/mozilla.org/firefox/nightly/${_ffver}-candidates/${_build}/linux-i686/xpi/fr.xpi"
source=("ftp://ftp.mozilla.org/pub/mozilla.org/firefox/releases/${_ffver}/source/firefox-${_ffver}.source.tar.bz2"
"ftp://ftp.mozilla.org/pub/mozilla.org/firefox/releases/${_ffver}/linux-x86_64/xpi/fr.xpi"
'mozconfig'
'firefox-default-prefs.js'
'firefox.desktop'
'firefox-safe.desktop'
'4.0-ldflags-namespec.patch'
)
md5sums=('511828dcc226f38602c6c67bd192ef40'
'd7b71014ce4fe14b8bf7dc05aabe9606'
'd1235d38fa85577b722b080c08127580'
'6bfa54cc21d28a9989530aa2bf5ac319'
'b3216987e63c11e520abc47431d07368'
'9eb57a262bfec76f3c32bdcdb1bfc17e'
'873bdd1c7d4e63883a446d345e01a39f'
)
build() {
cd "$srcdir"/mozilla-$_mozver
# msg "Patching source."
# PGO compilation LDFLAGS fix
patch -Np1 -i "$srcdir"/4.0-ldflags-namespec.patch
cp "$srcdir"/mozconfig .mozconfig
# msg "Setting up build."
# Changing the user's optimization flags is justified, because this is
# a package specifically for an optimized software build, and because of
# the official branding, binaries can't be redistributed anyways.
# These flags just set guidelines for the build, they are overridden in
# most compile job pieces by Firefox's better judgement.
# Mon Test
# export CFLAGS="-march=native -O2 -pipe"
# export CXXFLAGS="-march=native -O2 -pipe"
export CFLAGS="-march=native -O2 -pipe -fomit-frame-pointer -floop-interchange -floop-strip-mine -floop-block"
export CXXFLAGS="-march=native -O2 -pipe -fomit-frame-pointer -floop-interchange -floop-strip-mine -floop-block"
# The hash-style and as-needed flags are in Arch defaults anyways,
# and the other optimization flags are almost definitely safe.
export LDFLAGS="-Wl,-rpath,/usr/lib/firefox-$_ffmajorver -Wl,-O1,--sort-common,--hash-style=gnu,--as-needed"
export PYTHON=python2
autoconf-2.13
# msg "Actual build."
# Yes, all this is SMP -- MOZ_MAKE_FLAGS takes care of it.
# Compile a non-PGO build first to reduce chance of error in PGO build.
# Mon Test
# make -j1 -f client.mk build MOZ_MAKE_FLAGS="$MAKEFLAGS"
make -f client.mk build MOZ_MAKE_FLAGS="$MAKEFLAGS"
}
package() {
cd "$srcdir"/mozilla-$_mozver
# msg "Installing to $pkgdir."
# Mon Test
# make -j1 DESTDIR="$pkgdir" -C ff-pgo install
make DESTDIR="$pkgdir" -C ff-pgo install
install -Dm644 "$srcdir/mozilla-$_mozver/other-licenses/branding/firefox/mozicon128.png" \
"$pkgdir/usr/share/pixmaps/firefox.png"
install -Dm644 "$srcdir/firefox.desktop" \
"$pkgdir/usr/share/applications/firefox.desktop"
install -Dm644 "$srcdir/firefox-safe.desktop" \
"$pkgdir/usr/share/applications/firefox-safe.desktop"
#Install fr.xpi
cd ${srcdir}
install -m755 -d ${pkgdir}/usr/lib/firefox-$_ffmajorver/extensions/langpack-fr@firefox.mozilla.org
cp -R chrome ${pkgdir}/usr/lib/firefox-$_ffmajorver/extensions/langpack-fr@firefox.mozilla.org
install -D -m644 chrome.manifest ${pkgdir}/usr/lib/firefox-$_ffmajorver/extensions/langpack-fr@firefox.mozilla.org/
install -D -m644 install.rdf ${pkgdir}/usr/lib/firefox-$_ffmajorver/extensions/langpack-fr@firefox.mozilla.org/
install -D -m644 firefox-default-prefs.js ${pkgdir}/usr/lib/firefox-$_ffmajorver/defaults/pref/all-arch.js
# sed -e "s:general.useragent.locale\", \"en-US\":general.useragent.locale\", \"fr\":" \
# -i ${pkgdir}/usr/lib/firefox-$_ffver/defaults/pref/firefox.js \
# -i ${pkgdir}/usr/lib/firefox-$_ffver/defaults/pref/firefox-l10n.js
# System extensions
# Use the system hunspell dictionaries
#Remove included dictionaries, add symlink to system myspell path.
#Note: this will cause file conflicts when users have installed dictionaries in the old location
rm -rf "${pkgdir}/usr/lib/firefox-$_ffmajorver/dictionaries"
ln -sf /usr/share/myspell/dicts "${pkgdir}/usr/lib/firefox-$_ffmajorver/dictionaries"
# ghost files
# Remove devel stuff.
rm -rf "$pkgdir/usr/include/"
rm -rf "$pkgdir/usr/lib/firefox-devel-$_ffmajorver/"
rm -rf "$pkgdir/usr/share/idl/"
}
| true |
41152885053481695a9815de4564c1999e4bfda3 | Shell | starcraftman/.my_scripts | /archive/change_author.sh | UTF-8 | 431 | 2.625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
git filter-branch -f --env-filter '
an="$GIT_AUTHOR_NAME"
am="$GIT_AUTHOR_EMAIL"
cn="$GIT_COMMITTER_NAME"
cm="$GIT_COMMITTER_EMAIL"
if [ "$GIT_COMMITTER_NAME" = "userName" ]
then
cn="newName"
cm="newEmail"
fi
if [ "$GIT_AUTHOR_NAME" = "userName" ]
then
an="newName"
am="newEmail"
fi
export GIT_AUTHOR_NAME="$an"
export GIT_AUTHOR_EMAIL="$am"
export GIT_COMMITTER_NAME="$cn"
export GIT_COMMITTER_EMAIL="$cm"
'
| true |
d61fc9d64d9e0b72e6f7bd0a7f637f85b9da4560 | Shell | openshift-scale/grafshift | /deploy.sh | UTF-8 | 1,594 | 3.484375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
GRAFANA_PASSWORD=${1}
prom_url=`oc get secrets -n openshift-monitoring grafana-datasources -o go-template='{{index .data "prometheus.yaml"}}' | base64 --decode | jq '.datasources[0].url'`
prom_user="internal"
prom_pass=`oc get secrets -n openshift-monitoring grafana-datasources -o go-template='{{index .data "prometheus.yaml"}}' | base64 --decode | jq '.datasources[0].basicAuthPassword'`
oc new-project grafshift
oc process -f templates/grafana.yml -p "GRAFANA_ADMIN_PASSWORD=${GRAFANA_PASSWORD}" -p "PROMETHEUS_URL=${prom_url}" -p "PROMETHEUS_USER=${prom_user}" -p "PROMETHEUS_PASSWORD=${prom_pass}" | oc create -f -
for i in `seq 1 20`
do
echo "Seeing if pod is up... ${i}"
running=`oc get po -n grafshift | grep running -i -c`
if [ ${running} -gt 0 ]
then
echo "Grafana Pod is up..."
break
fi
sleep 1
done
grafshift_route=`oc get routes -n grafshift -o=json | jq -r '.items[0].spec.host'`
api_key=`curl -s -H "Content-Type: application/json" -H "Accept: application/json" -X POST -d '{"name":"grafyaml","role":"Admin"}' http://admin:${GRAFANA_PASSWORD}@${grafshift_route}/api/auth/keys | jq -r '.key'`
# Clear GrafYaml Cache
rm -rf ~/.cache/grafyaml/cache.dbm
# Creates local GrafYaml config file
echo "[cache]" > .grafyaml_config
echo "enabled = false" >> .grafyaml_config
echo "[grafana]" >> .grafyaml_config
echo "url = http://${grafshift_route}" >> .grafyaml_config
echo "apikey = ${api_key}" >> .grafyaml_config
echo "Uploading Dashboards to http://${grafshift_route}/"
grafana-dashboard --config-file .grafyaml_config update dashboards/
| true |
d2b33c4acac39b7695cc4ef8deec1103439c7ecd | Shell | limianwang/environments | /dotfiles/.zshrc | UTF-8 | 2,349 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env zsh
setopt prompt_subst
export ZSH="/Users/$USER/.oh-my-zsh"
export EDITOR=vim
export GREP_OPTIONS='--color=auto'
export GREP_COLOR='0;32'
plugins=(
git
)
test -f $ZSH/oh-my-zsh.sh && . $_
if type brew &>/dev/null; then
FPATH=$(brew --prefix)/share/zsh-completions:$FPATH
autoload -Uz compinit
compinit -i
fi
# Exporting colors to the terminal
# http://osxdaily.com/2012/02/21/add-color-to-the-terminal-in-mac-os-x/
export TERM=xterm-256color
export CLICOLOR=1
export LSCOLORS=ExFxCxDxBxegedabagacad
# Reset the path
export PATH="/usr/local/bin:/usr/local/sbin:$PATH";
# Parse the git branch of the folder
function parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/(\1$(parse_git_dirty)$(parse_git_stash)) /"
}
function parse_git_dirty {
[[ $(git status 2> /dev/null | tail -n1) != "nothing to commit, working tree clean" ]] && echo "*"
}
function parse_git_stash {
[[ $(git stash list 2> /dev/null | tail -n1) != "" ]] && echo "^"
}
function list_deps {
brew list | while read cask; do echo -n "$cask ->"; brew deps $cask | awk '{printf(" %s ", $0)}'; echo ""; done
}
alias deps=list_deps
# http://apple.stackexchange.com/questions/55875/git-auto-complete-for-branches-at-the-command-line
# curl https://raw.githubusercontent.com/git/git/master/contrib/completion/git-completion.bash -o ~/.git-completion.bash
test -f ~/.git-completion.bash && . $_
BLUE="\[\e[0;34m\]";
GREEN="\[\e[0;32m\]";
CYAN="\[\e[0;36m\]";
YELLOW="\[\e[0;33m\]";
RED="\[\e[0;31m\]";
NO_COLOUR="\[\e[m\]";
# nvm
export NVM_DIR=~/.nvm
source $(brew --prefix nvm)/nvm.sh
function initNode() {
nvm use default;
}
initNode
precmd() {
echo -ne "\e]1;${PWD##*/}\a"
}
if [ $ITERM_SESSION_ID ]; then
precmd
fi
export PROMPT='%10F{blue}%n%f@%F{blue}%m:%f%F{yellow}%~%f %F{red}$(parse_git_branch)%f(%F{red}%(1j.%j.0)%f)'$'\n''λ %F{yellow}=>%f '
# Postgres
alias pg='postgres'
export PGDATA=/usr/local/var/postgres/
#pyenv
export PATH=$(pyenv root)/shims:$PATH
# Go
export GOPATH=~/go
export GOROOT="$(brew --prefix golang)/libexec"
export PATH=$PATH:$GOPATH/bin
export PATH=$PATH:$GOROOT/bin
# Flutter
export PATH="$PATH:/Users/$USER/flutter/bin"
# aliases
alias ll="ls -l";
alias cdm="cd ~/dev"
alias cdg="cd $GOPATH/src"
[[ /usr/local/bin/kubectl ]] && source <(kubectl completion zsh)
| true |
70b79ec55fd60c9959e95892f0a32cd63ac6d768 | Shell | jihoonkim/DataMed-Admixture | /provision/install_iadmix.sh | UTF-8 | 2,267 | 3.078125 | 3 | [
"MIT"
] | permissive |
#!/bin/bash
#-----------------------------------------------------------------------------
# File name : install_iadmix.sh
# Author : Jihoon Kim (j5kim@ucsd.edu), Olivier Harismendy
# Date : 12/14/2017
# Description : Install iAdmix to calculate population allele frequency
# for an input vcf-format file.
#-----------------------------------------------------------------------------
### update the repository source list
apt-get update -y
### install dependent packages
apt-get install -y autoconf build-essential curl gcc-multilib git g++ \
libbz2-dev liblzma-dev libncurses5-dev libssl-dev libz-dev make pkg-config \
software-properties-common python wget zip zlibc zlib1g zlib1g-dev tabix
### install iADMIX to compute population allele frequencies
cd /opt
git clone https://github.com/vibansal/ancestry.git
cd ancestry
find . -type f -name '*.o' -delete # recursively delete all *.o files
make all
### install vcftools to convert .vcf to PLINK format file
cd /opt
git clone https://github.com/vcftools/vcftools.git
cd vcftools
./autogen.sh
./configure
make
make install
### install PLINK
cd /opt
wget http://zzz.bwh.harvard.edu/plink/dist/plink-1.07-x86_64.zip
unzip plink-1.07-x86_64.zip
ln -s /opt/plink-1.07-x86_64/plink /usr/local/bin/plink
### download resource data, the population allele frequencies for common SNPs
### of the International HapMap Project
cd /opt/ancestry
wget "https://ndownloader.figshare.com/files/9920605" -O hapmap3.8populations.hg19.txt.zip
unzip hapmap3.8populations.hg19.txt.zip
rm hapmap3.8populations.hg19.txt.zip
### download resource data, the population allele frequencies for common SNPs
### of the 1000 Genomes Project without LD-based SNP pruning
cd /opt/ancestry
wget "https://ndownloader.figshare.com/files/11173319" -O 1000Gphase3.5superpopulations.hg19.withoutLDpruning.tsv.gz
tabix -p vcf 1000Gphase3.5superpopulations.hg19.withoutLDpruning.tsv.gz
### download resource data, the population allele frequencies for common SNPs
### of the 1000 Genomes Project with LD-based SNP pruning
cd /opt/ancestry
wget "https://ndownloader.figshare.com/files/11144816" -O 1000Gphase3.5superpopulations.hg19.withLDpruning.txt.gz
gunzip 1000Gphase3.5superpopulations.hg19.withLDpruning.txt.gz
| true |
ac964628ef426364667eb1220b938b4f9e62c4fa | Shell | JeroenKnoops/forest-bash | /test/messages.bats | UTF-8 | 1,349 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bats
source ./messages.sh
@test "info" {
run info 'my text'
[ "$status" -eq 0 ]
[ "$output" = " INFO: `tput setaf 3`my text`tput sgr0`" ]
}
@test "success" {
run success 'my text'
[ "$status" -eq 0 ]
[ "${lines[0]}" = " SUCCESS: `tput setaf 2`my text`tput sgr0`" ]
[ "${lines[1]}" = " =====================================================================================" ]
}
@test "error" {
run error 'my text'
[ "$status" -eq 1 ]
[ "$output" = " ERROR: `tput setaf 1`my text`tput sgr0`" ]
}
@test "warn" {
run warn 'my text'
[ "$status" -eq 0 ]
[ "$output" = " WARN: `tput setaf 4`my text`tput sgr0`" ]
}
@test "big-figlet" {
skip
run big 'blurk'
blurk=$(cat <<-EOM
_ _ _
| |__| |_ _ _ _| |__
| '_ \ | || | '_| / /
|_.__/_|\_,_|_| |_\_\\
EOM
)
[ "$status" -eq 0 ]
set -- "$blurk"
declare -a Array=($*)
[ "${lines[0]}" = "${Array[0]}" ]
[ "${lines[1]}" = "${Array[1]}" ]
[ "${lines[2]}" = "${Array[2]}" ]
[ "${lines[3]}" = "${Array[3]}" ]
[ "${lines[4]}" = "${Array[4]}" ]
}
@test "big-no-figlet" {
skip
run big 'blurk'
[ "$output" = "blurk" ]
}
@test "message example" {
run ./examples/messages-example.sh
[ "$status" -eq 0 ]
[ "$output" = " INFO: `tput setaf 3`scripts installed`tput sgr0`" ]
}
| true |
1f804333089f95a644590e278aba2deb54394046 | Shell | alexbers/keyrace_ssh | /start_game.sh | UTF-8 | 564 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
cd "$( dirname "${BASH_SOURCE[0]}")"
set -e
if [ -f gamestarted -o -f gameprepare ]; then
echo "Old game is still running, run ./stop_game.sh"
exit 1
fi
text=${1:?usage: start.game.sh <textfile>}
text_data=$(<$text)
touch gameprepare
echo "Get ready!!"
for i in {15..1}; do
echo $i
sleep 1
done
echo "$text_data" > text.txt
touch gamestarted
echo "Go!Go!Go!"
#echo "Press Ctrl-C to stop the game"
#trap "./stop_game.sh" SIGINT SIGTERM
#while true; do
# sleep 600
#done
#echo Game was running too long. Stopping it
#./stop_game.sh
| true |
67c4a880eea3426e0a24d5a2cf3649b531c93796 | Shell | ben00310/COMP2101 | /bash/arithmetic-demo.sh | UTF-8 | 1,110 | 4.1875 | 4 | [] | no_license | #!/bin/bash
#
# this script demonstrates doing arithmetic
# Improve this script by asking the user to supply the two numbers
# Improve this script by demonstrating using subtraction and multiplication
# Improve this script by demonstrating the modulus operation
# - the output should look something like:
# - first divided by second gives X with a remainder of Y
# Improve this script by calculating and displaying the first number raised to the power of the second number
echo "Please supply two numbers"
read num1
read num2
firstnum=$num1
secondnum=$num2
sum=$((firstnum + secondnum))
sum2=$((firstnum - secondnum))
sum3=$((firstnum * secondnum))
sum4=$((firstnum % secondnum))
sum5=$((firstnum ** secondnum))
dividend=$((firstnum / secondnum))
fpdividend=$(awk "BEGIN{printf \"%.2f\", $firstnum/$secondnum}")
cat <<EOF
$firstnum plus $secondnum is $sum
$firstnum subtact $secondnum is $sum2
$firstnum multiply $secondnum is $sum3
$firstnum modulus $secondnum is $sum4
$firstnum to the power of $secondnum is $sum5
$firstnum divided by $secondnum is $dividend
- More precisely, it is $fpdividend
EOF
| true |
994e8fd5f0e4687cf8ea2978d8457883b56f5d1e | Shell | spdk/spdk | /test/common/autotest_common.sh | UTF-8 | 42,613 | 2.921875 | 3 | [
"Intel",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2015 Intel Corporation
# All rights reserved.
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
rpc_py=rpc_cmd
function xtrace_fd() {
if [[ -n ${BASH_XTRACEFD:-} && -e /proc/self/fd/$BASH_XTRACEFD ]]; then
# Close it first to make sure it's sane
exec {BASH_XTRACEFD}>&-
fi
exec {BASH_XTRACEFD}>&2
set -x
}
function xtrace_disable() {
if [ "${XTRACE_DISABLED:-}" != "yes" ]; then
PREV_BASH_OPTS="$-"
if [[ "${PREV_BASH_OPTS:-}" == *"x"* ]]; then
XTRACE_DISABLED="yes"
fi
set +x
elif [ -z ${XTRACE_NESTING_LEVEL:-} ]; then
XTRACE_NESTING_LEVEL=1
else
XTRACE_NESTING_LEVEL=$((++XTRACE_NESTING_LEVEL))
fi
}
function xtrace_disable_per_cmd() { eval "$* ${BASH_XTRACEFD}> /dev/null"; }
xtrace_disable
set -e
shopt -s expand_aliases
if [[ -e $rootdir/test/common/build_config.sh ]]; then
source "$rootdir/test/common/build_config.sh"
elif [[ -e $rootdir/mk/config.mk ]]; then
build_config=$(< "$rootdir/mk/config.mk")
source <(echo "${build_config//\?=/=}")
else
source "$rootdir/CONFIG"
fi
# Source scripts after the config so that the definitions are available.
source "$rootdir/test/common/applications.sh"
source "$rootdir/scripts/common.sh"
# Dummy function to be called after restoring xtrace just so that it appears in the
# xtrace log. This way we can consistently track when xtrace is enabled/disabled.
function xtrace_enable() {
# We have to do something inside a function in bash, and calling any command
# (even `:`) will produce an xtrace entry, so we just define another function.
function xtrace_dummy() { :; }
}
# Keep it as alias to avoid xtrace_enable backtrace always pointing to xtrace_restore.
# xtrace_enable will appear as called directly from the user script, from the same line
# that "called" xtrace_restore.
alias xtrace_restore='if [ -z ${XTRACE_NESTING_LEVEL:-} ]; then
if [[ "${PREV_BASH_OPTS:-}" == *"x"* ]]; then
XTRACE_DISABLED="no"; PREV_BASH_OPTS=""; set -x; xtrace_enable;
fi
else
XTRACE_NESTING_LEVEL=$((--XTRACE_NESTING_LEVEL));
if [ $XTRACE_NESTING_LEVEL -eq "0" ]; then
unset XTRACE_NESTING_LEVEL
fi
fi'
: ${RUN_NIGHTLY:=0}
export RUN_NIGHTLY
# Set defaults for missing test config options
: ${SPDK_AUTOTEST_DEBUG_APPS:=0}
export SPDK_AUTOTEST_DEBUG_APPS
: ${SPDK_RUN_VALGRIND=0}
export SPDK_RUN_VALGRIND
: ${SPDK_RUN_FUNCTIONAL_TEST=0}
export SPDK_RUN_FUNCTIONAL_TEST
: ${SPDK_TEST_UNITTEST=0}
export SPDK_TEST_UNITTEST
: ${SPDK_TEST_AUTOBUILD=""}
export SPDK_TEST_AUTOBUILD
: ${SPDK_TEST_RELEASE_BUILD=0}
export SPDK_TEST_RELEASE_BUILD
: ${SPDK_TEST_ISAL=0}
export SPDK_TEST_ISAL
: ${SPDK_TEST_ISCSI=0}
export SPDK_TEST_ISCSI
: ${SPDK_TEST_ISCSI_INITIATOR=0}
export SPDK_TEST_ISCSI_INITIATOR
: ${SPDK_TEST_NVME=0}
export SPDK_TEST_NVME
: ${SPDK_TEST_NVME_PMR=0}
export SPDK_TEST_NVME_PMR
: ${SPDK_TEST_NVME_BP=0}
export SPDK_TEST_NVME_BP
: ${SPDK_TEST_NVME_CLI=0}
export SPDK_TEST_NVME_CLI
: ${SPDK_TEST_NVME_CUSE=0}
export SPDK_TEST_NVME_CUSE
: ${SPDK_TEST_NVME_FDP=0}
export SPDK_TEST_NVME_FDP
: ${SPDK_TEST_NVMF=0}
export SPDK_TEST_NVMF
: ${SPDK_TEST_VFIOUSER=0}
export SPDK_TEST_VFIOUSER
: ${SPDK_TEST_VFIOUSER_QEMU=0}
export SPDK_TEST_VFIOUSER_QEMU
: ${SPDK_TEST_FUZZER=0}
export SPDK_TEST_FUZZER
: ${SPDK_TEST_FUZZER_SHORT=0}
export SPDK_TEST_FUZZER_SHORT
: ${SPDK_TEST_NVMF_TRANSPORT="rdma"}
export SPDK_TEST_NVMF_TRANSPORT
: ${SPDK_TEST_RBD=0}
export SPDK_TEST_RBD
: ${SPDK_TEST_VHOST=0}
export SPDK_TEST_VHOST
: ${SPDK_TEST_BLOCKDEV=0}
export SPDK_TEST_BLOCKDEV
: ${SPDK_TEST_IOAT=0}
export SPDK_TEST_IOAT
: ${SPDK_TEST_BLOBFS=0}
export SPDK_TEST_BLOBFS
: ${SPDK_TEST_VHOST_INIT=0}
export SPDK_TEST_VHOST_INIT
: ${SPDK_TEST_LVOL=0}
export SPDK_TEST_LVOL
: ${SPDK_TEST_VBDEV_COMPRESS=0}
export SPDK_TEST_VBDEV_COMPRESS
: ${SPDK_RUN_ASAN=0}
export SPDK_RUN_ASAN
: ${SPDK_RUN_UBSAN=0}
export SPDK_RUN_UBSAN
: ${SPDK_RUN_EXTERNAL_DPDK=""}
export SPDK_RUN_EXTERNAL_DPDK
: ${SPDK_RUN_NON_ROOT=0}
export SPDK_RUN_NON_ROOT
: ${SPDK_TEST_CRYPTO=0}
export SPDK_TEST_CRYPTO
: ${SPDK_TEST_FTL=0}
export SPDK_TEST_FTL
: ${SPDK_TEST_OCF=0}
export SPDK_TEST_OCF
: ${SPDK_TEST_VMD=0}
export SPDK_TEST_VMD
: ${SPDK_TEST_OPAL=0}
export SPDK_TEST_OPAL
: ${SPDK_TEST_NATIVE_DPDK}
export SPDK_TEST_NATIVE_DPDK
: ${SPDK_AUTOTEST_X=true}
export SPDK_AUTOTEST_X
: ${SPDK_TEST_RAID5=0}
export SPDK_TEST_RAID5
: ${SPDK_TEST_URING=0}
export SPDK_TEST_URING
: ${SPDK_TEST_USDT=0}
export SPDK_TEST_USDT
: ${SPDK_TEST_USE_IGB_UIO:=0}
export SPDK_TEST_USE_IGB_UIO
: ${SPDK_TEST_SCHEDULER:=0}
export SPDK_TEST_SCHEDULER
: ${SPDK_TEST_SCANBUILD:=0}
export SPDK_TEST_SCANBUILD
: ${SPDK_TEST_NVMF_NICS:=}
export SPDK_TEST_NVMF_NICS
: ${SPDK_TEST_SMA=0}
export SPDK_TEST_SMA
: ${SPDK_TEST_DAOS=0}
export SPDK_TEST_DAOS
: ${SPDK_TEST_XNVME:=0}
export SPDK_TEST_XNVME
# Comma-separated list of fuzzer targets matching test/fuzz/llvm/$target
: ${SPDK_TEST_FUZZER_TARGET:=}
export SPDK_TEST_FUZZER_TARGET
: ${SPDK_TEST_NVMF_MDNS=0}
export SPDK_TEST_NVMF_MDNS
# always test with SPDK shared objects.
export SPDK_LIB_DIR="$rootdir/build/lib"
export DPDK_LIB_DIR="${SPDK_RUN_EXTERNAL_DPDK:-$rootdir/dpdk/build}/lib"
export VFIO_LIB_DIR="$rootdir/build/libvfio-user/usr/local/lib"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SPDK_LIB_DIR:$DPDK_LIB_DIR:$VFIO_LIB_DIR
# Tell setup.sh to wait for block devices upon each reset
export PCI_BLOCK_SYNC_ON_RESET=yes
# Export PYTHONPATH with addition of RPC framework. New scripts can be created
# specific use cases for tests.
export PYTHONPATH=$PYTHONPATH:$rootdir/python
# Don't create Python .pyc files. When running with sudo these will be
# created with root ownership and can cause problems when cleaning the repository.
export PYTHONDONTWRITEBYTECODE=1
# Export flag to skip the known bug that exists in librados
# Bug is reported on ceph bug tracker with number 24078
export ASAN_OPTIONS=new_delete_type_mismatch=0:disable_coredump=0
export UBSAN_OPTIONS='halt_on_error=1:print_stacktrace=1:abort_on_error=1:disable_coredump=0'
# Export LeakSanitizer option to use suppression file in order to prevent false positives
# and known leaks in external executables or libraries from showing up.
asan_suppression_file="/var/tmp/asan_suppression_file"
rm -rf "$asan_suppression_file" 2> /dev/null || sudo rm -rf "$asan_suppression_file"
cat << EOL >> "$asan_suppression_file"
# ASAN has some bugs around thread_local variables. We have a destructor in place
# to free the thread contexts, but ASAN complains about the leak before those
# destructors have a chance to run. So suppress this one specific leak using
# LSAN_OPTIONS.
leak:spdk_fs_alloc_thread_ctx
# Suppress known leaks in fio project
leak:$CONFIG_FIO_SOURCE_DIR/parse.c
leak:$CONFIG_FIO_SOURCE_DIR/iolog.c
leak:$CONFIG_FIO_SOURCE_DIR/init.c
leak:$CONFIG_FIO_SOURCE_DIR/filesetup.c
leak:fio_memalign
leak:spdk_fio_io_u_init
# Suppress leaks in gperftools-libs from fio
leak:libtcmalloc_minimal.so
# Suppress leaks in libiscsi
leak:libiscsi.so
# Suppress leaks in libcrypto
# Below is caused by openssl 3.0.8 leaks
leak:libcrypto.so
EOL
# Suppress leaks in libfuse3
echo "leak:libfuse3.so" >> "$asan_suppression_file"
export LSAN_OPTIONS=suppressions="$asan_suppression_file"
export DEFAULT_RPC_ADDR="/var/tmp/spdk.sock"
if [ -z "${DEPENDENCY_DIR:-}" ]; then
export DEPENDENCY_DIR=/var/spdk/dependencies
else
export DEPENDENCY_DIR
fi
# Export location of where all the SPDK binaries are
export SPDK_BIN_DIR="$rootdir/build/bin"
export SPDK_EXAMPLE_DIR="$rootdir/build/examples"
# for vhost, vfio-user tests
export QEMU_BIN=${QEMU_BIN:-}
export VFIO_QEMU_BIN=${VFIO_QEMU_BIN:-}
export AR_TOOL=$rootdir/scripts/ar-xnvme-fixer
# For testing nvmes which are attached to some sort of a fanout switch in the CI pool
export UNBIND_ENTIRE_IOMMU_GROUP=${UNBIND_ENTIRE_IOMMU_GROUP:-no}
# pass our valgrind desire on to unittest.sh
if [ $SPDK_RUN_VALGRIND -eq 0 ]; then
export valgrind=''
else
# unset all DEBUGINFOD_* vars that may affect our valgrind instance
unset -v "${!DEBUGINFOD_@}"
fi
if [ "$(uname -s)" = "Linux" ]; then
HUGEMEM=${HUGEMEM:-4096}
export CLEAR_HUGE=yes
if [[ $SPDK_TEST_CRYPTO -eq 1 || $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]]; then
# Make sure that memory is distributed across all NUMA nodes - by default, all goes to
# node0, but if QAT devices are attached to a different node, all of their VFs will end
# up under that node too and memory needs to be available there for the tests.
export HUGE_EVEN_ALLOC=yes
fi
MAKE="make"
MAKEFLAGS=${MAKEFLAGS:--j$(nproc)}
elif [ "$(uname -s)" = "FreeBSD" ]; then
MAKE="gmake"
MAKEFLAGS=${MAKEFLAGS:--j$(sysctl -a | grep -E -i 'hw.ncpu' | awk '{print $2}')}
# FreeBSD runs a much more limited set of tests, so keep the default 2GB.
HUGEMEM=${HUGEMEM:-2048}
elif [ "$(uname -s)" = "Windows" ]; then
MAKE="make"
MAKEFLAGS=${MAKEFLAGS:--j$(nproc)}
# Keep the default 2GB for Windows.
HUGEMEM=${HUGEMEM:-2048}
else
echo "Unknown OS \"$(uname -s)\""
exit 1
fi
export HUGEMEM=$HUGEMEM
if [ -z "${output_dir:-}" ]; then
mkdir -p "$rootdir/../output"
export output_dir="$rootdir/../output"
fi
TEST_MODE=
for i in "$@"; do
case "$i" in
--iso)
TEST_MODE=iso
;;
--transport=*)
TEST_TRANSPORT="${i#*=}"
;;
--sock=*)
TEST_SOCK="${i#*=}"
;;
esac
done
# start rpc.py coprocess if it's not started yet
if [[ -z ${RPC_PIPE_PID:-} ]] || ! kill -0 "$RPC_PIPE_PID" &> /dev/null; then
# Include list to all known plugins we use in the tests
PYTHONPATH+=":$rootdir/test/rpc_plugins"
coproc RPC_PIPE { PYTHONPATH="$PYTHONPATH" "$rootdir/scripts/rpc.py" --server; }
exec {RPC_PIPE_OUTPUT}<&${RPC_PIPE[0]} {RPC_PIPE_INPUT}>&${RPC_PIPE[1]}
# all descriptors will automatically close together with this bash
# process, this will make rpc.py stop reading and exit gracefully
fi
function set_test_storage() {
[[ -v testdir ]] || return 0
local requested_size=$1 # bytes
local mount target_dir
local -A mounts fss sizes avails uses
local source fs size avail mount use
local storage_fallback storage_candidates
storage_fallback=$(mktemp -udt spdk.XXXXXX)
storage_candidates=(
"$testdir"
"$storage_fallback/tests/${testdir##*/}"
"$storage_fallback"
)
if [[ -n ${ADD_TEST_STORAGE:-} ]]; then
# List of dirs|mounts separated by whitespaces
storage_candidates+=($ADD_TEST_STORAGE)
fi
if [[ -n ${DEDICATED_TEST_STORAGE:-} ]]; then
# Single, dedicated dir|mount
storage_candidates=("$DEDICATED_TEST_STORAGE")
fi
mkdir -p "${storage_candidates[@]}"
# add some headroom - 64M
requested_size=$((requested_size + (64 << 20)))
while read -r source fs size use avail _ mount; do
mounts["$mount"]=$source fss["$mount"]=$fs
avails["$mount"]=$((avail * 1024)) sizes["$mount"]=$((size * 1024))
uses["$mount"]=$((use * 1024))
done < <(df -T | grep -v Filesystem)
printf '* Looking for test storage...\n' >&2
local target_space new_size
for target_dir in "${storage_candidates[@]}"; do
# FreeBSD's df is lacking the --output arg
# mount=$(df --output=target "$target_dir" | grep -v "Mounted on")
mount=$(df "$target_dir" | awk '$1 !~ /Filesystem/{print $6}')
target_space=${avails["$mount"]}
if ((target_space == 0 || target_space < requested_size)); then
continue
fi
if ((target_space >= requested_size)); then
# For in-memory fs, and / make sure our requested size won't fill most of the space.
if [[ ${fss["$mount"]} == tmpfs ]] || [[ ${fss["$mount"]} == ramfs ]] || [[ $mount == / ]]; then
new_size=$((uses["$mount"] + requested_size))
if ((new_size * 100 / sizes["$mount"] > 95)); then
continue
fi
fi
fi
export SPDK_TEST_STORAGE=$target_dir
printf '* Found test storage at %s\n' "$SPDK_TEST_STORAGE" >&2
return 0
done
printf '* Test storage is not available\n'
return 1
}
function get_config_params() {
xtrace_disable
config_params='--enable-debug --enable-werror'
# for options with dependencies but no test flag, set them here
if [ -f /usr/include/infiniband/verbs.h ]; then
config_params+=' --with-rdma'
fi
if [ $SPDK_TEST_USDT -eq 1 ]; then
config_params+=" --with-usdt"
fi
if [ $(uname -s) == "FreeBSD" ]; then
intel="hw.model: Intel"
cpu_vendor=$(sysctl -a | grep hw.model | cut -c 1-15)
else
intel="GenuineIntel"
cpu_vendor=$(grep -i 'vendor' /proc/cpuinfo --max-count=1)
fi
if [[ "$cpu_vendor" != *"$intel"* ]]; then
config_params+=" --without-idxd"
else
config_params+=" --with-idxd"
fi
if [[ -d $CONFIG_FIO_SOURCE_DIR ]]; then
config_params+=" --with-fio=$CONFIG_FIO_SOURCE_DIR"
fi
if [ -d ${DEPENDENCY_DIR}/vtune_codes ]; then
config_params+=' --with-vtune='${DEPENDENCY_DIR}'/vtune_codes'
fi
if [ -d /usr/include/iscsi ]; then
[[ $(< /usr/include/iscsi/iscsi.h) =~ "define LIBISCSI_API_VERSION ("([0-9]+)")" ]] \
&& libiscsi_version=${BASH_REMATCH[1]}
if ((libiscsi_version >= 20150621)); then
config_params+=' --with-iscsi-initiator'
fi
fi
if [[ $SPDK_TEST_UNITTEST -eq 0 && \
$SPDK_TEST_SCANBUILD -eq 0 && -z \
${SPDK_TEST_AUTOBUILD:-} ]]; then
config_params+=' --disable-unit-tests'
fi
if [ $SPDK_TEST_NVME_CUSE -eq 1 ]; then
config_params+=' --with-nvme-cuse'
fi
if [ -f /usr/include/libpmem.h ] && [ $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]; then
if ge "$(nasm --version | awk '{print $3}')" 2.14 && [[ $SPDK_TEST_ISAL -eq 1 ]]; then
config_params+=' --with-vbdev-compress --with-dpdk-compressdev'
fi
fi
if [ -d /usr/include/rbd ] && [ -d /usr/include/rados ] && [ $SPDK_TEST_RBD -eq 1 ]; then
config_params+=' --with-rbd'
fi
# for options with no required dependencies, just test flags, set them here
if [ $SPDK_TEST_CRYPTO -eq 1 ]; then
config_params+=' --with-crypto'
fi
if [ $SPDK_TEST_OCF -eq 1 ]; then
config_params+=" --with-ocf"
fi
if [ $SPDK_RUN_UBSAN -eq 1 ]; then
config_params+=' --enable-ubsan'
fi
if [ $SPDK_RUN_ASAN -eq 1 ]; then
config_params+=' --enable-asan'
fi
if [ "$(uname -s)" = "Linux" ]; then
config_params+=' --enable-coverage'
fi
if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
if [[ -d /usr/include/fuse3 ]] || [[ -d /usr/local/include/fuse3 ]]; then
config_params+=' --with-fuse'
fi
fi
if [[ -f /usr/include/liburing/io_uring.h && -f /usr/include/linux/ublk_cmd.h ]]; then
config_params+=' --with-ublk'
fi
if [ $SPDK_TEST_RAID5 -eq 1 ]; then
config_params+=' --with-raid5f'
fi
if [ $SPDK_TEST_VFIOUSER -eq 1 ] || [ $SPDK_TEST_VFIOUSER_QEMU -eq 1 ] || [ $SPDK_TEST_SMA -eq 1 ]; then
config_params+=' --with-vfio-user'
fi
# Check whether liburing library header exists
if [ -f /usr/include/liburing/io_uring.h ] && [ $SPDK_TEST_URING -eq 1 ]; then
config_params+=' --with-uring'
fi
if [ -n "${SPDK_RUN_EXTERNAL_DPDK:-}" ]; then
config_params+=" --with-dpdk=$SPDK_RUN_EXTERNAL_DPDK"
fi
if [[ $SPDK_TEST_SMA -eq 1 ]]; then
config_params+=' --with-sma'
config_params+=' --with-crypto'
fi
if [ -f /usr/include/daos.h ] && [ $SPDK_TEST_DAOS -eq 1 ]; then
config_params+=' --with-daos'
fi
# Make the xnvme module available for the tests
if [[ $SPDK_TEST_XNVME -eq 1 ]]; then
config_params+=' --with-xnvme'
fi
if [[ $SPDK_TEST_FUZZER -eq 1 ]]; then
config_params+=" $(get_fuzzer_target_config)"
fi
if [[ $SPDK_TEST_NVMF_MDNS -eq 1 ]]; then
config_params+=' --with-avahi'
fi
echo "$config_params"
xtrace_restore
}
function get_fuzzer_target_config() {
local -A fuzzer_targets_to_config=()
local config target
fuzzer_targets_to_config["vfio"]="--with-vfio-user"
for target in $(get_fuzzer_targets); do
[[ -n ${fuzzer_targets_to_config["$target"]:-} ]] || continue
config+=("${fuzzer_targets_to_config["$target"]}")
done
if ((${#config[@]} > 0)); then
echo "${config[*]}"
fi
}
function get_fuzzer_targets() {
local fuzzers=()
if [[ -n ${SPDK_TEST_FUZZER_TARGET:-} ]]; then
IFS="," read -ra fuzzers <<< "$SPDK_TEST_FUZZER_TARGET"
else
fuzzers=("$rootdir/test/fuzz/llvm/"*)
fuzzers=("${fuzzers[@]##*/}")
fi
echo "${fuzzers[*]}"
}
function rpc_cmd() {
xtrace_disable
local rsp rc=1
local stdin cmd cmds_number=0 status_number=0 status
if (($#)); then
cmds_number=1
echo "$@" >&$RPC_PIPE_INPUT
elif [[ ! -t 0 ]]; then
mapfile -t stdin <&0
cmds_number=${#stdin[@]}
printf '%s\n' "${stdin[@]}" >&$RPC_PIPE_INPUT
else
return 0
fi
while read -t 15 -ru $RPC_PIPE_OUTPUT rsp; do
if [[ $rsp == "**STATUS="* ]]; then
status[${rsp#*=}]=$rsp
if ((++status_number == cmds_number)); then
break
fi
continue
fi
echo "$rsp"
done
rc=${!status[*]}
xtrace_restore
[[ $rc == 0 ]]
}
function rpc_cmd_simple_data_json() {
local elems="$1[@]" elem
local -gA jq_out=()
local jq val
local lvs=(
"uuid"
"name"
"base_bdev"
"total_data_clusters"
"free_clusters"
"block_size"
"cluster_size"
)
local bdev=(
"name"
"aliases[0]"
"block_size"
"num_blocks"
"uuid"
"product_name"
"supported_io_types.read"
"supported_io_types.write"
"driver_specific.lvol.clone"
"driver_specific.lvol.base_snapshot"
"driver_specific.lvol.esnap_clone"
"driver_specific.lvol.external_snapshot_name"
)
[[ -v $elems ]] || return 1
for elem in "${!elems}"; do
jq="${jq:+$jq,\"\\n\",}\"$elem\",\" \",.[0].$elem"
done
jq+=',"\n"'
shift
while read -r elem val; do
jq_out["$elem"]=$val
done < <(rpc_cmd "$@" | jq -jr "$jq")
((${#jq_out[@]} > 0)) || return 1
}
function NOT() {
local es=0
"$@" || es=$?
# Logic looks like so:
# - return false if command exit successfully
# - return false if command exit after receiving a core signal (FIXME: or any signal?)
# - return true if command exit with an error
# This naively assumes that the process doesn't exit with > 128 on its own.
if ((es > 128)); then
es=$((es & ~128))
case "$es" in
3) ;& # SIGQUIT
4) ;& # SIGILL
6) ;& # SIGABRT
8) ;& # SIGFPE
9) ;& # SIGKILL
11) es=0 ;; # SIGSEGV
*) es=1 ;;
esac
elif [[ -n ${EXIT_STATUS:-} ]] && ((es != EXIT_STATUS)); then
es=0
fi
# invert error code of any command and also trigger ERR on 0 (unlike bash ! prefix)
((!es == 0))
}
function timing() {
direction="$1"
testname="$2"
now=$(date +%s)
if [ "$direction" = "enter" ]; then
export timing_stack="${timing_stack:-};${now}"
export test_stack="${test_stack:-};${testname}"
else
touch "$output_dir/timing.txt"
child_time=$(grep "^${test_stack:1};" $output_dir/timing.txt | awk '{s+=$2} END {print s}')
start_time=$(echo "$timing_stack" | sed -e 's@^.*;@@')
timing_stack=$(echo "$timing_stack" | sed -e 's@;[^;]*$@@')
elapsed=$((now - start_time - child_time))
echo "${test_stack:1} $elapsed" >> $output_dir/timing.txt
test_stack=$(echo "$test_stack" | sed -e 's@;[^;]*$@@')
fi
}
function timing_cmd() (
# The use-case here is this: ts=$(timing_cmd echo bar). Since stdout is always redirected
# to a pipe handling the $(), lookup the stdin's device and determine if it's sane to send
# cmd's output to it. If not, just null it.
[[ -t 0 ]] && exec {cmd_out}>&0 || exec {cmd_out}> /dev/null
local time=0 TIMEFORMAT=%2R # seconds
# We redirect cmd's std{out,err} to a separate fd dup'ed to stdin's device (or /dev/null) to
# catch only output from the time builtin - output from the actual cmd would be still visible,
# but $() will return just the time's data, hence making it possible to just do:
# time_of_super_verbose_cmd=$(timing_cmd super_verbose_cmd)
time=$({ time "$@" >&"$cmd_out" 2>&1; } 2>&1)
echo "$time"
)
function timing_enter() {
xtrace_disable
timing "enter" "$1"
xtrace_restore
}
function timing_exit() {
xtrace_disable
timing "exit" "$1"
xtrace_restore
}
function timing_finish() {
flamegraph='/usr/local/FlameGraph/flamegraph.pl'
if [ -x "$flamegraph" ]; then
"$flamegraph" \
--title 'Build Timing' \
--nametype 'Step:' \
--countname seconds \
$output_dir/timing.txt \
> $output_dir/timing.svg
fi
}
function create_test_list() {
xtrace_disable
# First search all scripts in main SPDK directory.
completion=$(grep -shI -d skip --include="*.sh" -e "run_test " $rootdir/*)
# Follow up with search in test directory recursively.
completion+=$'\n'$(grep -rshI --include="*.sh" --exclude="*autotest_common.sh" -e "run_test " $rootdir/test)
printf "%s" "$completion" | grep -v "#" \
| sed 's/^.*run_test/run_test/' | awk '{print $2}' \
| sed 's/\"//g' | sort > $output_dir/all_tests.txt || true
xtrace_restore
}
function gdb_attach() {
gdb -q --batch \
-ex 'handle SIGHUP nostop pass' \
-ex 'handle SIGQUIT nostop pass' \
-ex 'handle SIGPIPE nostop pass' \
-ex 'handle SIGALRM nostop pass' \
-ex 'handle SIGTERM nostop pass' \
-ex 'handle SIGUSR1 nostop pass' \
-ex 'handle SIGUSR2 nostop pass' \
-ex 'handle SIGCHLD nostop pass' \
-ex 'set print thread-events off' \
-ex 'cont' \
-ex 'thread apply all bt' \
-ex 'quit' \
--tty=/dev/stdout \
-p $1
}
function process_core() {
# Note that this always was racy as we can't really sync with the kernel
# to see if there's any core queued up for writing. We could check if
# collector is running and wait for it explicitly, but it doesn't seem
# to be worth the effort. So assume that if we are being called via
# trap, as in, when some error has occurred, wait up to 10s for any
# potential cores. If we are called just for cleanup at the very end,
# don't wait since all the tests ended successfully, hence having any
# critical cores lying around is unlikely.
((autotest_es != 0)) && sleep 10
local coredumps core
shopt -s nullglob
coredumps=("$output_dir/coredumps/"*.bt.txt)
shopt -u nullglob
((${#coredumps[@]} > 0)) || return 0
chmod -R a+r "$output_dir/coredumps"
for core in "${coredumps[@]}"; do
cat <<- BT
##### CORE BT ${core##*/} #####
$(<"$core")
--
BT
done
return 1
}
function process_shm() {
type=$1
id=$2
if [ "$type" = "--pid" ]; then
id="pid${id}"
fi
shm_files=$(find /dev/shm -name "*.${id}" -printf "%f\n")
if [[ -z ${shm_files:-} ]]; then
echo "SHM File for specified PID or shared memory id: ${id} not found!"
return 1
fi
for n in $shm_files; do
tar -C /dev/shm/ -cvzf $output_dir/${n}_shm.tar.gz ${n}
done
return 0
}
# Parameters:
# $1 - process pid
# $2 - rpc address (optional)
# $3 - max retries (optional)
function waitforlisten() {
if [ -z "${1:-}" ]; then
exit 1
fi
local rpc_addr="${2:-$DEFAULT_RPC_ADDR}"
local max_retries=${3:-100}
echo "Waiting for process to start up and listen on UNIX domain socket $rpc_addr..."
# turn off trace for this loop
xtrace_disable
local ret=0
local i
for ((i = max_retries; i != 0; i--)); do
# if the process is no longer running, then exit the script
# since it means the application crashed
if ! kill -s 0 $1; then
echo "ERROR: process (pid: $1) is no longer running"
ret=1
break
fi
if $rootdir/scripts/rpc.py -t 1 -s "$rpc_addr" rpc_get_methods &> /dev/null; then
break
fi
sleep 0.5
done
xtrace_restore
if ((i == 0)); then
echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$rpc_addr'"
ret=1
fi
return $ret
}
function waitfornbd() {
local nbd_name=$1
local i
for ((i = 1; i <= 20; i++)); do
if grep -q -w $nbd_name /proc/partitions; then
break
else
sleep 0.1
fi
done
# The nbd device is now recognized as a block device, but there can be
# a small delay before we can start I/O to that block device. So loop
# here trying to read the first block of the nbd block device to a temp
# file. Note that dd returns success when reading an empty file, so we
# need to check the size of the output file instead.
for ((i = 1; i <= 20; i++)); do
dd if=/dev/$nbd_name of="$SPDK_TEST_STORAGE/nbdtest" bs=4096 count=1 iflag=direct
size=$(stat -c %s "$SPDK_TEST_STORAGE/nbdtest")
rm -f "$SPDK_TEST_STORAGE/nbdtest"
if [ "$size" != "0" ]; then
return 0
else
sleep 0.1
fi
done
return 1
}
function waitforbdev() {
local bdev_name=$1
local bdev_timeout=$2
local i
[[ -z ${bdev_timeout:-} ]] && bdev_timeout=2000 # ms
$rpc_py bdev_wait_for_examine
if $rpc_py bdev_get_bdevs -b $bdev_name -t $bdev_timeout; then
return 0
fi
return 1
}
function make_filesystem() {
local fstype=$1
local dev_name=$2
local i=0
local force
if [ $fstype = ext4 ]; then
force=-F
else
force=-f
fi
while ! mkfs.${fstype} $force ${dev_name}; do
if [ $i -ge 15 ]; then
return 1
fi
i=$((i + 1))
sleep 1
done
return 0
}
function killprocess() {
# $1 = process pid
if [ -z "${1:-}" ]; then
return 1
fi
if kill -0 $1; then
if [ $(uname) = Linux ]; then
process_name=$(ps --no-headers -o comm= $1)
else
process_name=$(ps -c -o command $1 | tail -1)
fi
if [ "$process_name" = "sudo" ]; then
# kill the child process, which is the actual app
# (assume $1 has just one child)
local child
child="$(pgrep -P $1)"
echo "killing process with pid $child"
kill $child
else
echo "killing process with pid $1"
kill $1
fi
# wait for the process regardless if its the dummy sudo one
# or the actual app - it should terminate anyway
wait $1
else
# the process is not there anymore
echo "Process with pid $1 is not found"
fi
}
function iscsicleanup() {
echo "Cleaning up iSCSI connection"
iscsiadm -m node --logout || true
iscsiadm -m node -o delete || true
rm -rf /var/lib/iscsi/nodes/*
}
function stop_iscsi_service() {
if cat /etc/*-release | grep Ubuntu; then
service open-iscsi stop
else
service iscsid stop
fi
}
function start_iscsi_service() {
if cat /etc/*-release | grep Ubuntu; then
service open-iscsi start
else
service iscsid start
fi
}
function rbd_setup() {
# $1 = monitor ip address
# $2 = name of the namespace
if [ -z "${1:-}" ]; then
echo "No monitor IP address provided for ceph"
exit 1
fi
if [ -n "${2:-}" ]; then
if ip netns list | grep "$2"; then
NS_CMD="ip netns exec $2"
else
echo "No namespace $2 exists"
exit 1
fi
fi
if hash ceph; then
export PG_NUM=128
export RBD_POOL=rbd
export RBD_NAME=foo
$NS_CMD $rootdir/scripts/ceph/stop.sh || true
$NS_CMD $rootdir/scripts/ceph/start.sh $1
$NS_CMD ceph osd pool create $RBD_POOL $PG_NUM || true
$NS_CMD rbd create $RBD_NAME --size 1000
fi
}
function rbd_cleanup() {
if hash ceph; then
$rootdir/scripts/ceph/stop.sh || true
rm -f /var/tmp/ceph_raw.img
fi
}
function daos_setup() {
# $1 = pool name
# $2 = cont name
if [ -z "${1:-}" ]; then
echo "No pool name provided"
exit 1
fi
if [ -z "${2:-}" ]; then
echo "No cont name provided"
exit 1
fi
dmg pool create --size=10G $1 || true
daos container create --type=POSIX --label=$2 $1 || true
}
function daos_cleanup() {
local pool=${1:-testpool}
local cont=${2:-testcont}
daos container destroy -f $pool $cont || true
sudo dmg pool destroy -f $pool || true
}
function _start_stub() {
# Disable ASLR for multi-process testing. SPDK does support using DPDK multi-process,
# but ASLR can still be unreliable in some cases.
# We will re-enable it again after multi-process testing is complete in kill_stub().
# Save current setting so it can be restored upon calling kill_stub().
_randomize_va_space=$(< /proc/sys/kernel/randomize_va_space)
echo 0 > /proc/sys/kernel/randomize_va_space
$rootdir/test/app/stub/stub $1 &
stubpid=$!
echo Waiting for stub to ready for secondary processes...
while ! [ -e /var/run/spdk_stub0 ]; do
# If stub dies while we wait, bail
[[ -e /proc/$stubpid ]] || return 1
sleep 1s
done
echo done.
}
function start_stub() {
if ! _start_stub "$@"; then
echo "stub failed" >&2
return 1
fi
}
function kill_stub() {
if [[ -e /proc/$stubpid ]]; then
kill $1 $stubpid
wait $stubpid
fi 2> /dev/null || :
rm -f /var/run/spdk_stub0
# Re-enable ASLR now that we are done with multi-process testing
# Note: "1" enables ASLR w/o randomizing data segments, "2" adds data segment
# randomizing and is the default on all recent Linux kernels
echo "${_randomize_va_space:-2}" > /proc/sys/kernel/randomize_va_space
}
function run_test() {
if [ $# -le 1 ]; then
echo "Not enough parameters"
echo "usage: run_test test_name test_script [script_params]"
exit 1
fi
xtrace_disable
local test_name="$1"
shift
if [ -n "${test_domain:-}" ]; then
export test_domain="${test_domain}.${test_name}"
else
export test_domain="$test_name"
fi
timing_enter $test_name
echo "************************************"
echo "START TEST $test_name"
echo "************************************"
xtrace_restore
time "$@"
xtrace_disable
echo "************************************"
echo "END TEST $test_name"
echo "************************************"
timing_exit $test_name
export test_domain=${test_domain%"$test_name"}
if [ -n "$test_domain" ]; then
export test_domain=${test_domain%?}
fi
if [ -z "${test_domain:-}" ]; then
echo "top_level $test_name" >> $output_dir/test_completions.txt
else
echo "$test_domain $test_name" >> $output_dir/test_completions.txt
fi
xtrace_restore
}
function skip_run_test_with_warning() {
echo "WARNING: $1"
echo "Test run may fail if run with autorun.sh"
echo "Please check your $rootdir/test/common/skipped_tests.txt"
}
function print_backtrace() {
# if errexit is not enabled, don't print a backtrace
[[ "$-" =~ e ]] || return 0
local args=("${BASH_ARGV[@]}")
xtrace_disable
# Reset IFS in case we were called from an environment where it was modified
IFS=" "$'\t'$'\n'
echo "========== Backtrace start: =========="
echo ""
for ((i = 1; i < ${#FUNCNAME[@]}; i++)); do
local func="${FUNCNAME[$i]}"
local line_nr="${BASH_LINENO[$((i - 1))]}"
local src="${BASH_SOURCE[$i]}"
local bt="" cmdline=()
if [[ -f $src ]]; then
bt=$(nl -w 4 -ba -nln $src | grep -B 5 -A 5 "^${line_nr}[^0-9]" \
| sed "s/^/ /g" | sed "s/^ $line_nr /=> $line_nr /g")
fi
# If extdebug set the BASH_ARGC[i], try to fetch all the args
if ((BASH_ARGC[i] > 0)); then
# Use argc as index to reverse the stack
local argc=${BASH_ARGC[i]} arg
for arg in "${args[@]::BASH_ARGC[i]}"; do
cmdline[argc--]="[\"$arg\"]"
done
args=("${args[@]:BASH_ARGC[i]}")
fi
echo "in $src:$line_nr -> $func($(
IFS=","
printf '%s\n' "${cmdline[*]:-[]}"
))"
echo " ..."
echo "${bt:-backtrace unavailable}"
echo " ..."
done
echo ""
echo "========== Backtrace end =========="
xtrace_restore
return 0
}
function waitforserial() {
local i=0
local nvme_device_counter=1 nvme_devices=0
if [[ -n "${2:-}" ]]; then
nvme_device_counter=$2
fi
# Wait initially for min 2s to make sure all devices are ready for use.
sleep 2
while ((i++ <= 15)); do
nvme_devices=$(lsblk -l -o NAME,SERIAL | grep -c "$1")
((nvme_devices == nvme_device_counter)) && return 0
if ((nvme_devices > nvme_device_counter)); then
echo "$nvme_device_counter device(s) expected, found $nvme_devices" >&2
fi
echo "Waiting for devices"
sleep 1
done
return 1
}
function waitforserial_disconnect() {
local i=0
while lsblk -o NAME,SERIAL | grep -q -w $1; do
[ $i -lt 15 ] || break
i=$((i + 1))
echo "Waiting for disconnect devices"
sleep 1
done
if lsblk -l -o NAME,SERIAL | grep -q -w $1; then
return 1
fi
return 0
}
function waitforblk() {
local i=0
while ! lsblk -l -o NAME | grep -q -w $1; do
[ $i -lt 15 ] || break
i=$((i + 1))
sleep 1
done
if ! lsblk -l -o NAME | grep -q -w $1; then
return 1
fi
return 0
}
function waitforblk_disconnect() {
local i=0
while lsblk -l -o NAME | grep -q -w $1; do
[ $i -lt 15 ] || break
i=$((i + 1))
sleep 1
done
if lsblk -l -o NAME | grep -q -w $1; then
return 1
fi
return 0
}
function waitforfile() {
local i=0
while [ ! -e $1 ]; do
[ $i -lt 200 ] || break
i=$((i + 1))
sleep 0.1
done
if [ ! -e $1 ]; then
return 1
fi
return 0
}
function fio_config_gen() {
local config_file=$1
local workload=$2
local bdev_type=$3
local env_context=$4
local fio_dir=$CONFIG_FIO_SOURCE_DIR
if [ -e "$config_file" ]; then
echo "Configuration File Already Exists!: $config_file"
return 1
fi
if [ -z "${workload:-}" ]; then
workload=randrw
fi
if [ -n "$env_context" ]; then
env_context="env_context=$env_context"
fi
touch $1
cat > $1 << EOL
[global]
thread=1
$env_context
group_reporting=1
direct=1
norandommap=1
percentile_list=50:99:99.9:99.99:99.999
time_based=1
ramp_time=0
EOL
if [ "$workload" == "verify" ]; then
cat <<- EOL >> $config_file
verify=sha1
verify_backlog=1024
rw=randwrite
EOL
# To avoid potential data race issue due to the AIO device
# flush mechanism, add the flag to serialize the writes.
# This is to fix the intermittent IO failure issue of #935
if [ "$bdev_type" == "AIO" ]; then
if [[ $($fio_dir/fio --version) == *"fio-3"* ]]; then
echo "serialize_overlap=1" >> $config_file
fi
fi
elif [ "$workload" == "trim" ]; then
echo "rw=trimwrite" >> $config_file
else
echo "rw=$workload" >> $config_file
fi
}
function fio_plugin() {
# Setup fio binary cmd line
local fio_dir=$CONFIG_FIO_SOURCE_DIR
# gcc and clang uses different sanitizer libraries
local sanitizers=(libasan libclang_rt.asan)
local plugin=$1
shift
local asan_lib=
for sanitizer in "${sanitizers[@]}"; do
asan_lib=$(ldd $plugin | grep $sanitizer | awk '{print $3}')
if [[ -n "${asan_lib:-}" ]]; then
break
fi
done
# Preload the sanitizer library to fio if fio_plugin was compiled with it
LD_PRELOAD="$asan_lib $plugin" "$fio_dir"/fio "$@"
}
function fio_bdev() {
fio_plugin "$rootdir/build/fio/spdk_bdev" "$@"
}
function fio_nvme() {
fio_plugin "$rootdir/build/fio/spdk_nvme" "$@"
}
function get_lvs_free_mb() {
local lvs_uuid=$1
local lvs_info
local fc
local cs
lvs_info=$($rpc_py bdev_lvol_get_lvstores)
fc=$(jq ".[] | select(.uuid==\"$lvs_uuid\") .free_clusters" <<< "$lvs_info")
cs=$(jq ".[] | select(.uuid==\"$lvs_uuid\") .cluster_size" <<< "$lvs_info")
# Change to MB's
free_mb=$((fc * cs / 1024 / 1024))
echo "$free_mb"
}
function get_bdev_size() {
local bdev_name=$1
local bdev_info
local bs
local nb
bdev_info=$($rpc_py bdev_get_bdevs -b $bdev_name)
bs=$(jq ".[] .block_size" <<< "$bdev_info")
nb=$(jq ".[] .num_blocks" <<< "$bdev_info")
# Change to MB's
bdev_size=$((bs * nb / 1024 / 1024))
echo "$bdev_size"
}
function autotest_cleanup() {
local autotest_es=$?
xtrace_disable
# Slurp at_app_exit() so we can kill all lingering vhost and qemu processes
# in one swing. We do this in a subshell as vhost/common.sh is too eager to
# do some extra work which we don't care about in this context.
# shellcheck source=/dev/null
vhost_reap() (source "$rootdir/test/vhost/common.sh" &> /dev/null || return 0 && at_app_exit)
# catch any stray core files and kill all remaining SPDK processes. Update
# autotest_es in case autotest reported success but cores and/or processes
# were left behind regardless.
process_core || autotest_es=1
reap_spdk_processes || autotest_es=1
vhost_reap || autotest_es=1
$rootdir/scripts/setup.sh reset
$rootdir/scripts/setup.sh cleanup
if [ $(uname -s) = "Linux" ]; then
modprobe -r uio_pci_generic
fi
rm -rf "$asan_suppression_file"
if [[ -n ${old_core_pattern:-} ]]; then
echo "$old_core_pattern" > /proc/sys/kernel/core_pattern
fi
if [[ -e /proc/$udevadm_pid/status ]]; then
kill "$udevadm_pid" || :
fi
shopt -s nullglob
local storage_fallback_purge=("${TMPDIR:-/tmp}/spdk."??????)
shopt -u nullglob
if ((${#storage_fallback_purge[@]} > 0)); then
rm -rf "${storage_fallback_purge[@]}"
fi
if ((autotest_es)); then
if [[ $(uname) == FreeBSD ]]; then
ps aux
elif [[ $(uname) == Linux ]]; then
# Get more detailed view
grep . /proc/[0-9]*/status
# Dump some extra info into kernel log
echo "######## Autotest Cleanup Dump ########" > /dev/kmsg
# Show cpus backtraces
echo l > /proc/sysrq-trigger
# Show mem usage
echo m > /proc/sysrq-trigger
# show task states
echo t > /proc/sysrq-trigger
# show blocked tasks
echo w > /proc/sysrq-trigger
fi > "$output_dir/proc_list.txt" 2>&1 || :
fi
xtrace_restore
return $autotest_es
}
function freebsd_update_contigmem_mod() {
if [ $(uname) = FreeBSD ]; then
kldunload contigmem.ko || true
if [ -n "${SPDK_RUN_EXTERNAL_DPDK:-}" ]; then
cp -f "$SPDK_RUN_EXTERNAL_DPDK/kmod/contigmem.ko" /boot/modules/
cp -f "$SPDK_RUN_EXTERNAL_DPDK/kmod/contigmem.ko" /boot/kernel/
cp -f "$SPDK_RUN_EXTERNAL_DPDK/kmod/nic_uio.ko" /boot/modules/
cp -f "$SPDK_RUN_EXTERNAL_DPDK/kmod/nic_uio.ko" /boot/kernel/
else
cp -f "$rootdir/dpdk/build/kmod/contigmem.ko" /boot/modules/
cp -f "$rootdir/dpdk/build/kmod/contigmem.ko" /boot/kernel/
cp -f "$rootdir/dpdk/build/kmod/nic_uio.ko" /boot/modules/
cp -f "$rootdir/dpdk/build/kmod/nic_uio.ko" /boot/kernel/
fi
fi
}
function freebsd_set_maxsock_buf() {
# FreeBSD needs 4MB maxsockbuf size to pass socket unit tests.
# Otherwise tests fail due to ENOBUFS when trying to do setsockopt(SO_RCVBUF|SO_SNDBUF).
# See https://github.com/spdk/spdk/issues/2943
if [[ $(uname) = FreeBSD ]] && (($(sysctl -n kern.ipc.maxsockbuf) < 4194304)); then
sysctl kern.ipc.maxsockbuf=4194304
fi
}
function get_nvme_name_from_bdf() {
blkname=()
nvme_devs=$(lsblk -d --output NAME | grep "^nvme") || true
if [ -z "${nvme_devs:-}" ]; then
return
fi
for dev in $nvme_devs; do
link_name=$(readlink /sys/block/$dev/device/device) || true
if [ -z "${link_name:-}" ]; then
link_name=$(readlink /sys/block/$dev/device)
fi
bdf=$(basename "$link_name")
if [ "$bdf" = "$1" ]; then
blkname+=($dev)
fi
done
printf '%s\n' "${blkname[@]}"
}
function get_nvme_ctrlr_from_bdf() {
bdf_sysfs_path=$(readlink -f /sys/class/nvme/nvme* | grep "$1/nvme/nvme")
if [[ -z "${bdf_sysfs_path:-}" ]]; then
return
fi
printf '%s\n' "$(basename $bdf_sysfs_path)"
}
# Get BDF addresses of all NVMe drives currently attached to
# uio-pci-generic or vfio-pci
function get_nvme_bdfs() {
xtrace_disable
bdfs=$(jq -r .config[].params.traddr <<< $($rootdir/scripts/gen_nvme.sh))
if [[ -z ${bdfs:-} ]]; then
echo "No devices to test on!"
exit 1
fi
echo "$bdfs"
xtrace_restore
}
# Same as function above, but just get the first disks BDF address
function get_first_nvme_bdf() {
head -1 <<< "$(get_nvme_bdfs)"
}
function nvme_namespace_revert() {
$rootdir/scripts/setup.sh
sleep 1
bdfs=$(get_nvme_bdfs)
$rootdir/scripts/setup.sh reset
for bdf in $bdfs; do
nvme_ctrlr=/dev/$(get_nvme_ctrlr_from_bdf ${bdf})
if [[ -z "${nvme_ctrlr:-}" ]]; then
continue
fi
# Check Optional Admin Command Support for Namespace Management
oacs=$(nvme id-ctrl ${nvme_ctrlr} | grep oacs | cut -d: -f2)
oacs_ns_manage=$((oacs & 0x8))
if [[ "$oacs_ns_manage" -ne 0 ]]; then
# This assumes every NVMe controller contains single namespace,
# encompassing Total NVM Capacity and formatted as 512 block size.
# 512 block size is needed for test/vhost/vhost_boot.sh to
# successfully run.
unvmcap=$(nvme id-ctrl ${nvme_ctrlr} | grep unvmcap | cut -d: -f2)
if [[ "$unvmcap" -eq 0 ]]; then
# All available space already used
continue
fi
tnvmcap=$(nvme id-ctrl ${nvme_ctrlr} | grep tnvmcap | cut -d: -f2)
cntlid=$(nvme id-ctrl ${nvme_ctrlr} | grep cntlid | cut -d: -f2)
blksize=512
size=$((tnvmcap / blksize))
nvme detach-ns ${nvme_ctrlr} -n 0xffffffff -c $cntlid || true
nvme delete-ns ${nvme_ctrlr} -n 0xffffffff || true
nvme create-ns ${nvme_ctrlr} -s ${size} -c ${size} -b ${blksize}
nvme attach-ns ${nvme_ctrlr} -n 1 -c $cntlid
nvme reset ${nvme_ctrlr}
waitforfile "${nvme_ctrlr}n1"
fi
done
}
# Get BDFs based on device ID, such as 0x0a54
function get_nvme_bdfs_by_id() {
local bdfs=()
for bdf in $(get_nvme_bdfs); do
device=$(cat /sys/bus/pci/devices/$bdf/device) || true
if [[ "$device" == "$1" ]]; then
bdfs+=($bdf)
fi
done
printf '%s\n' "${bdfs[@]}"
}
function opal_revert_cleanup() {
# The OPAL CI tests is only used for P4510 devices.
mapfile -t bdfs < <(get_nvme_bdfs_by_id 0x0a54)
if [[ -z ${bdfs[0]:-} ]]; then
return 0
fi
$SPDK_BIN_DIR/spdk_tgt &
spdk_tgt_pid=$!
waitforlisten $spdk_tgt_pid
bdf_id=0
for bdf in "${bdfs[@]}"; do
$rootdir/scripts/rpc.py bdev_nvme_attach_controller -b "nvme"${bdf_id} -t "pcie" -a ${bdf}
# Ignore if this fails.
$rootdir/scripts/rpc.py bdev_nvme_opal_revert -b "nvme"${bdf_id} -p test || true
((++bdf_id))
done
killprocess $spdk_tgt_pid
}
function pap() {
while read -r file; do
cat <<- FILE
--- $file ---
$(<"$file")
--- $file ---
FILE
rm -f "$file"
done < <(find "$@" -type f | sort -u)
}
function get_proc_paths() {
case "$(uname -s)" in
Linux) # ps -e -opid,exe <- not supported under {centos7,rocky8}'s procps-ng
local pid exe
for pid in /proc/[0-9]*; do
exe=$(readlink "$pid/exe") || continue
exe=${exe/ (deleted)/}
echo "${pid##*/} $exe"
done
;;
FreeeBSD) procstat -ab | awk '{print $1, $4}' ;;
esac
}
exec_files() { file "$@" | awk -F: '/ELF.+executable/{print $1}'; }
function reap_spdk_processes() {
local bins test_bins procs
local spdk_procs spdk_pids
mapfile -t test_bins < <(find "$rootdir"/test/{app,env,event} -type f)
mapfile -t bins < <(
exec_files "${test_bins[@]}"
readlink -f "$SPDK_BIN_DIR/"* "$SPDK_EXAMPLE_DIR/"*
)
mapfile -t spdk_procs < <(get_proc_paths | grep -E "$(
IFS="|"
echo "${bins[*]#$rootdir/}"
)" || true)
((${#spdk_procs[@]} > 0)) || return 0
printf '%s is still up, killing\n' "${spdk_procs[@]}" >&2
mapfile -t spdk_pids < <(printf '%s\n' "${spdk_procs[@]}" | awk '{print $1}')
kill -SIGKILL "${spdk_pids[@]}" 2> /dev/null || :
return 1
}
function is_block_zoned() {
local device=$1
[[ -e /sys/block/$device/queue/zoned ]] || return 1
[[ $(< "/sys/block/$device/queue/zoned") != none ]]
}
function get_zoned_devs() {
local -gA zoned_devs=()
local nvme bdf
for nvme in /sys/block/nvme*; do
if is_block_zoned "${nvme##*/}"; then
zoned_devs["${nvme##*/}"]=$(< "$nvme/device/address")
fi
done
}
# Define temp storage for all the tests. Look for 2GB at minimum
set_test_storage "${TEST_MIN_STORAGE_SIZE:-$((1 << 31))}"
set -o errtrace
shopt -s extdebug
trap "trap - ERR; print_backtrace >&2" ERR
PS4=' \t -- ${BASH_SOURCE#${BASH_SOURCE%/*/*}/}@${LINENO} -- \$ '
if $SPDK_AUTOTEST_X; then
# explicitly enable xtraces, overriding any tracking information.
unset XTRACE_DISABLED
unset XTRACE_NESTING_LEVEL
xtrace_fd
xtrace_enable
else
xtrace_restore
fi
| true |
4fa2863b6b2b4157a9f6cd424fa31cfcd9236c39 | Shell | amotus/oe-pseudo-test-env | /test_lib/data/cmd_cases/400_create/400_inode_reuse_after_delete.sh | UTF-8 | 228 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -euf -o pipefail
mkdir -p "$IMAGE_ROOTFS/etc"
declare a="$IMAGE_ROOTFS/etc/a.txt"
declare b="$IMAGE_ROOTFS/etc/b.txt"
touch "$a"
chmod a+rwx "$a"
# chown root:root "$a"
rm "$a"
# sleep 0.025
touch "$b"
| true |
922dbcbc625bfd5596971c8e7d28676e6af08e9c | Shell | henrywu2019/oke-airflow | /userdata/cloudinit.sh | UTF-8 | 18,748 | 2.875 | 3 | [
"UPL-1.0"
] | permissive | #!/bin/bash
LOG_FILE="/var/log/OCI-airflow-initialize.log"
log() {
echo "$(date) [${EXECNAME}]: $*" >> "${LOG_FILE}"
}
fetch_metadata () {
region=`curl -s -L http://169.254.169.254/opc/v1/instance/regionInfo/regionIdentifier`
image_name=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/image_name`
image_label=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/image_label`
oke_cluster_id=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/oke_cluster_id`
nodepool_id=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/nodepool_id`
repo_name=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/repo_name`
registry=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/registry`
registry_user=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/registry_user`
secret_id=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/secret_id`
tenancy_ocid=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/tenancy_ocid`
sql_alchemy_conn=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/sql_alchemy_conn`
namespace=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/namespace`
kube_label=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/kube_label`
mount_target_id=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/mount_target_id`
nfs_ip=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/nfs_ip`
admin_db_password=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/admin_db_password | base64 -d`
admin_db_user=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/admin_db_user`
db_ip=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/db_ip`
db_name=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/db_name`
airflow_db_user=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/airflow_db_user`
airflow_db_password=`curl -s -L http://169.254.169.254/opc/v1/instance/metadata/airflow_db_password | base64 -d`
}
EXECNAME="OCI CLI"
log "->Download"
curl -L -O https://raw.githubusercontent.com/oracle/oci-cli/master/scripts/install/install.sh >> $LOG_FILE
chmod a+x install.sh
log "->Install"
./install.sh --accept-all-defaults >> $LOG_FILE
echo "export OCI_CLI_AUTH=instance_principal" >> ~/.bash_profile
echo "export OCI_CLI_AUTH=instance_principal" >> ~/.bashrc
echo "export OCI_CLI_AUTH=instance_principal" >> /home/opc/.bash_profile
echo "export OCI_CLI_AUTH=instance_principal" >> /home/opc/.bashrc
EXECNAME="Kubectl & Git"
log "->Install"
yum install -y kubectl git >> $LOG_FILE
mkdir -p /home/opc/.kube
echo "source <(kubectl completion bash)" >> ~/.bashrc
echo "alias k='kubectl'" >> ~/.bashrc
echo "source <(kubectl completion bash)" >> /home/opc/.bashrc
echo "alias k='kubectl'" >> /home/opc/.bashrc
source ~/.bashrc
EXECNAME="Docker"
log "->Install"
yum-config-manager --enable ol7_addons >> $LOG_FILE
yum install -y docker-engine docker-cli >> $LOG_FILE
log "->Enable"
systemctl enable docker >> $LOG_FILE
systemctl start docker >> $LOG_FILE
usermod -a -G docker opc
log "->Build Prep"
mkdir -p /airflow/docker-build
cd /airflow/docker-build
cat > Dockerfile << EOF
FROM python:latest
ARG AIRFLOW_USER_HOME=/opt/airflow
ARG AIRFLOW_USER="airflow"
ARG AIRFLOW_UID="1000"
ARG AIRFLOW_GID="1000"
ENV AIRFLOW_HOME=\$AIRFLOW_USER_HOME
RUN groupadd -g \$AIRFLOW_GID airflow && \\
useradd -ms /bin/bash -u \$AIRFLOW_UID airflow -g \$AIRFLOW_GID -d \$AIRFLOW_USER_HOME && \\
chown \$AIRFLOW_USER:\$AIRFLOW_GID \$AIRFLOW_USER_HOME && \\
buildDeps='freetds-dev libkrb5-dev libsasl2-dev libssl-dev libffi-dev libpq-dev' \\
apt-get update && \\
apt-get install -yqq sudo && \\
apt-get install -yqq wget && \\
apt-get install -yqq --no-install-recommends \$buildDeps build-essential default-libmysqlclient-dev && \\
python -m pip install --upgrade pip && \\
pip install --no-cache-dir 'apache-airflow[crypto,kubernetes,mysql]' && \\
apt-get purge --auto-remove -yqq \$buildDeps && \\
apt-get autoremove -yqq --purge && \\
rm -rf /var/lib/apt/lists/*
# Enable sudo for airflow user without asking for password
RUN usermod -aG sudo \$AIRFLOW_USER && \\
echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
# Install OCI python SKD
RUN pip install oci && \\
pip install cx_Oracle
# Copy airflow pod template file
COPY pod_template.yaml \$AIRFLOW_USER_HOME/pod_template.yaml
RUN chown \$AIRFLOW_UID:\$AIRFLOW_GID \$AIRFLOW_USER_HOME/pod_template.yaml
# Install OCI plugins and copy the script to download OCI DAG templates
RUN mkdir -p \$AIRFLOW_USER_HOME/scripts
COPY install_oci_plugins.sh \$AIRFLOW_USER_HOME/scripts/install_oci_plugins.sh
COPY install_oci_dag_templates.sh \$AIRFLOW_USER_HOME/scripts/install_oci_dag_templates.sh
RUN chown -R \$AIRFLOW_UID:\$AIRFLOW_GID \$AIRFLOW_USER_HOME/scripts && \\
chmod +x \$AIRFLOW_USER_HOME/scripts/install_oci_plugins.sh && \\
chmod +x \$AIRFLOW_USER_HOME/scripts/install_oci_dag_templates.sh
USER \$AIRFLOW_UID
WORKDIR \$AIRFLOW_USER_HOME
# Install OCI plugins
RUN \$AIRFLOW_USER_HOME/scripts/install_oci_plugins.sh
EOF
cat > install_oci_plugins.sh << EOF
#!/bin/bash
# Install OCI plugins
hooks_dir="/opt/airflow/plugins/hooks"
operators_dir="/opt/airflow/plugins/operators"
sensors_dir="/opt/airflow/plugins/sensors"
mkdir -p \$hooks_dir
mkdir -p \$operators_dir
mkdir -p \$sensors_dir
plugin_url=https://raw.githubusercontent.com/oracle-quickstart/oci-airflow/master/scripts/plugins
dag_url=https://raw.githubusercontent.com/oracle-quickstart/oci-airflow/master/scripts/dags
# hooks
for file in oci_base.py oci_object_storage.py oci_data_flow.py oci_data_catalog.py oci_adb.py; do
wget \$plugin_url/hooks/\$file -O \$hooks_dir/\$file
done
# operators
for file in oci_object_storage.py oci_data_flow.py oci_data_catalog.py oci_adb.py oci_copy_object_to_adb.py; do
wget \$plugin_url/operators/\$file -O \$operators_dir/\$file
done
# sensors
for file in oci_object_storage.py oci_adb.py; do
wget \$plugin_url/sensors/\$file -O \$sensors_dir/\$file
done
EOF
cat > install_oci_dag_templates.sh << EOF
#!/bin/bash
# Install OCI plugins
dags_dir="/opt/airflow/dags"
mkdir -p \$dags_dir
dag_url=https://raw.githubusercontent.com/oracle-quickstart/oci-airflow/master/scripts/dags
# Airflow OCI DAGs
for file in oci_simple_example.py oci_advanced_example.py oci_adb_sql_example.py oci_smoketest.py; do
wget \$dag_url/\$file -O \$dags_dir/\$file
done
for file in schedule_dataflow_app.py schedule_dataflow_with_parameters.py trigger_dataflow_when_file_exists.py; do
wget \$dag_url/\$file -O \$dags_dir/\$file.template
done
EOF
cat > pod_template.yaml << EOF
---
apiVersion: v1
kind: Pod
metadata:
name: dummy-name
spec:
containers:
- args: []
command: []
env:
- name: AIRFLOW__CORE__EXECUTOR
value: "KubernetesExecutor"
- name: AIRFLOW__CORE__SQL_ALCHEMY_CONN
valueFrom:
secretKeyRef:
name: airflow-secrets
key: sql_alchemy_conn
envFrom: []
image: dummy_image
imagePullPolicy: IfNotPresent
name: base
ports: []
volumeMounts:
- name: airflow-dags
mountPath: /opt/airflow/dags
- name: airflow-logs
mountPath: /opt/airflow/logs
volumes:
- name: airflow-dags
persistentVolumeClaim:
claimName: airflow-dags
- name: airflow-logs
persistentVolumeClaim:
claimName: airflow-logs
#hostNetwork: false
restartPolicy: Never
serviceAccountName: airflow
EOF
log "->Build Image"
fetch_metadata
docker build -t ${image_name}:${image_label} . >> $LOG_FILE
log "->Push to Registry"
auth_token=`oci secrets secret-bundle get --secret-id ${secret_id} --stage CURRENT | jq ."data.\"secret-bundle-content\".content" | tr -d '"' | base64 -d`
tenancy_name=`oci os ns get | jq ."data" | tr -d '"'`
export tenancy_name=$tenancy_name
docker login ${registry} -u $tenancy_name/${registry_user} -p ${auth_token} >> $LOG_FILE
docker tag "${image_name}:${image_label}" ${registry}/$tenancy_name/${repo_name}/${image_name}:${image_label} >> $LOG_FILE
docker push ${registry}/$tenancy_name/${repo_name}/${image_name}:${image_label} >> $LOG_FILE
cd ..
mkdir airflow-oke
cd airflow-oke
EXECNAME="Kubeconfig"
log "->Generate"
RET_CODE=1
INDEX_NR=1
SLEEP_TIME="10s"
while [ ! -f /root/.kube/config ]
do
sleep 5
source ~/.bashrc
fetch_metadata
log "-->Attempting to generate kubeconfig"
oci ce cluster create-kubeconfig --cluster-id ${oke_cluster_id} --file /root/.kube/config --region ${region} --token-version 2.0.0 >> $LOG_FILE
log "-->Finished attempt"
done
mkdir -p /home/opc/.kube/
cp /root/.kube/config /home/opc/.kube/config
EXECNAME="OKE Templates"
log "->Build volumes.yaml"
cat > volumes.yaml << EOF
---
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: oci-fss
provisioner: oracle.com/oci-fss
parameters:
mntTargetId: ${mount_target_id}
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: airflow-dags
spec:
storageClassName: oci-fss
accessModes:
- ReadOnlyMany
capacity:
storage: 20Gi
mountOptions:
- nosuid
nfs:
server: ${nfs_ip}
path: "/airflow-dags/"
readOnly: false
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: airflow-dags
spec:
storageClassName: "oci-fss"
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 20Gi
volumeName: airflow-dags
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: airflow-logs
spec:
storageClassName: oci-fss
accessModes:
- ReadOnlyMany
capacity:
storage: 20Gi
mountOptions:
- nosuid
nfs:
server: ${nfs_ip}
path: "/airflow-logs/"
readOnly: false
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: airflow-logs
spec:
storageClassName: "oci-fss"
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 20Gi
volumeName: airflow-logs
---
EOF
log "->Build configmap.yaml"
fetch_metadata
cat > configmap.yaml << EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: airflow-config
namespace: ${namespace}
data:
AIRFLOW_HOME: "/opt/airflow"
AIRFLOW__CORE__DAGS_FOLDER: "/opt/airflow/dags"
AIRFLOW__CORE__LOAD_EXAMPLES: "True"
AIRFLOW__CORE__EXECUTOR: "KubernetesExecutor"
AIRFLOW__CORE__SQL_ALCHEMY_CONN_SECRET: "sql_alchemy_conn"
AIRFLOW__KUBERNETES__POD_TEMPLATE_FILE: "/opt/airflow/pod_template.yaml"
AIRFLOW__KUBERNETES__WORKER_CONTAINER_REPOSITORY: "${registry}/$tenancy_name/${repo_name}/${image_name}"
AIRFLOW__KUBERNETES__WORKER_CONTAINER_TAG: "${image_label}"
AIRFLOW__KUBERNETES__WORKER_SERVICE_ACCOUNT_NAME: "airflow"
AIRFLOW__KUBERNETES__NAMESPACE: "${namespace}"
#AIRFLOW__LOGGING__BASE_LOG_FOLDER: "/opt/airflow/dags/logs"
#AIRFLOW__CORE__DAG_PROCESSOR_MANAGER_LOG_LOCATION: "/opt/airflow/dags/logs"
#AIRFLOW__SCHEDULER__CHILD_PROCESS_LOG_DIRECTORY: "/opt/airflow/dags/logs"
EOF
log "->Build secrets.yaml"
cat > secrets.yaml << EOF
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License. *
apiVersion: v1
kind: Secret
metadata:
name: airflow-secrets
namespace: airflow
type: Opaque
data:
# The sql_alchemy_conn value is a base64 encoded representation of this connection string:
# mysql+mysql://airflow_username:airflow_password@mysql_db_ip:mysql_db_port/airflow_database
sql_alchemy_conn: ${sql_alchemy_conn}
EOF
log "->Build airflow.yaml"
cat > airflow.yaml << EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: airflow
namespace: ${namespace}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: airflow
namespace: ${namespace}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: airflow
namespace: ${namespace}
subjects:
- kind: ServiceAccount
name: airflow
roleRef:
kind: Role
name: airflow
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: airflow
namespace: ${namespace}
labels:
app: airflow
spec:
replicas: 1
selector:
matchLabels:
app: airflow
template:
metadata:
labels:
app: airflow
spec:
serviceAccountName: airflow
initContainers:
- name: "init"
image: ${registry}/$tenancy_name/${repo_name}/${image_name}:${image_label}
imagePullPolicy: Always
envFrom:
- configMapRef:
name: airflow-config
env:
- name: AIRFLOW__CORE__SQL_ALCHEMY_CONN
valueFrom:
secretKeyRef:
name: airflow-secrets
key: sql_alchemy_conn
command: ["/bin/sh", "-c"]
args:
- sudo chown airflow:airflow /opt/airflow/dags;
sudo chown airflow:airflow /opt/airflow/logs;
airflow db init;
airflow users create --username airflow --firstname airflow --lastname airflow --role Admin --password airflow --email admin@airflow.org;
/opt/airflow/scripts/install_oci_dag_templates.sh;
volumeMounts:
- name: airflow-dags
mountPath: /opt/airflow/dags
- name: airflow-logs
mountPath: /opt/airflow/logs
containers:
- name: webserver
image: ${registry}/$tenancy_name/${repo_name}/${image_name}:${image_label}
imagePullPolicy: IfNotPresent
command: ["airflow","webserver"]
envFrom:
- configMapRef:
name: airflow-config
env:
- name: AIRFLOW__CORE__SQL_ALCHEMY_CONN
valueFrom:
secretKeyRef:
name: airflow-secrets
key: sql_alchemy_conn
volumeMounts:
- name: airflow-dags
mountPath: /opt/airflow/dags
- name: airflow-logs
mountPath: /opt/airflow/logs
- name: scheduler
image: ${registry}/$tenancy_name/${repo_name}/${image_name}:${image_label}
imagePullPolicy: IfNotPresent
command: ["airflow","scheduler"]
envFrom:
- configMapRef:
name: airflow-config
env:
- name: AIRFLOW__CORE__SQL_ALCHEMY_CONN
valueFrom:
secretKeyRef:
name: airflow-secrets
key: sql_alchemy_conn
volumeMounts:
- name: airflow-dags
mountPath: /opt/airflow/dags
- name: airflow-logs
mountPath: /opt/airflow/logs
volumes:
- name: airflow-dags
persistentVolumeClaim:
claimName: airflow-dags
- name: airflow-logs
persistentVolumeClaim:
claimName: airflow-logs
imagePullSecrets:
- name: airflow-ocir-secret
---
apiVersion: v1
kind: Service
metadata:
name: airflow
namespace: ${namespace}
spec:
type: LoadBalancer
ports:
- port: 8080
selector:
app: airflow
EOF
EXECNAME="OCI MySQL"
log "->Install Client"
yum install -y https://dev.mysql.com/get/mysql80-community-release-el7-3.noarch.rpm >> $LOG_FILE
yum install -y mysql >> $LOG_FILE
log "->Create Airflow DB"
log "-->Building SQL"
echo -e "CREATE DATABASE IF NOT EXISTS ${db_name} CHARACTER SET utf8 COLLATE utf8_unicode_ci;" >> airflow.sql
echo -e "CREATE USER IF NOT EXISTS ${airflow_db_user} IDENTIFIED WITH mysql_native_password BY '${airflow_db_password}';" >> airflow.sql
echo -e -e "GRANT ALL ON ${db_name}.* TO ${airflow_db_user};" >> airflow.sql
log "-->Executing as ${admin_db_user}"
mysql -h ${db_ip} -u ${admin_db_user} -p${admin_db_password} < airflow.sql 2>&1 2>> $LOG_FILE
EXECNAME="Airflow"
log "->OKE Worker check"
SLEEP_TIME="20s"
active_nodes=""
while [ -z "$active_nodes" ]
do
sleep $SLEEP_TIME
log "-->Checking if there is a worker node in ACTIVE state" >> $LOG_FILE
active_nodes=`oci ce node-pool get --node-pool-id ${nodepool_id} --query 'data.nodes[*].{ocid:id, state:"lifecycle-state"}' | jq '.[] | select(.state=="ACTIVE")' | jq ."ocid"`
done
log "-->Nodepool ${nodepool_id}"
log "--->Worker(s) $active_nodes"
log "->Deploy"
log "-->Check for namespace ${namespace}"
done=1
export KUBECONFIG=/root/.kube/config
while [ $done != 0 ]; do
kubectl get namespace | grep ${namespace}
rt=$?
if [ $rt != 0 ]; then
log "--->${namespace} does not exist, creating."
kubectl create namespace ${namespace} 2>&1 2>> $LOG_FILE
sleep 10
else
log "--->${namespace} found."
done=0
fi
done
log "-->Fetch secret"
kubectl -n ${namespace} get secrets | grep 'airflow-ocir-secret' 2>&1 2>> $LOG_FILE
if [[ $? -ne 0 ]]; then
log "--->Secret dosn't exist, creating"
kubectl -n ${namespace} create secret docker-registry airflow-ocir-secret --docker-server=${registry} --docker-username=$tenancy_name/${registry_user} --docker-password=${auth_token} 2>&1 >> $LOG_FILE
fi
log "-->Applying volumes.yaml"
kubectl -n ${namespace} apply -f volumes.yaml 2>&1 2>> $LOG_FILE
log "-->Applying configmap.yaml"
kubectl -n ${namespace} apply -f configmap.yaml 2>&1 2>> $LOG_FILE
log "-->Applying secrets.yaml"
kubectl -n ${namespace} apply -f secrets.yaml 2>&1 2>> $LOG_FILE
log "-->Applying airflow.yaml"
kubectl -n ${namespace} apply -f airflow.yaml 2>&1 2>> $LOG_FILE
log "--->Wait 120s until LB is created and public IP is allocated to airflow service"
sleep 120
log "--->Checking for public IP"
kubectl -n ${namespace} get svc 2>&1 2>> $LOG_FILE
chown -R opc:opc /home/opc
log "DEPLOYMENT DONE"
| true |
6d641b37c40ad3f2bd7bdbb19b9446439234a4fc | Shell | huahuayu/bash-by-examples | /while-in-one-line.sh | UTF-8 | 77 | 2.5625 | 3 | [] | no_license | #!/bin/bash
count=0
while [ $count -lt 5 ]; do echo $count; ((count++)); done | true |
d25c3cf92b14dc5091de9fc287beaba46e3fe5fe | Shell | mwatson128/Perl_scripts | /operations/tpekv/topout | UTF-8 | 498 | 3.328125 | 3 | [] | no_license | #!/bin/ksh
# (]$[) topout:1.10 | CDATE=04/29/07 18:57:45
# Build TOPDIR based on user and machine.
user=`id | cut -d"(" -f2 | cut -d")" -f1`
#user=`/bin/who am i | awk '{print $1}'`
machine=`/bin/uname -n | cut -d . -f 1`
TOPDIR=/$machine/perf/kv
TZ=GMT; export TZ
MDATE=`date '+%m%y'`
LOG=top.`date +%m%d%y`
cd $TOPDIR
if [ ! -d $MDATE ]
then
mkdir $MDATE
chmod 775 $MDATE
fi
cd $MDATE
# Run commands. Output to log.
date >> $LOG
nice /bin/top -b -n 1 | head -50 >> $LOG
chmod 644 $LOG
| true |
abfe338d476f8c59290b8743809e02776618140a | Shell | owtf/owtf | /owtf/scripts/send_urls_to_proxy.sh | UTF-8 | 418 | 3.890625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
if [ $# -lt 1 ]; then
echo "Syntax: $0 <file with urls.txt> (proxy_ip: default 127.0.0.1) (proxy_port: default 8080)"
exit
fi
FILE=$1
PROXY_IP="127.0.0.1"
PROXY_PORT="8080"
if [ $2 ]; then
PROXY_IP=$2
fi
if [ $3 ]; then
PROXY_PORT=$3
fi
COUNT=0
for i in $(cat $FILE); do
COUNT=$(($COUNT + 1))
echo "$COUNT - Sending $i to proxy .."
curl -k "$i" --proxy 127.0.0.1:8080 -o /dev/null
done
| true |
fb49cb7cb628409ae70c607c75d2a136149bac7d | Shell | telmich/nsbin | /old/mails_counter | UTF-8 | 719 | 3 | 3 | [] | no_license | #!/bin/bash
#
# Author: Nico Schottelius <nicos@pcsystems.de>
# Date: 12th of October 2k
# Last Modifed: dito
# Copyright: GPL 2.0 or newer
#
echo "Starting Netscape Mail counter (c) by Nico Schottelius (nicos@pcsystems.de)"
#FILE="/tmp/from.`date +%j`"
#rm $FILE 2> /dev/null
echo "Please wait, counting mails..."
find ~/nsmail -name \* -exec cat {} 2> /dev/null \; | egrep "^From" | grep "@" \
| grep -v Nico | less
#grep From: $FILE.temp | grep -v Drafts | grep -v Sent | grep -v Trash | grep -v telmich | grep -v nicos | grep @ >> $FILE 2> /dev/null
#echo "Action: sorting"
#sort $FILE -o $FILE.temp
#echo "Action: show number of mails"
#less $FILE.temp
#echo "Action: cleaning up"
#rm $FILE $FILE.temp 2>/dev/null
echo "Do you have more than 4000 ? :-)"
| true |
3c45b78d226e46c87bab59330c2b4d961a6ccc0f | Shell | mattiashem/openvas-exporter | /run.sh | UTF-8 | 393 | 2.9375 | 3 | [] | no_license | #!/bin/bash
#
#
# Script to startup openVas exporter and to loop it
#
echo "Setup configfile"
envsubst </home/gvm/config/config.tmp> /home/gvm/config/config.ini
echo "Starting logstash"
/usr/share/logstash/bin/logstash -f /home/gvm/config/logstash.conf &
echo "Starting the collector"
cd /home/gvm
echo "Looping"
while true
do
python3 getReport.py
echo "sleep"
sleep 5m
done
| true |
bcc09c874cf2291791b6981b5952946d2d2a5fdf | Shell | bahmutov/gatsby-starter-blog | /trigger-circle.sh | UTF-8 | 1,396 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# todo: use correct repo url for pull requests and forks
echo "Triggering Circle build against deployed url $DEPLOY_URL branch $BRANCH"
# https://www.netlify.com/docs/continuous-deployment/#build-environment-variables
echo "Other Netlify variables"
echo "REPOSITORY_URL $REPOSITORY_URL"
echo "PULL_REQUEST $PULL_REQUEST"
echo "COMMIT_REF $COMMIT_REF"
echo "CONTEXT $CONTEXT"
echo "REVIEW_ID $REVIEW_ID"
echo "URL $URL"
echo "DEPLOY_URL $DEPLOY_URL"
echo "DEPLOY_PRIME_URL $DEPLOY_PRIME_URL"
# Circle API https://circleci.com/docs/api/v1-reference/
# pass deployed url to Cypress as an environment variable
# https://on.cypress.io/environment-variables
if [ "$PULL_REQUEST" = true ]; then
echo "Triggering pull request build - cannot use BRANCH directly"
# instead need to use review id (= pull request number)
# and pass commit ref as revision
# https://discuss.circleci.com/t/api-trigger-build-of-pull-request-from-fork/7784/19
curl -u ${CIRCLE_API_USER_TOKEN}: \
-d build_parameters[CYPRESS_baseUrl]=$DEPLOY_URL \
-d revision=$COMMIT_REF \
https://circleci.com/api/v1.1/project/github/bahmutov/gatsby-starter-blog/tree/pull/$REVIEW_ID
else
curl -u ${CIRCLE_API_USER_TOKEN}: \
-d build_parameters[CYPRESS_baseUrl]=$DEPLOY_URL \
https://circleci.com/api/v1.1/project/github/bahmutov/gatsby-starter-blog/tree/$BRANCH
fi
| true |
74e769fafa9bb6175e819681587eccbe796b6a42 | Shell | Sylk/matthewbauer.github.io | /bootstrap.sh | UTF-8 | 954 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env sh
echo This script will install Nix and Git
echo if they are not already installed.
if ! command -v nix-env >/dev/null 2>&1; then
nix_installer=$(mktemp)
curl -s https://nixos.org/nix/install \
> $nix_installer
sh $nix_installer
source $HOME/.profile
fi
if ! command -v git >/dev/null 2>&1; then
nix-env -iA nixpkgs.git
fi
if [ -d .git ]; then
git pull origin master || true
fi
if ! [ -f default.nix ]; then
repo_dir=$HOME/.local/share/bauer
mkdir -p $(dirname $repo_dir)
git clone https://github.com/matthewbauer/bauer \
$repo_dir
cd $repo_dir
fi
nix-env -if .
echo "source $HOME/.nix-profile/etc/profile" >> $HOME/.profile
echo "To use bauer correctly, you must first source the profile."
echo
echo "To do this, just run:"
echo " source $HOME/.nix-profile/etc/profile"
echo "From you command line"
echo "You can also run either emacs or zsh to launch the environment"
| true |
47586837b52f4b123322e63b57139a53d002a6b2 | Shell | fluid-cloudnative/fluid | /test/testcase_basic/test.sh | UTF-8 | 9,420 | 3.578125 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | #!/bin/bash
set -x
dataset_path="./dataset.yaml"
runtime_path="./runtime.yaml"
dataload_path="./dataload.yaml"
fluid_git="https://github.com/fluid-cloudnative/fluid.git"
dataset_name="spark"
dataload_name="spark-dataload"
get_fluid()
{
echo "get fluid lastest chart..."
if [ -d "/fluid" ]
then
echo "fluid repository already exists."
else
echo "clone from ${fluid_git}."
git clone $fluid_git /fluid
fi
echo "update fluid from master branch..."
cd /fluid &&
git checkout master &&
git pull origin master:master
if [[ $? -ne 0 ]]
then
echo "ERROR: failed to update fluid"
exit 1
else
echo "fluid updated."
fi
cd -
}
uninstall_fluid()
{
local fluid=$(helm list | awk '{print $1}' | grep ^fluid$)
if [[ $fluid == "fluid" ]]
then
echo "delete crd..."
kubectl delete crd $(kubectl get crd | grep data.fluid.io | awk '{print $1}')
local crd=$(kubectl get crd | grep data.fluid.io)
if [[ $crd == "" ]]
then
echo "delete fluid crd successfully."
else
echo "ERROR: can not delete fluid crd."
exit 1
fi
fi
echo "uninstall fluid..."
helm delete fluid
fluid=$(helm list | awk '{print $1}' | grep ^fluid$)
if [[ $fluid == "" ]]
then
echo "uninstall fluid successfully."
else
echo "ERROR: can not uninstall fluid."
exit 1
fi
}
install_fluid()
{
echo "create namespace..."
local namespace=$(kubectl get namespace | awk '{print $1}' | grep ^fluid-system$)
if [[ $namespace == "" ]]
then
kubectl create namespace fluid-system
else
echo "namespace $namespace already exists."
fi
echo "install fluid..."
helm install fluid /fluid/charts/fluid/fluid/
local fluid=$(helm list | awk '{print $1}' | grep ^fluid$)
if [[ $fluid == "fluid" ]]
then
echo "fluid has been installed successfully. check its running status..."
while :
do
local alluxioruntime_controller_status=$(kubectl get pod -n fluid-system | grep alluxioruntime-controller | awk '{print $3}')
local dataset_controller_status=$(kubectl get pod -n fluid-system | grep dataset-controller | awk '{print $3}')
local node_num=$(expr $(kubectl get nodes | wc -l) - 1)
local csi_nodeplugin_num=$(kubectl get pod -n fluid-system | grep csi-nodeplugin | awk '$3=="Running"' | wc -l)
if [[ $alluxioruntime_controller_status == "Running" && $dataset_controller_status == "Running" && $csi_nodeplugin_num -eq $node_num ]]
then
echo "fluid runs successfully."
break
else
echo "fluid does not run, wait 10 seconds..."
sleep 10
fi
done
else
echo "ERROR: can not install fluid."
exit 1
fi
}
create_dataset()
{
echo "create dataset..."
kubectl create -f $dataset_path
local result=$(kubectl get dataset | awk '{print $1}' | grep ^spark$)
if [[ $result == $dataset_name ]]
then
echo "create dataset $dataset_name successfully!"
else
echo "ERROR: can not create dataset ${dataset_name}."
exit 1
fi
}
create_runtime()
{
echo "create runtime..."
kubectl create -f $runtime_path
local result=$(kubectl get alluxioruntime | awk '{print $1}' | grep ^spark$)
if [[ $result == $dataset_name ]]
then
echo "create runtime $dataset_name successfully!"
else
echo "ERROR: can not create runtime ${dataset_name}."
exit 1
fi
}
check_runtime_pod()
{
echo "check runtime pods..."
while :
do
local master_num=$(kubectl get pod | grep spark-master | awk '$3=="Running"' | wc -l)
local worker_num=$(kubectl get pod | grep spark-worker | awk '$3=="Running"' | wc -l)
local fuse_num=$(kubectl get pod | grep spark-fuse | awk '$3=="Running"' | wc -l)
if [[ $master_num -gt 0 && $worker_num -gt 0 && $fuse_num -gt 0 ]]
then
echo "runtime pods are ready."
break;
else
echo "runtime pods are not ready, wait 10 seconds..."
sleep 10
fi
done
}
check_pvc()
{
echo "check pv and pvc..."
while :
do
local pv_status=$(kubectl get pv | awk '$1=="spark" && $7=="fluid" {print $5}')
if [[ $pv_status == "Bound" ]]
then
echo "pv $spark_name has been created and bound."
break
else
echo "pv is not created or bound, wait 5 seconds..."
fi
done
while :
do
local pvc_status=$(kubectl get pvc | awk '$1=="spark" && $3=="spark" && $6=="fluid" {print $2}')
if [[ $pvc_status == "Bound" ]]
then
echo "pvc $spark_name has been created and bound."
break
else
echo "pvc is not created or bound, wait 5 seconds..."
fi
done
}
check_dataset_bound()
{
echo "check whether dataset is bound..."
while :
do
local master_status=$(kubectl get alluxioruntime | awk '$1=="spark"{print $2}')
local worker_status=$(kubectl get alluxioruntime | awk '$1=="spark"{print $3}')
local fuse_status=$(kubectl get alluxioruntime | awk '$1=="spark"{print $4}')
if [[ $master_status == "Ready" && ($worker_status == "Ready" || $worker_status == "PartialReady") && ($fuse_status == "Ready" || $fuse_status == "PartialReady") ]]
then
echo "runtime is ready."
else
echo "runtime is not ready, wait 5 seconds..."
continue
fi
local dataset_status=$(kubectl get dataset | awk '$1=="spark"{print $6}')
if [[ $dataset_status == "Bound" ]]
then
echo "dataset is bound."
break
else
echo "dataset is not bound, wait 5 seconds..."
sleep 5
fi
done
}
create_dataload()
{
echo "create dataload..."
kubectl create -f $dataload_path
local result=$(kubectl get dataload | awk '{print $1}' | grep ^spark-dataload$)
if [[ $result == $dataload_name ]]
then
echo "create dataload $dataload_name successfully!"
sleep 5
else
echo "ERROR: can not create dataload ${dataload_name}."
exit 1
fi
}
check_dataload()
{
echo "check dataload running status..."
local job=$(kubectl get job | awk '$1=="spark-dataload-loader-job"')
if [[ $job == "" ]]
then
echo "ERROR: the dataload job is not created successfully."
exit 1
else
echo "the dataload job is created successfully."
fi
local dataload_status=$(kubectl get dataload | awk '$1=="spark-dataload" {print $3}')
if [[ $dataload_status == "Pending" || $dataload_status == "Loading" || $dataload_status == "Complete" || $dataload_status == "Failed" ]]
then
echo "dataload is running properly."
else
echo "ERROR: dataload is not running properly"
exit 1
fi
echo "check if dataload is finished..."
while :
do
dataload_status=$(kubectl get dataload | awk '$1=="spark-dataload" {print $3}')
if [[ $dataload_status == "Complete" || $dataload_status == "Failed" ]]
then
echo "dataload is finished."
if [[ $dataload_status == "Complete" ]]
then
local cache_percent=$(kubectl get dataset | awk '$1=="spark" {print $5}')
echo "data is loaded successfully, the cache percent is ${cache_percent}."
else
echo "failed to load data."
fi
break
else
echo "dataload is still running, wait 20 seconds..."
sleep 20
fi
done
}
delete_dataset()
{
echo "delete dataset..."
while :
do
kubectl delete dataset $dataset_name
local dataset_status=$(kubectl get dataset | awk '$1=="spark"')
if [[ $dataset_status == "" ]]
then
echo "delete dataset $dataset_name successfully!"
break
else
echo "dataset ${dataset_name} has not deleted, wait 5 seconds."
sleep 5
fi
done
while :
do
local dataload_status=$(kubectl get dataload | awk '$1=="spark-dataload"')
if [[ $dataload_status == "" ]]
then
echo "delete dataload $dataload_name successfully!"
break
else
echo "dataload ${dataload_name} has not deleted, wait 5 seconds."
sleep 5
fi
done
while :
do
local runtime_status=$(kubectl get alluxioruntime | awk '$1=="spark"')
if [[ $runtime_status == "" ]]
then
echo "delete runtime $dataset_name successfully!"
break
else
echo "runtime ${dataset_name} has not deleted, wait 10 seconds."
sleep 10
fi
done
}
main()
{
echo "begin to test..."
get_fluid && \
uninstall_fluid && \
install_fluid
create_dataset && \
create_runtime && \
check_runtime_pod && \
check_pvc && \
check_dataset_bound && \
create_dataload && \
check_dataload && \
delete_dataset
echo "pass the test."
}
main "$@" | true |
283b903a5884e0ac4d92d23d9475f2c54fce30af | Shell | skywills/docker-bitcoinsv | /bin/docker_entrypoint.sh | UTF-8 | 1,364 | 3.546875 | 4 | [] | no_license | #!/bin/bash
set -euo pipefail
COIN_DIR=/root/.${COINNAME}
COIN_CONF=${COIN_DIR}/${COINNAME}.conf
DAEMON=${COINNAME}d
# If config doesn't exist, initialize with sane defaults for running a
# non-mining node.
if [ ! -e "${COIN_CONF}" ]; then
tee -a >${COIN_CONF} <<EOF
# For documentation on the config file, see
#
# the bitcoin source:
# https://github.com/bitcoin/bitcoin/blob/master/share/examples/bitcoin.conf
# the wiki:
# https://en.bitcoin.it/wiki/Running_Bitcoin
# server=1 tells Bitcoin-Qt and bitcoind to accept JSON-RPC commands
server=1
# You must set rpcuser and rpcpassword to secure the JSON-RPC api
rpcuser=${RPCUSER:-${COINNAME}}
rpcpassword=${RPCPASSWORD:-changemeplz}
rpcallowip=${RPCALLOWIP:-::/0}
# Listen for RPC connections on this TCP port:
rpcport=${RPCPORT:-8332}
# Print to console (stdout) so that "docker logs bitcoind" prints useful
# information.
printtoconsole=${PRINTTOCONSOLE:-1}
# We probably don't want a wallet.
disablewallet=${DISABLEWALLET:-0}
# Enable an on-disk txn index. Allows use of getrawtransaction for txns not in
# mempool.
txindex=${TXINDEX:-1}
# Run on the test network instead of the real bitcoin network.
testnet=${TESTNET:-1}
# Set database cache size in MiB
dbcache=${DBCACHE:-512}
EOF
fi
if [ $# -eq 0 ]; then
exec ${DAEMON} -datadir=${COIN_DIR} -conf=${COIN_CONF}
else
exec "$@"
fi
| true |
2593714ad081c801427db063acd91c48f7cf47b0 | Shell | wadejong/NWChem-Json | /src/tools/get-tools | UTF-8 | 2,691 | 3.8125 | 4 | [
"ECL-2.0"
] | permissive | #!/bin/bash
#
# $Id: get-tools 27094 2015-05-07 23:13:50Z edo $
#
unalias -a
export NWCHEM_TOP=${NWCHEM_TOP:-"`pwd`/../"}
if test "x$NWCHEM_TOP" = x
then
echo You must set NWCHEM_TOP to use this script.
exit 1
fi
ga_version=0
while [ $# -gt 0 ]
do
case "$1" in
--ga-version) ga_version=1;;
*)
echo >&2 "usage: $0 [--ga-version]"
exit 1;;
esac
shift
done
TOOLDIR=`pwd`
TOOLSVN=`which svn`
CONFIG=$NWCHEM_TOP/src/config/makefile.h
GA_DEV_SVN=https://svn.pnl.gov/svn/hpctools/trunk/ga
GA_DEV=ga-dev
if test "x$GA_STABLE" = x
then
GA_STABLE=ga-5-4
GA_STABLE_SVN=https://svn.pnl.gov/svn/hpctools/branches/ga-5-4
else
GA_STABLE_SVN=https://svn.pnl.gov/svn/hpctools/branches/$GA_STABLE
fi
GA_EXP_SVN=https://svn.pnl.gov/svn/hpctools/branches/exp1
GA_EXP=ga-exp1
# If EXP_GA is set at all in the user's environment then it means 'yes'
if test "x$EXP_GA" != x
then
EXP_GA=yes
fi
if [ $ga_version == 0 ] ; then
echo "EXP_GA=$EXP_GA"
echo "GA_STABLE=$GA_STABLE"
fi
# If DEV_GA is set at all in the user's environment then it means 'yes'
# DEV_GA=y
if test "x$DEV_GA" != x
then
GA_SVN=$GA_DEV_SVN
GA_DIRNAME=$GA_DEV
elif test "x$EXP_GA" != x
then
GA_SVN=$GA_EXP_SVN
GA_DIRNAME=$GA_EXP
else
GA_SVN=$GA_STABLE_SVN
GA_DIRNAME=$GA_STABLE
fi
if [ $ga_version == 1 ] ; then
echo $GA_DIRNAME
exit 0
fi
if [ ${#TOOLSVN} -eq 0 ] ; then
echo "No Subversion found!"
echo "Giving up and hoping for the best..."
exit 0
fi
svn log ./get-tools 2>&1 > /dev/null
stat=$?
if [ $stat -ne 0 ] ; then
echo "No access to repository!"
echo "Giving up and hoping for the best..."
exit 0
fi
# Update the autotools build of GA.
#if test -e $TOOLDIR/build
#then
# echo "You have an old copy of the ga tools using configure"
# echo "Deleting it now"
# rm -rf $TOOLDIR/build
# rm -rf $TOOLDIR/install
#fi
if test -d $GA_DIRNAME
then
echo "Updating existing $GA_DIRNAME"
cd $GA_DIRNAME
svn cleanup
if svn update --username nwchem --password nwchem
then
echo "Finished updating existing $GA_DIRNAME"
else
echo "Failed updating existing $GA_DIRNAME"
echo "Attempting to remove $GA_DIRNAME"
cd $TOOLDIR
if rm -rf $GA_DIRNAME
then
echo "Checking out $GA_SVN"
svn checkout $GA_SVN $GA_DIRNAME --username nwchem --password nwchem
else
echo "Failed to remove $GA_DIRNAME"
echo "get-tools has failed"
fi
fi
cd $TOOLDIR
else
echo "Checking out $GA_SVN"
svn checkout $GA_SVN $GA_DIRNAME --username nwchem --password nwchem
fi
exit 0
| true |
4356b7e4fc7a4b0b5e13206b7d1061baa8ca5f1d | Shell | ms77grz/manuals | /bashx/lessons/lesson12.sh | UTF-8 | 323 | 3.3125 | 3 | [] | no_license | #!/bin/bash
vehicle=$1
case $vehicle in
"car" )
echo "Rent of a $vehicle is \$100" ;;
"van" )
echo "Rent of a $vehicle is \$80" ;;
"bicycle" )
echo "Rent of a $vehicle is \$5" ;;
"truck" )
echo "Rent of a $vehicle is \$150" ;;
* )
echo "Vehicle is unknown"
esac | true |
b51d1e436efc66d2c75505f38dff38c6399b4907 | Shell | go2suresh1979/MyRepo | /Personal/Ticketing/main/bin/startReport.ksh | UTF-8 | 2,713 | 3.453125 | 3 | [] | no_license | #!/bin/ksh
#----------------------------------------------------------------------#
#
# Component: $Id: //remedy7/main/bin/startReport.ksh#2 $
# Author: swhetstone
# Copyright: Verizon Business, 2006
#
#----------------------------------------------------------------------#
#
# Description:
# ------------
# This script initiates the TORM (Totality Operations Report Management)
# report programs.
#
#----------------------------------------------------------------------#
#
# Interface:
# ----------
# See showUsage() method for description of command-line args.
#
#----------------------------------------------------------------------#
# Set Base, Java and Oracle HOME
BASEDIR=/var/opt/totality/remedy/current
JAVA_LIB_HOME=/usr/local/javalib
#ORACLE_HOME=/usr/local/ORACLE/product/CLIENT
ORACLE_HOME=/usr/local/ORACLE8
# Set the report specific classpath
CLASSPATH=$BASEDIR/lib/CeReport.jar
CLASSPATH=$CLASSPATH:$BASEDIR/lib/MetricTrendingReport.jar
# Set base class path
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/log4j-1.2.14.jar:$JAVA_LIB_HOME/com-darwinsys-util.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/jasperreports.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/commons-digester.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/commons-collections.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/commons-logging.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/commons-beanutils.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/commons-javaflow.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/jfreechart.jar
CLASSPATH=$CLASSPATH:$JAVA_LIB_HOME/jcommon.jar
CLASSPATH=$CLASSPATH:$ORACLE_HOME/jdbc/lib/classes12.zip
CLASSPATH=$CLASSPATH:$ORACLE_HOME/sqlj/lib/runtime.zip
export CLASSPATH=$CLASSPATH
# Oracle Home
export ORACLE_HOME=$ORACLE_HOME
# Set the Path
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib:/usr/local/lib:/usr/ucblib:/usr/dt/lib:$ORACLE_HOME/lib32:$ORACLE_HOME/lib
export PATH=/usr/local/JAVA/bin/:$PATH
main()
{
# Initiate the report with exec.
#
#log "Started $0 - $$"
checkArgs $@
# exec the report run string
exec "$@" -p $$
}
checkArgs()
{
# Check the command-line arguments.
#
# Parse arguments
while getopts :h option
do
case "$option" in
h) showUsage
exit $GOOD_RUN
;;
esac
done
}
showUsage()
{
# Display usage information.
#
/bin/cat <<USAGE
Description:
This program initiates the TORM reports. It is executed by the Report Execution
Daemon (REXD).
Synopsis:
startReport.ksh <report start command>
Example: startReport.ksh com.totality.ce.main.CeReport -b /var/opt/totality/mgmt_sv/current/solaris -c CeReport.cfg
Example: startReport.ksh com.totality.ce.main.CeReport
USAGE
}
log()
{
print `date +'%Y/%m/%d %H:%M:%S'` $*
}
main $@
| true |
e9fbba0b32ede9fe5d77bbf4e6e2412bb9ebe66d | Shell | SarahMohamedAbdAlkader/DBMS-BashScript | /project/Bash_DBMS.sh | UTF-8 | 6,921 | 3.6875 | 4 | [] | no_license | #!/usr/bin/bash
function mainMenu
{
echo "Please, Enter your choice: "
options=("create Database" "List Databases" "Connect To Databases" "Drop Database" "Exit");
select choice in "${options[@]}"
do
case $choice in
"create Database")
echo "createDB"
createDataBase;
break;
;;
"List Databases")
echo "List DBs: "
# ls ./DataBases
if [ -z "$(ls -A ./DataBases)" ];
then
echo "There Is No Database To Be Listed";
mainMenu;
else
echo "This Is List With Available Databases : ";
i=1;
for DB in `ls ./DataBases`
do
DBArray[$i]=$DB;
echo $i") "$DB;
let i=i+1;
done
fi
mainMenu
break;
;;
"Connect To Databases")
echo " Your DataBases are : `ls ./DataBases`"
echo "Enter Database name you want to be connected to:"
read dBName
if [ ! -d ./DataBases/$dBName ]
then
echo "This is dB doen't exist"
else
pwd
echo "Connect to $dBName"
dBOptions=("create Table" "List Tables" "Drop Table" "Insert Into Table" "Select From Table" "Delete From Table" "Back To Main Menu" "Exit")
PS3="Enter YOUR choice : " ;
flag2=1;
while ( test $flag2 -eq 1 )
do
select choice in "${dBOptions[@]}"
do
case $choice in
"create Table")
echo "create table is in progress"
createTable
break;
;;
"List Tables")
echo "List Tables is in progress";
listTables;
break; ;;
"Drop Table")
echo "your tables are"
ls -d ./DataBases/$dBName
echo "Drop Table is in progress"
echo "enter table name"
read tname
if [ ! -d ./DataBases/$dBName/$tname.csv ]
then
rm ./DataBases/$dBName/$tname.csv
rm ./DataBases/$dBName/$tname.meta
echo "Done"
else
echo "This tabel does not exist"
fi
break; ;;
"Insert Into Table")
echo "Insert Into Table is in progress"
echo "Your Tables are: "
ls -a ./DataBases/$dBName
insertRaw
break;
;;
"Select From Table")
echo "YOUr Tables Are"
ls -a ./DataBases/$dBName
echo "Select From table is in progress"
echo "enter table name"
read tname
echo "*******************************************"
cat ./DataBases/$dBName/$tname.csv | more | column -t -s ","
echo "Enter value that you want to search:"
read value
grep -w "$value" ./DataBases/$dBName/$tname.csv
break;
;;
"Delete From Table")
echo "Delete From Table is in progress"
echo "enter table name"
read tname
echo "Enter PrimaryKeyvalue that you want to delete its record:"
read value
egrep -v "$value|EXPDTA" ./DataBases/$dBName/$tname.csv > ./DataBases/$dBName/$tname
mv ./DataBases/$dBName/$tname ./DataBases/$dBName/$tname.csv
# grep -w "$value" ./DataBases/$dBName/$tname.csv | xargs rm -f >> ./DataBases/$dBName/$tableName.csv;
break;
;;
"Back To Main Menu") echo "Bye"
mainMenu
break;
;;
"Exit")
exit -1;
break;;
esac
done
done
break;
fi
;;
"Drop Database")
for DB in `ls ./DataBases`
do
DBArray[$i]=$DB;
let i=i+1;
done
if [[ ${#DBArray[@]} -eq 0 ]];
then
echo "There Is No Database To Be Listed";
mainMenu;
else
echo "This Is List With Available Databases : ";
i=1;
for DB in `ls ./DataBases`
do
DBArray[$i]=$DB;
echo $i") "$DB;
let i=i+1;
done
echo "${DBARR[@]}";
read -p "Choose Database You Want To Drop : " choise ;
rm -r ./DataBases/${DBArray[$choise]};
fi
break;
;;
"Exit")
echo "exit"
flag=0;
break;
esac
done
}
function createDataBase
{
echo "Enter Database Name:"
read dBName
echo $dBName
if test -z "$dBName"
then
echo "please enter valid DB name";
else
if [ -d ./DataBases/$dBName ]
then echo "This Database exists.";
else
mkdir ./DataBases/$dBName;
echo "YOUR Database Created Successfully!"
fi
fi
}
function createTable
{
read -p "Enter Table Name : " tableName ;
if [[ ! -e ./DataBases/$dBName/$tableName ]] && [[ $tableName != "" ]]
then
echo "$dBName"
touch ./DataBases/$dBName/$tableName.csv;
touch ./DataBases/$dBName/$tableName.meta;
chmod +x ./DataBases/$dBName/$tableName.meta;
chmod +x ./DataBases/$dBName/$tableName.csv;
echo "*****$tableName Meta Data ****" >./DataBases/$dBName/$tableName.meta;
echo "Table Name:$tableName " >> ./DataBases/$dBName/$tableName.meta;
echo "Enter The Number Of Columns : "
read tableColumns
echo "The Number Of Columns Is: $tableColumns" >> ./DataBases/$dBName/$tableName.meta;
for (( i = 1; i <= tableColumns ; i++ )); do
read -p "Enter Name Of Column [$i] : " ColName ;
echo "Column[$i]: $ColName" >> ./DataBases/$dBName/$tableName.meta ;
columnsArray[$i]=$ColName
select columnType in String Integer
do
case $columnType in
"String")
echo "Column Type is:String" >> ./DataBases/$dBName/$tableName.meta;
break ;
;;
"Integer")
echo "Column Type is :Integer" >> $ ./DataBases/$dBName/$tableName.meta;
break ;
;;
*)
echo "You Must Choose The Column Data Type"
esac
done
done
colArrIndex=1
echo "***********" >> ./DataBases/$dBName/$tableName.meta;
while [ $colArrIndex -le $tableColumns ]
do
if [ $colArrIndex -eq $tableColumns ]
then echo -e "${columnsArray[colArrIndex]}" >> ./DataBases/$dBName/$tableName.csv; else
echo -n "${columnsArray[colArrIndex]}," >> ./DataBases/$dBName/$tableName.csv;
fi
colArrIndex=$((colArrIndex+1));
done
echo $tableName" Done";
else
echo "This Table exists"
fi
}
function listTables
{
if [ -z "$(ls -A ./DataBases/$dBName)" ]; then
echo "Empty"
else
echo "Not Empty"
ls -a ./DataBases/$dBName
fi
}
function insertRaw
{
read -p "enter table name : " tableName
if [ ! -d ./DataBases/$dBName ]
then
echo "this name does not exist,please try again"
insertRaw
else
tableData="./DataBases/$dBName/$tableName.csv"
tableMeta="./DataBases/$dBName/$tableName.meta"
echo "This exists"
DataNum=$(cat $tableData | wc -l)
echo "$DataNum"
record=""
noCols=$((`awk -F: '{if (NR == 3) print $2 }' $tableMeta`));
echo "Number of columns: $noCols"
pkVal=$((noCols+2))
pkVal=`cut -f2 -d: $tableMeta | head -$pkVal | tail -1 `
echo "Primary key column is : $pkVal"
for (( i = 1; i <= noCols ; i++ )); do
colType=$( grep -n "Type" $tableMeta | cut -d':' -f 3)
# i=1
# for col in $colType
# do
# colName=$(echo $col | cut -d':' -f 1)
# colsArray[$i]=$col;
# echo $i") "$col;
# if [ "$col" == "String" ]
# then
# stringRegex='^[]0-9a-zA-Z,!^`@{}=().;/~_[:space:]|[-]+$'
# read -p "Enter value of Column [$i] : " ColValue
# if [[ $ColValue == $stringRegex ]]
# then
# echo "please enter a string value"
# read -p "Enter value of Column [$i] : " ColValue
# fi
# echo "string"
# else
# read -p "Enter value of Column [$i] : " ColValue
# echo "Integer"
# fi
# let i=i+1
# done
read -p "Enter value of Column [$i] : " ColValue
record+=$ColValue:
done
sed -i ''$DataNum' a '$record'' ./DataBases/$dBName/$tableName.csv
sed -i 's/:/ , /g' ./DataBases/$dBName/$tableName.csv
fi
}
flag=1;
while ( test $flag -eq 1 )
do
PS3="Enter YOUR Choice: "
mainMenu
done
| true |
b99a9eece8072bec57924e2fe48d0f8687c95659 | Shell | vishnuak15/shell-scripting | /greeting.sh | UTF-8 | 110 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env bash
name="Vishnu Mohandas"
Favroite_color=blue
echo hi $name, your favorite color is $Favroite_color. | true |
67eecd4968e3748f812b8f730e575b1cd50bb3f2 | Shell | delkyd/alfheim_linux-PKGBUILDS | /gprbuild/PKGBUILD | UTF-8 | 1,233 | 2.796875 | 3 | [] | no_license | pkgname=gprbuild
pkgver=2017
pkgrel=3
pkgdesc='Multi-language build system.'
url='http://www.adacore.com/gnatpro/toolsuite/gprbuild/'
arch=('i686' 'x86_64')
license=('GPL')
depends=('xmlada')
makedepends=('git' 'gprbuild-bootstrap')
# We provide gprbuild-bootstrap here so we can use this gprbuild to bootstrap
# itself and ada-xmlada.
provides=('gprbuild' 'gprbuild-bootstrap')
conflicts=('gprbuild' 'gprbuild-bootstrap')
source=('http://mirrors.cdn.adacore.com/art/591c45e2c7a447af2deecff7'
'expose-cargs-and-largs-makefile.patch')
sha1sums=('f956aa57c58c342a958332c8cd98e6481e9ce593'
'bda77367bc6985c3daf96929cccf5551a0544237')
prepare()
{
cd gprbuild-gpl-2017-src
patch -Np1 -i "$srcdir"/expose-cargs-and-largs-makefile.patch
# Not everyone is Debian
sed -i 's/libexec/lib/g' bootstrap.sh doinstall gprbuild.gpr \
share/gprconfig/compilers.xml \
share/gprconfig/linker.xml \
share/gprconfig/gnat.xml
}
build()
{
cd gprbuild-gpl-2017-src
export OS=UNIX
make prefix=/usr setup
make all
}
package()
{
cd gprbuild-gpl-2017-src
export OS=UNIX
make prefix="$pkgdir"/usr install
# Cleanup
rm -f -- "$pkgdir"/usr/doinstall
}
| true |
992ed18d38397b82fb9100239464d1269f825d8f | Shell | EA7KDO/Scripts | /gitcopy2.sh | UTF-8 | 6,637 | 3.484375 | 3 | [] | no_license | #!/bin/bash
#########################################################
# Nextion TFT Support for Nextion 2.4" #
# Gets all Scripts and support files from github #
# and copies them into the Nextion_Support directory "
# and copies the NX??? tft file into /usr/local/etc #
# and returns a script duration time to the Screen #
# as a script completion flag #
# #
# KF6S/VE3RD 2020-05-12 #
#########################################################
# Valid Screen Names for EA7KDO - NX3224K024, NX4832K035
# Valid Screen Names for VE3RD - NX3224K024
#if [[ $EUID -ne 0 ]]; then
# clear
# echo ""
# echo "This script must be run as root"
# echo "Setting root user"
# echo "Re-Start Script"
# echo ""
# sudo su
# exit 1
#fi
p1=$(pwd) ; cd .. ; homedir=$(pwd) ; cd "$p1"
who=$(whoami)
echo "This script is running as $who user"
sleep 2
run=""
errtext="This is a test"
parm1="$1"
parm2="$2"
ver="20220124"
declare -i tst
export NCURSES_NO_UTF8_ACS=1
export LANG=en_US.UTF-8
if [ ! -f ~/.dialog ]; then
# j=1
# else
sudo dialog --create-rc ~/.dialogrc
fi
sudo sed -i '/use_colors = /c\use_colors = ON' ~/.dialogrc
sudo sed -i '/screen_color = /c\screen_color = (WHITE,BLUE,ON)' ~/.dialogrc
sudo sed -i '/title_color = /c\title_color = (YELLOW,RED,ON)' ~/.dialogrc
echo -e '\e[1;44m'
if [ -z "$1" ]; then
clear
fi
function exitcode
{
txt='Abort Function\n\n
This Script will Now Stop'"\n$errtext"
dialog --title " Programmed Exit " --ascii-lines --msgbox "$txt" 8 78
clear
echo -e '\e[1;40m'
run="Done"
exit
}
# EA7KDO Script Function
function getea7kdo
{
tst=0
# echo "Function EA7KDO"
calltxt="EA7KDO"
if [ -d "$homedir"/Nextion_Temp ]; then
sudo rm -R "$homedir"/Nextion_Temp
fi
if [ "$scn" == "NX3224K024" ]; then
sudo git clone --depth 1 https://github.com/EA7KDO/NX3224K024 "$homedir"/Nextion_Temp
tst=1
fi
if [ "$scn" == "NX4832K035" ]; then
sudo git clone --depth 1 https://github.com/EA7KDO/NX4832K035 "$homedir"/Nextion_Temp
tst=2
fi
}
# VE3RD Script Function
function getve3rd
{
if [ -d "$homedir"/Nextion_Temp ]; then
sudo rm -R "$homedir"/Nextion_Temp
fi
tst=0
# echo "Function VE3RD"
calltxt="VE3RD"
if [ "$scn" = "NX3224K024" ]; then
tst=1
sudo git clone --depth 1 https://github.com/VE3RD/Nextion "$homedir"/Nextion_Temp
elif [ "$scn" == "NX4832K035" ]; then
sudo git clone --depth 1 https://github.com/VE3RD/NX4832K035 "$homedir"/Nextion_Temp
tst=2
else
errtext="Invalid VE3RD Screen Name $scn, $s1, $s2"
exitcode
fi
}
function getcall
{
#Set Screen Author
calltxt=""
if [ "$parm2" == VE3RD ] || [ "$parm1" == VE3RD ] ; then
calltxt="VE3RD"
else
calltxt="EA7KDO"
fi
}
#### Start of Main Code
## Select User Screens
getcall
S1=""
S2=""
if [ -f "/usr/local/etc/NX4832K035.tft" ]; then
S1="NX4832K035"
S1A=" Available "
else
S1="NX4832K035"
S1A=" Not Available "
fi
if [ -f "/usr/local/etc/NX3224K024.tft" ]; then
S2="NX3224K024"
S2A=" Available "
else
S2="NX3224K024"
S2A=" Not Available "
fi
result=(dialog --backtitle "Screen Selector - $calltxt" --ascii-lines --menu "Choose Your $calltxt Nextion Screen Model" 22 76 16)
options=(1 "$S1A 3.5 Inch Nextion Screen"
2 "$S2A 2.4 Inch Nextion Screen"
3 " Abort - Exit Script")
choices=$("${result[@]}" "${options[@]}" 2>&1 >/dev/tty)
#errt="$?"
clear
echo "Choice = $choices"
if [ -z "$choices" ]; then
#if [ "$choices" != "1" ] || [ "$choices" != "2" ] || [ "$choices" != "3" ]; then
errtext="Cancel Button Pressed"
exitcode
fi
for choice in $choices
do
case $choice in
1)
echo "$S1A 3.5 Inch Nextion Screen Selected"
scn="NX4832K035"
;;
2)
echo "$S2A 2.4 Inch Nextion Screen Selected"
scn="NX3224K024"
;;
3)
echo "Abort - Exit Script"
errtext="Abort Selected"
exitcode
;;
esac
done
if [ "$calltxt" == "VE3RD" ]; then
if [ "$result" == "NX3224K024" ]; then
#echo "Trap2"
scn="$result"
else
#echo "Trap3"
errtext=" Invalid Screen name for $calltxt"
fi
fi
echo "$scn $calltxt"
#echo " End Processing Parameters - $scn $calltxt"
#Start Duration Timer
start=$(date +%s.%N)
model="$scn"
tft='.tft'
#gz='.gz'
#Put Pi-Star file system in RW mode
sudo mount -o remount,rw / > /dev/null
sleep 1s
#Stop the cron service
sudo systemctl stop cron.service > /dev/null
#Test for "$homedir"/Nextion_Temp and remove it, if it exists
if [ -d "$homedir"/Nextion_Temp ]; then
sudo rm -R "$homedir"/Nextion_Temp
fi
# Get Nextion Screen/Scripts and support files from github
# Get EA7KDO File Set
if [ "$calltxt" == "EA7KDO" ]; then
echo "getting Screens for $calltxt"
getea7kdo
fi
# Get VE3RD File Set
if [ "$calltxt" == "VE3RD" ]; then
echo "Getting Screens for $calltxt"
getve3rd
fi
if [ ! -d /usr/local/etc/Nextion_Support ]; then
sudo mkdir /usr/local/etc/Nextion_Support
else
sudo rm -R /usr/local/etc/Nextion_Support
sudo mkdir /usr/local/etc/Nextion_Support
fi
sudo chmod +x "$homedir"/Nextion_Temp/*.sh
sudo rsync -avqru "$homedir"/Nextion_Temp/* /usr/local/etc/Nextion_Support/ --exclude=NX* --exclude=profiles.txt
sudo rsync -avqru "$homedir"/Scripts/stripped2.csv /usr/local/etc/
sudo mount -o remount,rw /
sudo wget https://database.radioid.net/static/user.csv -O /usr/local/etc/stripped.csv
if [ -f "$homedir"/Nextion_Temp/profiles.txt ]; then
if [ ! -f /usr/local/etc/Nextion_Support/profiles.txt ]; then
if [ "$fb" ]; then
txtn= "Replacing Missing Profiles.txt"
txt="$txt\n""$txtn"
fi
sudo cp "$homedir"/Nextion_Temp/profiles.txt /usr/local/etc/Nextion_Support/
fi
fi
model="$scn"
echo "Remove Existing $model$tft and copy in the new one"
txtn="Remove Existing $model$tft and copy in the new one"
txt="$txt""$txtn"
if [ -f /usr/local/etc/"$model$tft" ]; then
sudo rm /usr/local/etc/NX*K*.tft
fi
sudo cp "$homedir"/Nextion_Temp/"$model$tft" /usr/local/etc/
FILE=/usr/local/etc/"$model$tft"
if [ ! -f "$FILE" ]; then
# Copy failed
echo "No TFT File Available to Flash - Try Again"
errtext="Missing tft File Parameter"
exitcode
fi
sudo systemctl start cron.service > /dev/null
duration=$(echo "$(date +%s.%N) - $start" | bc)
execution_time=`printf "%.2f seconds" $duration`
txt="$calltxt Scripts Loaded: $execution_time"
#whiptail --title "$title" --msgbox "$txt" 8 90
dialog --title " $title " --ascii-lines --msgbox "$txt" 8 78
echo -e '\e[1;40m'
if [ -z "$1" ]; then
clear
fi
sudo mount -o remount,ro /
exit
| true |
1ddd06b9337d60a655cec72122c6cfed9eabb771 | Shell | pphe/pcmatrix | /run_test.sh | UTF-8 | 808 | 2.703125 | 3 | [] | no_license | #!/bin/bash
echo "----------------------------------------------------------------------"
echo "Cleaning and remaking..."
echo "----------------------------------------------------------------------"
make clean && make
echo "----------------------------------------------------------------------"
echo
echo "----------------------------------------------------------------------"
echo "Running tests..."
echo "----------------------------------------------------------------------"
echo "Test counter module:"
./counter
echo
echo "Test matrix module:"
./matrix
echo
echo "Test prodcons module:"
./prodcons
echo
echo "Test pcMatrix program:"
for (( i = 1; i <= 100; i++ ))
do
echo "Iteration #$i:"
./pcMatrix
echo
done
echo "----------------------------------------------------------------------"
echo
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.