blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
92c6c7d2b9623c777819c43795a4ac678426a48e | Shell | kaoxkrul/bash | /deforce | UTF-8 | 409 | 3.328125 | 3 | [] | no_license | #!/bin/bash
for i in `find -name 'index.html' -o -name 'default.html'`
do
echo "checking ${i}"
if grep -q '<IFRAME SRC="http://www.forced-action.com/" WIDTH=1 HEIGHT=1></IFRAME>' ${i}; then
echo "deforcing ${i}"
grep -v '<IFRAME SRC="http://www.forced-action.com/" WIDTH=1 HEIGHT=1></IFRAME>' ${i} > ${i}.tmp
cat ${i}.tmp > ${i}
rm -f ${i}.tmp
else
echo "${i} was clean"
fi
done
| true |
9dbadab3f3002e1f6d2525933cbf521a96c9cffd | Shell | ronthings/deep_phix | /bash_scripts/1_filter_reads.sh | UTF-8 | 1,362 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
usage() {
NAME=$(basename $0)
cat <<EOF
Usage:
${NAME}
You must define global variables first (via "0_config_and_run.sh")
EOF
}
# location for log file
LOGFILE=./1_filter.log
# adapter sequence for Nextera (XT)
adapter="CTGTCTCTTATA"
# variables to be used in main loop
reads1=(${FASTQLOC}/*R1*.fastq.gz) # collect each forward read in array, e.g. "~/FASTQ/A_S1_L001_R1_001.fastq.gz"
reads1=("${reads1[@]##*/}") # [@] refers to array, greedy remove */ from left, e.g. "A_S1_L001_R1_001.fastq.gz"
reads2=("${reads1[@]/_R1/_R2}") # substitute R2 for R1, e.g. "A_S1_L001_R2_001.fastq.gz"
# main loop
pipeline() {
echo [`date +"%Y-%m-%d %H:%M:%S"`] "#> START: " $0 $@
for ((i=0; i<=${#reads1[@]}-1; i++)); do # i from zero to one minus length of array
fwdrds="${reads1[$i]}" # e.g. "A_S1_L001_R1_001.fastq.gz"
rvsrds="${reads2[$i]}" # e.g. "A_S1_L001_R2_001.fastq.gz"
id="${fwdrds%%_*}" # greedy remove _ from right e.g. "A"
cutadapt --quality-base=33 --quality-cutoff 30,30 \
-a ${adapter} -A ${adapter} --error-rate=0.2 --overlap=3 \
--trim-n --pair-filter=any --minimum-length=20 --cores=$NUMCPUS \
-o TRIM/${id}_trimmed_R1.fastq.gz -p TRIM/${id}_trimmed_R2.fastq.gz \
${FASTQLOC}/${fwdrds} ${FASTQLOC}/${rvsrds}
done
echo [`date +"%Y-%m-%d %H:%M:%S"`] "#> DONE."
} #pipeline end
pipeline 2>&1 | tee $LOGFILE
| true |
0997a19bf6c4131572dc90ea42408e4434d06402 | Shell | Estecka/42-mini-micro-paint | /mini_paint/test.sh | UTF-8 | 1,038 | 3.453125 | 3 | [] | no_license | #!/bin/bash
echo "> start"
echo "$ compile"
make all || { printf "\e[1;31m: Compilation error.\n" && exit 1; }
echo "$ test"
counter=1
max=2
our_res=-1
bad_res=-1
while [ $counter -le $max ]
do
./gen
if [ $? ]
then
sleep .01
./our_mini_paint example_ >ours.out.log 2>ours.err.log
our_res=$?
./mini_paint example_ >yours.out.log 2>yours.err.log
bad_res=$?
if [ $our_res -ne $bad_res ]
then
printf "\n: different return result, our \e[1;31m$our_res\e[0m and yours \e[1;32m$bad_res\e[0m !\n"
exit 1
fi
diff -y --suppress-common-lines ours.out.log yours.out.log
if [ $? -ne 0 ]
then
printf "\e[1;31m: difference in output, coutput is our, output yours and the example is in example_ !\e[0m\n"
exit 2
fi
if [ $(( $counter % 50 )) = 0 ]
then
printf "\e[1;34m[$counter]\e[0m"
fi
if [ $our_res -ne 0 ]
then
printf "\e[1;33m$our_res\e[0m"
else
printf "\e[1;32m$our_res\e[0m"
fi
fi
max=$((max + 1))
counter=$((counter + 1))
done
rm -rf example_ *.out.log *.err.log
printf "\n> done"
| true |
de998e14041792882c22bdf2dd9781b0921b8dc4 | Shell | ZeroInfinite/iot-hub-c-intel-edison-client-app | /buildSDK.sh | UTF-8 | 1,498 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#Copyright (c) Microsoft. All rights reserved.
#Licensed under the MIT license. See LICENSE file in the project root for full license information.
if [ ! -d ~/azure-iot-sdk-c ];
then
git clone https://github.com/Azure/azure-iot-sdk-c.git ~/azure-iot-sdk-c && cd ~/azure-iot-sdk-c && git checkout 76906dc;
fi
cd ~/azure-iot-sdk-c/uamqp
if ! [ "$(ls -A .)" ];
then
git clone https://github.com/Azure/azure-uamqp-c.git . && git checkout 5bf09d3;
fi
cd ~/azure-iot-sdk-c/umqtt
if ! [ "$(ls -A .)" ];
then
git clone https://github.com/Azure/azure-umqtt-c.git . && git checkout 51da812;
fi
cd ~/azure-iot-sdk-c/parson
if ! [ "$(ls -A .)" ];
then
git clone https://github.com/kgabis/parson.git . && git checkout c22be79;
fi
cd ~/azure-iot-sdk-c/c-utility
if ! [ "$(ls -A .)" ];
then
git clone https://github.com/Azure/azure-c-shared-utility.git . && git checkout 9073d21;
fi
cd ~/azure-iot-sdk-c/uamqp/c-utility
if ! [ "$(ls -A .)" ];
then
git clone https://github.com/Azure/azure-c-shared-utility.git . && git checkout b0b5b1b;
fi
cd ~/azure-iot-sdk-c/umqtt/c-utility
if ! [ "$(ls -A .)" ];
then
git clone https://github.com/Azure/azure-c-shared-utility.git . && git checkout b0b5b1b;
fi
sed -i 's/--jobs=$CORES/--jobs=2/g' ~/azure-iot-sdk-c/build_all/linux/build.sh
test -e ~/azure-iot-sdk-c/cmake/iotsdk_linux/iothub_client/libiothub_client_mqtt_transport.a || (cd ~/azure-iot-sdk-c && sudo build_all/linux/build.sh --skip-unittests --no-amqp --no-http --no_uploadtoblob)
| true |
6a235caa211ef8d9786cd3fd1e4120521305bfa9 | Shell | silnrsi/font-namdhinggo | /tools/archive/latin/import-symbols.bash | UTF-8 | 1,294 | 3.421875 | 3 | [] | no_license | #!/bin/bash
declare -A ufos
ufos[NamdhinggoSILMaster-ExtraBold.ufo]=stock/GentiumBookPlus-Bold.ufo
ufos[NamdhinggoSILMaster-Regular.ufo]=custom/GentiumBookPlus-Regular.ufo
config=$HOME/script/limb/fonts/namdhinggo_local/latin
for ufo in *.ufo
do
# import characters
latin=$config/instances/${ufos[$ufo]}
pushd $config
cat lowercase_blue.txt > import.txt
psfgetglyphnames -a aglfn-nr.csv -i import.txt $latin glyphs.csv
popd
psfcopyglyphs --rename rename --unicode usv --force -s $latin -i $config/glyphs.csv -l ${ufo}_import.log $ufo
# scale characters
for codepoints in lowercase_blue
do
psfgetglyphnames -a ${config}/aglfn-nr.csv -i ${config}/${codepoints}.txt $latin ${codepoints}_import.csv
awk 'FS=","{printf "%s,%s,%s\n", $2, $2, $3}' ${codepoints}_import.csv | tail -n +2 > ${codepoints}.csv
transform=$(echo $codepoints | cut -d _ -f 1)
psfmakescaledshifted -i ${codepoints}.csv -t latin$transform -l ${ufo}_${codepoints}.log $ufo
done
# cleanup
psfdeleteglyphs -i $config/delete_blue.txt $ufo
psfrenameglyphs -i $config/rename_blue.txt $ufo
psfsetunicodes -i $config/encode_blue.txt $ufo
cd ../..
./preflight
cd -
git add $ufo
done
git commit -m "Make some symbols bold"
| true |
cc6d2407e56ba0fe2f0b1b69e50264def8d3b3d6 | Shell | pcorpet/url-shortener | /build.sh | UTF-8 | 783 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
readonly CONTAINER_NAME="lascap/url-shortener"
set -e
TMPDIR=$(mktemp -d)
cp -R public "${TMPDIR}/public"
cp Dockerfile "${TMPDIR}"
echo "Building binary..."
if [ -z "${CIRCLECI}" ]; then
# `docker rm` doesn't work on CircleCI.
readonly RM_FLAG="--rm"
fi
mkdir -p release
docker-compose run ${RM_FLAG} -e CGO_ENABLED=0 builder /bin/bash -c "cp /etc/ssl/certs/ca-certificates.crt release && go build -ldflags \"-s\" -a -installsuffix cgo -o release/url-shortener"
cp release/url-shortener release/ca-certificates.crt "${TMPDIR}"
echo "Packaging Docker image..."
if [ -n "$1" ]; then
readonly TAG="${CONTAINER_NAME}:${1}"
else
# Using "latest".
readonly TAG="${CONTAINER_NAME}"
fi
docker build --build-arg GIT_SHA1 -t "${TAG}" "${TMPDIR}"
rm -rf "${TMPDIR}"
| true |
b3232bf629d7ae6fe346ee6faa4ec2f23bc4d2f6 | Shell | Nordix/xcluster | /ovl/k8s-cni-calico/k8s-cni-calico.sh | UTF-8 | 3,203 | 3.484375 | 3 | [
"MIT"
] | permissive | #! /bin/sh
##
## k8s-cni-calico.sh --
##
## Help script for the xcluster ovl/k8s-cni-calico.
##
prg=$(basename $0)
dir=$(dirname $0); dir=$(readlink -f $dir)
me=$dir/$prg
tmp=/tmp/${prg}_$$
die() {
echo "ERROR: $*" >&2
rm -rf $tmp
exit 1
}
help() {
grep '^##' $0 | cut -c3-
rm -rf $tmp
exit 0
}
test -n "$1" || help
echo "$1" | grep -qi "^help\|-h" && help
log() {
echo "$prg: $*" >&2
}
dbg() {
test -n "$__verbose" && echo "$prg: $*" >&2
}
## Commands;
##
## env
## Print environment.
cmd_env() {
test -n "$__nvm" || export __nvm=5
test -n "$__nrouters" || export __nrouters=1
if test "$cmd" = "env"; then
set | grep -E '^(__.*)='
return 0
fi
test -n "$xcluster_DOMAIN" || xcluster_DOMAIN=xcluster
test -n "$XCLUSTER" || die 'Not set [$XCLUSTER]'
test -x "$XCLUSTER" || die "Not executable [$XCLUSTER]"
eval $($XCLUSTER env)
}
##
## Tests;
## test [--xterm] [--no-stop] [test...] > logfile
## Exec tests
cmd_test() {
cmd_env
start=starts
test "$__xterm" = "yes" && start=start
rm -f $XCLUSTER_TMP/cdrom.iso
if test -n "$1"; then
local t=$1
shift
test_$t $@
else
test_start
fi
now=$(date +%s)
tlog "Xcluster test ended. Total time $((now-begin)) sec"
}
## test start_empty
## Start empty cluster. K8s nodes will be "NotReady".
test_start_empty() {
export __image=$XCLUSTER_HOME/hd-k8s-xcluster.img
export xcluster_FIRST_WORKER=2
test -n "$xcluster_CALICO_BACKEND" || export xcluster_CALICO_BACKEND=none
tlog "CALICO_BACKEND=$xcluster_CALICO_BACKEND"
test -n "$TOPOLOGY" && \
. $($XCLUSTER ovld network-topology)/$TOPOLOGY/Envsettings
xcluster_start network-topology k8s-cni-calico $@
otc 1 check_namespaces
if echo "$xcluster_PROXY_MODE" | grep -qi disable; then
kubectl create -n kube-system -f $dir/default/etc/kubernetes/calico/apiserver-configmap.yaml
otcw restart_kubelet
fi
otc 1 check_nodes
}
## test start
## Start cluster with Calico. The "linux" data-plane is default.
test_start() {
test -n "$xcluster_CALICO_BACKEND" || export xcluster_CALICO_BACKEND=legacy
test_start_empty $@
otcr vip_routes
}
## test start_vpp
## Start cluster with the VPP data-plane
test_start_vpp() {
#export xcluster_PROXY_MODE=disabled
export xcluster_CALICO_BACKEND=operator+install-vpp
export __mem=2G
export __mem1=1G
export __nvm=3
test_start_empty $@
otcr vip_routes
}
## test start_bpf
## Start cluster with the eBPF data-plane. Kube-proxy is disabled.
test_start_bpf() {
export xcluster_PROXY_MODE=disabled
export xcluster_CALICO_BACKEND=bpf
test_start_empty $@
otcr vip_routes
}
##
. $($XCLUSTER ovld test)/default/usr/lib/xctest
indent=''
# Get the command
cmd=$1
shift
grep -q "^cmd_$cmd()" $0 $hook || die "Invalid command [$cmd]"
while echo "$1" | grep -q '^--'; do
if echo $1 | grep -q =; then
o=$(echo "$1" | cut -d= -f1 | sed -e 's,-,_,g')
v=$(echo "$1" | cut -d= -f2-)
eval "$o=\"$v\""
else
o=$(echo "$1" | sed -e 's,-,_,g')
eval "$o=yes"
fi
shift
done
unset o v
long_opts=`set | grep '^__' | cut -d= -f1`
# Execute command
trap "die Interrupted" INT TERM
cmd_$cmd "$@"
status=$?
rm -rf $tmp
exit $status
| true |
9d48cc94ecbb60f180f61721cede8a2b6efa9622 | Shell | webern-unibas-ch/awg-app | /version.sh | UTF-8 | 1,005 | 3.65625 | 4 | [
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
version_file=src/app/app.globals.ts
CURRENT_VERSION=$(node -p "require('./package.json').version")
CURRENT_HOMEPAGE=$(node -p "require('./package.json').homepage")
CURRENT_DAY=$(date +"%d.")
CURRENT_MONTH=$(LC_ALL=de_DE.UTF-8 date +"%B");
CURRENT_YEAR=$(date +"%Y")
CURRENT_DATE="$CURRENT_DAY $CURRENT_MONTH $CURRENT_YEAR"
echo "Updating app globals...
"
> $version_file
echo "// THIS IS AN AUTO-GENERATED FILE. DO NOT CHANGE IT MANUALLY!
// Generated last time on $(date)
/**
* The latest version of the AWG App
*/
export const appVersion = '$CURRENT_VERSION';
/**
* The release date of the latest version of the AWG App
*/
export const appVersionReleaseDate = '$CURRENT_DATE';
/**
* The URL of the AWG App
*/
export const appHomepage = '$CURRENT_HOMEPAGE';" >> $version_file
echo "Adding global file to git...
"
git add $version_file
echo "Updated app globals to
... version: $CURRENT_VERSION
... version date: $CURRENT_DATE
... homepage: $CURRENT_HOMEPAGE
Done." | true |
9ed4d8108087bdd7a4bfaf17afd996a4e9a4fb51 | Shell | dulalsaurab/ROUTING | /scripts/graphing-scripts/plot/plot-med.sh | UTF-8 | 1,173 | 3.28125 | 3 | [] | no_license | #!/bin/bash
if [ $# -lt 1 ] ; then
echo "Usage: $0 <test folder>"
exit
fi
dir=$1
#get the name of nodes
y=`ls $dir/hr/faces-2`
for k in $y
do
echo "Node: $k"
gnuplot << EOF
reset
fontsize = 14
set terminal pngcairo enhanced font 'Verdana,10' size 900,500 crop
set boxwidth 1.0
set style fill solid 0.5 border 0
set style histogram clustered gap 3 errorbars lw 1
set style data histograms
set datafile missing "-"
set yrange[0:500]
set grid ytics
set xtics rotate
set key outside
set key right center
set key vertical
set size 1,0.7
set ylabel "Round Trip Time (ms)"
set output "$dir/med-${k}.png"
set title "Med RTT from $k to other sites"
plot '$dir/hr/faces-2/$k/ping-data.all' u 2:3:4:xtic(1) title "hr-f2" fc rgb "red",\
'$dir/ls/faces-2/$k/ping-data.all' u 2:3:4 title "ls-f2" fc rgb "blue",\
'$dir/hr/faces-3/$k/ping-data.all' u 2:3:4 title "hr-f3" fc rgb "green",\
'$dir/ls/faces-3/$k/ping-data.all' u 2:3:4 title "ls-f3" fc rgb "magenta",\
'$dir/hr/faces-all/$k/ping-data.all' u 2:3:4 title "hr-all" fc rgb "pink",\
'$dir/ls/faces-all/$k/ping-data.all' u 2:3:4 title "ls-all" fc rgb "cyan"
EOF
done #for k in $y
| true |
75cfd891a09228df74e765b15d857ca43c846aef | Shell | EilidhHendry/hadoop-mapreduce | /Task_4.2/4_part2_2.sh | UTF-8 | 730 | 2.875 | 3 | [] | no_license | #!/bin/bash
echo "Assignment 2 - Question 4 - Part 2"
echo "Querying Stack Overflow"
IN="/user/s0925284/exc2/output/task_4_part2_1.out"
OUT="/user/s0925284/exc2/output/task_4_part2.out"
MAP="task4mapper_part2_2.py"
RED="task4reducer_part2_2.py"
OUTFILE="Question_4_part2"
NUMR=1
hadoop dfs -rmr $OUT
RUN="hadoop jar /opt/hadoop/hadoop-0.20.2/contrib/streaming/hadoop-0.20.2-streaming.jar -D mapred.output.key.comparator.class=org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -D mapred.text.key.comparator.options=-nr -D mapred.reduce.tasks=$NUMR -input $IN -output $OUT -mapper $MAP -file $MAP -reducer $RED -file $RED"
$RUN
rm -r $OUTFILE
hadoop dfs -copyToLocal $OUT $OUTFILE
echo "Script complete, see output Directory."
| true |
0eb3c4fd7bf56713049d7f8552f86d0a494fef26 | Shell | sckevmit/vrouter-pktgen-tests | /config-scripts/MPLSoUDP/VROUTER2/90.unbind-ifs.sh | UTF-8 | 658 | 2.8125 | 3 | [] | no_license | #!/bin/bash
##
## Unbind Interfaces on vRouter 2
## Copyright (c) 2015 Semihalf. All rights reserved.
##
. ../00.config.sh
#################################################################
## Re-bind Interfaces to Linux Driver
sudo -E ${BIND} -b ${VROUTER2_PCI_DRV} ${VROUTER2_PCI}
sudo -E ${BIND} --status
#################################################################
## Remove Kernel Modules
sudo rmmod rte_kni.ko
sudo rmmod igb_uio.ko
#################################################################
## Configure Linux Interfaces
sudo ifconfig ${VROUTER2_PCI_IF} ${VROUTER2_PCI_DEF_IP} netmask 255.255.255.0 up
sudo ifconfig ${VROUTER2_PCI_IF}
| true |
d1f6ca4d6ad8fe936a14c6f348db52aea6ec2b78 | Shell | lhl/powerbutton | /cron/rtunnel.sh | UTF-8 | 350 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
REMOTE_PORT=9999
REMOTE_HOST="REMOTE_HOST"
# $COMMAND is the command used to create the reverse ssh tunnel
COMMAND="/usr/bin/ssh -q -N -R $REMOTE_PORT:localhost:22 $REMOTE_HOST -o ExitOnForwardFailure=yes -o ServerAliveInterval=10"
CHECK_TUNNEL=`ps auxw | grep "$COMMAND" | grep -v grep`
if [ -z "$CHECK_TUNNEL" ]; then
$COMMAND
fi
| true |
985ea8f35971d71d4fed660eabd65701d405a44a | Shell | asdyxcyxc/Dynamic-Rabbits | /vulnerability_bunny/scripts/linux/run_unit_tests.sh | UTF-8 | 1,640 | 3.21875 | 3 | [] | no_license | RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
echo "Starting Tests ..."
NUM_TESTS=0
NUM_PASSED=0
run_test(){
$DB_RUN -c $1 -- $2 $3 $4 $5
return $?
}
run_py_test(){
python $1 $DYNAMORIO_DIR/install/bin32/drrun $2 $3 $4 $5
return $?
}
check_result(){
bname=$(basename $1)
NUM_PASSED=$((NUM_PASSED + 1))
run_test $1 $2 "$3" $4 $5
if [ $? -eq 0 ]
then
NUM_PASSED=$((NUM_PASSED + 1))
echo -e "${GREEN}$bname test passed!${NC}"
return 1
else
echo -e "${RED}$bname test failed!${NC}"
#return 0
exit
fi
}
check_py_result(){
bname=$(basename $2)
NUM_PASSED=$((NUM_PASSED + 1))
run_py_test $1 $2 $3 $4
if [ $? -eq 0 ]
then
NUM_PASSED=$((NUM_PASSED + 1))
echo -e "${GREEN}$bname test passed!${NC}"
return 1
else
echo -e "${RED}$bname test failed!${NC}"
exit
fi
}
cd $VULNERABILITY_BUNNY_DIR/src/unit_tests/build
echo "Running applications"
check_result libuaf_checker.so uaf_test
check_result libuaf_checker.so uaf_test2
check_result libuaf_checker.so uaf_test3
check_result libuaf_checker.so uaf_test4
check_result libuaf_checker.so uaf_test5
check_result libuaf_checker.so uaf_test6
check_result libuaf_checker.so uaf_test7
check_result libuaf_checker.so uaf_test8
check_result libuaf_checker.so uaf_test9
check_result libuaf_checker.so uaf_test10
check_result libuaf_checker.so uaf_test11
check_result libuaf_checker.so uaf_test12
check_result libuaf_checker.so uaf_test13
check_result libuaf_checker.so uaf_test14
check_result libuaf_checker.so uaf_test15
check_result libuaf_checker.so uaf_test16
check_result libuaf_checker.so uaf_test17
| true |
988959f51eb720afe9371a6e8405a0a0ce6b468e | Shell | webos-internals/build | /optware/libstdc++/control/postinst | UTF-8 | 367 | 2.78125 | 3 | [] | no_license | #!/bin/sh
APPID=mobi.optware.libstdc++
# Symlink files into /opt
cd $IPKG_OFFLINE_ROOT/usr/palm/applications/$APPID/opt
find lib -type d -exec mkdir -p /opt/{} \;
find lib -type f -exec ln -sf $IPKG_OFFLINE_ROOT/usr/palm/applications/$APPID/opt/{} /opt/{} \;
ln -sf libstdc++.so.6.0.9 /opt/lib/libstdc++.so
ln -sf libstdc++.so.6.0.9 /opt/lib/libstdc++.so.6
exit 0
| true |
22f1c538d32740f570f486267a518e6a6d923562 | Shell | farhanghazi97/Social-Network-Analysis | /Ass2_Testing/testPQ.sh | UTF-8 | 685 | 3.34375 | 3 | [] | no_license | RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m'
make testPQ || exit
mkdir -p output
rm -f output/TestGraph.out
./testPQ > output/TestPQ.out
r=`diff -B output/TestPQ.out output/TestPQ.exp`
if [[ "$r" == "" ]]; then
echo -e "====== ${GREEN} Output Matches${NC} ======"
else
echo -e "=========== ${RED} Output Mismatch${NC} ============"
echo -e " ${RED}(your output on left, expected on right)${NC} "
diff -y -B output/TestPQ.out output/TestPQ.exp
echo -e " ${RED} Your output in: ./output/TestPQ.out ${NC}"
echo -e " ${RED} Expected output in: ./output/TestPQ.exp ${NC}"
echo -e "=========== ${RED} End of Output Mismatch${NC} ============"
fi
| true |
e2c4d2b6b5c820ccf0d009f1ec342e15f6a8decf | Shell | Srini4u/tomstar | /upload_image_to_elasticbeanstalk.sh | UTF-8 | 1,327 | 2.84375 | 3 | [] | no_license | #! /bin/bash
DOCKER_TAG=$1
DOCKERRUN_FILE=$DOCKER_TAG-Dockerrun.aws.json
EB_BUCKET=$DEPLOYMENT_BUCKET/$BUCKET_DIRECTORY
echo "REGION=$REGION"
echo "AWS_APPLICATION_NAME=$AWS_APPLICATION_NAME"
echo "DOCEKR_TAG=$DOCKER_TAG"
echo "DOCKERRUN_FILE=$DOCKERRUN_FILE"
echo "DEPLOYMENT_BUCKET=$DEPLOYMENT_BUCKET"
echo "BUCKET_DIRECTORY=$BUCKET_DIRECTORY"
echo "IMAGE_NAME=$IMAGE_NAME"
# Run aws command to create a new EB application with label
# aws elasticbeanstalk create-application-version --region=$REGION --application-name $AWS_APPLICATION_NAME \
# --version-label $DOCKER_TAG --source-bundle S3Bucket=$DEPLOYMENT_BUCKET,S3Key=$BUCKET_DIRECTORY/$DOCKERRUN_FILE
aws elasticbeanstalk create-application-version \
--application-name tomstar \
--version-label $DOCKER_TAG \
--source-bundle S3Bucket=$DEPLOYMENT_BUCKET,S3Key=$BUCKET_DIRECTORY/$DOCKERRUN_FILE \
--region us-east-1
# Update Elastic Beanstalk environment to new version
aws elasticbeanstalk update-environment \
--environment-name tomstar \
--version-label $DOCKER_TAG \
--region us-east-1
# Update Elastic Beanstalk environment to new version (in case we want continuous delivery)
# aws elasticbeanstalk update-environment --region=us-east-1 --environment-name idsp-management-prod --version-label $SHA1
| true |
66048c920775ebbca64f96ee52eade58787b69ec | Shell | openbsd/xenocara | /lib/mesa/.gitlab-ci/bin/download_gl_schema.sh | UTF-8 | 292 | 3.28125 | 3 | [] | no_license | #!/bin/sh
# Helper script to download the schema GraphQL from Gitlab to enable IDEs to
# assist the developer to edit gql files
SOURCE_DIR=$(dirname "$(realpath "$0")")
(
cd $SOURCE_DIR || exit 1
gql-cli https://gitlab.freedesktop.org/api/graphql --print-schema > schema.graphql
)
| true |
f2c36d744361db921e576e5ad5edee68c3a9cafb | Shell | Benjaminmnoer/xZTL | /vms/setup-vmctl.sh | UTF-8 | 841 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# WARNING: This deletes every folder created by vmctl and creates a complete fresh install.
VMCTLLOC=$HOME/Repositories/vmctl/
ARCHLOC=$HOME/Repositories/archbase/
sudo rm -rf img/nvme.qcow2 log/ run/ state/
IMG=$PWD/img/
if [[ ! -d "$IMG" ]]
then
echo "Creating img folder."
mkdir img
fi
SEED=$PWD/img/seed.img
if [ ! -f "$SEED" ]; then
cd img
$VMCTLLOC/contrib/generate-cloud-config-seed.sh $HOME/.ssh/id_rsa.pub
cd ..
fi
BASE=$PWD/img/base.qcow2
if [ ! -f "$BASE" ]; then
cd img
echo "Getting image"
# wget https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img
cp $ARCHLOC/archbase.qcow2 base.qcow2
# echo "Renaming to base.qcow2"
# mv focal-server-cloudimg-amd64.img base.qcow2
echo "Resizing to 8G"
qemu-img resize base.qcow2 8G
cd ..
fi
| true |
99c86589513ddef69edacf667c9ec5c6cbabf823 | Shell | spolowy/Death | /scripts/fast_refill.bash | UTF-8 | 264 | 3.640625 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
yellow='\033[33m'
none='\033[0m'
self="$0"
if [ $# -ne 1 ]; then
printf "${yellow}usage${none}: $self [directory]\n"
exit 1
fi
folder="$1"
rm -rf /tmp/test/*
rm -rf /tmp/test2/*
mkdir -p /tmp/test
mkdir -p /tmp/test2
cp "$folder"/* /tmp/test/
| true |
e5e2a33c328ae21de96966c2165745c7f6d7e9bc | Shell | pombreda/aur-mirror | /translate-shell/PKGBUILD | UTF-8 | 1,512 | 2.75 | 3 | [] | no_license | # Maintainer: Thiago Perrotta <echo dGhpYWdvcGVycm90dGE5NUBnbWFpbC5jb20K | base64 -d >
# Contributor: Star Brilliant <echo bTEzMjUzQGhvdG1haWwuY29tCg== | base64 -d>
pkgname=translate-shell
pkgver=0.8.22.5
pkgrel=4
pkgdesc="Google Translate to serve as a command-line tool."
arch=('any')
url="http://www.soimort.org/$pkgname"
license=('Public Domain')
depends=('gawk>=4.0')
makedepends=('git')
optdepends=(
# 'zsh: for interpreting the wrapper script'
'fribidi: needed for displaying right-to-left (RTL) languages'
'mplayer: needed for the Text-to-Speech functionality'
'espeak: needed for the Text-to-Speech functionality'
'rlwrap: needed for readline-style editing and history in the interactive mode'
'emacs: for using the Emacs interface (with the -E flag)'
)
provides=("$pkgname")
conflicts=("$pkgname")
install=$pkgname.install
source=("https://github.com/soimort/$pkgname/archive/v$pkgver.tar.gz")
md5sums=('0e73b25c0b2ead022268555943a77460')
sha256sums=('a0e3d5ac173f8964d05e698cb27628852a98e77cbb31006bbee54f8ff9e2d7d8')
build() {
cd "$srcdir/$pkgname-$pkgver"
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
mkdir -p "$pkgdir/usr/bin/"
# Main executable
make "INSTDIR=$pkgdir/usr/bin" install
ln -s /usr/bin/trans "$pkgdir/usr/bin/$pkgname"
# Man page
install -Dm644 man/trans.1 "$pkgdir/usr/share/man/man1/trans.1"
install -Dm644 man/trans.1 "$pkgdir/usr/share/man/man1/$pkgname.1"
# License
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true |
e4816a1238c72f9dc72175319e8d44b0759e3027 | Shell | midzelis/pms-docker | /root/plex-envvars | UTF-8 | 527 | 2.765625 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
IFS=$'\n\t'
home=~plex
model=$(uname -m)
version=$(uname -r)
export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR:-${home}/Library/Application Support}"
export PLEX_MEDIA_SERVER_HOME=/usr/lib/plexmediaserver
export PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS=6
export PLEX_MEDIA_SERVER_INFO_VENDOR=Docker
export PLEX_MEDIA_SERVER_INFO_DEVICE="Docker Container"
export PLEX_MEDIA_SERVER_INFO_MODEL="$model"
export PLEX_MEDIA_SERVER_INFO_PLATFORM_VERSION="$version"
| true |
954acc0679dfae54f40085622b77d16ff0de90c5 | Shell | antonbriganti-myob/payslip-ci-cd | /ops/bin/deploy | UTF-8 | 1,686 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
echo "Deploying is the last thing you do on your pipeline. Now that you know everything is good because your code has built and your tests have passed"
echo "Compared to building and testing, deploying can vary pretty differently depending on what service or lanaguage you're using, but the core concepts remain the same"
echo "Your code is bundled into an executable format, and then uploaded onto the service you're using."
echo "We want the pipeline to take care of this so that we don't have to worry about relying on one person to deploy, and to make sure the same configuration set up is used every time."
echo
echo "If we have different environments we deploy to (like SIT and PROD), we would want these to be different steps, just like our tests."
echo "It's also good to have rules around when you deploy to SIT and PROD, and these can be reflected in the pipeline."
echo "For example, you automatically deploy to SIT but you need to manually approve a deploy to PROD. You can set a blocker of some kind in your pipeline to make sure you don't auto deploy to PROD"
echo "TravisCI doesn't have a manual confirmation step, so each push we make would deploy. This isn't always ideal, but there are some ways around it like setting a 'release' branch that is the only one you can deploy on"
echo
echo "For the sake of ease today, we're not going to actually deploy anything, but there are services like Heroku which you could deploy your apps to. Feel free to check them out!"
echo "Frow, let's just pretend we're deploying."
echo "Deploying..."
count=0
while [ $count -lt 5 ]
do
echo -n "#"
sleep 1
count=$[$count+1]
done
echo
echo "Deployed!" | true |
727036a7fe59431ae0bd710e8b58c1f818344662 | Shell | rmehner/local-tld | /bin/local-tld-setup | UTF-8 | 715 | 3 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh -ex
PWD=`pwd`
# thanks http://stackoverflow.com/questions/630372/determine-the-path-of-the-executing-bash-script
MY_PATH="`dirname \"$0\"`"
MY_PATH="`( cd \"$MY_PATH\" && pwd )`"
ETC=$MY_PATH/../etc
# cp etc/* to targets
sudo mkdir -p /etc/resolver
sudo cp $ETC/resolver.dev /etc/resolver/dev
sudo cp $ETC/ie.hood.local-tld-firewall.plist /Library/LaunchDaemons/
cp $ETC/ie.hood.local-tld-service.plist $HOME/Library/LaunchAgents/
# launchin
sudo launchctl load -Fw /Library/LaunchDaemons/ie.hood.local-tld-firewall.plist
launchctl unload $HOME/Library/LaunchAgents/ie.hood.local-tld-service.plist
launchctl load -Fw $HOME/Library/LaunchAgents/ie.hood.local-tld-service.plist
echo "Setup done."
| true |
0d71ff4f7029b1b22011fa69c8bf60e1bd3a9d48 | Shell | PeiwenWu/Adaptation-Interspeech18 | /kaldi-wangke/scripts/rnnlm/get_num_splits.sh | UTF-8 | 4,072 | 3.71875 | 4 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
# Copyright 2017 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
# This script works out how many pieces we want to split the data into for a
# particular training run, based on how many words are in the data directory
# (excluding dev), and the target words-per-split.
if [ $# != 3 ]; then
(
echo "Usage: rnnlm/get_num_splits.sh <target-words-per-split> <data-dir> <weights-file>"
echo "e.g.: rnnlm/get_num_splits.sh 200000 data/text exp/rnnlm/data_weights.txt"
echo "This works out how many pieces to split a data directory into, and"
echo "(if just one piece) how many times that piece should be repeated to"
echo "get the target words-per-split. A number is printed to the standard"
echo "output. If no repeats are necessary it will be the number of splits,"
echo "a positive number. If repeats are necessary, then a negative number,"
echo "interpretable as the negative of the number of times we should repeat"
echo "the data, is echoed, and the number of splits should be taken to be 1."
echo "To compute the number of words of training data"
echo "this script uses <data-dir>/*.counts; they are scaled by the data-multiplicities"
echo "given as the second field of <weights-file> for each data source."
) 1>&2
exit 1
fi
words_per_split=$1
text=$2
weights_file=$3
! [ $words_per_split -eq $words_per_split ] && \
echo "$0: first arg must be an integer" 1>&2 && exit 1;
[ ! -d $text ] && \
echo "$0: no such directory $text" 1>&2 && exit 1;
[ ! -f $weight ] && \
echo "$0: expected weights file in $weight" 1>&2 && exit 1;
rnnlm/ensure_counts_present.sh $text
set -e -o pipefail -u
export LC_ALL=C
multiplicities=$(mktemp tmp.XXXX)
trap "rm $multiplicities" EXIT
if ! awk '{if(NF!=3){ exit(1); } print $1, $2; } END{if(NR==0) exit(1);}' <$weights_file > $multiplicities; then
echo "$0: weights file $weights_file has the wrong format."
fi
tot_orig=0
tot_with_multiplicities=0
for f in $text/*.counts; do
if [ "$f" != "$text/dev.counts" ]; then
this_tot=$(cat $f | awk '{tot += $2} END{print tot}')
if ! [ $this_tot -gt 0 ]; then
echo "$0: there were no counts in counts file $f" 1>&2
exit 1
fi
# weight by the data multiplicity which is the second field of the weights file.
multiplicity=$(basename $f | sed 's:.counts$::' | utils/apply_map.pl $multiplicities)
if ! [ $multiplicity -eq $multiplicity ]; then
echo "$0: error getting multiplicity for data-source $f, check weights file $weights_file"
exit 1
fi
tot_orig=$[tot_orig+this_tot]
tot_with_multiplicities=$[tot_with_multiplicities+(this_tot*multiplicity)]
fi
done
if ! [ $tot_orig -gt 0 ]; then
echo "$0: there was a problem getting counts from directory $text (no counts present?)" 1>&2
exit 1
fi
if ! [ $tot_with_multiplicities -gt 0 ]; then
echo "$0: there was a problem getting counts from directory $text (check data-weights file $weights_file)" 1>&2
exit 1
fi
# adding words_per_split-1 below causes us to round up the number of splits.
num_splits=$[(tot_with_multiplicities+words_per_split-1)/words_per_split]
actual_words_per_split=$[tot_with_multiplicities/num_splits]
if ! [ $num_splits -gt 0 ]; then
echo "$0: there was a problem getting the number of splits" 1>&2
exit 1
fi
num_repeats=$[words_per_split/actual_words_per_split]
if ! [ $num_repeats -ge 1 ]; then
echo "$0: error computing the number of repeats, got $num_repeats." 1>&2
exit 1
fi
if [ $num_repeats -gt 1 -a $num_splits -gt 1 ]; then
echo "$0: script error: both num-repeats and num-splits are over 1." 1>&2
exit 1
fi
echo -n "get_num_splits.sh: based on tot-words=$tot_orig (with multiplicities: $tot_with_multiplicities)" 1>&2
echo " and target-words-per-split=$words_per_split, got $num_splits splits, actual words-per-split is $actual_words_per_split" 1>&2
if [ $num_repeats -gt 1 ]; then
echo " ... and num-repeats is $num_repeats" 1>&2
fi
if [ $num_repeats -eq 1 ]; then
echo $num_splits
else
echo -$num_repeats
fi
| true |
b66a498cd60e18335d86a9aed834f42660968b8f | Shell | SkRabbanibasha/shallscripting | /day_14/lastname.sh | UTF-8 | 191 | 3.484375 | 3 | [] | no_license | #! /bin/bash -
validlast(){
name=$1
pat="^[A-Z][a-zA-Z]{2}"
if [[ $name =~ $pat ]]
then
echo valid
else
echo invalid
fi
}
read -p "enter a last name:" lastname
validlast $lastname
| true |
4610648566ce97d3b90b421a9b9c84a107ed9a16 | Shell | OpenGov/s3deploy | /s3deploy.sh | UTF-8 | 9,933 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# Copyright (c) OpenGov 2014 - 2016
############################
# Simple script that tarballs the build directory and puts it to s3. The script
# assumes that it is being run under a Travis CI environment.
#
# It now supports posting arbitrary messages to SQS as well as relaying a
# tarball if travis secure env variables are not available.
#
# When tarballing the build, it is expected that the current working directory
# be inside the build directory
#
# To use this script, you need to include the functions from this script:
# $ . /path/to/s3deploy.sh
#
# Once they've been included, then you'll want to initialize it:
# $ s3d_initialize
#
# And at the end of your script call the upload function
# $ s3d_upload
#
# Your script should look like:
# - . /path/to/s3deploy.sh && s3d_initialize
# - <do some funky tests>
# - ...
# - s3d_upload
# - s3d_deploy <scm provider> <chef attr> <url affix> <chef runlist> <custom message>
#
# It expects the following environment variables to be set:
# TARBALL_TARGET_PATH : The target path for the tarball to be created
# TARBALL_EXCLUDE_PATHS : An array of directories and paths to exclude from the build. Should be in the form of TARBALL_EXCLUDE_PATHS='--exclude=path1 --exclude=path/number/dir'. You can use the s3d_exclude_paths function if youre to lazy to include the --exclude= your self.
#
# AWS_S3_BUCKET : The S3 bucket to upload the tarball to.
# AWS_S3_GLOBAL_NAMESPACE_DIR : The global namespace directory for placing all builds. Defaults tp '_global_'
# AWS_S3_GLOBAL_OBJECT_PATH : The global object path to the tarball you want to upload, in the form of <path>/<to>/<tarball name>. Defaults to <repo name>/_global_/<commit>.tar.gz
# AWS_SQS_NAME : The AWS SQS queue name to send messages to.
# AWS_DEFAULT_REGION : The S3 region to upload your tarball.
# AWS_ACCESS_KEY_ID : The aws access key id
# AWS_SECRET_ACCESS_KEY : The aws secret access key
#
# TRAVIS_BRANCH : The name of the branch currently being built.
# TRAVIS_COMMIT : The commit that the current build is testing.
# TRAVIS_PULL_REQUEST : The pull request number if the current job is a pull request, "false" if it's not a pull request.
# TRAVIS_BUILD_NUMBER : The number of the current build (for example, "4").
# TRAVIS_REPO_SLUG : The slug (in form: owner_name/repo_name) of the repository currently being built.
# TRAVIS_BUILD_DIR : The absolute path to the directory where the repository
# TRAVIS_SECURE_ENV_VARS : Whether the secret environment variables are available or not.
#
# TRAVIS_PYTHON_VERSION : Version of python that is being used, indicating that its using virtualenv
###############################################################################
# Enable to exit on any failure
set -e
######################################
########## Private Functions #########
######################################
# Sets information about the deploy into .s3d
_set_metadata() {
s3d_meta_path="$1"
if [ -z "$s3d_meta_path" ]; then s3d_meta_path=".s3d"; fi
cat <<EOF > "$s3d_meta_path"
{
"repo_url": "git@github.com:$TRAVIS_REPO_SLUG.git",
"repo_owner": "$GIT_REPO_OWNER",
"repo_name": "$GIT_REPO_NAME",
"repo_slug": "$TRAVIS_REPO_SLUG",
"revision": "$TRAVIS_COMMIT",
"branch": "$TRAVIS_BRANCH",
"build": "$TRAVIS_BUILD_NUMBER",
"pull_request": "$TRAVIS_PULL_REQUEST",
"s3_prefix_tarball": "$AWS_S3_BUCKET/$GIT_REPO_NAME/$AWS_S3_GLOBAL_NAMESPACE_DIR",
"date": `date -u +%s`
}
EOF
}
# Checks if the global build exists and exit if it does
_check_global_build_exists() {
set +e
revision=$(ruby -r 'json' -e "resp = JSON.parse(%x[aws s3api head-object --bucket $AWS_S3_BUCKET --key $AWS_S3_GLOBAL_OBJECT_PATH]); puts resp['Metadata']['revision']")
status=$?
set -e
if [ "$status" = 0 ] && [ "$revision" = "$TRAVIS_COMMIT" ] ; then
echo "Commit $TRAVIS_COMMIT has already been built.";
if [ -n "$dont_exit_if_build_exists" ]; then
# Export variable to let others know that the build already exists
export S3D_BUILD_EXISTS=1
else
exit 0;
fi
else
echo "Build at 's3://$AWS_S3_BUCKET/$AWS_S3_GLOBAL_OBJECT_PATH' does not exist"
fi
}
######################################
########## Public Functions ##########
######################################
# Check if the file names in the build folder are fingerprinted
# Parameters:
# s3d_check_fingerprints <local_directory>
#
# Example:
# s3d_check_fingerprints build/public
s3d_check_fingerprints() {
if [ ! $# -eq 1 ]; then echo "check_fingerprints requires exactly 1 parameter; $# parameters given"; exit 1; fi
local_dir=$1
GLOBIGNORE="*.json"
for file_name in "$local_dir"/*; do
normalized_file_name=$(basename "$file_name")
status=$(echo "$normalized_file_name" | grep -E "^(.*?\.)?[a-fA-F0-9]{20,124}(\.[a-z0-9]+)+$")
if [ "$status" = "" ]; then
echo "Error: $normalized_file_name is not fingerprinted. Please check!"
exit 1
fi
done
}
# Syncs a directory to s3. By default the files synced are set to private read only.
# Parameters:
# s3d_sync <local_directory> <s3_path> <permissions> <custom flags>
#
# Example:
# s3d_sync assets dapp-assets public-read --exclude '*' --include '*-????????????????????????????????.*'
s3d_sync() {
if [ ! "$#" -ge 2 ]; then echo "s3d_sync requires at least 2 parameters; $# parameters given"; exit 1; fi
local_dir=$1
s3_path=$2
acl=$3
num_extra=$(($# - 3))
if [ -z $acl ]; then acl='private'; fi
set -x
if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then
aws s3 sync "${@:4:num_extra}" --acl "$acl" "$local_dir" "s3://$s3_path"
fi
set +x
}
# Mark paths to exclude from the tarball build. You should pass an
# array of patterns, which can include the shell wildcard, that match the file
# names to exclude; the paths can be either files or directories.
# Only use this function if you don't already set the TARBALL_EXCLUDE_PATHS yourself.
s3d_exclude_paths() {
patterns=("$@")
for pattern in "${patterns[@]}"; do
TARBALL_EXCLUDE_PATHS="--exclude=$pattern $TARBALL_EXCLUDE_PATHS"
done
export TARBALL_EXCLUDE_PATHS
}
# Uploads the build tarball to s3, under the paths:
# s3://og-deployments/$GIT_REPO_NAME/_global_/$TRAVIS_COMMIT.tar.gz
# s3://og-deployments/$GIT_REPO_NAME/_global_/$TRAVIS_BRANCH.tar.gz
s3d_upload() {
cd $TRAVIS_BUILD_DIR
_set_metadata
# Tar the build directory while excluding version control file
tar --exclude='./.git' $TARBALL_EXCLUDE_PATHS -c -z -f "$TARBALL_TARGET_PATH" .
# Upload to S3
TARBALL_ETAG=$(ruby -e "require 'json'; resp = JSON.parse(%x[aws s3api put-object --acl private --bucket $AWS_S3_BUCKET --key $AWS_S3_GLOBAL_OBJECT_PATH --body $TARBALL_TARGET_PATH --metadata revision=$TRAVIS_COMMIT,pull_request=$TRAVIS_PULL_REQUEST,date=`date -u --iso-8601=seconds`]); puts resp['ETag'][1..-2]")
# Copy to the global namespace as its branch name only if its not a pull request
if [ "$TRAVIS_PULL_REQUEST" = 'false' ]; then
aws s3api copy-object --metadata-directive COPY --copy-source "$AWS_S3_BUCKET/$AWS_S3_GLOBAL_OBJECT_PATH" --bucket "$AWS_S3_BUCKET" --key "$GIT_REPO_NAME/$AWS_S3_GLOBAL_NAMESPACE_DIR/$TRAVIS_BRANCH.tar.gz"
fi
}
# Initializes necessary environment variables and checks if build exists.
# Will exit build successfully if the build already exists in the master branch
# Takes the following arguments
# $1, dont_exit_if_build_exists : Whether to continue the script or not if the build already exists. Defaults to false; can be set to any truthy value.
s3d_initialize() {
set -x
export BUILD_DATE=`date -u +%Y/%m`
IFS='/' read -a ginfo <<< "$TRAVIS_REPO_SLUG"
if [ -z "$GIT_REPO_OWNER" ]; then export GIT_REPO_OWNER="${ginfo[0]}"; fi
if [ -z "$GIT_REPO_NAME" ]; then export GIT_REPO_NAME="${ginfo[1]}"; fi
if [ -z "$TARBALL_TARGET_PATH" ]; then export TARBALL_TARGET_PATH=/tmp/$GIT_REPO_NAME.tar.gz; fi
if [ -z "$AWS_S3_BUCKET" ]; then export AWS_S3_BUCKET=og-deployments; fi
if [ -z "$AWS_S3_GLOBAL_NAMESPACE_DIR" ]; then export AWS_S3_GLOBAL_NAMESPACE_DIR='_global_'; fi
if [ -z "$AWS_S3_GLOBAL_OBJECT_PATH" ]; then
prefix="$GIT_REPO_NAME/$AWS_S3_GLOBAL_NAMESPACE_DIR"
if [ "$TRAVIS_PULL_REQUEST" = 'false' ]; then
# for merge builds
export AWS_S3_GLOBAL_OBJECT_PATH="$prefix/$TRAVIS_COMMIT.tar.gz";
else
# for pull request builds
export AWS_S3_GLOBAL_OBJECT_PATH="$prefix/pr-$TRAVIS_PULL_REQUEST.tar.gz";
fi
fi
if [ -z "$AWS_DEFAULT_REGION" ]; then export AWS_DEFAULT_REGION=us-east-1; fi
if [ -z "$AWS_ACCESS_KEY_ID" ]; then echo "AWS_ACCESS_KEY_ID not set"; exit 1; fi
set +x
# we don't want to spew the secrets
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then echo "AWS_SECRET_ACCESS_KEY not set"; exit 0; fi
set -x
# Enable user install if virtualenv has not been activated
# The flag is unsupported in virtualenv since python packages
# are already installed in user owned paths.
# We also need to force reinstall the aws cli package if the project
# python based because if its caching the pip packages, its not caching
# the aws binary.
user_mode=''
ignore_installed=''
if [ -z "$TRAVIS_PYTHON_VERSION" ]; then
user_mode='--user'
else
ignore_installed='--ignore-installed'
fi
# Install the aws cli tools
pip install $user_mode $ignore_installed awscli==1.10.44
# Update the path to access the aws executable
if [ -z "$TRAVIS_PYTHON_VERSION" ]; then export PATH="$HOME/.local/bin/:$PATH"; fi
dont_exit_if_build_exists=$1
_check_global_build_exists
}
| true |
5c63dcfd55acd94a5c068b2b66a36fc881724396 | Shell | adhytianara/SistemOperasi | /Demos/Week01/a01-SCREEN-CHECK | UTF-8 | 2,463 | 2.859375 | 3 | [] | no_license | #!/bin/bash
# REV05: Thu Feb 6 19:40:08 WIB 2020
# REV04: Thu Sep 13 10:07:03 WIB 2018
# START: Tue Jan 30 19:50:17 WIB 2018
# Copyright (C) 2018-2020 Rahmat M. Samik-Ibrahim
# http://RahmatM.Samik-Ibrahim.vLSM.org/
# This program is free script/software. This program is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# INFO: Check if the screen size is at least 80 x 23 characters
INFO=".zzz-generate-READ-THIS-FIRST.sh"
echo "";
[ -f $INFO ] && bash $INFO $0
XX="xx"
echo "RESIZE the screen if this following message does not fit in \"80 x 23\""
echo ""; echo "*** HIT ENTER KEY ***";
[ "$1" = "$XX" ] || (read YY)
cat - << ZCZCNNNN1
(ROW 01) START START START START START START START START START START START START
123456789|123456789|123456789|123456789|123456789|123456789|123456789|123456789|
10 20 30 40 50 60 70 80
(COLUMN)
123456789|123456789|123456789|123456789|123456789|123456789|123456789|123456789|
10 20 30 40 50 60 70 80
(COLUMN)
10 20 30 40 50 60 70 80
(ROW 09) XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX SCREEN TEST FOR 80 columns x 23 rows XXXXX
(ROW 13) XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
123456789|123456789|123456789|123456789|123456789|123456789|123456789|123456789|
10 20 30 40 50 60 70 80
(COLUMN)
10 20 30 40 50 60 70 80
123456789|123456789|123456789|123456789|123456789|123456789|123456789|123456789|
10 20 30 40 50 60 70 80
(COLUMN)
10 20 30 40 50 60 70 80
123456789|123456789|123456789|123456789|123456789|123456789|123456789|123456789|
(ROW 23) END END END END END END END END END END END END END END END END END END
ZCZCNNNN1
| true |
5e2cca4befb981ede36d22bece42ef8e3f459dea | Shell | poojagit912/ShellScripts | /palindrome.sh | UTF-8 | 264 | 3.71875 | 4 | [] | no_license | #!/bin/bash
echo -n "Enter number : "
read n
d=0
inp=$n
rev=""
while [ $n -gt 0 ]
do
d=$(( $n % 10 ))
n=$(( $n / 10 ))
rev=$( echo ${rev}${d} )
done
if [ $inp -eq $rev ];
then
echo "Number is palindrome"
else
echo "Number is NOT palindrome"
fi
| true |
c9cd580fd3a6e33ec37aed05062ffe05e5faf9ef | Shell | ibotty/.config | /bashrc.d/virtualenv.sh | UTF-8 | 823 | 4 | 4 | [] | no_license | #!/bin/bash
_python_virtualenv_dir="$HOME/.local/python-virtualenv"
_python_set_virtualenv() {
export VIRTUAL_ENV="$_python_virtualenv_dir/$1"
export PATH="$VIRTUAL_ENV/bin:$PATH"
unset PYTHON_HOME
}
_python_mkvirtualenv() {
mkdir -p "$_python_virtualenv_dir/$1"
virtualenv-3
}
_python_setup_virtualenv() {
pip install pylint
}
python_workon() {
if [ "$#" -gt 1 ]; then
env="$1"
elif [ -f .virtualenv_name ]; then
env="$(< .virtualenv_name)"
fi
local creating=no
if ! [ -d "$_python_virtualenv_dir/$env" ]; then
creating=yes
echo >&2 "virtualenv $env does not exist. Creating."
_python_mkvirtualenv "$env"
fi
_python_set_virtualenv "$env"
if [ "$creating" = yes ]; then
_python_setup_virtualenv "$env"
fi
}
| true |
b42c6a0b57bc61dad17a3c629f14c60c8b010deb | Shell | GoodGuide/sync-dir-with-github-org | /run_rename_report.sh | UTF-8 | 783 | 3.5625 | 4 | [] | no_license | #!/bin/bash
set -euo pipefail
repos_dat_file=goodguide_repos.dat
[[ -f $repos_dat_file ]] || ./load_all_github_repos GoodGuide > "$repos_dat_file"
output_report_prefix=goodguide_repo_cleanup
markdown_report_filename="${output_report_prefix}.md"
renames_report_filename="${output_report_prefix}_renames.yml"
./goodguide_repo_cleanup_report "$repos_dat_file" "$output_report_prefix"
open "${markdown_report_filename}"
local_rename_script_filename="${output_report_prefix}_rename_local_clones.sh"
./build_local_rename_script < "${renames_report_filename}" > "${local_rename_script_filename}"
chmod +x "${local_rename_script_filename}"
unset CONFIRM
./rename_repos_on_github < "${renames_report_filename}"
echo "wrote local rename script to ./${local_rename_script_filename}"
| true |
35e8edec52f80bf007fc0647a7da3c194fa127e3 | Shell | magos-linux/magos-linux | /make_MagOS/files/patches/rootfs/MagOS/usr/lib/magos/rc.d/rc.xorg | UTF-8 | 193 | 2.625 | 3 | [] | no_license | #!/bin/sh
# to be sourced
#
# magosctl Helper script for MagOS Linux Live.
#
# Authors: Mikhail Zaripov <m3for@mail.ru>
#
for a in /usr/lib/magos/rc.xorg/* ;do
[ -x $a ] && $a 2>/dev/null
done
true
| true |
2eb499298742066e1666efb24d025101c7ddd571 | Shell | dagdak/.setting | /install.sh | UTF-8 | 842 | 3.078125 | 3 | [] | no_license | #!/bin/bash
# File: install.sh
# Author: Xianglan Piao <xianglan0502@gmail.com>
# Date: 2013.03.21
# Last Modified Date: 2018.01.15
# Last Modified By: Xianglan Piao <xianglan0502@gmail.com>
PACKAGE=("vim" "openssh-server" "tmux" "screen" "exuberant-ctags")
for p in ${PACKAGE[@]}; do
if [[ "$OSTYPE" == "linux-gnu" ]]; then
sudo apt-get -y install $p
elif [[ "$OSTYPE" == "darwin16" ]]; then
sudo port install $p
else
sudo pkg install $p
fi
done
TARGET=("vim" "vimrc" "bashrc" "bash_profile" "gitconfig")
for t in ${TARGET[@]}; do
rm -rf $HOME/.$t
ln -s $PWD/$t $HOME/.$t
echo $t" is changed"
done
if [[ -d "$HOME/.vim/bundle" ]]; then
rm -rf $HOME/.vim/bundle
mkdir -p $HOME/.vim/bundle
fi
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim +PluginInstall +qall
source $HOME/.bashrc
| true |
6e3b8300b4c618c63297c9c987b8a6349a0c094f | Shell | microservicesguru/local-setup | /install.sh | UTF-8 | 2,473 | 3.703125 | 4 | [] | no_license | #!/bin/bash
source /etc/profile
JAVA_VERSION="11"
# install packages
apt-get update
apt-get -y install curl
apt-get -y install docker.io unzip python3-pip
# add docker privileges
usermod -aG docker sdykyi
# install aws cli
AWS_CLI_EXECUTABLE_LOCATION=$(command -v aws)
echo 'Check if AWS CLI v2 is installed'
if [ -z "$AWS_CLI_EXECUTABLE_LOCATION" ];
then
echo 'Starting AWS CLI v2 installation...'
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install && \
rm awscliv2.zip && \
pip3 install botocore && \
pip3 install boto3
else
echo 'AWS CLI v2 is already installed'
fi
#java
ACTUAL_JAVA_VERSION=$(java -version 2>&1 | head -1 | cut -d'"' -f2 | sed '/^1\./s///' | cut -d'.' -f1)
echo 'Check if Java is installed'
if [ -z "$ACTUAL_JAVA_VERSION" ] || [ "$ACTUAL_JAVA_VERSION" != "$JAVA_VERSION" ];
then
echo 'Starting Java installation...'
wget -O- https://apt.corretto.aws/corretto.key | sudo apt-key add - && \
add-apt-repository 'deb https://apt.corretto.aws stable main' && \
apt-get update && \
apt-get install -y java-11-amazon-corretto-jdk && \
sed -i '/export JAVA_HOME/d' /etc/profile && \
echo "export JAVA_HOME=/usr/lib/jvm/java-11-amazon-corretto" >> /etc/profile && \
echo "export PATH=$PATH:/usr/lib/jvm/java-11-amazon-corretto/bin" >> /etc/profile
else
echo "Java is already installed, version: $ACTUAL_JAVA_VERSION"
fi
##################
### OPTIONALLY ###
##################
GOOGLE_CHROME=$(google-chrome --version 2>&1 | awk -F ' ' '{print($1, $2)}')
echo 'Check if Google Chrome is installed'
if [ "$GOOGLE_CHROME" != "Google Chrome" ];
then
echo 'Starting Google Chrome installation...'
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb && \
dpkg -i google-chrome-stable_current_amd64.deb && \
rm google-chrome-stable_current_amd64.deb
else
echo "Google Chrome is already installed"
fi
INTELLIJ_IDEA=$(find /opt -type d -name 'idea*')
echo 'Check if Intellij Idea is installed'
if [ -z "$INTELLIJ_IDEA" ];
then
echo 'Starting Intellij Idea installation...'
wget https://download-cf.jetbrains.com/idea/ideaIU-2020.2.2.tar.gz && \
tar -zxvf ideaIU-2020.2.2.tar.gz && \
mv idea-IU-* /opt && \
rm ideaIU-2020.2.2.tar.gz
else
echo "Intellij Idea is already installed"
fi
source /etc/profile
# clean up
apt-get clean
| true |
8edabcdf54bce79b7768aeaa4d7620b38633362a | Shell | clyphy/Checkra1n-Linux-1 | /getcheckra1n.sh | UTF-8 | 3,940 | 3.53125 | 4 | [] | no_license | #!/bin/bash
# Checkra1n Easy Installer
# GitHub Repository: https://github.com/Randomblock1/Checkra1n-Linux
# Terminal colors
BLACK=$(tput setaf 0)
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
YELLOW=$(tput setaf 3)
LIME_YELLOW=$(tput setaf 190)
BLUE=$(tput setaf 4)
MAGENTA=$(tput setaf 5)
CYAN=$(tput setaf 6)
WHITE=$(tput setaf 7)
BRIGHT=$(tput bold)
NORMAL=$(tput sgr0)
BLINK=$(tput blink)
REVERSE=$(tput smso)
UNDERLINE=$(tput smul)
# Prints a line with color using terminal codes
Print_Style () {
printf "%s\n" "${2}$1${NORMAL}"
}
if [ "$EUID" -ne 0 ]
then Print_Style "YOU AREN'T RUNNING AS ROOT! This script needs root, use sudo!" $RED
exit
fi
if [ "$BASH_VERSION" = '' ]; then
Print_Style "Warning: this script must be run in bash!" $RED
exit
else
Print_Style "Bash detected. Good." $GREEN
fi
# Downloads checkra1n
GetJB () {
wget $DL_LINK
chmod 755 checkra1n
}
# Check system architecture
CPUArch=$(uname -m)
Print_Style "System Architecture: $CPUArch" $YELLOW
# Get Linux distribution
# Stolen from Stack Overflow lol
if [ -f /etc/os-release ]; then
# freedesktop.org and systemd
. /etc/os-release
OS=$NAME
VER=$VERSION_ID
elif type lsb_release >/dev/null 2>&1; then
# linuxbase.org
OS=$(lsb_release -si)
VER=$(lsb_release -sr)
elif [ -f /etc/lsb-release ]; then
# For some versions of Debian/Ubuntu without lsb_release command
. /etc/lsb-release
OS=$DISTRIB_ID
VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
# Older Debian/Ubuntu/etc.
OS=Debian
VER=$(cat /etc/debian_version)
elif [ -f /etc/SuSe-release ]; then
# Older SuSE/etc.
...
elif [ -f /etc/redhat-release ]; then
# Older Red Hat, CentOS, etc.
...
else
# Fall back to uname, e.g. "Linux <version>", also works for BSD, etc.
OS=$(uname -s)
VER=$(uname -r)
fi
# Determine Linux distro
if [[ "$OS" == *"Raspbian"* ]]; then
DEPENDENCIES="usbmuxd libimobiledevice6"
else
Print_Style "I don't know what dependencies you need for this distro. Using defaults for Raspbian..." $RED
DEPENDENCIES="usbmuxd libimobiledevice6"
fi
# Choose correct download link
# TODO: dynamically fetch latest urls from checkra1n website
if [[ "$CPUArch" == *"aarch64"* || "$CPUArch" == *"arm64"* ]]; then
Print_Style "ARM64 detected!" $YELLOW
DL_LINK=https://assets.checkra.in/downloads/linux/cli/arm64/1985cee5704ed152d7a59efbcda5dab409824eeed5ebb23779965511b1733e28/checkra1n
elif [[ "$CPUArch" == *"armhf"* || "$CPUArch" == *"armv"* ]]; then
Print_Style "ARM detected!" $YELLOW
DL_LINK=https://assets.checkra.in/downloads/linux/cli/arm/c5cbb125c6948b39383702b62cec4f184263c8db50f49b9328013213126dae78/checkra1n
elif [[ "$CPUArch" == *"x86_64"* ]]; then
Print_Style "x86_64 detected!" $YELLOW
DL_LINK=https://assets.checkra.in/downloads/linux/cli/x86_64/9f215d8c5a1b6cea717c927b86840b9d1f713d42a24626be3a0408a4f6ba0f4d/checkra1n
elif [[ "$CPUArch" == *"x86"* ]]; then
Print_Style "x86 detected!" $YELLOW
DL_LINK=https://assets.checkra.in/downloads/linux/cli/i486/4785390cf41dfbf4478bce4b69a00ec00a82ebab0a1c8dc364a8fe1b6fc664c0/checkra1n
else
Print_Style "ERROR: Unknown/Unsuported architecture! Please try again, make sure your architecture is supported by checkra1n and that you're using sh instead of bash." $RED
DL_LINK=UNKNOWN
exit
fi
Print_Style "Getting checkra1n..." $GREEN
GetJB
Print_Style "Done! Marked as executable!" $GREEN
echo -n "Install to /usr/bin (y/n)?"
read answer
if [ "$answer" != "${answer#[Yy]}" ]; then
sudo cp checkra1n /usr/bin
Print_Style "Copied executable to /usr/bin" $GREEN
echo -n "Delete downloaded file (no longer needed)? (y/n)"
read answer
if [ "$answer" != "${answer#[Yy]}" ]; then
rm checkra1n
fi
fi
Print_Style "Attenpting to install dependencies." $BLUE
# TODO: detect if yum or others are needed
apt install -y $DEPENDENCIES
Print_Style "All done!" $BLUE
| true |
483d9872b51d2e708f178d86e2c208bb09224826 | Shell | betherealone/mac-osx-setup | /scripts/zsh.sh | UTF-8 | 747 | 3.765625 | 4 | [] | no_license | #!/bin/bash
if ! command -v brew > /dev/null; then
echo "[ZSH] Install Homebrew"
ruby -e "$(curl --location --fail --silent --show-error https://raw.githubusercontent.com/Homebrew/install/master/install)"
else
echo "[ZSH] Update Homebrew"
brew update
fi
echo ""
echo "[ZSH] Install ZSH"
brew install zsh
echo ""
echo "[ZSH] Installing ZSH tools"
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
echo ""
echo "[ZSH] Installing default zshrc"
read -p "Do you wish to install default zshrc config ? y/n " yn
case $yn in
[Yy]* ) cp ~/.zshrc ~/.zshrc-backup-file; cp ./default-zshrc ~/.zshrc;;
[Nn]* ) echo "";;
* ) echo "Please answer yes or no.";;
esac
echo ""
| true |
dc9e6bbac78a40b96cfc15006a930388281d650a | Shell | kundajelab/hydrogels | /peak_aggregation_to_counts/mamgland_subset/generate_mamgland_peak_subset.sh | UTF-8 | 379 | 2.734375 | 3 | [] | no_license | for peak_file in `cut -f2 atac.peaks.naiveo.txt`
do
zcat $peak_file >> naive_overlap.optimal_set.mamgland.bed
done
bedtools sort -i naive_overlap.optimal_set.mamgland.bed > naive_overlap.optimal_set.sorted.mamgland.bed
bedtools merge -i naive_overlap.optimal_set.sorted.mamgland.bed > naive_overlap.optimal_set.sorted.merged.mamgland.bed
echo "generated merged peak file!"
| true |
1ce568b987c4f5dcc10b9e1eabe016d27472be80 | Shell | jhamfler/debconfsrv | /apply.sh | UTF-8 | 480 | 2.59375 | 3 | [] | no_license | #!/bin/bash
cp .bas* ~
cp .pro* ~
cp .zsh* ~
cp .vimrc ~
mkdir -p ~/.oh-my-zsh/themes/
cp jh.zsh-theme ~/.oh-my-zsh/themes/jh.zsh-theme
if [ ! -d ~/.vim/bundle/Vundle.vim ]; then
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim +PluginInstall +qall
fi
if [ ! -d ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions ]; then
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
fi
| true |
21144990a22c5a94d4428bb0146a0d4a33f4a266 | Shell | rajatrh/aistore | /deploy/prod/k8s/helm/ais/run_ais_sample.sh | UTF-8 | 4,908 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash -p
#
# Wrapper for helm install of AIS - alternative to repeating all these
# runes on the cmdline: copy and customize this script.
#
############# BEGIN: Review customization from this point to the marker below #############
#
# AIS cluster name
#
AIS_NAME=demo
#
# Container images - select aisnode version, the kubectl version rarely changes
#
AISNODE_IMAGE=quay.io/nvidia/aisnode:20200218
KUBECTL_IMAGE=quay.io/nvidia/ais-kubectl:1
#
# *If* the images require a pull secret, then install the pull secret in k8s
# and quote the secret name here (not the secret itself!). Leave as empty
# string for public repos.
#
PULLSECRETNAME=""
#
# Mountpaths in AIS target nodes for use by AIS (as hostPath volumes). You must specify this.
# Target nodes are controlled by node labeling. The ais chart today assumes the same paths
# are used on all nodes - this is a restriction of the chart, not of AIS itself.
#
#MOUNTPATHS='{/ais/sda,/ais/sdb,/ais/sdc,/ais/sdd,/ais/sde,/ais/sdf,/ais/sdg,/ais/sdh,/ais/sdi,/ais/sdj}'
MOUNTPATHS=""
#
# Grafana & Graphite storage - the chart will create hostName PVs for these.
# Grafana is small (just worksheets etc) so assume they're to come from the
# same node as subdirectories of the same tree. The nodename/basepath/size
# below are used in completing a PV/PVC for use with Graphite and Grafana -
# the chart bundles a local-storage PV which will require some modification
# if provisioning from another source.
#
INSTALL_MONITORING=true
STATS_NODENAME="cpu01"
STATS_BASEPATH="/data"
STATS_SIZE="250Gi"
#
# By default we dedicate AIS nodes to AIS and don't restrict it on CPU/mem - it doesn't
# need much except when performing distributed sorts. If you need to restrict CPU/mem
# resource then use the following, otherwise leave as empty strings.
#
CPU_REQUESTS="" # eg, 40
CPU_LIMITS="" # eg 44
MEM_REQUESTS="" # eg 120Gi
MEM_LIMITS="" # eg 140Gi
#
# External ingress to cluster - pass the cluster CIDR as used in Kubespray
# and the hostport number that will be opened on target nodes and redirected
# to target pods there. If not opening external ingress (ie access to external
# storage clients) then leave AIS_K8S_CLUSTER_CIDR empty.
#
# This has only been tested using metallb - if using a cloud provider
# LoadBalancer then some work may be required.
#
AIS_K8S_CLUSTER_CIDR="" # eg 192.168.0.0/18
AIS_TARGET_HOSTPORT=51081 # don't change unless really necessary
AIS_GATEWAY_EXTERNAL_IP="" # must be in metalLB pool range if used
#
# Similarly for ingress to Grafana. We also create a NodePort service
# for Grafana, but the ingress has a stable port number.
#
AIS_GRAFANA_EXTERNAL_IP=""
############# END: Review customization above this point #############
helm version >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "Helm does not appear to be available" >/dev/stderr
exit 2
fi
if [[ -z "$MOUNTPATHS" ]]; then
echo "Please fill MOUNTPATHS" >&2
exit 2
fi
if $INSTALL_MONITORING; then
NO_MONITORING="nope"
if [[ ! -f "charts/requirements.lock" ]]; then
# pull dependencies automatically just once; first add repo
(cd charts && helm dependency update)
if [[ $? -ne 0 ]]; then
echo "helm dependency update failed!" >/dev/stderr
exit 2
fi
fi
else
NO_MONITORING=""
fi
helm install \
--name=$AIS_NAME \
--set image.pullPolicy=IfNotPresent \
--set-string image.aisnode.repository=$(echo $AISNODE_IMAGE | cut -d: -f1) \
--set-string image.aisnode.tag=$(echo $AISNODE_IMAGE | cut -d: -f2) \
--set-string image.kubectl.repository=$(echo $KUBECTL_IMAGE | cut -d: -f1) \
--set-string image.kubectl.tag=$(echo $KUBECTL_IMAGE | cut -d: -f2) \
${PULLSECRETNAME:+ --set-string image.pullSecretNames="{$PULLSECRETNAME}"} \
--set-string target.mountPaths="$MOUNTPATHS" \
${NO_MONITORING:+ --set-string graphite.ais.pv.node=$STATS_NODENAME} \
${NO_MONITORING:+ --set-string graphite.ais.pv.path=${STATS_BASEPATH}/graphite} \
${NO_MONITORING:+ --set-string graphite.ais.pv.capacity=${STATS_SIZE}} \
${NO_MONITORING:+ --set-string grafana.ais.pv.node=$STATS_NODENAME} \
${NO_MONITORING:+ --set-string grafana.ais.pv.path=${STATS_BASEPATH}/grafana} \
${NO_MONITORING:+ --set-string grafana.ais.pv.capacity=${STATS_SIZE}} \
${CPU_REQUESTS:+ --set-string target.resources.requests.cpu=${CPU_REQUESTS}} \
${CPU_LIMITS:+ --set-string target.resources.limits.cpu=${CPU_LIMIT}} \
${MEM_REQUESTS:+ --set-string target.resources.requests.memory=${MEM_REQUESTS}} \
${MEM_LIMITS:+ --set-string target.resources.limits.memory=${MEM_LIMITS}} \
${AIS_K8S_CLUSTER_CIDR:+ --set ais_k8s.cluster_cidr="${AIS_K8S_CLUSTER_CIDR}"} \
${AIS_TARGET_HOSTPORT:+ --set-string target.service.hostport=${AIS_TARGET_HOSTPORT}} \
${AIS_GATEWAY_EXTERNAL_IP:+ --set-string ingress.gateway.externalIP=${AIS_GATEWAY_EXTERNAL_IP}} \
${AIS_GRAFANA_EXTERNAL_IP:+ --set-string ingress.grafana.externalIP=${AIS_GRAFANA_EXTERNAL_IP}} \
charts/.
| true |
f0ea08611d2d2323f69b46679b6ccd80e307754d | Shell | holdenout/dotfiles | /bash_aliases | UTF-8 | 678 | 2.625 | 3 | [] | no_license | # Because hulu never stops
alias stulu="sleep 2h; wmctrl -c \"Hulu | Watch\""
# Directories first 'ls'
alias lll='ls -AF1 --group-directories-first'
# Show shell keyboard shortcuts
alias shcuts='echo "Move:
ctrl-a Beginning of line
ctrl-e End of line
ctrl-f Forward 1 char
ctrl-b Backward 1 char
alt-f Forward 1 word
alt-b Backward 1 word
Deleting:
ctrl-d Delete at cursor
alt-bsp
ctrl-w Delete to beginning of word
ctrl-k Delete to end of line
ctrl-u Delete to beginning of line
ctrl-y Paste cut text
Swapping:
ctrl-t Exchange char at cursor with preceding
alt-t Exchange word at cursor with preceding
Misc:
ctrl-r Search command history"'
| true |
a83b1d822d5996e83e372d0e376a9e422f4d1e92 | Shell | code-newbie/playSMS | /bin/playsmssend | UTF-8 | 525 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# Usage: playsmssend <to> <message>
## Username and password of the playsms user you wants to use
L="admin"
P="admin"
## The path to your playSMS, with trailing slash
W="http://localhost/playsms/"
## You shouldn't edit the rest of the file
## Code to use the number of the sender
## replacing + with %2B (urlencoded form of +)
DF=`echo $1 | sed s/+/%2B/`
M=$2
## request webservices, returns the result to sender
$(which lynx) -dump "$W?app=webservices&u=$L&p=$P&ta=pv&to=$DF&msg=$M" >/dev/null 2>&1
| true |
c8edf6b795cf4fc07d552d57718a537eff3b8045 | Shell | TortugaLabs/noxml | /ymlgen.sh | UTF-8 | 2,009 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
# Copyright (c) 2018 Alejandro Liu
# Licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
set -euf -o pipefail
ymlgen() {
local off=""
local ln left right
sed -e 's/^\s*//' -e 's/\s*$//' | (
while read ln
do
[ -z "$ln" ] && continue
if ((echo $ln | grep -q '^<') && (echo $ln | grep -q '>$')) ; then
ln=$(echo $ln | sed -e 's/^<\s*//' -e 's/\s*>$//')
if (echo $ln | grep -q '>.*<') ; then
left=$(echo "$ln" | cut -d'>' -f1)
right=$(echo "$ln" | cut -d'>' -f2- | sed -e 's/<\s*\/[^>]*$//')
echo "$off$left: $right"
elif (echo $ln | grep -q '/$') ; then
echo "$off$(echo "$ln" | sed -e 's!\s*/$!!')"
elif (echo $ln | grep -q '^/') ; then
off="$(expr substr "$off" 1 $(expr $(expr length "$off") - 2))"
echo "$off:"
else
echo "$off$ln:"
off="$off "
fi
else
echo "Parser can not handle such complex XML!" 1>&2
return 2
fi
done
)
}
| true |
381aa88d92f073326052edc8b21dfafdefcc3f32 | Shell | MRCIEU/eczema_gwas_fu | /bayesian_fm/finemap/01_finemap_analysis_1k_published.sh | UTF-8 | 5,292 | 3.296875 | 3 | [] | no_license | #!/bin/bash
HOME=/panfs/panasas01/sscm/qh18484
scripts=$HOME/bin/eczema_gwas_fu/bayesian_fm/finemap
gwas=/panfs/panasas01/sscm/qh18484/data/gwas/paternoster2015
onek=/panfs/panasas01/sscm/qh18484/analysis/bayesian_fm/RefPanel/1kGenomes
FINEMAP_ANALYSIS=$HOME/analysis/bayesian_fm/finemap/1k/published
utils=$HOME/bin/eczema_gwas_fu/utils
#Submit test finemap run
cd $FINEMAP_ANALYSIS
#qsub $scripts/sub_finemap_test.sh
#Generate input files for toy analysis fine-mapping the filaggrin locus.
#Important: need to provide MAF (minor allele frequency) rather than EAF (effect allele frequency) like in the Z input file, so need to convert that.
#Copy LD file for European 1K Phase3 along with the list of processed SNPs.
onek=/panfs/panasas01/sscm/qh18484/analysis/bayesian_fm/RefPanel/1kGenomes
#Generate input Z file
input_file=chr1.rs61813875.1500000
python $scripts/generate_Z_file.py --tab $gwas/results.published.tsv \
--proces ${onek}/$input_file.processed --out ${input_file}.z \
--chrom 2 --pos 3 --ident 1 --ref 4 --alt 5 --beta 8 --se 9 --eas 6
#Generate MASTER file
#Using 103066 as sample size (European case and control samples in the discovery phase, including 23 and me. Without 23 and me, that would be 40835)
mkdir $input_file
cd $input_file
mv ../chr1.rs61813875.1500000.z ./
echo "z;ld;snp;config;log;n_samples" >master
echo "${input_file}.z;${onek}/${input_file}.ld;${input_file}.snp;${input_file}.config;${input_file}.log;103066" >>master
#shotgun stochastic search
qsub $scripts/sub_finemap.sh
#The results are a bit unexpected - different top loci (except for rs61816766) than in the analysis - perhaps program expects floating point LD numbers rather than scientifc notation?
awk '{for (i=1; i<=NF; i++) printf("%.15f ", $i);} {printf("\n")}' ${onek}/${input_file}.ld > ${onek}/${input_file}_float.ld
mkdir ${input_file}_float
cd ${input_file}_float
cp ../$input_file/chr1.rs61813875.1500000.z ./
echo "z;ld;snp;config;log;n_samples" >master
echo "${input_file}.z;${onek}/${input_file}_float.ld;${input_file}.snp;${input_file}.config;${input_file}.log;103066" >>master
qsub $scripts/sub_finemap.sh
#Obtain exactly the same results.
##Now, analysis of the CD207 locus.
function finemap_default
{
snp=$1
interval=$2
chrom=$(grep $snp $gwas/paternoster_2015_index_snps_sorted.txt | cut -f2)
cd $FINEMAP_ANALYSIS
input_file=chr${chrom}.$snp.$interval
mkdir $input_file && cd $input_file
python $scripts/generate_Z_file.py --tab $gwas/results.published.tsv \
--proces ${onek}/$input_file.processed --out ${input_file}.z \
--chrom 2 --pos 3 --ident 1 --ref 4 --alt 5 --beta 8 --se 9 --eas 6
echo "z;ld;snp;config;log;n_samples" >master
echo "${input_file}.z;${onek}/${input_file}.ld;${input_file}.snp;${input_file}.config;${input_file}.log;103066" >>master
qsub $scripts/sub_finemap.sh
}
finemap_default rs112111458 1500000
##Now, analysis of the EMSY/C11orf30 locus.
finemap_default rs2212434 1500000
#Analysis using 500 kbp, 280 kbp and 100 kbp intervals.
finemap_default rs61813875 250000
finemap_default rs112111458 250000
finemap_default rs2212434 250000
finemap_default rs61813875 140000
finemap_default rs112111458 140000
finemap_default rs2212434 140000
finemap_default rs61813875 50000
finemap_default rs112111458 50000
finemap_default rs2212434 50000
#Analysis using only 1 causal SNP for CD207
function finemap_1snp
{
snp=$1
interval=$2
chrom=$(grep $snp $gwas/paternoster_2015_index_snps_sorted.txt | cut -f2)
cd $FINEMAP_ANALYSIS
input_file=chr${chrom}.$snp.$interval
mkdir ${input_file}_1snp && cd ${input_file}_1snp
python $scripts/generate_Z_file.py --tab $gwas/results.published.tsv \
--proces ${onek}/$input_file.processed --out ${input_file}.z \
--chrom 2 --pos 3 --ident 1 --ref 4 --alt 5 --beta 8 --se 9 --eas 6
echo "z;ld;snp;config;log;n_samples" >master
echo "${input_file}.z;${onek}/${input_file}.ld;${input_file}.snp;${input_file}.config;${input_file}.log;103066" >>master
qsub $scripts/sub_finemap_1snp.sh
}
finemap_1snp rs112111458 1500000
finemap_1snp rs112111458 250000
finemap_1snp rs112111458 140000
finemap_1snp rs112111458 50000
#Convert from space-delimited to tab-delimited tables
#Reverse probabilities for the data to be suitable for plotting with GWAS software
#Filter data to keep for plotting
for a in chr*0
do
cd $a
cat ${a}.snp | tr ' ' '\t' >${a}.snp.tab
cat ${a}.config | tr ' ' '\t' >${a}.config.tab
awk '($12 > 1) {print $0}' ${a}.snp.tab > ${a}.snp.filtered
awk -v OFS="\t" '{(NR==1)?$(NF+1)="pval":$(NF+1)=1-$11 ; print}' ${a}.snp.filtered >${a}.snp.filtered.pval
my_snp=$(echo $a | cut -d"." -f2)
my_int=$(echo $a | cut -d"." -f3)
my_int=$(expr $my_int / 1000)
qsub -v input_file=${a}.snp.filtered.pval,snp_id="rsid",pval="prob",my_ref=$my_snp,my_flank=${my_int}kb,my_prefix=${my_snp}_${my_int}kbp $utils/sub_locus_zoom.sh
cd ../
done
for a in chr*1snp
do
cd $a
cat ${a%_1snp}.snp | tr ' ' '\t' >${a%_1snp}.snp.tab
cat ${a%_1snp}.config | tr ' ' '\t' >${a%_1snp}.config.tab
awk '($12 > 1) {print $0}' ${a%_1snp}.snp.tab > ${a%_1snp}.snp.filtered
awk -v OFS="\t" '{(NR==1)?$(NF+1)="pval":$(NF+1)=1-$11 ; print}' ${a%_1snp}.snp.filtered >${a%_1snp}.snp.filtered.pval
cd ../
done
#Template script to generate LocusPlots and submit them
python $utils/create_batch_scripts.py $scripts/locus_zoom_finemap.sh
| true |
e156adc551a0e6bee5cf64fd4e43fe2115547641 | Shell | AntiasKing/42sh | /src/demo.sh | UTF-8 | 336 | 3.359375 | 3 | [] | no_license | #!./42sh
set it=0
while ([ $it -ne 43 ]) then
set a=$($it%2)
if ([ $a -eq 0 ]) echo $it is even
if ([ $a -ne 0 ]) echo $it is odd
set it=$($it+1)
end
if ([ $it -eq 43 ]) then
echo "scripting work !"
else if ([ $it -eq 44 ]) then
echo "scripting work half"
else
echo "scripting doesn't work..."
endif
| true |
f89ba1d1fc4f387b14ffc12ce31cd2da0a036b27 | Shell | delkyd/alfheim_linux-PKGBUILDS | /k8stail/PKGBUILD | UTF-8 | 1,490 | 2.78125 | 3 | [] | no_license | # Maintainer: larte <lauri.arte@gmail.com>
pkgname=k8stail
pkgver=0.5.1
pkgrel=1
pkgdesc="Watch kubernetes logstreams filtering with namespace and labels, like tail -f"
arch=('x86_64' 'i686')
url="https://github.com/dtan4/k8stail"
license=('MIT')
depends=('glibc')
makedepends=('go' 'go-bindata' 'make' 'glide')
_archive=k8stail-$pkgver
source=($_archive.tar.gz::https://github.com/dtan4/k8stail/archive/v$pkgver.tar.gz)
md5sums=('bd893081cbe9e9fbd29ccff7193c869b')
prepare() {
cd $srcdir/$_archive
msg2 "Patching makefile"
sed -i -e 's/REVISION.*:=.*/REVISION := "aur-pkgbuild"/' Makefile
msg2 "mkdir -p $srcdir/go/src/github.com/dtan4"
mkdir -p $srcdir/go/src/github.com/dtan4
msg2 "ln -sf $srcdir/$_archive $srcdir/go/src/github.com/dtan4/$archive"
ln -sf $srcdir/$_archive $srcdir/go/src/github.com/dtan4/$archive
}
build() {
cd $srcdir/go/src/github.com/dtan4/$_archive
export GOPATH=$srcdir/go
export GOBIN="$srcdir/bin"
export PATH=$PATH:$GOPATH/bin
make deps
make install
#GOPATH=$srcdir/go go install -ldflags="-s -w -X \"main.Version=$pkgver\" -X \"main.Revision=aur-pkgbuild\""
}
check() {
cd $srcdir/go/src/github.com/dtan4/$_archive
GOPATH="$srcdir/go" make test
}
package() {
mkdir -p "$pkgdir/usr/bin"
install -p -m755 "$srcdir/bin/$pkgname-$pkgver" "$pkgdir/usr/bin/k8stail"
install -Dm644 $srcdir/$_archive/LICENSE $pkgdir/usr/share/licenses/$pkgname/LICENSE
}
| true |
0f0836b7277353e1162729ff67e31e0059a5e6e5 | Shell | wpomori/scripts_qiime | /start.sh | UTF-8 | 6,695 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#=======================================================================
#
# Script teste para iniciar o qiime na opção de ajuda
# Existem os sequenciamentos single-end e paired-end
# Aqui você tem que escolher qual tipo de sequenciamento
# é o seu
#
#=======================================================================
#=======================================================================
# Variáveis para serem usadas como referências aos programas
# Diretório atual
# base_dir="/bin"
# Variável atribuída quando o usuário pedir ajuda pelo teclado
help="$1"
#=======================================================================
#=======================================================================
#
# Texto que será impresso caso o usuário peça ajuda ao programa
if [ "${help}" = --help ] || [ "${help}" = \-h ]
then
echo "
$0 versão 0.03
Script versão beta desenvolvido como objeto de defesa de MBA em
redes Linux na UNIARA por Wellington Pine Omori em 07 de Dezembro de 2016.
e-mail: wpomori@gmail.com
Script para trimagem de dados de sequenciamento (Ion Torrent PGM/Próton)
single-end no formato fastq usando Cutadapt e Prinseq. Os gráficos de
qualidade dos dados brutos e processados são construídos com FastQC. Modo
de uso:
$0
Orientação de uso: o programa $0 não precisa que nenhum diretório
ou arquivo seja informado quando invocado.
Apenas tenha a certeza de que os arquivos fastq
dos dados brutos estejam no diretório atual.
Não se esqueça de informar o arquivo map_file.txt
e o arquivo custom_parameters.txt, os quais serão
importantes ao QIIME. O resto o programa fará por
você. "
fi;
#=======================================================================
#=======================================================================
#
# Construído a partir do exemplo da página 274 do livro
# Programação Shell Linux de Júlio Cezar Neves, 10ª ED
if [ "$#" -eq 1 ]
then
# $0 pega o nome do programa (start.sh) e $*
# contém a quantidades de parâmetros usados
echo "
O programa $0 não precisa de atribuição de mais "
echo " de um parâmetro [ contém $# parâmetro(s) ] além do "
echo " --help ou -h [ $0 --help ou $0 -h ] .
"
exit;
fi;
#=======================================================================
#=======================================================================
#
# Enquanto a opção for inválida, $OK estará vazia
OK=
until [ "$OK" ] ; do
# A opção -n abaixo, serve para não saltar a linha ao final do echo
echo -n "
Opção Ação
===== ====
1 Arquivo de entrada está no formato fasta (trimado a priori).
2 Arquivo de entrada está no formato fastq (trimado a priori).
3 Arquivo de entrada está no formato fastq (não trimado e single-end, Ion Torrent PGM/Próton).
4 Arquivo de entrada está no formato fastq (não trimado e paired-end, Pandaseq).
5 Ajuda (?)
6 Sair do programa.
Escolha UMA opção entre as opções acima (1-6): "
read Opcao
echo "\n"
# Até que se prove o contrário, a opção é boa!
OK=1
# Variáveis para serem usadas como referências aos programas
# Diretório atual
# base_dir="."
case "$Opcao"
in
1) echo " Executando QIIME para arquivos no formato fasta "
echo " Executando o script para arquivos de entrada no "
echo " formato fasta ... \n "
qiime_fa.sh
;;
2) echo " Executando QIIME para arquivos no formato fastq ... "
echo " Executando o script para arquivos de entrada no "
echo " formato fastq ... \n "
qiime_fq.sh
;;
3) echo " Executando trimagem para arquivos no formato fastq single-end ... "
echo " Executando os scripts do Scythe, Cutadapt, Prinseq e FastQC ... "
echo " Executando QIIME para os arquivos no formato fasta"
echo " Executando o script para arquivos de entrada no "
echo " formato fasta ... \n "
qiime_sg_raw.sh
;;
4) echo " Executando trimagem para arquivos no formato fastq paired-end ... "
echo " Executando os scripts do Pandaseq, Cutadapt, Prinseq e FastQC ... "
echo " Executando QIIME para os arquivos no formato fasta"
echo " Executando o script para arquivos de entrada no "
echo " formato fasta ... \n "
qiime_pe_pa.sh
;;
5)
# Enquanto a opção for inválida, $OK estará vazia
OK=
until [ "$OK" ] ; do
# A opção -n abaixo, serve para não saltar a linha ao final do echo
echo -n " Para quais dados você precisa visualizar os arquivos exemplos?
Opção Ação
===== ====
1 Arquivos exemplos no formato fasta (trimado a priori).
2 Arquivos exemplos no formato fastq (trimado a priori).
3 Arquivos exemplos no formato fastq (não trimado e single-end).
4 Arquivos exemplos no formato fastq (não trimado e paired-end).
5 Sair do programa.
Escolha UMA opção entre as opções acima (1-5):
"
read Opcao
echo "\n"
# Até que se prove o contrário, a opção é boa!
OK=1
case "$Opcao"
in
1) echo "Copiando os arquivos exemplos no formato fasta trimados a priori"
echo "para o diretório atual" ; pwd
cp -r /usr/local/bioinfo/qiime_examples/scripts_qiime-master/examples/1_arquivos_fasta_trimados .
;;
2) echo "Copiando os arquivos exemplos no formato fastq trimados a priori"
echo "para o diretório atual" ; pwd
cp -r /usr/local/bioinfo/qiime_examples/scripts_qiime-master/examples/2_arquivos_fastq_trimados .
;;
3) echo "Copiando os arquivos exemplos no formato fastq single-end (dados brutos)"
echo "para o diretório atual" ; pwd
cp -r /usr/local/bioinfo/qiime_examples/scripts_qiime-master/examples/3_arquivos_fq_raw .
;;
4) echo "Copiando os arquivos exemplos no formato fastq paired-end (dados brutos)"
echo "para o diretório atual" ; pwd
cp -r /usr/local/bioinfo/qiime_examples/scripts_qiime-master/examples/4_pe_fq_raw .
;;
5) echo " Saindo do programa ... \n "
sleep 2
;;
*) echo "São válidas somente opções entre 1-5 ... "
sleep 1
# Opção incorreta porque $OK está vazia, forcando loop
OK=
;;
esac
done;
;;
6) echo " Saindo do programa ... \n "
sleep 2
;;
*) echo "São válidas somente opções entre 1-6 ... "
sleep 1
# Opção incorreta porque $OK está vazia, forcando loop
OK=
;;
esac
done;
#=======================================================================
#=======================================================================
exit;
| true |
3a024039d7033695af5fd82811a926e92fb7c5ff | Shell | faust64/keengreeper | /utils/checkVersion | UTF-8 | 1,218 | 3.375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
if test -z "$1"; then
echo "CRITICAL: missing local version" >&2
exit 1
elif test -z "$2"; then
echo "CRITICAL: missing reference version" >&2
exit 1
fi
eval `echo "$1" | sed 's|[~^]*\([0-9]*\)\.\([0-9xX]*\)\.*\(.*\)|localMaj=\1 localMin=\2 localPatch=\3|'`
eval `echo "$2" | sed 's|\([0-9]*\)\.\([0-9]*\)\.*\(.*\)|refMaj=\1 refMin=\2 refPatch=\3|'`
if echo "$1" | grep '^^'; then
localMin=x
localPatch=x
elif echo "$1" | grep '^~'; then
localPatch=x
fi >/dev/null
for var in localMaj localMin localPatch
do
eval check=\$$var
if test -z "$check" -o "$check" = X; then
eval $var=x
fi
done
MATCH=false
if test "$localMaj" -gt "$refMaj"; then
MATCH=true
elif test "$localMaj" = "$refMaj"; then
if test "$localMin" != x; then
if test "$localMin" -gt "$refMin"; then
MATCH=true
fi
fi
if ! $MATCH; then
if test "$localMin" = "$refMin" -o "$localMin" = x; then
if test "$localPatch" != x; then
if test "$localPatch" -gt "$refPatch"; then
MATCH=true
fi
fi
if ! $MATCH; then
if test "$localPatch" = "$refPatch" -o "$localPatch" = x; then
MATCH=true
fi
fi
fi
fi
fi
if $MATCH; then
exit 0
fi
exit 2
| true |
d95a3d9821e21d6b9a3dd46b59b6210558c2b487 | Shell | pmahnke/deployment-configs | /qa-deploy | UTF-8 | 5,030 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
# Bash strict mode
set -eo pipefail
USAGE="Usage
===
$ ./qa-deploy (production|staging) file [DOCKER_IMAGE_TAG]
Description
---
Deploy locally to microk8s.
"
function invalid() {
message=${1}
echo "Error: ${message}"
echo ""
echo "$USAGE"
exit 1
}
function add_secrets() {
# Fake snapcraft config
if microk8s.kubectl get secret snapcraft-io &> /dev/null; then microk8s.kubectl delete secret snapcraft-io; fi
microk8s.kubectl create secret generic snapcraft-io \
--from-literal=secret_key=admin \
--from-literal=csrf_secret_key=admin \
--from-literal='sentry_dsn=' \
--from-literal='sentry_public_dsn=' \
--from-literal='marketo_client_id=' \
--from-literal='marketo_client_secret='
# Fake google API key
if microk8s.kubectl get secret google-api &> /dev/null; then microk8s.kubectl delete secret google-api; fi
microk8s.kubectl create secret generic google-api --from-literal=google-custom-search-key='notsosecret'
# Fake IRC creds
if microk8s.kubectl get secret irc-secrets &> /dev/null; then microk8s.kubectl delete secret irc-secrets; fi
microk8s.kubectl create secret generic irc-secrets --from-literal=hubot-auth-admin=fake \
--from-literal=hubot-irc-password=fake \
--from-literal=hubot-release-notification-secret=fake
}
function add_docker_credentials_microk8s() {
if ! microk8s.kubectl get secret registry-access &> /dev/null; then
username=""
password=""
while [[ -z "${username}" || -z "${password}" ]]; do
echo "##############"
echo "Docker registry credentials"
echo
echo -n "Username: "
read username
echo -n "Password: "
read -s password
echo -e "\n##############"
done
microk8s.kubectl create secret docker-registry registry-access --docker-server=prod-comms.docker-registry.canonical.com --docker-username="${username}" --docker-password="${password}" --docker-email=root@localhost
microk8s.kubectl patch serviceaccount default -p '{"imagePullSecrets": [{"name": "registry-access"}]}' --namespace default
fi
}
function apply_configuration() {
if ! microk8s.kubectl get configmap environment &> /dev/null; then
microk8s.kubectl create configmap environment --from-literal environment=qa;
fi
# On the version of the ingress controller we have we need to change the
# name to be able to apply the configuration. By default the nginx ingress
# controller is searching for a config-map called: nginx-load-balancer-microk8s-conf
cat "configmaps/nginx-configuration.yaml" | sed 's|name: nginx-configuration|name: nginx-load-balancer-microk8s-conf|' | microk8s.kubectl apply --filename - --namespace default
}
function run_microk8s() {
# Run microk8s
if ! command -v microk8s.kubectl &> /dev/null; then
if ls /snap/microk8s &> /dev/null; then
echo "Enabling microk8s"
snap enable microk8s
echo "> Waiting for microk8s to wake up ..."
sleep 10
else
echo "Please install or enable microk8s (https://github.com/juju-solutions/microk8s)"
echo ""
echo " snap install microk8s --classic --edge"
exit 1
fi
fi
}
function enable_ingress() {
if ! microk8s.kubectl get service/default-http-backend &> /dev/null; then
echo "HTTP backend not found: Enabling ingress addon"
microk8s.enable ingress dns
fi
}
function deploy_locally() {
environment="${1}"
values_file="${2}"
tag="${3}"
echo "Deploying project ${values_file}"
./konf.py --local-qa ${environment} ${values_file} --tag ${tag} | microk8s.kubectl apply --filename -
}
function run() {
# Arguments
environment="$1"
values_file="$2"
tag_to_deploy=${3:-latest}
# Mandatory arguments
if [ -z "${environment:-}" ]; then invalid "No environment specified (e.g. 'production')"; fi
if [ -z "${values_file:-}" ]; then invalid "No values file specified (e.g. 'sites/canonical.com.yaml')"; fi
# Validate arguments
if [[ ! "$environment" =~ ^(production|staging)$ ]]; then
invalid "You need to specify a valid environment: 'production' or 'staging'"
fi
if [[ ! -f $values_file ]] ; then
invalid "File '${values_file}' doesn't exits, aborting."
fi
run_microk8s
# The configuration needs to run before the ingress controller starts
# because it doesn't restart when applying the configuration after.
apply_configuration
enable_ingress
add_secrets
add_docker_credentials_microk8s
deploy_locally "${environment}" "${values_file}" "${tag_to_deploy}"
if [ -n "${staging:-}" ]; then deploy_locally "${environment}" "${values_file}" "${tag_to_deploy}"; fi
}
run ${@}
| true |
01bdeeca771cca370c508a2b85049f6829d6b592 | Shell | RicaBenhossi/Alura-Learning | /new_course_branch.sh | UTF-8 | 7,758 | 4.28125 | 4 | [
"MIT"
] | permissive | #! /bin/bash
execute_command(){
command_to_execute=$1
$command_to_execute
wait
error=$?
if [[ $error -ne 0 ]]; then
echo
echo "Try to execute $command_to_execute"
echo "Execution fail! Fix it and try again."
echo
$SHELL
fi
}
confirm_option_yn(){
warning_message=$1
while true
do
echo "$warning_message" >&2
read -r -s -n 1 response
case "$response" in
[Yy]|[Nn])
if [[ ($response == "Y") || ($response == "y") ]]; then
return $(true)
else
return $(false)
fi
break
;;
*)
echo "Invalid option." >&2
;;
esac
done
}
commit_branch(){
message=$1
execute_command "git add ."
execute_command "git commit -m "$message""
}
branch_name_exist(){
branch_name=$1
branch_list="$(git branch --list)"
if [[ "$branch_list" == *"$branch_name"* ]]; then
return $(true)
else
return $(false)
fi
}
create_branch_name(){
while true
do
echo "What is the name of the course?" >&2
read course_name
echo >&2
while true
do
echo "What is the order number of the course? (See the last course added in README file)" >&2
read course_order
echo >&2
case $course_order in
''|*[!0-9]*)
echo >&2
echo "The input must be a nunber." >&2
echo >&2 ;;
*) break ;;
esac
done
if ($(branch_name_exist $course_order-$course_name)); then
echo >&2
echo "================================================================================" >&2
echo "Error" >&2
echo >&2
echo " The branch name $branch_name already exist. Choose another name." >&2
echo >&2
echo "================================================================================" >&2
echo >&2
echo >&2
else
break
fi
done
echo "$course_order-$course_name"
}
get_base_branch(){
while true
do
echo "Here is the list of branches we have." >&2
echo >&2
echo "$(git branch --list)" >&2
echo >&2
echo "What is the branch you want to use as the base of this new branch?" >&2
read container_branch_name
echo >&2
if (! $(branch_name_exist $container_branch_name)); then
echo >&2
echo "================================================================================" >&2
echo "Error" >&2
echo >&2
echo " The branch $container_branch_name doesn't exist. Choose another one." >&2
echo >&2
echo "================================================================================" >&2
echo >&2
echo >&2
else
break
fi
done
echo $container_branch_name
}
merge_branch_base() {
branch_base_name=$1
branch_master="master"
echo "Chekouting files from $branch_master $branch_base_name">&2
echo >&2
execute_command "git checkout $branch_base_name"
execute_command "git pull origin"
execute_command "git checkout $branch_master LICENSE README.md .gitignore"
commit_branch "Adding files LICENSE, README.md and .gitignore"
execute_command "git push origin $branch_base_name"
echo >&2
}
create_new_branch(){
course_branch=$1
base_branch=$2
echo >&2
echo "------------------------------------------------------------" >&2
echo >&2
echo "Creating a new course branch based on $base_branch" >&2
echo >&2
merge_branch_base $base_branch
echo >&2
echo "Creating the new branch " >&2
echo >&2
execute_command "git checkout -b $course_branch"
echo >&2
echo "** Branch $course_branch_name created. **" >&2
echo >&2
}
create_tasks_json(){
if (confirm_option_yn "Would you like to create standard tasks.json? [Y/N]") ; then
branch_name=$1
vscode_folder="$2$branch_name/.vscode"
echo >&2
echo "------------------------------------------------------------" >&2
echo >&2
echo "Creating .vscode/tasks.json file." >&2
execute_command "mkdir -p $vscode_folder"
echo '{' >> $vscode_folder/tasks.json
echo ' // See https://go.microsoft.com/fwlink/?LinkId=733558' >> $vscode_folder/tasks.json
echo ' // for the documentation about the tasks.json format' >> $vscode_folder/tasks.json
echo ' "version": "2.0.0",' >> $vscode_folder/tasks.json
echo ' "tasks": [' >> $vscode_folder/tasks.json
echo ' {' >> $vscode_folder/tasks.json
echo ' "label": "Checkout right branch",' >> $vscode_folder/tasks.json
echo ' "type": "shell",' >> $vscode_folder/tasks.json
echo ' "command": "git checkout '$branch_name'",' >> $vscode_folder/tasks.json
echo ' "problemMatcher": [],' >> $vscode_folder/tasks.json
echo ' "runOptions": {"runOn": "folderOpen"}' >> $vscode_folder/tasks.json
echo ' }' >> $vscode_folder/tasks.json
echo ' ]' >> $vscode_folder/tasks.json
echo '}' >> $vscode_folder/tasks.json
echo >&2
echo "File tasks.json created." >&2
echo >&2
echo "Commiting task.json file creation." >&2
echo >&2
commit_branch "File .vscode/tasks.json created."
echo >&2
echo "** Tasks.json sucessfully created. **" >&2
echo >&2
fi
}
merge_new_branch_to_main(){
branch_name=$1
echo >&2
echo "------------------------------------------------------------" >&2
echo >&2
if (confirm_option_yn "Would you like to merge this new branch to main? [Y/N]") ; then
echo >&2
execute_command "git checkout main"
execute_command "git pull origin"
execute_command "git merge $branch_name"
execute_command "git push origin"
execute_command "git checkout $branch_name"
echo >&2
echo "** Branch $branch_name sucessfully merges into branch main" >&2
echo >&2
fi
}
push_to_github(){
branch_name-$1
echo >&2
echo "------------------------------------------------------------" >&2
echo >&2
echo "Pushing "$branch_name" to github" >&2
echo >&2
execute_command "git push --set-upstream origin "$branch_name""
echo >&2
echo "** Branch "$branch_name" sucessfully pushed to github. **" >&2
echo >&2
}
echo "************************************************************"
echo "* *"
echo "* CREATE NEW COURSE BRANCH *"
echo "* *"
echo "************************************************************"
echo
echo
course_branch_name=$(create_branch_name)
base_branch_name=$(get_base_branch)
create_new_branch $course_branch_name $base_branch_name
course_main_folder_name=$(ls -d */)
create_tasks_json $course_branch_name $course_main_folder_name
push_to_github $course_branch_name
# merge_new_branch_to_main $course_branch_name
# execute_command "cd $course_main_folder_name$course_branch_name"
echo "------------------------------------------------------------"
echo "| PROCESS FINISHED. ENJOY! |"
echo "------------------------------------------------------------" | true |
f54658f6bf4ade64a10879eb299234b2b9858bd7 | Shell | xlyric/Melanie2-InstallationAuto | /deb/install_pegase/DEBIAN/postinst | UTF-8 | 680 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
LOCALFILES="/usr/local/src"
LOCALAPP="pegase"
#conf php avec Nginx
cd /var/www/html
tar -xvf $LOCALFILES/$LOCALAPP.tar.gz
chown -R www-data. /var/www/html/pegase
cp $LOCALFILES/$LOCALAPP.conf /etc/nginx/sites-available/$LOCALAPP.conf
rm /etc/nginx/sites-enabled/default
ln -s /etc/nginx/sites-available/$LOCALAPP.conf /etc/nginx/sites-enabled/
mkdir /etc/nginx/certs
mv $LOCALFILES/$LOCALAPP*.key /etc/nginx/certs
mv $LOCALFILES/$LOCALAPP*.pem /etc/nginx/certs
service php7.3-fpm restart
service nginx restart
# conf memcache
cp $LOCALFILES/memcached.conf /etc/memcached.conf
service memcached restart
mkdir /var/log/pegase/
chown www-data. /var/log/pegase/
| true |
af7dcf3cb930c18f58273147d3e9cf7ac416a60d | Shell | IKAMTeam/depl-scripts | /list-services.sh | UTF-8 | 1,690 | 3.734375 | 4 | [] | no_license | #!/bin/bash
if [ -n "$1" ] && { [ "$1" == "--short-format" ] || [ "$1" == "-s" ]; }; then
SHORT_FORMAT="1"
fi
# shellcheck source=utils.sh
. "$(dirname "$0")/utils.sh"
require_root_user
if [ -z "$SHORT_FORMAT" ]; then
echo "List of available OneVizion services:"
fi
find "$SERVICES_PATH" -maxdepth 1 -type d -print0 | while read -r -d '' SERVICE_DIR; do
SERVICE_NAME="$(basename "$SERVICE_DIR")"
ARTIFACT_JAR="$(get_artifact_name "$SERVICE_NAME").jar"
ARTIFACT_PY="$(get_artifact_name "$SERVICE_NAME").py"
if [ ! -f "$SERVICE_DIR/$ARTIFACT_JAR" ] && [ ! -f "$SERVICE_DIR/$ARTIFACT_PY" ]; then
continue
fi
if [ -z "$SHORT_FORMAT" ]; then
if [ -f "$SERVICE_DIR/$ARTIFACT_JAR" ]; then
ARTIFACT_VERSION="$(extract_and_read_artifact_version "$SERVICE_DIR/$ARTIFACT_JAR")"
else
ARTIFACT_VERSION="[version is undefined]"
fi
if is_daemon_installed "$SERVICE_NAME"; then
if is_daemon_running "$SERVICE_NAME"; then
IS_RUNNING="\e[32mrunning\e[0m"
else
IS_RUNNING="\e[31mnot running\e[0m"
fi
echo -e " \e[1m$SERVICE_NAME \e[0m($SERVICE_DIR) [\e[42ma systemd service\e[0m, $IS_RUNNING]: \e[4m$ARTIFACT_VERSION \e[0m"
elif is_cron_installed "$SERVICE_NAME"; then
echo -e " \e[1m$SERVICE_NAME \e[0m($SERVICE_DIR) [\e[42ma cron scheduled service\e[0m]: \e[4m$ARTIFACT_VERSION \e[0m"
else
echo -e " \e[1m$SERVICE_NAME \e[0m($SERVICE_DIR) [\e[41mnot a systemd service or cron scheduled\e[0m]: \e[4m$ARTIFACT_VERSION \e[0m"
fi
else
echo "$SERVICE_NAME"
fi
done
| true |
9cfe071dbc308ac613fdcec3eeb3b4d827e06f9f | Shell | XiangXiangQq3/gmall | /data/shell脚本/kafka.sh | UTF-8 | 505 | 2.828125 | 3 | [] | no_license | #! /bin/bash
case $1 in
start )
for i in hadoop102 hadoop103 hadoop104; do
echo "========== $i =========="
ssh $i "/opt/module/kafka_2.11-0.11.0.2/bin/kafka-server-start.sh -daemon /opt/module/kafka_2.11-0.11.0.2/config/server.properties"
done
;;
stop )
for i in hadoop102 hadoop103 hadoop104; do
echo "========== $i =========="
ssh $i "/opt/module/kafka_2.11-0.11.0.2/bin/kafka-server-stop.sh"
done
;;
esac
| true |
09111d770e069805587a32c1255cb806df4f03a9 | Shell | btittelbach/nautilus-scripts | /Image/EXIFtool DateDiff | UTF-8 | 402 | 3.140625 | 3 | [] | no_license | #!/bin/zsh
[[ -x =exiftool ]] || {zenity --error --text "exiftool is not installed"; exit 1}
local imgts
local realts
local tsdiff
imgts=$(exiftool -p '$CreateDate' -d "%s" "${(f)NAUTILUS_SCRIPT_SELECTED_FILE_PATHS[1]}")
realts=$(($(zenity --title="Time in picture (epoch seconds)" --entry --entry-text='epoch seconds')))
tsdiff=$((realts-imgts))
zenity --text-info --filename=/dev/stdin <<< "$tsdiff"
| true |
60930cc08732100928db90b5ccb6d4dffb7469d3 | Shell | LudvigHz/dotfiles | /install/cli-tools.sh | UTF-8 | 3,682 | 4.28125 | 4 | [] | no_license | #!/usr/bin/env bash
print_help() {
printf "Usage: install.sh cli-tools [options]\n"
printf "\tOptions:\n"
printf "\t%s\t%s\n" \
"-a|--all" "Install all tools" \
"-n|--name [n]" "Install a single tool. Run with -l to see list of tools" \
"-f|--force" "Force intstall even though the command exists" \
"-u|--use [m]" "Specify your package manager (default: apt)" \
"-h|--help" "Print this help message" \
"-l|--list" "List all cli tools that will be installed" \
"--list-managers" "List all supported package managers for use for -u"
}
# Table of package managers and their install commands
declare -A installers=(
["apt"]="sudo apt-get install -y "
["pacman"]="sudo pacman -S "
["dnf"]="sudo dnf install "
["pkg"]="pkg install "
["zypper"]="sudo zypper --non-interactive install "
["brew"]="brew install "
["snap"]="snap install "
)
declare -A updaters=(
["apt"]="sudo apt-get update"
["pacman"]="sudo pacman -Syy"
["dnf"]="echo No need to update"
["pkg"]="echo No need to update"
["zypper"]="sudo zypper refresh"
["brew"]="echo You may need to manually update homebrew!"
["snap"]="snap refresh"
)
# Declare a table of tools that should be installed
declare -A cli_tools=(
#["program"]="command"
["zsh"]="zsh"
["vim"]="vim"
# ["neovim"]="nvim"
["fzf"]="fzf"
["subversion"]="svn"
["git"]="git"
["ripgrep"]="rg"
["tmux"]="tmux"
["gawk"]="awk"
["autojump"]="autojump"
#TODO add command for installing GUI and distro-specific programs
)
# Function to print keys from an associative array
print_all() {
printf "Cli-tools to be installed:\n"
for i in "${!cli_tools[@]}"; do
printf "\t%s\n" "$i"
done
}
# Function for installing a given utility
install_program() {
if [[ ! $(command -v "$manager") ]]; then
printf "%s does not seem to be installed on this system\n" "$manager"
for m in "${!installers[@]}"; do
if [[ $(command -v "$m") ]]; then
local alternate=$m
printf "Found %s! Do you wish to use it instead? [y/n] " "$alternate"
read -r ans
if [[ "$ans" == "y" || "$ans" == "Y" ]]; then
manager="$alternate"
printf "Installing using %s...\n" "$manager"
break
fi
fi
done
if [[ $manager != "$alternate" ]]; then
exit 0
fi
fi
if [[ ! $force ]]; then
if [[ $(command -v "${cli_tools["$1"]}") ]]; then
printf "%s is already installed, use -f option to force install.\n" "$1"
else
printf "Installing %s using %s\n\n" "$1" "$manager"
eval "${installers["$manager"]} $1"
fi
else
printf "Installing %s using %s\n\n" "$1" "$manager"
eval "${installers["$manager"]} $1"
fi
}
manager="apt"
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-a | --all)
install_all=true
shift
;;
-f | --force)
force=true
shift
;;
-n | --name)
cli_tool="$2"
shift
shift
;;
-u | --use)
manager="$2"
shift
shift
;;
-h | --help)
print_help
exit 1
;;
-l | --list)
print_all
exit 1
;;
--list-managers)
printf "Supported package managers:\n"
for i in "${!installers[@]}"; do
printf "\t%s\n" "$i"
done
exit 1
;;
*)
shift
;;
esac
done
if [[ -n $cli_tool ]]; then
# Update package manager before installing
eval "${updaters["$manager"]}"
install_program "$cli_tool"
elif [[ $install_all ]]; then
# Update package manager before installing
eval "${updaters["$manager"]}"
for prog in "${!cli_tools[@]}"; do
install_program "$prog"
done
else
print_help
fi
echo -e "\nInstalling cli tools - DONE\n"
| true |
08c6fed2845bd6c07937e3a1756138213d357a73 | Shell | silencezzzz/nginx-php | /run.sh | UTF-8 | 430 | 3.1875 | 3 | [] | no_license | #!/bin/sh
# Start php and nginx
while :
do
runningPHP=$(ps -ef |grep "php-fpm" |grep -v "grep" | wc -l)
if [ "$runningPHP" -eq 0 ] ; then
echo "PHP service was not started. Startting now."
php-fpm
fi
runningNginx=$(ps -ef |grep "nginx" |grep -v "grep" | wc -l)
if [ "$runningNginx" -eq 0 ] ; then
echo "Nginx service was not started. Startting now."
/usr/local/nginx/sbin/nginx
fi
sleep 5
done | true |
e0cb32793c5c8f3c937e903bea00b672d5defb9f | Shell | hestela/rust-mpi-benchmarks | /ci/run-bench.sh | UTF-8 | 747 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
set -e
BINARIES_DIR="target/release"
binaries=$(find $BINARIES_DIR -maxdepth 1 -type f -executable \
-exec file -i '{}' \; | cut -d":" -f1)
num_binaries=$(printf "%d" "$(echo "${binaries}" | wc -w)")
echo "Running with $(which mpiexec)"
num_ok=0
num_failed=0
result="ok"
for binary in ${binaries}
do
echo "Starting benchmark: ${binary}."
output_file=${binary}_output
if (mpiexec -np 2 ./${binary} > "${output_file}")
then
echo "ok"
num_ok=$((${num_ok} + 1))
else
echo "output:"
cat "${output_file}"
num_failed=$((${num_failed} + 1))
result="failed"
fi
rm -f "${output_file}"
done
echo "${num_ok}/${num_binaries} worked; ${num_failed} failed."
if [ "$num_failed" -ne "0" ]; then
exit 1
fi
| true |
19dce52020d7e679212950eeacb8a0755734414b | Shell | binh-vu/semantic-modeling | /bin/experiments/func/karma_func.sh | UTF-8 | 2,296 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
set -o pipefail
exec_karma () {
dataset=$1
semantic_labeling=$2
top_n_stypes=$3
pyexp_name=$4
shift 4
kfolds=("$@")
workdir="$(pwd)"
export PYTHONPATH="$PYTHONPATH:$workdir/pysm"
exp_name=$(python -c "from datetime import datetime; print(datetime.now().strftime('%B_%d__%H:%M:%S').lower());")
exp_dir="$(pwd)/debug/experiments/${dataset}_${exp_name}"
commit_id=$(git rev-parse HEAD)
if [ -f $exp_dir ]; then
echo "ExperimentDirectory should not exists!"
exit -1
fi
mkdir -p $exp_dir
echo $commit_id > "$exp_dir/commit_id.txt"
echo "Execution dir: $exp_dir"
echo "" > "$exp_dir/execution.log"
echo "" > $(pwd)/debug/execution.log
python -m experiments.clear_cache --dataset=$dataset
for kfold in "${kfolds[@]}"; do
echo ">>>>>> semantic-labeling-method=$semantic_labeling, #TYPES=$top_n_stypes, KFOLD = $kfold"
echo "Execute command:
python -m experiments.semantic_modeling.kfold_karma \\
--dataset=$dataset \\
--semantic_typer=$semantic_labeling \\
--semantic_labeling_top_n_stypes=$top_n_stypes \\
--kfold=\"$kfold\" \\
--exp_dir=$exp_dir 2>&1 | tee -a \"$exp_dir/execution.log\" $workdir/debug/execution.log"
python -m experiments.semantic_modeling.kfold_karma \
--dataset=$dataset \
--semantic_typer=$semantic_labeling \
--semantic_labeling_top_n_stypes=$top_n_stypes \
--kfold="$kfold" \
--exp_dir=$exp_dir 2>&1 | tee -a "$exp_dir/execution.log" $workdir/debug/execution.log
done
# echo "Executing command:
# python -m experiments.semantic_modeling.kfold_record \\
# --dataset $dataset \\
# --run_name $semantic_labeling#$top_n_stypes \\
# --exp_name $pyexp_name \\
# --exp_dir $exp_dir" | tee -a "$exp_dir/execution.log" $workdir/debug/execution.log
#
# python -m experiments.semantic_modeling.kfold_record \
# --dataset=$dataset \
# --run_name="$semantic_labeling#$top_n_stypes" \
# --exp_name="$pyexp_name" \
# --exp_dir=$exp_dir 2>&1 | tee -a "$exp_dir/execution.log" $workdir/debug/execution.log
} | true |
58d25009191356cebe16c8c58153255e533e0540 | Shell | jens-maus/RaspberryMatic | /scripts/update-daemonize.sh | UTF-8 | 920 | 3.703125 | 4 | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"GPL-2.0-only",
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
ID=${1}
PACKAGE_NAME="daemonize"
PROJECT_URL="https://github.com/bmc/daemonize"
ARCHIVE_URL="${PROJECT_URL}/archive/${ID}/${PACKAGE_NAME}-${ID}.tar.gz"
if [[ -z "${ID}" ]]; then
echo "tag name or commit sha required (see ${URL})"
exit 1
fi
# download archive for hash update
ARCHIVE_HASH=$(wget --passive-ftp -nd -t 3 -O - "${ARCHIVE_URL}" | sha256sum | awk '{ print $1 }')
if [[ -n "${ARCHIVE_HASH}" ]]; then
# update package info
BR_PACKAGE_NAME=${PACKAGE_NAME^^}
BR_PACKAGE_NAME=${BR_PACKAGE_NAME//-/_}
sed -i "s/${BR_PACKAGE_NAME}_VERSION = .*/${BR_PACKAGE_NAME}_VERSION = $1/g" "buildroot-external/package/${PACKAGE_NAME}/${PACKAGE_NAME}.mk"
# update package hash
sed -i "$ d" "buildroot-external/package/${PACKAGE_NAME}/${PACKAGE_NAME}.hash"
echo "sha256 ${ARCHIVE_HASH} ${PACKAGE_NAME}-${ID}.tar.gz" >>"buildroot-external/package/${PACKAGE_NAME}/${PACKAGE_NAME}.hash"
fi
| true |
6e88ac78721a05fca2656b3a03e9b837f0035cca | Shell | JeffAment/Scripts-and-services | /Bash_Scripts/angel_dust | UTF-8 | 224 | 2.96875 | 3 | [] | no_license | #!/bin/bash
while [ true ]
do
# Window ID will be in hex, so we convert to decimal for xdotool
WINDOW_ID=$(wmctrl -l | awk '/Shadow/ {print strtonum($1)}')
xdotool key --window $WINDOW_ID --delay 800 Escape Escape
done
| true |
296017936b5b490f242ee2e1a0908749081c941b | Shell | FMSoftCN/mdolphin-core | /Source/WebKit/mg/control/resource/image/buildInnerResource.sh | UTF-8 | 438 | 2.625 | 3 | [
"BSD-2-Clause",
"Apache-2.0"
] | permissive | #!/bin/sh
name="InnerResource"
macro="InnerResource"
tmp="tmpdata"
find ../imageSource -name '*.png' >$tmp
sed -n 's/^\.\.\/imageSource\///p' $tmp>list
rm -f $tmp
./inner-res-trans -i ../imageSource -l list -o . -n $name -m $macro
genFile=$name.c
# for g++ compile error
sed -e '/static\s*INNER_RES\s*.*\[\]=/,/G/s/void/Uint8/p' $genFile >$tmp
sed -e '/static\s*INNER_RES\s*.*\[\]=/,/G/s/"png"/(char*)"png"/p' $tmp>$genFile
rm -f $tmp
| true |
ec3cc4c9e64641dc2417aba51d1528c8cb23783f | Shell | LighteningIce/partition-trees | /example/gen_data.sh | UTF-8 | 1,115 | 3.390625 | 3 | [] | no_license | #!/bin/bash
function rand(){
min=$1
max=$(($2-$min+1))
num=$(date +%s%N)
echo $(($num%$max+$min))
}
function ergodic(){
for ((i=1;i<=100;i++));
do
# tree_num=$(rand 20001 60000);#结点个数
tree_num=20
# echo $tree_num;
if [ $i -eq 1 ]
# then mkdir ../randTrees_20001_60000
then mkdir ../randTrees_$tree_num;
fi
#结点最大度
degree=$(rand 2 100);
#树最大高度
height=$(rand 2 100);
echo "height:"$height;
while [ `expr $height \* $degree` -lt $tree_num ]
do
let degree=$(rand 2 100);
let height=$(rand 2 100);
# echo "height:"$height;
done;
echo "degree=$degree"
echo "height=$height"
echo `expr $height \* $degree`;
echo $i
if [ $((i % 5)) -eq 0 ]
then sleep 3;
fi
./main_gen $tree_num $degree $height>> ../randTrees_$tree_num/gen_data_$tree_num'_'$i.txt;
# ./main_gen $tree_num $degree $height>> ../randTrees_20001_60000/gen_data_20001_60000_$i.txt;
# echo -n "随机数:" ;expr $(date +%s%N)%$[$max - $min + 1] + $min
sleep 1;
done
}
ergodic
| true |
6e6a7037bb8334c1f36b6136d589d2fb1785bc30 | Shell | wenhuchen/KGPT | /scripts/e2enlg/eval_sequence_e2enlg_all.sh | UTF-8 | 378 | 2.796875 | 3 | [
"MIT"
] | permissive | FOLDER=$(pwd)
GPUS=$1
OPTION=$2
checkpoint=$3
for file in ${FOLDER}/${checkpoint}/*.pt
do
echo "Evaluating on"${file}
CUDA_VISIBLE_DEVICES=${GPUS} python code/run.py --batch_size 16 --dataset e2enlg \
--tokenizer_dir ${FOLDER}/GPT2_tokenizer/ --max_enc_len 256 --max_dec_len 72 --num_workers 0 --option ${OPTION} \
--load_from $file --beam_size 2 --encoder sequence
done
| true |
8904d836a7397e0570bedeea99ad5b382ae3f9c4 | Shell | nmsa/tma-framework-k | /development/mysql/ceph/ceph_configuration.sh | UTF-8 | 483 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | #Create Ceph image
rbd create tma_k_db_mysql -s 1024
#Disable some image features that are not supported by Linux Kernel
rbd feature disable tma_k_db_mysql fast-diff
rbd feature disable tma_k_db_mysql object-map
rbd feature disable tma_k_db_mysql deep-flatten
# Assing a file system type to image created
mkfs.ext4 /dev/rbd0
#Generate authentication key to connecting Kubernetes and Ceph
ceph auth get-key client.admin > temp.txt
key="$(sed -n 1p temp.txt)"
echo "${key}"| base64
| true |
de6d97055a7a58ca5429f7de035b583f2d706d21 | Shell | hhiikkaarruu/dotfiles | /install.sh | UTF-8 | 511 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env zsh
GIT_DOTPATH="${HOME}/dotfiles"
DOTPATH="${HOME}/.dotfiles"
mv "${GIT_DOTPATH}" "${DOTPATH}" &> /dev/null
cd "${DOTPATH}"
if [ $? -ne 0 ]; then
echo "not found:${DOTPATH}"
fi
for f in .??*
do
[ "${f}" = ".git" ] && continue
[ "${f}" = ".swp" ] && continue
ln -snfv "${DOTPATH}/${f}" "${HOME}/${f}"
done
ln -snfv "${DOTPATH}/.vimrc" "${DOTPATH}/.vim/init.vim"
mkdir -p "${HOME}/.config"
ln -snfv "${DOTPATH}/.vim" "${HOME}/.config/nvim"
echo 'installation has been complated. ✔︎'
| true |
0e4703fa282dc59afe8cd64237bd2a21ae2a3c40 | Shell | cegamboav/OVM_Scripting | /display_vcpu_pinning.sh | UTF-8 | 1,128 | 3.28125 | 3 | [] | no_license | #!/bin/bash
clear
print_doble_line(){
echo "|====================================================================================================|"
}
print_simple_line(){
echo "|------------------------------------------|------------------------------------------|--------------|"
}
print_server_info(){
printf "%-1s %-20s %-77s %-1s\n" "|" "Server_Name:" $1 "|"
}
print_info(){
printf "%-1s %-40s %-1s %-40s %-1s %-12s %-1s\n" "|" $1 "|" $2 "|" $3 "|"
}
print_doble_line
print_server_info $(hostname)
print_simple_line
print_info "VM_ID" "VM_Name" "CPU_Pinning"
print_simple_line
dom0_pinning=$(xm vcpu-list|grep Domain|awk '{print $7}'|tail -n 1)
print_info "Domain-0" "Domain-0" "0-$dom0_pinning"
for i in $(xm vcpu-list|sort -k 7|awk '{print $1}'|egrep -v 'Domain|Name'|uniq)
do
Id=$i
cpu_pinning=$(xm vcpu-list $i|grep -v Name|awk '{print $7}'|uniq)
path=$(find /OVS/Repositories/ -name $i|grep -v snap|head -n 1)
name=$(cat $path/vm.cfg |grep OVM_simple_name|cut -d "'" -f2)
#echo "$Id $name $cpu_pinning"
print_info $Id $name $cpu_pinning
done
print_doble_line | true |
210d8f4cb351d64f8ecca30f3bb350b91979e73e | Shell | FNNDSC/ChRIS_store | /docker-entrypoint.sh | UTF-8 | 436 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [[ "$DJANGO_DB_MIGRATE" == 'on' ]]; then
if [[ "$DJANGO_SETTINGS_MODULE" == 'config.settings.local' ]]; then
python migratedb.py -u chris -p Chris1234 -d chris_store_dev --host chris_store_dev_db --noinput
elif [[ "$DJANGO_SETTINGS_MODULE" == 'config.settings.production' ]]; then
python migratedb.py -u $POSTGRES_USER -p $POSTGRES_PASSWORD -d $POSTGRES_DB --host $DATABASE_HOST --noinput
fi
fi
exec "$@"
| true |
a2510b89d0035748c8661dcaa40f9a5d69a2e0bd | Shell | germanramos/docker-openvpn | /bin/openvpn-auth.sh | UTF-8 | 478 | 2.703125 | 3 | [] | no_license | #!/bin/bash
OPENVPNDIR="/etc/openvpn"
. $OPENVPNDIR/auth.env
if [[ $AUTH_METHOD == 'openshift' ]]; then
curl -u $username:$password -kIsS "${AUTH_HTTPBASIC_URL}/oauth/authorize?client_id=openshift-challenging-client&response_type=token" | grep -q "Set-Cookie"
elif [[ $AUTH_METHOD == 'openshift-token' ]]; then
curl -X GET -H "Authorization: Bearer $password" -kIsS "${AUTH_HTTPBASIC_URL}/oapi/v1/projects" | grep -q "200 OK"
else
/usr/local/bin/openvpn-auth.py $@
fi
| true |
5e066d5c1c6aeb9a45ac72a411cf5d64329755a9 | Shell | presscommandz/js-data-structure | /scripts/build.sh | UTF-8 | 209 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env bash
ttsc
declare -a export_dirs=(
core-graphic
)
for dir in "${export_dirs[@]}"; do
cat <<EOF >"$dir/package.json"
{
"main": "./index.js",
"types": "./index.d.ts"
}
EOF
done
| true |
22d343adef363544bef88d15fc33e79c69a205e9 | Shell | an-dev/linux | /brightchg.sh | UTF-8 | 630 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# Acer Travelmate P253-M brightness control workaround
# Note: add the following to /etc/rc.local
# chmod 777 /sys/class/backlight/intel_backlight/brightness
# For convenience, assign whatever keys you want to run this script
# Fine tune the bump parameter as required
#
# Usage:
# ./brightchg.sh up # bump up brightness
# ./brightchg.sh down # bump down brightness
#
curr=`cat /sys/class/backlight/intel_backlight/brightness`
bump=244
if [ "$1" == "up" ]; then
curr=`echo "$curr + $bump" | bc`
else
curr=`echo "$curr - $bump" | bc`
fi
echo $curr | tee /sys/class/backlight/intel_backlight/brightness | true |
1600b80876bfb2380f4730c9af1b3b06751b65ea | Shell | veronica-g/mac_scripting | /gitdetector.command | UTF-8 | 364 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
#
# git detector.
# displays the installed git version on a mac
#
# Created by/last edited by: Veronica Goralski on 2 July 2018
if [ -e /Library/Developer ]
then
gitversion=$(/usr/bin/git --version | awk '{print $3}')
printf "\ngit detector:\n\ngit version $gitversion found.\n\n"
else
printf "Xcode tools (git dependency) not installed.\n"
fi
| true |
d2954a439da32a5bc06f3738bc6e8a6445eff4a7 | Shell | tomaka/jsonrpc | /_automate/publish.sh | UTF-8 | 183 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -exu
ORDER=(core core-client server-utils tcp ws ws/client http ipc stdio pubsub derive test)
for crate in ${ORDER[@]}; do
cd $crate
cargo publish $@
cd -
done
| true |
1aed20fcd27b38a0acbd8a9d2b7d641c9f1f356f | Shell | fuzzyfreak/docs | /fuzzydns-scripts/docs/playground/playground.sh | UTF-8 | 345 | 3.375 | 3 | [] | no_license | #!/bin/bash
source scriptVars.sh
source scriptFunc.sh
source scriptTest.sh
clear
while :
do
echo "Welcome to the playground!"
echo " * 1: test_fun"
echo " * quit: testexit"
read OPT1
case $OPT1 in
1) find_fun 1 ;;
quit) exit_fun ;;
esac
read -p "Press any key to continue... " -n1 -s
clear
done
| true |
5ae41e9531b7ca5089722ca54b501057f96d83a8 | Shell | lifoxin/study | /ss/stop.sh | UTF-8 | 414 | 3.03125 | 3 | [] | no_license | #!/bin/bash
time1=`date "+%Y-%m-%d %H:%M:%S"`
kill -9 $(ps -ef |grep -v grep |grep ssserver|awk '{print $2}') >> /dev/null 2>&1
#-ne !=
#-eq =
if [ $? -eq 0 ]
then
echo "$time1 >> the service has been shut down" >> /tmp/error.time
echo "$time1 >> the service has been shut down"
else
echo "$time1 >> the service did not start" >> /tmp/error.time
echo "$time1 >> the service did not start"
fi
| true |
2b6c7f3f6d0f72e04bff75147326b6fc1186a747 | Shell | dkohlbre/crypto-vm | /problems/hitcon2014-rsaha/setup.sh | UTF-8 | 677 | 3.171875 | 3 | [] | no_license | #!/bin/bash
PROBLEM="hitcon2014-rsaha"
files=("redacted_server.py")
# Just copy the data file to the webdir
for fname in "${files[@]}"
do
# mkdir -p /var/www/html/$PROBLEM
cp /vagrant/problems/$PROBLEM/$fname /var/www/html/$PROBLEM/$PROBLEM\_$fname
done
# we need sympy
pip install sympy
#setup xinetd server
cp /vagrant/problems/$PROBLEM/$PROBLEM.xinetd /etc/xinetd.d/$PROBLEM
echo -e "\n$PROBLEM 5454/tcp\n" >> /etc/services
# setup the service
files=("server.py")
#Copy service files over
for fname in "${files[@]}"
do
mkdir -p /home/vagrant/problems/$PROBLEM
cp /vagrant/problems/$PROBLEM/$fname /home/vagrant/problems/$PROBLEM/$fname
done
service xinetd restart
| true |
943d0e3f0ff3899eb2c22e701561a99e8e34e630 | Shell | frohoff/gitlabhq | /lib/hooks/post-receive | UTF-8 | 453 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Version 4.1
# This file was placed here by GitLab. It makes sure that your pushed commits
# will be processed properly.
while read oldrev newrev ref
do
# For every branch or tag that was pushed, create a Resque job in redis.
repo_path=`pwd`
env -i redis-cli rpush "resque:gitlab:queue:post_receive" "{\"class\":\"PostReceive\",\"args\":[\"$repo_path\",\"$oldrev\",\"$newrev\",\"$ref\",\"$GL_USER\"]}" > /dev/null 2>&1
done
| true |
c4c30006ce6fea4f26ca80672c30bcfbbcbd6818 | Shell | chaffeechenyefei/cDarkNet | /run_test_ww_aug_s1_Scene4.sh | UTF-8 | 1,567 | 2.921875 | 3 | [] | no_license | # current experiment training, need revising before each testing
model_no="_ww_aug"
weights_name="yolov3_ww_aug_v1_9000.weights"
# names should follow the style or need changing the code below for corrent directories
YOLO_DIR="/home/ubuntu/darknet"
# DATASET_DIR="/home/ubuntu/CV/data/wework_activity/Scene_4"
# TESTSET_DIR="${DATASET_DIR}/test_2"
BACKUP_DIR="${YOLO_DIR}/backup/ww_aug" # "${model_no}"
# TEST_DATA_DIR="${TESTSET_DIR}/test.txt"
# create prediction directory if not exists
PRED_DIR="${YOLO_DIR}/predict_v${model_no}"
mkdir -p "${PRED_DIR}"
cfg_file=${YOLO_DIR}/cfg/yolov3_ww_608/yolov3_ww_608_test.cfg
weights_file="${BACKUP_DIR}/${weights_name}"
# create positions of predicted bounding boxes and save to result.txt under ${PRED_DIR}
#${YOLO_DIR}/darknet detector test ${YOLO_DIR}/cfg/activity_wework_tinyv1_Scene4.data ${cfg_file} ${weights_file} -thresh 0.25 -dont_show -ext_output < ${TEST_DATA_DIR} > ${PRED_DIR}/result.txt
# create images of predictions oand save images under ${PRED_DIR}
# for f in ${TESTSET_DIR}/*.jpg; do
# ${YOLO_DIR}/darknet detector test ${YOLO_DIR}/cfg/activity_wework_tinyv1_Scene4.data ${cfg_file} ${weights_file} "$f" -dont_show
# new_f_name="${f##*/}"
# echo ${new_f_name}
# mv ${YOLO_DIR}/predictions.jpg ${PRED_DIR}/${new_f_name}
# done
# create mAP of testing set and save to result.txt
${YOLO_DIR}/darknet detector map ${YOLO_DIR}/cfg/yolov3_ww_aug_v1/activity_wework_leaveout.data ${cfg_file} ${weights_file} -thresh 0.25 -iou_thresh 0.25 -dont_show -ext_output >> ${PRED_DIR}/result.txt
| true |
84f4fccefe8451c2e6742314632dc9e54a22991a | Shell | sam-xif/Schedule-IP | /rebuild_db.sh | UTF-8 | 286 | 2.75 | 3 | [
"MIT"
] | permissive | # Draft of BASH version of rebuild_db.bat
DBNAME=schedule.db
MANAGE_SCRIPT=manage.py
EXT=db
rm $DBNAME
touch $DBNAME
python $MANAGE_SCRIPT version_control --url=sqlite:///$DBNAME
sed -i.bu "s;sqlite:///.*\.${EXT};sqlite:///${DBNAME};g" $MANAGE_SCRIPT
python $MANAGE_SCRIPT upgrade
| true |
fb8cf952b6b84242707708220904940df6bcb806 | Shell | illiapoplawski/androidSimplified | /build-scripts/verifyGitStatus.sh | UTF-8 | 5,092 | 4.125 | 4 | [] | no_license | #! /usr/bin/env bash
#
# Author: Illia Poplawski <illia.poplawski@gmail.com>
#
#/ Verifies the git status of all projects in a repo
#/
#/ Public Functions:
#/
#/ Usage: verifyGitStatus [OPTIONS]...
#/
#/ OPTIONS
#/ -h, --help
#/ Print this help message
#/ -d, --directory <path>
#/ Specify Top Dir for Android source
#/ -p, --project <path>
#/ Relative path to project dir from source top
#/ --all
#/ Verifies all projects
#/
#/ EXAMPLES
#/ verifyGitStatus
#/ verifyGitStatus -d <path/to/dir> -p <path>
#/ verifyGitStatus -d <path/to/dir> --all
#/ verifyGitStatus --help
#/
# don't hide errors within pipes
set -o pipefail
[[ -v SCRIPT_NAME ]] || readonly SCRIPT_NAME="${BASH_SOURCE[0]}"
[[ -v SCRIPT_DIR ]] || readonly SCRIPT_DIR="$( cd "$( dirname "$SCRIPT_NAME" )" && pwd )"
GIT_STATUS_LOG="$(dirname "$SCRIPT_DIR")"/log/git_status.log
mkdir -p "${GIT_STATUS_LOG%/*}"
IFS=$'\t\n' # Split on newlines and tabs (but not on spaces)
. "$(dirname "$SCRIPT_DIR")"/utilities/logging.sh
. "$(dirname "$SCRIPT_DIR")"/utilities/verifyPythonVenv.sh
. "$(dirname "$SCRIPT_DIR")"/utilities/setTopDir.sh
# Usage: _verifyGitStatus
#
# Verifies the git status of all projects in repo
_verifyGitStatus() {
local status_out
if [[ ! -v auto_verify ]]; then
"$(dirname "$SCRIPT_DIR")"/utilities/userFunctions.sh getYesNo -t "Verify Git Status" -d "Would you like to verify git status before building?" -i "yes" || {
log -w "Not verifying git status"
exit 0
}
fi
if [[ ! -v BUILD_TOP_DIR ]]; then
# Set top dir
setTopDir || exit $?
fi
"$(dirname "$SCRIPT_DIR")"/utilities/loggingFunctions.sh clearLog -f "$GIT_STATUS_LOG"
if [[ -v verify_all ]]; then
verifyPythonVenv -d "$BUILD_TOP_DIR" || exit $?
log -i "Verifying git status for all projects"
pushd "$BUILD_TOP_DIR" &>/dev/null || exit $?
status_out=$(repo forall -pc 'git status' | sed -e 's/project //' -e 's/\/$//' | tee -a "$GIT_STATUS_LOG") || {
log -e "Verifying all projects failed"
exit 1
}
popd &>/dev/null || exit $?
else
local project_dir
if [[ ! -v project_dir_rel || -z $project_dir_rel || "$project_dir_rel" == " " ]]; then
project_dir=$("$(dirname "$SCRIPT_DIR")"/utilities/userFunctions.sh getDir -t "Choose your project directory" -o "$BUILD_TOP_DIR") || {
log -e "Project directory to verify not set."
exit 1
}
project_dir_rel=${project_dir#"$BUILD_TOP_DIR/"}
else
project_dir="$BUILD_TOP_DIR"/"$project_dir_rel"
fi
if [[ ! -d $project_dir ]]; then
log -e "Project directory: $project_dir_rel does not exist."
exit 1
fi
log -i "Verifying git status for project: $project_dir_rel"
pushd "$project_dir" &>/dev/null || exit $?
status_out=$(git status | tee -a "$GIT_STATUS_LOG") || {
log -e "Verifying project failed"
exit 1
}
popd &>/dev/null || exit $?
fi
if echo "$status_out" | grep -Eqwi 'conflicts'; then
log -e "git status not clean, please check git_status.log for details"
exit 1
else
log -i "git status clean"
fi
}
# Show verify git status usage
_verifyGitStatusUsage() {
grep '^#/' "${SCRIPT_DIR}/${SCRIPT_NAME}" | sed 's/^#\/\w*//'
}
# Usage: verifyGitStatus [arg]
#
# Verifies the git status of all projects in repo
verifyGitStatus(){
local project_dir_rel
local verify_all
local auto_verify
local action
if [[ ${#} -eq 0 ]]; then
_verifyGitStatus
else
while [[ $# -gt 0 ]]; do
action="$1"
if [[ "$action" != '-'* ]]; then
shift
continue
fi
case "$action" in
-h|--help)
shift
_verifyGitStatusUsage
exit 0
;;
-d|--directory)
local dir="$2"
shift # past argument
if [[ "$dir" != '-'* ]]; then
shift # past value
if [[ -n $dir && "$dir" != " " ]]; then
BUILD_TOP_DIR="$dir"
else
log -w "No base directory parameter specified"
fi
fi
;;
-p|--project)
local dir="$2"
shift # past argument
if [[ "$dir" != '-'* ]]; then
shift # past value
if [[ -n $dir && "$dir" != " " ]]; then
project_dir_rel="$dir"
else
log -w "Empty project directory parameter"
fi
else
log -w "No project directory parameter specified"
fi
;;
--all)
shift
verify_all=true
;;
-a|--auto)
shift
auto_verify=true
;;
*)
log -w "Unknown argument passed: $action. Skipping"
shift # past argument
;;
esac
done
_verifyGitStatus
fi
}
(return 2>/dev/null) && sourced=1 || sourced=0
if [ $sourced -eq 1 ]; then
log -e "This script cannot be sourced. Use \"./verifyGitStatus.sh\" instead."
return 1
fi
[[ "$0" == "${BASH_SOURCE[0]}" ]] && verifyGitStatus "$@"
| true |
82eb2e345fe0709e836d8966995b3ad482713540 | Shell | thscorporation/emr-bootstrap-actions | /accumulo/1.6.1/install-accumulo.sh | UTF-8 | 2,251 | 3.046875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
set -x -e
cat > /home/hadoop/accumulo.sh << 'EOF2'
if ps ax | grep -v grep | egrep "datanode|namenode"> /dev/null
then
if [ ! -d "/home/hadoop/accumulo" ]; then
HOMEDIR=/home/hadoop
ACCUMULOV=1.6.1
ZOOKEEPRERV=3.4.6
ACCUMULO_TSERVER_OPTS=1GB
cd $HOMEDIR/.versions
wget https://elasticmapreduce.s3.amazonaws.com/samples/accumulo/1.6.1/accumulo-${ACCUMULOV}-bin.tar.gz
echo "Downloading Zookeeper"
wget http://apache.mirrors.tds.net/zookeeper/stable/zookeeper-3.4.6.tar.gz
tar xzf zookeeper*tar.gz
ln -sf $HOMEDIR/.versions/zookeeper-${ZOOKEEPRERV} $HOMEDIR/zookeeper
tar -xvzf accumulo-${ACCUMULOV}-bin.tar.gz
ln -sf $HOMEDIR/.versions/accumulo-$ACCUMULOV $HOMEDIR/accumulo
sudo yum install -y expect
cd ${HOMEDIR}
cp accumulo/conf/examples/${ACCUMULO_TSERVER_OPTS}/standalone/* accumulo/conf/
sed -i "s/<value>localhost:2181<\/value>/<value>$1:2181<\/value>/" accumulo/conf/accumulo-site.xml
cat >> accumulo/conf/accumulo-env.sh << EOF
export ACCUMULO_HOME=/home/hadoop/accumulo
export HADOOP_HOME=/home/hadoop
export ACCUMULO_LOG_DIR=/mnt/var/log/hadoop
export ZOOKEEPER_HOME=/home/hadoop/zookeeper
export JAVA_HOME=/usr/lib/jvm/java
export HADOOP_PREFIX=/home/hadoop
export HADOOP_CONF_DIR=/home/hadoop/conf
EOF
#Run on master /slave based on configuration
if grep isMaster /mnt/var/lib/info/instance.json | grep true;
then
expect -c "
spawn accumulo/bin/accumulo init
expect -nocase \"Instance name\" {send \"$2\r\"}
expect -nocase \"Enter initial password for*\" {send \"$3\r\"}
expect -nocase \"*password*\" {send \"$3\r\r\";expect eof}"
curl http://169.254.169.254/latest/meta-data/local-ipv4 > accumulo/conf/masters
echo 'x' > accumulo/conf/slaves
accumulo/bin/start-all.sh > accumulo/logs/start-all.log
else
curl http://169.254.169.254/latest/meta-data/local-ipv4 > accumulo/conf/slaves
MASTER=$(grep -i "job.tracker<" /home/hadoop/conf/mapred-site.xml | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}')
echo $MASTER > accumulo/conf/masters
accumulo/bin/tup.sh
fi
accumulo/bin/start-here.sh
sudo sed -i 's/.*accumulo.*//' /etc/crontab
fi
fi
EOF2
sudo sh -c "echo '*/1 * * * * hadoop bash /home/hadoop/accumulo.sh $1 $2 $3 > /home/hadoop/cron.log 2>&1 ' >> /etc/crontab"
echo "Done" | true |
28dc521b0f570e65a1dc5e8b2c1645f660f6080b | Shell | pasuinthesky/discordbot | /botctl.sh | UTF-8 | 791 | 3.828125 | 4 | [] | no_license | #!/usr/bin/bash
#
function bot_running() {
ps -ef|grep -v grep |grep "python3 Bot.py"
return $?
}
function start_bot() {
if bot_running; then
echo "Bot has already been running. exit..."
else
echo "Starting bot..."
source ~/bin/activate
python3 Bot.py 1>Bot.log 2>&1 &
echo "Bot started..."
fi
}
function stop_bot() {
while bot_running
do
ps -ef|grep -v grep |grep Bot.py | awk '{print $2}' | xargs kill -15
echo "Kill signal sent. Wating for bot to exit...."
date
echo
sleep 5
done
echo "Bot stopped..."
}
case "$1" in
start)
start_bot
;;
stop)
stop_bot
;;
restart)
stop_bot
start_bot
;;
*)
echo "Usage: botctl [start|stop|restart]"
esac
| true |
7fa5708810263b8f0f19c1f3191053a5c932dabf | Shell | mredar/automation | /ansible/roles/ingestion_app/files/copy_local_solrconf.sh | UTF-8 | 583 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
file_md5() {
echo `md5sum $1 | cut -d' ' -f1`
}
file=$1
src_dir=/heidrun/solr_conf
dest_dir=/opt/solr/dpla/solr/collection1/conf
srcfile=$src_dir/$file
destfile=$dest_dir/$file
test -z "$file" && exit 1
test -e $src_dir/$file || exit 1
# Skip overwriting the file if it has not changed
if [ -e "$destfile" ]; then
checksum_src=`file_md5 $srcfile`
checksum_dest=`file_md5 $destfile`
if [ $checksum_src == $checksum_dest ]; then
exit 0
fi
fi
cp $srcfile $destfile && chown root:root $destfile && chmod 0644 $destfile
echo "changed"
exit 0
| true |
709ebb23a2b4003e9f943b090115a496948a7221 | Shell | centminmod/centminmod-redis | /redis-keys.sh | UTF-8 | 1,347 | 4.1875 | 4 | [] | no_license | #!/bin/bash
######################################################
# get redis database keys
# written by George Liu (eva2000) centminmod.com
######################################################
# variables
#############
DT=$(date +"%d%m%y-%H%M%S")
######################################################
# functions
#############
if [ ! -f /usr/bin/redis-cli ]; then
echo "redis-cli not found"
exit 1
fi
get_keys() {
dbnumber="$1"
if [[ "$dbnumber" ]]; then
dbn="$dbnumber"
else
dbn=$(redis-cli info keyspace | awk -F : '/^db/ {print $1}' | cut -c3)
fi
for d in $dbn; do
redis-cli -n $d KEYS '*' | while read k; do key=$(echo "$k" | awk '{print $1}'); echo "---"; echo "redis key: $key"; echo -n "redis expiry ttl: "; redis-cli TTL "$key"; done
done
}
purge_keys() {
dbnumber="$1"
if [[ "$dbnumber" ]]; then
dbn="$dbnumber"
else
dbn=$(redis-cli info keyspace | awk -F : '/^db/ {print $1}' | cut -c3)
fi
for d in $dbn; do
redis-cli -n $d FLUSHDB | while read k; do echo "Purging redis database $d: $k"; done
done
}
######################################################
case "$1" in
get )
get_keys "$2"
;;
purge )
purge_keys
;;
* )
echo
echo "Usage:"
echo
echo "$0 get {redis_db_number}"
echo "$0 purge {redis_db_number}"
echo
;;
esac
exit | true |
60906258a93defa6703e8499f126ca1330a2b609 | Shell | ReactionMechanismGenerator/RMG-database | /trigger-rmg-tests.sh | UTF-8 | 1,459 | 3.84375 | 4 | [] | no_license | #!/bin/bash
# This script is designed to be run by Github Actions workflow
# to trigger the RMG-tests at
# https://github.com/reactionmechanismgenerator/rmg-tests
set -e # exit with nonzero exit code if anything fails
git config --global user.name "RMG Bot"
git config --global user.email "rmg_dev@mit.edu"
BRANCH=${GITHUB_REF#refs/heads/}
echo "GITHUB_WORKSPACE: $GITHUB_WORKSPACE"
echo "BRANCH: $BRANCH"
echo "RMG_PY_BRANCH: $RMG_PY_BRANCH"
# URL for the official RMG-tests repository
REPO=https://${GH_TOKEN}@github.com/ReactionMechanismGenerator/RMG-tests.git
# create a temporary folder:
REPO_NAME=$(basename $REPO)
TARGET_DIR=$(mktemp -d /tmp/$REPO_NAME.XXXX)
REV=$(git rev-parse HEAD)
# clone RMG-tests repo in the newly created folder:
git clone ${REPO} ${TARGET_DIR}
# go inside the newly created folder:
cd $TARGET_DIR
# create a new branch in RMG-tests with the name equal to
# the branch name of the tested RMG-database branch:
if [ "$RMG_PY_BRANCH" == "main" ]
then
RMGTESTSBRANCH=rmgdb-$BRANCH
else
RMGTESTSBRANCH=rmgdbpy-$BRANCH
fi
git checkout -b $RMGTESTSBRANCH || true
git checkout $RMGTESTSBRANCH
# create an empty commit with the SHA-ID of the
# tested commit of the RMG-database branch:
if [ "$RMG_PY_BRANCH" == "main" ]
then
git commit --allow-empty -m rmgdb-$REV
else
git commit --allow-empty -m rmgdbpy-$REV-${RMG_PY_BRANCH}
fi
# push to the branch to the RMG/RMG-tests repo:
git push -f $REPO $RMGTESTSBRANCH > /dev/null
| true |
9625b83a90b93fb149a3932b24234e4d1eb10646 | Shell | raonadeem/OS-SSL | /enable_ssl.sh | UTF-8 | 1,610 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# this script will generate the certificates and modify
# the yaml files to enable ssl in public API.
index_file="/etc/pki/CA/index.txt"
serial_file="/etc/pki/CA/serial"
ssl_dir="/home/stack/ssl_cert"
cert_count=1000
total_certs=$(ls /etc/pki/CA/newcerts/ |wc -l)
new_count=$(($total_certs + cert_count))
rm -rf $ssl_dir
mkdir $ssl_dir ; cd $ssl_dir
# Initializing the signing host
if [ -f "$index_file" ]
then
echo "File $file_index already exists."
else
sudo touch /etc/pki/CA/index.txt
fi
if [ -f "$serial_file" ]
then
echo "File Already exists"
else
sudo touch /etc/pki/CA/serial
fi
sudo chmod 646 $serial_file
sudo echo "$new_count" > $serial_file
sudo chmod 644 $serial_file
# # Creating a certificate authority
openssl genrsa -out ca.key.pem 4096
# #openssl req -key ca.key.pem -new -x509 -days 7300 -extensions v3_ca -out ca.crt.pem
openssl req -key ca.key.pem -new -x509 -days 7300 -extensions v3_ca -out ca.crt.pem -subj '/CN=SAHABA/O=DETASAD/C=SA/ST=Riyadh/L=Riyadh/OU=sahaba/'
# # Adding the certificate authority to clients
sudo cp ca.crt.pem /etc/pki/ca-trust/source/anchors/
sudo update-ca-trust extract
# # Creating an SSL/TLS key and certificate signing request
openssl genrsa -out server.key.pem 2048
cp /etc/pki/tls/openssl.cnf .
openssl req -config openssl.cnf -key server.key.pem -new -out server.csr.pem -subj '/CN=SAHABA/O=DETASAD/C=SA/ST=Riyadh/L=Riyadh/OU=sahaba/'
# #Creating the SSL/TLS certificate
sudo openssl ca -config openssl.cnf -extensions v3_req -days 3650 -in server.csr.pem -out server.crt.pem -cert ca.crt.pem -keyfile ca.key.pem -batch
| true |
9613e9cd502d4af19c9439df474ba1409e6df860 | Shell | HDragon8/Riru-LocationReportEnabler | /build.sh | UTF-8 | 1,036 | 2.75 | 3 | [] | no_license | function copy_files {
# /data/misc/riru/modules/template exists -> libriru_template.so will be loaded
# Change "template" to your module name
# You can also use this folder as your config folder
NAME="location_report_enabler"
mkdir -p $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME
cp $MODULE_NAME/template_override/riru_module.prop $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME/module.prop
cp $MODULE_NAME/template_override/config.sh $TMP_DIR_MAGISK
cp $MODULE_NAME/template_override/module.prop $TMP_DIR_MAGISK
echo -n "310030" > $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME/gsm.sim.operator.numeric
echo -n "us" > $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME/gsm.sim.operator.iso-country
mkdir $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME/packages
touch $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME/packages/com.google.android.gms
touch $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME/packages/com.google.android.gsf
touch $TMP_DIR_MAGISK/data/misc/riru/modules/$NAME/packages/com.google.android.apps.maps
} | true |
b7592c723e661af6350ef5e80b211374df955824 | Shell | agral/CompetitiveProgramming | /HackerRank/Utils/HackerRankVerify.sh | UTF-8 | 2,945 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Name: HackerRankVerify
# Description: Executes an answer to a HackerRank's challenge against all the testcases
# associated with it.
# Options: None, the script needs to be run from the challenge's root directory.
# (e.g. /path/to/hacker/rank/dir/Practice/Algorithms/Search/CountLuck)
# Created on: 20.10.2019
# Last modified: 22.03.2020
# Author: Adam Graliński (adam@gralin.ski)
# License: MIT
DIR_ORIGIN="${PWD}"
CHALLENGE_NAME="${PWD##*/}"
DIR_TESTCASES_IN="testcases/input"
DIR_TESTCASES_OUT="testcases/output"
log() {
echo -e "${@}"
}
loge() {
>&2 echo -e "${@}"
}
logv() {
if [ -n "${BE_VERBOSE}" ]; then
echo -e "${@}"
fi
}
abort() {
>&2 echo "Aborting."
exit 1
}
# run_and_compare `executable` `testcase_input_file` `expected_output_file`
run_and_compare() {
if [ "${#}" -ne 3 ]; then
loge "Critical: run_and_compare() called with ${#} arguments (3 required)."
abort
fi
# Note: ${1} should go without surrounding quotes.
diff "${3}" <(${1} <"${2}")
if [ "${?}" -ne 0 ]; then
loge "Testcase ${2} FAILED."
abort
fi
}
# run_test_suite `executable` `(lang)`
run_test_suite() {
if [ "${#}" -lt 2 ]; then
loge "Critical: run_test_suite() called with ${#} arguments (2 required)."
abort
fi
log "Running testcases for ${2} solution..."
for tc in "${TESTCASES[@]}"; do
logv "TC ${tc}"
expected_output_file="${tc//input/output}"
if [ ! -f "${expected_output_file}" ]; then
loge "Error: could not find expected answer file for TC ${tc}"
loge "(file: ${expected_output_file} does not exist)."
abort
fi
run_and_compare "${1}" "${tc}" "${expected_output_file}"
logv "${tc}: OK\n"
done
log "Testcases PASSED for ${2}."
}
### Handles the parameters: ###
while [ "${#}" -gt 0 ]; do
case "${1}" in
-v|--verbose)
BE_VERBOSE=1
logv "Verbose output is set."
;;
esac
shift
done
# Verifies that the testcases directory exists, stores all testcase files in `TESTCASES` array:
if [ ! -d "${DIR_TESTCASES_IN}" ] || [ ! -d "${DIR_TESTCASES_OUT}" ]; then
loge "Error: testcases have not been found at ${DIR_TESTCASES_IN}, ${DIR_TESTCASES_OUT}."
abort
fi
TESTCASES=()
while IFS= read -r -d $'\0'; do
TESTCASES+=("$REPLY")
done < <(find "${DIR_TESTCASES_IN}" -type f -name "*.txt" -print0)
unset ANY_TESTS_RAN
EXECUTABLE_CPP="${DIR_ORIGIN}/cpp/${CHALLENGE_NAME}.exe"
if [ -f "${EXECUTABLE_CPP}" ]; then
run_test_suite "${EXECUTABLE_CPP}" "cpp"
ANY_TESTS_RAN=1
else
logv "Skipping tests for cpp - solution file not found."
fi
if [ -f "${DIR_ORIGIN}/java/${CHALLENGE_NAME}.class" ]; then
run_test_suite "java -classpath ${DIR_ORIGIN}/java ${CHALLENGE_NAME}" "java"
ANY_TESTS_RAN=1
else
logv "Skipping tests for java - solution file not found."
fi
if [ -z "${ANY_TESTS_RAN}" ]; then
loge "No tests have been executed."
abort
fi
| true |
92fd2e54ce23ce8a9698e6056aa109f9e6e9e7af | Shell | joshsalvi/iterm2config | /.zshrc | UTF-8 | 10,723 | 2.71875 | 3 | [] | no_license | # =============================================================================
# ZSH Config
# =============================================================================
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/Users/jq210/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
#ZSH_THEME="robbyrussell"
ZSH_THEME="powerlevel10k/powerlevel10k"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to automatically update without prompting.
DISABLE_UPDATE_PROMPT="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS=true
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
# =============================================================================
# Plugins
# =============================================================================
# Check if zplug is installed
[ ! -d ~/.zplug ] && git clone https://github.com/zplug/zplug ~/.zplug
source ~/.zplug/init.zsh
# zplug
zplug 'zplug/zplug', hook-build:'zplug --self-manage'
# zsh-users
zplug "zsh-users/zsh-completions"
zplug "zsh-users/zsh-autosuggestions"
zplug "zsh-users/zsh-history-substring-search"
# zplug "zsh-users/zsh-syntax-highlighting", defer:2
# Supports oh-my-zsh plugins and the like
if [[ $OSTYPE = (linux)* ]]; then
zplug "plugins/archlinux", from:oh-my-zsh, if:"which pacman"
zplug "plugins/dnf", from:oh-my-zsh, if:"which dnf"
fi
if [[ $OSTYPE = (darwin)* ]]; then
zplug "plugins/osx", from:oh-my-zsh
zplug "plugins/brew", from:oh-my-zsh, if:"which brew"
zplug "plugins/macports", from:oh-my-zsh, if:"which port"
fi
zplug "plugins/archlinux", from:oh-my-zsh
zplug "plugins/common-aliase", from:oh-my-zsh
zplug "plugins/colored-man-pages", from:oh-my-zsh
zplug "plugins/colorize", from:oh-my-zsh
zplug "plugins/command-not-found", from:oh-my-zsh
zplug "plugins/copydir", from:oh-my-zsh
zplug "plugins/copyfile", from:oh-my-zsh
zplug "plugins/cp", from:oh-my-zsh
zplug "plugins/dircycle", from:oh-my-zsh
zplug "plugins/encode64", from:oh-my-zsh
zplug "plugins/extract", from:oh-my-zsh
zplug "plugins/history", from:oh-my-zsh
zplug "plugins/nmap", from:oh-my-zsh
zplug "plugins/tmux", from:oh-my-zsh
zplug "plugins/tmuxinator", from:oh-my-zsh
zplug "plugins/urltools", from:oh-my-zsh
zplug "plugins/web-search", from:oh-my-zsh
zplug "plugins/z", from:oh-my-zsh
zplug "plugins/git", from:oh-my-zsh
zplug "plugins/go", from:oh-my-zsh
zplug "plugins/svn", from:oh-my-zsh
zplug "plugins/node", from:oh-my-zsh
zplug "plugins/npm", from:oh-my-zsh
zplug "plugins/bundler", from:oh-my-zsh
zplug "plugins/gem", from:oh-my-zsh
zplug "plugins/rbenv", from:oh-my-zsh
zplug "plugins/pip", from:oh-my-zsh
zplug "plugins/sudo", from:oh-my-zsh
# Enhanced cd
zplug "b4b4r07/enhancd", use:enhancd.sh
# Enhanced dir list with git features
zplug "supercrabtree/k"
# Auto-close and delete matching delimiters
zplug "hlissner/zsh-autopair", defer:2
# Docker completion
zplug "felixr/docker-zsh-completion"
# Jump back to parent directory
zplug "tarrasch/zsh-bd"
# Simple zsh calculator
zplug "arzzen/calc.plugin.zsh"
# Directory colors
zplug "seebi/dircolors-solarized", ignore:"*", as:plugin
# Load theme
zplug "bhilburn/powerlevel9k", use:powerlevel9k.zsh-theme
# =============================================================================
# Options
# =============================================================================
# Improved LESS option
export LESS="--tabs=4 --no-init --LONG-PROMPT --ignore-case --quit-if-one-screen --RAW-CONTROL-CHARS"
# Watching other users
watch=(notme) # Report login/logout events for everybody except ourself.
LOGCHECK=60 # Time (seconds) between checks for login/logout activity.
REPORTTIME=5 # Display usage statistics for commands running > 5 sec.
WORDCHARS="\"*?_-[]~&;!#$%^(){}<>\""
# History
HISTFILE=~/.zsh_history
HISTSIZE=100000
SAVEHIST=100000
setopt autocd # Allow changing directories without `cd`
setopt append_history # Dont overwrite history
setopt extended_history # Also record time and duration of commands.
setopt share_history # Share history between multiple shells
setopt hist_expire_dups_first # Clear duplicates when trimming internal hist.
setopt hist_find_no_dups # Dont display duplicates during searches.
setopt hist_ignore_dups # Ignore consecutive duplicates.
setopt hist_ignore_all_dups # Remember only one unique copy of the command.
setopt hist_reduce_blanks # Remove superfluous blanks.
setopt hist_save_no_dups # Omit older commands in favor of newer ones.
# Changing directories
setopt pushd_ignore_dups # Dont push copies of the same dir on stack.
setopt pushd_minus # Reference stack entries with "-".
setopt extended_glob
# =============================================================================
# Aliases
# =============================================================================
# SSH
alias sshnmr="echo "Connecting\ to\ NMR..." &&ssh jq210@door.nmr.mgh.harvard.edu"
alias ssheris="echo "Connecting\ to\ server\ ERISone..." && ssh jq210@erisone.partners.org"
# Helper commands
alias rm='rm -v'
alias c='clear'
# Generic command adaptations
alias grep='() { $(whence -p grep) --color=auto $@ }'
alias egrep='() { $(whence -p egrep) --color=auto $@ }'
# Directory management
alias la='ls -a'
alias ll='ls -l'
alias lal='ls -al'
alias llt='ls -alt'
alias lalt='ls -alt'
alias d='dirs -v'
alias p='pushd'
# Update
# alias update="apt-get update && apt-get upgrade && apt-get dist-upgrade"
alias update="sudo softwareupdate -ia"
# =============================================================================
# Anaconda
# =============================================================================
# export PATH="$HOME/anaconda2/bin:$PATH"
# cat >> ~/.bashrc << END
# PATH=\$HOME/miniconda3/bin:\$PATH
# END
source $HOME/.bashrc
# =============================================================================
# Completions
# =============================================================================
zstyle ':completion:*' rehash true
# case-insensitive (all), partial-word and then substring completion
zstyle ":completion:*" matcher-list \
"m:{a-zA-Z}={A-Za-z}" \
"r:|[._-]=* r:|=*" \
"l:|=* r:|=*"
zstyle ":completion:*:default" list-colors ${(s.:.)LS_COLORS}
# =============================================================================
# Startup commands
# =============================================================================
#export {CC,CXX,MPICXX}=/usr/local/bin/gcc-6
alias gcc="gcc-7"
export CC=/usr/local/bin/gcc-7
export CXX=/usr/local/bin/g++-7
source activate moseq2
fast-theme free
clear
# Dynamic MOTD
php -f /etc/dynmotd | bash
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
| true |
2a62cbbc3c784a890c484fef7583b79adfb45c3e | Shell | karineek/CsmithEdge | /scripts/CsmithEdge/3-constructModifyTests.sh | UTF-8 | 4,762 | 3.78125 | 4 | [] | no_license | #!/bin/bash
##### Keep all the safe ops we need
function keep_required_safe {
testcaseName=$1
testcaseModify=$folder/'__'$testcaseName'M.c'
filename="$testcaseRes"
while read -r line; do
data="$line"
# Get locations:
temp=${#data}
size=$((temp - 44 -1))
var="${data:44:$size}"
isFirst=1
locF=0
funcF=0
locations=$(echo $var | tr "," " \n")
for loc in $locations
do
if (($isFirst==1))
then
isFirst=0
funcF=$loc
else
locF=$loc
fi
#echo "[$loc]"
done
#echo "location is: [$locF]"
#echo "Function number is: [$funcF]"
#Replace the rest of the calls to unsafe macros
keyword_raw='/* ___REMOVE_SAFE__OP *//*'$locF'*//* ___SAFE__OP */('
keyword_regexp="$(printf '%s' "$keyword_raw" | sed -e 's/[]\/$*.^|[]/\\&/g')"
replacement_raw='('
#Check if it is a macro or a function
if [ $headerMode -eq 2 ] && [[ " ${invocationsMacrosMix[@]} " =~ " ${locF} " ]]; then
## in mix mode, arr contains the value locF
replacement_raw='_mixM('
fi
replacement_regexp="$(printf '%s' "$replacement_raw" | sed -e 's/[\/&]/\\&/g')"
sed -i "s/$keyword_regexp/$replacement_regexp/g" $testcaseModify
done < "$filename"
}
#### Remove safe calls when not required (when $headerMode -eq 2)
function replace2unsafeMix {
testcaseName=$1
testcaseModify=$folder/'__'$testcaseName'M.c'
for locF in "${invocationsMacrosMix[@]}"; do
#Replace the rest of the calls to unsafe macros
keyword_raw='/* ___REMOVE_SAFE__OP *//*'$locF'*/'
keyword_regexp="$(printf '%s' "$keyword_raw" | sed -e 's/[]\/$*.^|[]/\\&/g')"
replacement_raw='_unsafe_macro_mixM/*'$locF'*/'
replacement_regexp="$(printf '%s' "$replacement_raw" | sed -e 's/[\/&]/\\&/g')"
sed -i "s/$keyword_regexp/$replacement_regexp/g" $testcaseModify
done
}
function replace2unsafe {
testcaseName=$1
testcaseModify=$folder/'__'$testcaseName'M.c'
#Replace the rest of the calls to unsafe macros
keyword_raw='/* ___REMOVE_SAFE__OP */'
keyword_regexp="$(printf '%s' "$keyword_raw" | sed -e 's/[]\/$*.^|[]/\\&/g')"
replacement_raw='_unsafe_macro'
replacement_regexp="$(printf '%s' "$replacement_raw" | sed -e 's/[\/&]/\\&/g')"
sed -i "s/$keyword_regexp/$replacement_regexp/g" $testcaseModify
}
#################################### Modify TEST ####################################
function modify_test {
## Single test - create a test case and its precompilation
testcaseName='test'$1 # testcase name
testcase=$2 # annotated testcase
safelist=$3 # must stay safe list
testcaseModify=$folder/'__'$testcaseName'M.c' # new testcase with RRS too
# Modify the test (the preprocessed file)
cp "$testcase" "$testcaseModify"
# Keep ops required to be safe as a macros or functions
keep_required_safe $testcaseName ## Uses: invocationsMacrosMix and testcaseRes
# Replace the rest of the calls in mix mode to unsafe macros according to invocationsMacrosMix
if [ $headerMode -eq 2 ] && [ ${#invocationsMacrosMix[@]} -gt 0 ]; then
replace2unsafeMix $testcaseName ## Uses: invocationsMacrosMix
fi
# Replace the rest of the calls to unsafe macros or functions
replace2unsafe $testcaseName
}
#################################### PREPARE GEN TEST ####################################
function update_csmith_cmd_options {
confg=$1
## Arrays to choose which one to set to functions or macros if mix
lastline=`tail -1 $confg`
IFS=' ' read -r -a invocationsMacrosMix <<< "$lastline"
## Check which version of headers we shall take
res1=`grep "USE_MATH_MACROS" $confg | wc -l`
if [[ "$res1" == "1" ]]; then
headerMode=1
else
res2=`grep "USE_MATH_MIX" $confg | wc -l`
if [[ "$res2" == "1" ]]; then
headerMode=2
fi
fi
}
################### MAIN ###############################
# Single iteration, requires a compiler and a seed
# Basic parameters
seed=$1 # File with all the seeds to use
annotated_testcase=$2 # Testcase
testcaseRes=$3 # Safelist
testcaseConfgFile=$4 # name of config file
folder=$5 # output folder
# Check if second parameter is a number
re='^[0-9]+$'
if ! [[ $seed =~ $re ]] ; then
echo ">> error: Not a number <$seed>." >&2; exit 1
fi
# check there is a safelist of calls
if [ ! -f $testcaseRes ]; then
echo ">> error: No safelist file <$testcaseRes>." >&2; exit 1
fi
# Check the configuration file exist
if ! test -f "$testcaseConfgFile" ; then
echo ">> error: No configuration file found for this seed $seed. Missing <$testcaseConfgFile>." >&2; exit 1
fi
## Build testcase
invocationsMacrosMix=() ## Invocation to set as function wrapper
headerMode=0 ## Assume function version
# Get configuration data: Update $CSMITH_USER_OPTIONS
update_csmith_cmd_options "$testcaseConfgFile"
# Run a single test
modify_test "$seed" "$annotated_testcase"
## END ##
| true |
ab0484b5d2c09e5928734a32cf60983eac729ccc | Shell | choudarykvsp/ctp-manual | /reference/calculators/extract_calculators.sh | UTF-8 | 695 | 3.5 | 4 | [] | no_license | #!/bin/bash -e
VOTCASHARE="$(csg_call --show-share)"
texfile="$PWD/calculators.tex"
rm -f $texfile; touch $texfile
for package in ctp_tools ctp_run ctp_parallel ctp_dump kmc_run; do
calculators="$(${package} --list | sed -ne 's/^\s\+\([a-z,0-9]*\)\s*\(.*\)/\1/p')"
echo $calculators
# loop over all calculators
for calculator in ${calculators}; do
library="$(echo ${package} | sed -e 's/\_[^\_]*$//g')"
xmlfile=${VOTCASHARE}/${library}/xml/$calculator.xml
echo $calculator
echo $xmlfile
if [ ! -f "$xmlfile" ]; then
continue
fi
echo "votca_property --file $xmlfile --format TEX --level 2"
votca_property --file $xmlfile --format TEX --level 2 >> $texfile
done
done
| true |
54cdb24fca2c59c433accdc2b5524ebf8b8e3a8d | Shell | mdwint/dotfiles | /secrets.sh | UTF-8 | 401 | 4.03125 | 4 | [] | no_license | #!/bin/sh
set -e
usage() {
echo "Usage: $0 <backup|restore> <archive-filename> [<source-path>...]" 1>&2
exit 1
}
[ $# -ge 2 ] || usage
CIPHER=aes256
COMMAND=$1
ARCHIVE=$2
shift 2
case $COMMAND in
backup)
tar -czf - -C "$HOME" "$@" | openssl enc -$CIPHER -e -out "$ARCHIVE"
;;
restore)
openssl enc -$CIPHER -d -in "$ARCHIVE" | tar -xzk -C "$HOME"
;;
*)
usage
;;
esac
| true |
cc928b537e696089a8cfd47e6b48ab96fcea9313 | Shell | kevinclev/kpc-tweeter | /p_validator.sh | UTF-8 | 525 | 3.953125 | 4 | [] | no_license | not_p=()
# Make sure each definition starts with a P
while read line
do
initial="$(echo $line | head -c 1)"
if [[ $initial != "P" ]];then
echo "$line does not start with a P"
not_p+=("$line")
fi
done < definitions.txt
if [ ${#not_p[@]} -ne 0 ];then
echo "Incorrect definitions:"
for i in "${not_p[@]}"
do
echo $i
done
exit 1
fi
# Check for duplicates
dupes=`sort definitions.txt | uniq -d`
if [ ! -z "$dupes" ];then
echo "There are duplicate values!"
echo $dupes
exit 1
fi
| true |
966da645ebbb664e4e7e3062aa7824f56fd7d231 | Shell | 2-IMMERSE/timeline-service | /bin/toCDNdmapps.sh | UTF-8 | 308 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
dirname=`dirname $0`
case x$1 in
x)
echo Usage: $0 sampledir
echo Uploads $dirname/sampledir to CDN dmapps/sampledir
exit 1
;;
esac
location=`basename $1`
set -x
aws s3 sync $1 s3://origin.platform.2immerse.eu/dmapps/$location/
echo 'URL:' https://origin.platform.2immerse.eu/dmapps/$location/
| true |
1832342ea47bc80b849ea9b47a83b812b2a530e1 | Shell | Werkov/dotfiles | /.bash_aliases | UTF-8 | 975 | 2.640625 | 3 | [] | no_license | alias ll="ls -l"
alias la="ls -a"
alias ..="cd .."
alias j="jobs"
# --Git aliases--
alias gst='git status'
alias gdf='git diff'
alias gad='git add'
alias gbl='git blame'
alias glog='git log --oneline'
alias gco='git commit'
alias grm='git rm'
alias gcp='git cherry-pick -x'
alias gdt='git describe --tags'
alias gda='git describe --all'
# --Bazaar aliases--
alias bst='bzr status'
alias bdf='bzr diff --diff-options -wu'
alias bad='bzr add'
alias blog='bzr log --line | less'
# -- Misc --
alias vlna='vlna -v KkSsVvZzOoUuAaIi'
alias mtime="/usr/bin/time -f \"real time:\t%e s\nmax memory:\t%M kB\n\""
alias muttf='mutt -F ~/.mutt/muttrc.fykos'
alias hd='hexdump -C'
alias cscope-init="find . \\( -name '*.c' -o -name '*.h' -o -name '*.cc' -o -name '*.hh' \\) ! -path './.*' | cscope -b -i -"
alias drafts='cd ~/projects/Werkov.github.io/_drafts'
alias sc='systemctl'
alias ssc='sudo systemctl'
alias ip='ip -c'
[ -f ~/.bash_aliases.local ] && . ~/.bash_aliases.local
| true |
4be36916c1973ce0e9d2edf049b35c4346b76791 | Shell | slikk66/dotfiles | /bin/ops | UTF-8 | 1,210 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
# CI/CD pipes will pass in a CONTAINER to use, default is to use the local user's latest image (laptop mode)
if [[ "${CONTAINER}" == "" ]]; then
CONTAINER=${USER}/ops:latest
fi
# Override interactive mode for CI/CD pipes
if [[ "${NONINTERACTIVE}" == "" ]]; then
FLAGS="-it"
else
FLAGS=""
fi
# Local data mount
LOCAL_DATA="-v ${PWD}:/data"
# DEV option to have editable shared pulumi
if [[ "${MOUNT_SHARED_PULUMI}" != "" ]]; then
PULUMI_DATA="-v ${PWD}/${MOUNT_SHARED_PULUMI}/pulumi:/data/pulumi"
fi
INFRA_DATA="-v ${PWD}/pulumi/infra:/data/pulumi/infra"
# debug/development options
if [[ "${NO_MOUNT_AWS}" == "" ]]; then
AWS_DATA="-v ${HOME}/.aws:/root/.aws"
fi
if [[ "${NO_MOUNT_SSH}" == "" ]]; then
SSH_DATA="-v ${HOME}/.ssh:/root/.ssh"
fi
# run bootstrapped image with remove flag
docker run --rm ${FLAGS} \
-e HOME=/root \
-e AWS_PROFILE \
-e AWS_DEFAULT_PROFILE \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-e AWS_SESSION_TOKEN \
-e TASK_REVISION \
-e PULUMI_CONFIG_PASSPHRASE="" \
${LOCAL_DATA} \
${PULUMI_DATA} \
${INFRA_DATA} \
${SSH_DATA} \
${AWS_DATA} \
-w /data \
${CONTAINER} "$@"
| true |
c50c947ae6335e5cd33432e9d4ad81575af76036 | Shell | KartikS11/Problem_day5_to_day8 | /day5/day5-IF/NuminWord.sh | UTF-8 | 231 | 3.328125 | 3 | [] | no_license | #!/bin/bash
#echo -n "Enter number:"
#read n
#echo "Number in words:"
for((i=1;i<2;i++))
do
echo -n "Enter number:"
read n
echo "Number in words:"
if [ $n -eq 0 ]
then
echo "zero"
elif [ $n -eq 1 ]
then
echo "one"
fi
done
| true |
22a487f049b89732b4d750542cd027e41bf64e0f | Shell | rse/typopro-src | /src/Mplus/convert.sh | UTF-8 | 267 | 3.265625 | 3 | [
"mplus"
] | permissive | #!/bin/sh
for from in *.ttf; do
to=`echo $from | sed -e 's;mplus-;Mplus;' -e 's;bold;Bold;' -e 's;black;Black;' -e 's;heavy;Heavy;' -e 's;light;Light;' -e 's;medium;Medium;' -e 's;regular;Regular;' -e 's;thin;Thin;'`
echo "$from -> $to"
mv $from $to
done
| true |
3a417267b6a7dce900f28cf0f6302c39f714d850 | Shell | amerlyq/airy | /%wf/dev/java/env | UTF-8 | 375 | 2.921875 | 3 | [
"MIT"
] | permissive | # vim:ft=sh
### Java SDK ###
if [[ -d /usr/java/jdk1.8.0_31 ]]; then
# export JAVA32_HOME=/usr/lib/jvm/java-7-openjdk-i386
# export JAVA64_HOME=/usr/lib/jvm/java-7-openjdk-amd64
#/usr/lib/jvm/java-8-oracle
export JAVA64_ORACLE_HOME=/usr/java/jdk1.8.0_31
export JAVA_HOME="$JAVA64_ORACLE_HOME"
export PATH="$PATH:$JAVA_HOME/bin:$JAVA_HOME/include"
fi
| true |
d74a52a41606a8034dc13a56906eb33ef6fa00fb | Shell | demoyuw/tools | /nova/evacuate.sh | UTF-8 | 284 | 2.640625 | 3 | [] | no_license | source /home/localadmin/creds/yuwei-openrc.sh
DEST_HOST="COM2"
vm_list=($(nova list |head -n -1 | sed '1,3'd | awk '{print $2}'))
for (( i=0; i<=((${#vm_list[@]}-1)); i++ ));
# for (( i=0; i<=1; i++ ));
do
echo ${vm_list[i]} ;
nova evacuate ${vm_list[i]} ${DEST_HOST}
done
| true |
11b054ce0fd3c4afe610f7806763c1787091531f | Shell | nixnub/Shell-Scripts | /acu | UTF-8 | 53,559 | 3.84375 | 4 | [] | no_license | #!/bin/bash
#############################################################
# Author : Ahmed Fekry, ahmed.fekry0@gmail.com #
# License : GPL <http://www.gnu.org/licenses/gpl.html #
# #
#############################################################
if [[ $# != 1 ]] ; then
echo "Usage: $0 <cfg file>"
exit 2
else
cfg_file=$1
fi
if [[ ! -e $cfg_file || ! -f $cfg_file ]] ; then
echo "No such file $1"
exit 3
else
. ./$cfg_file 2>&1
fi
if [[ $? != 0 ]] ; then
echo "Unable to load configuration file $cfg_file ".
exit 100
fi
if [[ "$user" != "admin" ]] ; then
echo "Please run as admin"
exit 1
fi
if [[ ! -d $lock ]] ; then
mkdir $lock
fi
if [[ ! -d $secdir ]] ; then
mkdir $secdir
fi
lock() {
touch $lock/.$1.lock
}
unlock() {
if [[ -e $lock/.$1.lock ]] ; then
rm $lock/.$1.lock
fi
}
loading() {
var=$1
while [ "$var" -lt 10 ] ; do
for i in '-' '\' '|' '/' ; do
echo -ne "\r$i" ; sleep 0.1
var=`expr $var + 1`
done
done
echo -ne "\r"
}
log() {
echo -ne "$timestamp : $1\n" | tee -a $logfile
}
check() {
read -r -e -p $'\nproceed ?[y/n] ' answer
case $answer in
[yY][eE][sS]|[yY])
proceed="TRUE"
;;
[nN][oO]|[nN])
proceed="FALSE"
exit 6
;;
*)
exit 6
;;
esac
}
conncheck() {
echo -ne "\n$(tput setaf 6)Connectivity check:$(tput sgr0)\n\n"
if [[ proceed -eq "TRUE" ]] ; then
echo -ne "\n$(tput setaf 6)ICMP checks:$(tput sgr0)\n"
port=22
ping -c$ping_cnt $ipmi_ip &>/dev/null
if [[ $? -eq 0 ]] ; then
echo -ne "IPMI IP : $(tput setaf 2)PING [OK]$(tput sgr0)\n"
else
echo "IPMI IP : Ping timeout!"
echo "Please check IPMI connection" && exit 11
fi
ping -c$ping_cnt $utility_node_ip &>/dev/null
if [[ $? -eq 0 ]] ; then
echo "Utility Node : $(tput setaf 2)PING [OK]$(tput sgr0)"
else
echo "Utility Node : Ping timeout!"
echo "Please check utility node connection" && exit 11
fi
ping -c$ping_cnt $storage_node_ip &>/dev/null
if [[ $? -eq 0 ]] ; then
echo "Storage Node : $(tput setaf 2)PING [OK]$(tput sgr0)"
else
echo "Storage Node : Ping timeout!"
echo "Please check Storage node connection" && exit 11
fi
ping -c$ping_cnt $ntp_ip &>/dev/null
if [[ $? -eq 0 ]] ; then
echo "NTP Server : $(tput setaf 2)PING [OK]$(tput sgr0)"
else
echo "NTP Server : Ping timeout!"
echo "Please check NTP Server connection" && exit 11
fi
echo -ne "\n$(tput setaf 6)SSH Checks:$(tput sgr0)\n"
2>/dev/null 1>/dev/null echo "" > /dev/tcp/$utility_node_ip/$port
if [[ $? == 0 ]] ; then
echo -ne "Utility Node : $(tput setaf 2)SSH [OK]$(tput sgr0)\n"
else
echo -ne "Utility Node : $(tput setaf 1)SSH [UNREACHABLE]$(tput sgr0)\n"
echo "Please check SSH PORT 22 on utility node $utility_node_ip" && exit 11
fi
2>/dev/null 1>/dev/null echo "" > /dev/tcp/$storage_node_ip/$port
if [[ $? == 0 ]] ; then
echo -ne "Storage Node : $(tput setaf 2)SSH [OK]$(tput sgr0)\n"
else
echo -ne "Storage Node : $(tput setaf 1)SSH [UNREACHABLE]$(tput sgr0)\n"
echo "Please check SSH PORT 22 on utility node $utility_node_ip" && exit 11
fi
fi
echo -ne "\n$(tput setaf 2)All good!\n\n$(tput sgr0)"
check
if [[ $proceed -eq "TRUE" ]] ; then
echo -e "\n$(tput setaf 2)Proceeding...\n$(tput sgr0)"
loading 1
else
exit 6
fi
}
# TASK 1
# Load SSH Keys
task1() {
lock 1
echo "########################"
echo "$(tput setab 7)$(tput setaf 0)TASK #1 - LOAD SSH KEYS $(tput sgr0)"
echo "########################"
log "(Starting Task 1)"
log "Starting SSH Agent..."
eval `$bin/ssh-agent` &> $logfile
if [[ $? == 0 ]] ; then
log "SSH Agent started successfully"
elif [[ $? != 0 ]] ; then
log "SSH Agent failed to start"
fi
log "Adding admin key identity "
$bin/ssh-add ~admin/.ssh/dpnid | tee -a $logfile
if [[ $? != 0 ]] ; then
log "Unable to add admin key identity .. exiting !"
exit 25
fi
log "Listing identity:"
echo ""
$bin/ssh-add -l
if [[ $? != 0 ]] ; then
log "error listing identity"
exit 25
fi
log "Task 1 completed successfully"
echo -ne "$(tput setaf 2)\nSuccess\n$(tput sgr0)"
success="TRUE"
unlock 1
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task2
fi
}
# TASK 2
# proactive_check dir
task2() {
lock 2
echo "#####################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #2 - proactive_check directory $(tput sgr0)"
echo "#####################################"
log "(Starting Task 2)"
cd $home
if [[ -d proactive_check ]] ; then
log "proactive_check directory exists"
if [[ -e proactive_check/proactive_check.pl ]] ; then
log "proactive_check.pl exists"
if [[ -x "proactive_check/proactive_check.pl" ]] ; then
log "Executable bit already set"
else
log "Setting executable bit"
chmod u+x $home/acu/proactive_check/proactive_check.pl
fi
fi
else
log "proactive_check does not exist, please redownload the package"
exit 200
fi
echo -ne "$(tput setaf 2)\nSuccess\n$(tput sgr0)"
success="TRUE"
unlock 2
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task3
fi
}
# TASK 3
# Run proactive_check
task3() {
lock 3
echo "##############################"
echo "$(tput setab 7)$(tput setaf 0)TASK #3 - run proactive_check $(tput sgr0)"
echo "##############################"
log "(Starting Task 3)"
log "Running proactive_check script"
$home/proactive_check/proactive_check.pl --addnode
success="TRUE"
unlock 3
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task4
fi
}
# TASK 4
# Enable Service Mode
task4() {
lock 4
echo "##############################"
echo "$(tput setab 7)$(tput setaf 0)TASK #4 - Enable Service Mode $(tput sgr0)"
echo "##############################"
log "(Starting Task 4)"
log "Enabling Service Mode"
$home/proactive_check/proactive_check.pl --servicemode=3
echo "$(tput setaf 3)Please verify that service mode is enabled for 3 hours $(tput sgr0)"
log "Checking if Service Mode is enabled"
$home/proactive_check/proactive_check.pl
success="TRUE"
unlock 4
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task5
fi
}
# TASK 5
# Collect System Info
task5() {
lock 5
if [ ! -e $_sysinfodir ] ; then
mkdir $_sysinfodir
fi
echo "##############################"
echo "$(tput setab 7)$(tput setaf 0)TASK #5 - Collect System Info $(tput sgr0)"
echo "##############################"
log "(Starting Task 5)"
log "Collecting system information from Utility node"
log "Running status.dpn"
echo -ne "\n$(tput setaf 3)NOTE: Please Validate the GSAN version reported on all the data nodes is running a consistent version."
echo -ne "\nAlso validate that the 'Percent Full' is within 2% on all nodes and that the RunLevel is at fullaccess\n\n\n$(tput sgr0)"
log "Status.dpn:"
status.dpn | tee $_sysinfodir/status.dpn.out
loading 1
log "nodenumbers:"
nodenumbers | tee $_sysinfodir/nodenumbers.out
loading 1
log "nodedb:"
nodedb print --nodes=0.all+ --addr --id | tee $_sysinfodir/nodedb.out
loading 1
log "Loading dpn keys"
log "***** Starting SSH Agent"
eval `$bin/ssh-agent`
if [[ $? == 0 ]] ; then
log "SSH Agent started successfully"
elif [[ $? != 0 ]] ; then
log "SSH Agent failed to start"
fi
log "Adding dpn keys identity "
$bin/ssh-add ~admin/.ssh/dpnid
if [[ $? == 0 ]] ; then
log "dpn keys added successfully"
elif [[ $? != 0 ]] ; then
log "***** Error: failed to add dpn keys"
fi
log "***** Listing identity"
$bin/ssh-add -l
log "Verifying node types of existing grid:"
loading 1
mapall --user=root --noerror $avhome/bin/syscheck --sysconfigdbfile=$avhome/var/sysconfigdb.xml | tee $_sysinfodir/mapall.out
log "Verifying if the system is a RAIN configuration:"
loading 1
mapall --user=root --noerror 'ls /data01/cur |grep par' |grep -v probe | wc -l | tee $_sysinfodir/mapall.out
# the following commands to be executed on the new storage node
# can be executed remotely from the utility node
# copy syscheck and sysconfigdb.xml to the new storage node
log "Copying syscheck to storage node : $storage_node_ip"
scp /usr/local/avamar/bin/syscheck root@$storage_node_ip:/usr/local/avamar/src/
if [[ $? != 0 ]] ; then
log "Failed to copy syscheck to $storage_node_ip"
else
log "syscheck copied successfully to $storage_node_ip"
fi
log "Copying sysconfigdb.xml to storage node : $storage_node_ip"
scp /usr/local/avamar/var/sysconfigdb.xml root@$storage_node_ip:/usr/local/avamar/src/
if [[ $? != 0 ]] ; then
log "Failed to copy sysconfigdb.xml to $storage_node_ip"
else
log "sysconfigdb.xml copied successfully to $storage_node_ip"
fi
# Verify the new storage node is the same node type as the existing grid
ssh -x root@$storage_node_ip '/usr/local/avamar/src/syscheck --sysconfigdbfile=/usr/local/avamar/src/sysconfigdb.xml'
ssh -x root@$storage_node_ip '/usr/local/avamar/src/syscheck --sysconfigdbfile=/usr/local/avamar/src/sysconfigdb.xml --verbose 2>&1 | grep "<hwcheck"'
ssh -x root@$storage_node_ip 'ls /data0?'
success="TRUE"
unlock 5
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task8
fi
}
# TASK 6
# Update checkpoint config
task6() {
lock 6
echo "##########################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #6 - Update Checkpoint Configuration $(tput sgr0)"
echo "##########################################"
log "(Starting Task 6)"
log "Collecting current cpmostrecent and cphfschecked values"
avmaint config --ava | grep cp
log "Changing most recent checkpoints value to 10"
avmaint config --ava cpmostrecent=10
log "Validating that cpmostrecent value changed"
avmaint config --ava | grep cp
log "Changing number of recent hfschecked retained"
avmaint config --ava cphfschecked=5
log "Validating that cphfschecked value changed"
avmaint config --ava | grep cp
log "Validating that balancing is disabled ( balancemin=0 )"
avmaint config --ava | grep balancemin
echo -ne " Would you like to set balancemin to 0 ? "
_answered=0
while [[ $_answered != 1 ]] ; do
read -r -e -p $'\npress n to continue and y to set balancemin to 0 [y/n]: ' answer
case $answer in
[yY][eE][sS]|[yY])
log "Setting balancemin to 0"
avmaint config --ava balancemin=0
log "Validating that balancemin equals 0"
avmaint config --ava | grep balancemin
_answered=1
;;
[nN][oO]|[nN])
log "Continuing..."
_answered=1
continue
;;
*)
_answered=0
;;
esac
done
log "Verifying that all stripes are online ( server is not migrating stripes )"
log "Wait for 2 minutes to verify all stripes are online "
for i in {1..5}; do
status.dpn
sleep 5
done
log "Disabling asynchronous crunching"
avmaint config --ava asynccrunching=false
log "Verifying that asynchronous crunching is disabled"
avmaint config --ava | grep crunch
success="TRUE"
unlock 6
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task7
fi
}
# TASK 7
# Perform a checkpoint with
# full validation
task7() {
lock 7
echo "####################################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #7 - Perform a checkpoint with Full Validation $(tput sgr0)"
echo "####################################################"
log "(Starting Task 7)"
log "Checking for a recent validated checkpoint"
mccli checkpoint show --verbose=TRUE | tee -a $logfile
echo -ne "$(tput setab 0)$(tput setaf 5)Note:$(tput sgr0)\nIf there is no validated checkpoint within the past 36 hours,then\ncontinue with this task to create a new validated checkpoint.\nHowever, if there is a validated checkpoint within the past 36 hours,\nthen skip this task and continue with the rest of the procedure.$(tput sgr0)"
ret=0
while [[ $ret -ne "1" ]] ; do
read -r -e -p $'\n\nProceed with this task and create a new validated checkpoint ? [y/n] : ' answer
case $answer in
[yY][eE][sS]|[yY])
ret=1
;;
[nN][oO]|[nN])
log "skipping task7 - checkpoint validation/creation"
task8
;;
*)
ret=0
;;
esac
done
log "Creating a validated checkpoint..."
log "Verifying that no maintenance tasks are running"
status.dpn
log "Checking status of avamar services"
dpnctl status | tee -a $tmp/status.dpn.out
log "checking if maintenance scheduler is running"
maint=`cat $tmp/status.dpn.out | grep -i "Maintenance" | grep -i "window" | grep -i status | cut -d: -f4 | sed 's/ //g' | tr -d '.'`
if [[ $maint == "suspended" ]] ; then
echo "Maintenance windows scheduler is suspended"
elif [[ $maint == "up" ]] ; then
echo "Maintenance windows scheduler is up"
ret=0
while [[ $ret -ne "1" ]] ; do
read -r -e -p $'\nStop maintenance scheduler ? :' answer
case $answer in
[yY][eE][sS]|[yY])
log "Stopping maintenance scheduler..."
dpnctl stop maint
;;
[nN][oO]|[nN])
log "Continuing..."
ret=1
;;
*)
ret=0
;;
esac
done
fi
log "Creating checkpoint..."
sudo avmaint --ava checkpoint
log "Please verify that the checkpoint has been created"
log "Printing checkpoint lists (cplist):"
sudo cplist
log "If the most recent validated checkpoint is more than 24 hours old, validate the checkpoint you just created"
read -r -e -p $'Please enter checkpoint number (cp.number) to validate: ' cpnum
log "Validating checkpoint $cpnum..."
sudo avmaint hfscheck --checkpoint=$cpnum --ava --rolling
success="TRUE"
unlock 7
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task8
fi
}
# TASK 8 ( task 12)
#Configure The New Nodes
# as storage nodes
task8() {
lock 8
echo "####################################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #8 - Configure the new nodes as Storage nodes $(tput sgr0)"
echo "####################################################"
log "(Starting Task 8)"
log "Connecting to Storage node : $storage_node_ip"
log "Saving original change_nodetype to /usr/local/avamar/install/scripts/change_nodetype-1.42"
ssh $storage_node_ip -x "mv /usr/local/avamar/install/scripts/change_nodetype /usr/local/avamar/install/scripts/change_nodetype-1.42"
if [[ $? != 0 ]] ; then
log "ERROR: unable to save original change_nodetype on storage node $storage_node_ip"
# exit 15
fi
log "Copying change_nodetype from utility node to new storage node"
scp /usr/local/avamar/install/scripts/change_nodetype $storage_node_ip:/usr/local/avamar/install/scripts/change_nodetype
if [[ $? != 0 ]] ; then
log "ERROR: unable to copy change_nodetype from utility node to new storage node"
# exit 16
fi
log "Converting the new node to a storage node"
ssh $storage_node_ip -x "sudo /usr/local/avamar/install/scripts/change_nodetype --data"
if [[ $? != 0 ]] ; then
log "ERROR: unable to convert the new node to a storage node"
# exit 17
fi
log "Validating that the node is now a storage node"
ssh $storage_node_ip -x "sudo cat /usr/local/avamar/etc/node.cfg"
if [[ $? != 0 ]] ; then
log "ERROR: unable to validate"
# exit 18
fi
unlock 8
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task9
fi
}
# TASK 9 ( task 13)
# Configure RMM
task9() {
lock 9
echo "#######################"
echo "$(tput setab 7)$(tput setaf 0)TASK #9 Configure RMM $(tput sgr0)"
echo "#######################"
log "(Starting Task 9)"
log "Configuring RMM"
ret=0
while [[ $ret -ne "1" ]] ; do
echo -ne "\n1] Dedicated"
echo -ne "\n2] Shared with eth0"
echo -ne "\n3] Do not configure RMM ( skip to next task )\n"
read -r -e -p $'\nPlease select an option [1/2/3]: ' answer
case $answer in
1)
mode="Dedicated"
echo "Proceeding with dedicated configuration..."
ssh root@$storage_node_ip -x "ipmitool lan set 3 ipaddr $rmm_ip"
ssh root@$storage_node_ip -x "ipmitool lan set 3 netmask $rmm_netmask"
ssh root@$storage_node_ip -x "ipmitool lan set 3 defgw ipaddr $rmm_gateway"
ret=1
;;
2)
mode="Shared"
echo "Proceeding with shared configuration..."
ssh root@$storage_node_ip -x "ipmitool lan set 1 ipaddr $rmm_ip"
ssh root@$storage_node_ip -x "ipmitool lan set 1 netmask $rmm_netmask"
ssh root@$storage_node_ip -x "ipmitool lan set 1 defgw ipaddr $rmm_gateway"
ssh root@$storage_node_ip -x "ipmitool lan set 3 ipaddr 0.0.0.0"
ssh root@$storage_node_ip -x "ipmitool lan set 3 netmask 0.0.0.0"
ssh root@$storage_node_ip -x "ipmitool lan set 3 defgw ipaddr 0.0.0.0"
ret=1
;;
3)
ret=1
success="TRUE"
task10
;;
*)
ret=0
;;
esac
done
success="TRUE"
unlock 9
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task10
fi
}
# TASK 10 ( task 14)
# Update Network Configuration
# on new nodes
task10() {
lock 10
echo "#################################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #10 Update Network configuration on new node $(tput sgr0)"
echo "#################################################"
log "(Starting Task 10)"
log "***** Starting SSH Agent"
eval `$bin/ssh-agent` &> $logfile
if [[ $? == 0 ]] ; then
log "SSH Agent started successfully"
elif [[ $? != 0 ]] ; then
log "SSH Agent failed to start"
fi
log "Adding dpn keys identity "
$bin/ssh-add ~admin/.ssh/dpnid &> $logfile
if [[ $? == 0 ]] ; then
log "dpn keys added successfully"
elif [[ $? != 0 ]] ; then
log "***** Error: failed to add dpn keys"
fi
log "***** Listing identity"
$bin/ssh-add -l
if [[ -e $home/rebuild_collect.pl ]] ; then
log "rebuild_collect.pl exists"
if [[ -x $home/rebuild.collect.pl ]] ; then
log "executable bit already sit on rebuild_collect.pl"
else
chmod u+x $home/rebuild_collect.pl
fi
else
log "rebuild_collect.pl does not exist, please re-download the package"
fi
log "Starting rebuild_collect.pl..."
loading 5
$rebuild_collect --source=0.0 | tee -a $log_file
log "Listing rebuild_collect tar file contents:"
_rebuild_collect_files=`ls rebuild_config_files*`
tar -tzf $_rebuild_collect_files
log "Copying rebuild_config_files to new storage node : $storage_node_ip"
scp -p $_rebuild_collect_files $storage_node_ip:/home/admin
if [[ $? != 0 ]] ; then
log "Unable to copy $home/rebuild_config_files to storage node : $storage_node_ip"
fi
echo -ne "\n$(tput setaf 1)$(tput setab 7)Connecting to storage node and updating network configuration$(tput sgr0)"
log "\nConnecting to New Storage Node as root, please provide password when prompted..."
ssh -t root@$storage_node_ip <<-EOF
cd /home/admin
tar zxvf rebuild_config_files*.tgz
cd rebuild_config_files*
ls
sed -i "/^IPADDR=/c\IPADDR=$bond0" ifcfg-bond0
sed -i "/^IPADDR=/c\IPADDR=$bond1" ifcfg-bond1
cp -p ifcfg* ifroute* routes /etc/sysconfig/network
cp -p hosts resolv.conf modprobe.conf.local /etc
EOF
success="TRUE"
unlock 10
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task11
fi
}
# TASK 11 ( task 15)
# Restart Network Service
# on New node
task11() {
lock 11
echo "##########################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #11 Restart Network Service on New Node $(tput sgr0)"
echo "##########################################"
log "(Starting Task 11)"
log "\nConnecting to New Storage Node as root, please provide password when prompted..."
ssh root@$storage_node_ip <<-EOF
cat <<end-of-lines >/tmp/restart_script.sh
#!/bin/sh
service network stop
modprobe -r bonding
service network start
end-of-lines
chmod +x /tmp/restart_script.sh
nohup /tmp/restart_script.sh &
EOF
echo -ne "\nCustomer has VLANs configured on their backup network?"
read -r -e -p $'\ni.e: Would you like to add 8021q module to the kernel ? [y/n]' answer
ret=0
while [[ $ret != "1" ]] ; do
case $answer in
[yY][eE][sS]|[yY])
vlans=1
ret=1
;;
[nN][oO]|[nN])
vlans=0
ret=1
;;
*)
ret=0
;;
esac
done
if [[ $vlans == "1" ]] ; then
echo "modprobing 802.1q on the new Storage Node"
ssh root@$storage_node_ip -x "modprobe 8021q"
if [[ $? != 0 ]] ; then
log "Unable to add 802.1q module to the storage node"
exit 22
fi
fi
unlock 11
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task12
fi
}
# TASK 12 ( task 16)
# Install RAID tools
task12() {
lock 12
echo "##########################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #12 Install RAID tools $(tput sgr0)"
echo "##########################################"
log "(Starting Task 12)"
log "Copying gen4s-sys-1.2.zip and dpnavsys-1.1.0-7.x86_64.rpm to new storage node"
scp gen4s-sys-1.2.zip root@$storage_node_ip:$avhome/src
if [[ $? != 0 ]] ; then
log "unable to copy gen4s-sys-1.2.zip to $storage_node_ip"
log "please copy it manually and confirm to continue"
check
fi
sleep 5
scp dpnavsys-1.1.0-7.x86_64.rpm root@$storage_node_ip:/usr/local/avamar/src
if [[ $? != 0 ]] ; then
log "unable to copy dpnavsys-1.1.0-7.x86_64.rpm to $storage_node_ip"
log "please copy it manually and confirm to continue"
check
fi
sleep 5
log "Extracting gen4s-sys-1.2.zip on new storage node"
ssh root@$storage_node_ip -x "cd $avhome/src && unzip gen4s-sys-1.2.zip"
sleep 5
log "Installing RAID Tools"
ssh root@$storage_node_ip -x "$avhome/src/gen4s-sys-1.2/avsetup.sh"
sleep 5
log "Installing dpnavsys"
ssh root@$storage_node_ip -x "rpm -ivh $avhome/src/dpnavsys-1.1.0-7.x86_64.rpm"
unlock 12
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task13
fi
}
#TASK 13 ( task 17)
#Patching Stunnel
task13() {
lock 13
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #13 Patching Stunnel $(tput sgr0)"
echo "################################"
log "(Starting Task 13)"
log "Copying stunnel from Utility node to new storage node"
log "Please Enter Utility node root password"
su -c "cd /data01/avamar/src/SLES11_64 && tar zxvf sec_os_updates_SLES*.tgz stunnel-4.36-0.10.2.x86_64.rpm" | tee -a $logfile
log "Please re-enter Utility node root password followed by storage node root password"
su -c "scp -p /data01/avamar/src/SLES11_64/stunnel-4.36-0.10.2.x86_64.rpm root@$storage_node_ip:/data01/avamar/src"
if [[ $? == 0 ]] ; then
log "Copied stunnel-4.36-0.10.2.x86_64.rpm successfully"
else
log "Unable to copy stunnel-4.36-0.10.2.x86_64.rpm"
log "Please copy stunnel-4.36-0.10.2.x86_64.rpm manually to new storage node and confirm to continue"
check
fi
sleep 5
log "checking stunnel md5sum on storage node"
storage_md5=`ssh -x admin@$storage_node_ip "md5sum /data01/avamar/src/stunnel-4.36-0.10.2.x86_64.rpm" | tee -a $logfile`
_var=`echo $stunnel_md5 | awk {'print $1'}`
if [[ $_var != $stunnel_md5 ]] ; then
log "ERROR: md5sum mismatch, please recopy stunnel-4.36-0.10.2.x86_64.rpm and confirm to continue"
check
else
log "$(tput setaf 3)Md5sum Matched $(tput sgr0)"
fi
sleep 5
log "Patching Stunnel on storage node"
ssh -x root@$storage_node_ip "rpm -Uvh /data01/avamar/src/stunnel-4.36-0.10.2.x86_64.rpm" | tee -a $logfile
if [[ $? == 0 ]] ; then
log "Stunnel patched successfully on storage node $storage_node_ip"
else
log "Unable to patch stunnel on storage node $storage_node_ip"
# exit 21
fi
sleep 5
log "Verifying that stunnel is patched successfully"
e=`ssh -x root@$storage_node_ip "rpm -qa | grep stunnel" | tee -a $logfile`
if [[ $e == "stunnel-4.36-0.10.2" ]] ; then
log "Stunnel stunnel-4.36-0.10.2 patched on storage node $storage_node_ip"
echo -ne "$(tput setaf 2)\nSuccess\n$(tput sgr0)"
else
log "ERROR: Incorrect stunnel version, expected stunnel-4.36-0.10.2"
log "ERROR: Stunnel NOT patched on storage node $storage_node_ip"
log "Please run this step manually"
exit 15
fi
unlock 13
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task14
fi
}
#TASK 14 ( task 21)
#Swap file configuration
# TODO create one swap file with 16GB - current
task14() {
lock 14
echo "##################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #14 Swap file configurations $(tput sgr0)"
echo "##################################"
log "(Starting Task 14)"
log "Checking amount of swap space on storage node $storage_node_ip"
loading 5
e=`ssh -x admin@$storage_node_ip "swapon -s" | tail -1 | awk {'print $3'}`
if [[ "$e" -lt "16000000" ]] ; then
log "Swap space on storage node $storage_node_ip : $(expr $e / 1000000) GB"
echo -ne "$(tput setab 0)$(tput setaf 5)Note:$(tput sgr0)\nRecommended configured swap space on new storage node is 16GB, currently $storage_node_ip has only $(expr $e / 1000000) GB \n"
log "Proceeding with creating swap"
log "Obtaining list of available data partitions on new node"
e=`ssh -x root@$storage_node_ip "df -h | grep data0"` | tee -a $logfile
log "Please confirm to proceed with increasing swap space to recommended value"
check
swapdiff=$(expr 16000000 - $e)
log "Creating remaining $(expr $swapdiff / 1000000) GB of swap"
ssh -x root@$storage_node_ip "cd /data01 && dd if=/dev/zero of=ddr_metadata_swapfile bs=1G count=$swapdiff" | tee -a $logfile
ssh -x root@$storage_node_ip "mkswap /data01/ddr_metadata_swapfile"
if [[ $? == 0 ]] ; then
log "swap file of $swapdiff GB created successfully"
else
log "Unable to create $swapdiff GB on new node, please create it manually and confirm to continue"
check
fi
log "Enabling new swap file"
ssh -x root@$storage_node_ip "swapon /data01/ddr_metadata/swapfile" | tee -a $logfile
if [[ $? == 0 ]] ; then
log "Swap enabled successfully"
log "Please verify that swapfile has been added to kernel's swap space"
ssh -x root@$storage_node_ip "swapon -s"
log "Adding new swap entry to /etc/fstab"
ssh -x root@$storage_node_ip "cp /etc/fstab /etc/fstab.bkp && echo 'data01/ddr_metadata_swapfile swap swap defaults 0 0' >> /etc/fstab"
else
log "Unable to enable swap on new node, please enable it manually and confirm to continue"
check
fi
else
log "Check passed, storage node $storage_node_ip has $(expr $e / 1000000) GB. of swap space configured."
log "Skipping swap creation"
unlock 14
check
task15
fi
unlock 14
check
if [[ $success == "TRUE" ]] && [[ $proceed == "TRUE" ]] ; then
task15
fi
}
#TASK 15
#Update OS Parameters
#TODO check if fs.file-max exists in /etc/sysctl.conf first, if it exists modify, otherwise append 2 lines
task15() {
lock 15
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #15 Update OS parameters $(tput sgr0)"
echo "################################"
log "(Starting Task 15)"
log "Updating OS parameters"
log "Modifying the max file handles kernel setting on storage node $storage_node_ip"
e=`ssh root@$storage_node_ip -x "sysctl -w fs.file-max=1600000"`
if [[ $? == 0 ]] ;then
log "max file handle successfully modified"
else
log "ERROR: unable to modify max file handle"
log "Please modify it manually and confirm to continue"
check
fi
log "Backing up limits.conf and sysctl.conf"
ssh -x root@$storage_node_ip "cp /etc/security/limits.conf /etc/security/limits.conf.bkp && cp /etc/sysctl.conf /etc/sysctl.conf.bkp"
if [[ $? == 0 ]] ; then
log "limits.conf and sysctl.conf backed up successfully"
fi
log "Modifying nofile parameter to 1000000 in /etc/security/limits.conf"
ssh -x root@$storage_node_ip "sed -i 's/^* - nofile .*$/* - nofile 1000000/' /etc/security/limits.conf"
if [[ $? == 0 ]] ; then
log "nofile parameter modified successfully"
fi
log "Increasing maximum open file handles to 1600000 in /etc/sysctl.conf"
ssh -x root@$storage_node_ip "echo '# Increase maximum simultaneously open file handles.' >> /etc/sysctl.conf && echo '* - fs.file-max = 1600000' >> /etc/sysctl.conf"
if [[ $? == 0 ]] ; then
log "fs.file-max parameter modified successfully"
fi
unlock 15
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task16
fi
}
#TASK 16 (task 23)
#OS Security Patch Rollup Installation
task16() {
lock 16
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #16 OS Security Patch Rollup$(tput sgr0)"
echo "################################"
log "(Starting Task 16)"
log "Creating ospatches directory on storage node"
ssh -x root@$storage_node_ip "mkdir $avhome/src/ospatches"
if [[ $? == 0 ]] ; then
log "$avhome/src/ospatches directory created successfully"
else
log "Unable to create directory $avhome/src/ospatches, please confirm it exists to continue" | tee -a $logfile
check
fi
log "copying sec_os_updates_SLES-2016-Q3-v3.tgz & sec_install_os_errata_sles.pl to new storage node"
scp sec_os_updates_SLES-2016-Q3-v3.tgz sec_install_os_errata_sles.pl root@$storage_node_ip:/$avhome/src/ospatches
if [[ $? == 0 ]] ; then
log "Files copied successfully"
else
log "Unable to copy files, please copy them manually and confirm to continue"
check
fi
log "Installing the security rollup"
ssh -x root@$storage_node_ip "perl $avhome/src/ospatches/sec_install_os_errata_sles.pl $avhome/src/ospatches/sec_os_updates_SLES-2016-Q3-v3.tgz"
unlock 16
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
log "NOTE: Storage node reboot required, please confirm to proceed"
check
log "Please enter storage node root password"
ssh -x root@$storage_node_ip "reboot"
log "Rebooting Storage node $storage_node_ip ..."
log "Please confirm that the new node is up and running before proceeding"
check
task17
fi
}
#TASK 17
#Install the new nodes
task17() {
lock 17
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #17 Install new nodes$(tput sgr0)"
echo "################################"
log "(Starting Task 17)"
log "Please verify that checkpoint validation (HFS check) has completed"
mccli checkpoint show --verbose=TRUE
check
current_date=`date +%y%m%d`
log "Making a copy of the current probe.xml"
cp $avhome/var/probe.xml $avhome/var/probe.xml.$current_date
log "Checking current nodes configured:"
sleep 2
nodedb print
sleep 5
log "Checking existing nodenumbers:"
nodedb print --nodes=0.all+ --addr --id
sleep 5
read -p "Please enter node number (0.X) ( NOTE: put X value only i.e: 3 ): " nodenumber
log "Adding the new node to probe.xml"
nodedb add node --node=0.$nodenumber --addr=$storage_node_ip --type=storage --nwgrp=1 --allow=backup
if [[ $? == 0 ]] ; then
log "New Storage node has been successfully added to probe.xml"
fi
sleep 5
log "Adding the Internal IP address for the new node to probe.xml"
nodedb add if --node=0.$nodenumber --addr=$bond1 --nwgrp=2 --allow=internal
if [[ $? == 0 ]] ; then
log "Internal IP address has been successfully added to probe.xml"
fi
sleep 5
log "Validating the new node has been correctly added to probe.xml"
nodedb print --nodes=0.all+ --addr --id
unlock 17
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task18
fi
}
#TASK 18
#Copy existing .ssh directories to new node
task18() {
lock 18
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #18 Copy existing .ssh directories$(tput sgr0)"
echo "################################"
log "(Starting Task 18)"
log "Copying existing .ssh directory to new storage node"
log "Please Enter root password for utility node followed by root password for storage node"
su -c "tar Cczf / - root/.ssh home/admin/.ssh home/dpn/.ssh | ssh root@$storage_node_ip tar Cxzf / -"
if [[ $? == 0 ]] ; then
log "SSH directories copied successfully to new storage node"
else
log "Unable to copy SSH directories, please execute the below command from utility node"
log "tar Cczf / - root/.ssh home/admin/.ssh home/dpn/.ssh | ssh root@$storage_node_ip tar Cxzf / -"
fi
log "$(tput setaf 3) NOTE: Please make sure to change root, admin, and dpn passwords on new node using passwd command$(tput sgr0)"
log "Please confirm that password for root, admin, and dpn has been changed before proceeding"
check
unlock 18
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task19
fi
}
#TASK 19
# Verify passwords and SSH keys on the new nodes
task19() {
lock 19
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #19 Verify Passwords and SSH keys on new node$(tput sgr0)"
echo "################################"
log "(Starting Task 19)"
eval `$bin/ssh-agent` &> $logfile
$bin/ssh-add ~admin/.ssh/dpnid | tee -a $logfile
log "running mapall to verify passwords and keys:"
mapall --all date
unlock 19
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task20
fi
}
#TASK 20
# Time Configuration
task20() {
lock 20
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #20 Time Configuration$(tput sgr0)"
echo "################################"
log "(Starting Task 20)"
cat > /tmp/dpncmds <<-EOF
#!/bin/bash
eval \`ssh-agent\`
ssh-add ~dpn/.ssh/dpnid
mapall --all --user=root 'date'
asktime
EOF
chmod a+rwx /tmp/dpncmds
su - dpn -c "/tmp/dpncmds"
unlock 20
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task21
fi
}
#TASK 21
# Update hosts file
task21() {
lock 21
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #21 Update hosts file$(tput sgr0)"
echo "################################"
log "(Starting Task 21)"
log "$(tput setaf 1)$(tput bold)Please modify /etc/hosts file on Utility node to add both the internal and customer network of the new nodes $(tput sgr0)"
log "Please confirm to continue and proceed with copying /etc/hosts file to all nodes"
check
eval `$bin/ssh-agent` &> $logfile
$bin/ssh-add ~admin/.ssh/dpnid
mapall --all+ --user=root copy /etc/hosts
if [[ $? == 0 ]] ; then
log "/etc/hosts has been successfully copied to all storage nodes in admin home dir"
else
log "ERROR: unable to copy /etc/hosts to all storage nodes"
fi
log "Copying the hosts file to /etc on each node"
mapall --all+ --user=root 'cp -f etc/hosts /etc'
if [[ $? == 0 ]] ; then
log "hosts file has been successfully copied to /etc on all nodes"
fi
unlock 21
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task22
fi
}
#TASK 22
# Install Avamar Security Packages
task22() {
lock 22
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #22 Install Avamar Security Packages$(tput sgr0)"
echo "################################"
log "(Starting Task 22)"
log "Checking if Avamar password security package is installed"
rpm -qa | grep pass 2>&1 /dev/null
if [[ $? == 0 ]] ; then
log "Avamar password security package is installed"
else
log "Avamar password security package is not installed"
fi
log "Checking version of the avamar firewall and hardening packages that are installed"
rpm -qa | egrep "avf|harden"
log "Copying Avamar firewall package to new node"
scp $home/hardening/avfwb*.rpm $storage_node_ip:$avhome/src
log "Copying Avamar hardening package to new node"
scp $home/hardening/avharden*.rpm $storage_node_ip:$avhome/src
log "Copying Avamar password package to new node"
scp $home/hardening/avpasswd*.rpm $storage_node_ip:$avhome/src
log "Installing Avamar security packages on the new node"
ssh -x root@$storage_node_ip "cd $avhome/src && rpm -ivh avfwb*.rpm avharden*.rpm avpasswd*.rpm"
if [[ $? == 0 ]] ; then
log "Avamar security packages installed successfully on the new node"
fi
log "Updating /etc/firewall-ips on utility node"
su root -c "cp /etc/firewall-ips /etc/firewall-ips.bkp"
if [[ $? == 0 ]] ; then
log "/etc/firewall-ips backed-up successfully"
fi
su root -c "sed -i '2s/.$/ $storage_node_ip\"/' /etc/firewall-ips"
if [[ $? == 0 ]] ; then
log "External IP added successfully in /etc/firewall-ips"
fi
su root -c "sed -i '3s/.$/ $bond1\"/' /etc/firewall-ips"
if [[ $? == 0 ]] ; then
log "Internal IP added successfully in /etc/firewall-ips"
fi
log "Changing permissions of sec_create_nodeips.sh script"
su root -c "chmod a+x $avhome/lib/admin/security/sec_create_nodeips.sh"
if [[ $? == 0 ]] ; then
log "Permissions of sec_create_nodeips.sh changed successfully"
fi
log "Updating the firewall-ips file on other nodes in the grid"
sudo $avhome/lib/admin/security/sec_create_nodeips.sh
if [[ $? == 0 ]] ; then
log "firewall-ips file updated successfully on other storage nodes"
else
log "Unable to update firewall-ips file on other storage nodes, please update them manually and confirm to continue"
check
fi
log "Restarting firewall service on all nodes"
eval `$bin/ssh-agent` &> $logfile
$bin/ssh-add ~admin/.ssh/dpnid | tee -a $logfile
mapall --all+ --noerror --user=root service avfirewall restart
if [[ $? == 0 ]] ; then
log "Firewall service restarted successfully on all nodes"
else
log "Unable to restart firewall service on all nodes, please restart it manually and confirm to continue"
check
fi
unlock 22
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task23
fi
}
#TASK 23
# Update the sudoers file on new nodes
task23() {
lock 23
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #23 Update the sudoers file on new nodes $(tput sgr0)"
echo "################################"
log "(Starting Task 23)"
read -p "Enter External IP address of an old storage node to copy sudoers file from: " nodeip
scp root@$nodeip:/etc/sudoers .
scp ./sudoers root@$storage_node_ip:/etc
if [[ $? == 0 ]] ; then
log "Sudoers file copied successfully from $oldnode to new node"
else
log "Unable to copy sudoers file, please copy it manually and confirm to continue"
check
fi
unlock 23
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task24
fi
}
#TASK 24
# Update Security Certificates on the new node
task24() {
lock 24
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #24 Update Security Certificates on the new node $(tput sgr0)"
echo "################################"
log "(Starting Task 24)"
log "copying the security certificates to the new nodes"
read -p "Please enter node number of the new node(0.X) ( NOTE: put X value only i.e: 3 ) : " nodenumber
eval `$bin/ssh-agent` &> $logfile
$bin/ssh-add ~admin/.ssh/dpnid | tee -a $logfile
cd /usr/local/avamar/etc && mapall --nodes=0.$nodenumber copy {key,cert}.pem
if [[ $? == 0 ]] ; then
log "Security certificates copied successfully to new node"
else
log "Unable to copy security certificates to new node"
log "Please copy security certificates manually and confirm to continue"
check
fi
log "Updating permissions of the security certificates on the new node"
mapall --nodes=0.$nodenumber chmod 400 {key,cert}.pem
if [[ $? == 0 ]] ; then
log "permissions updated successfully"
else
log "Unable to update security certificates permissions on the new node"
log "Please update security certificates manually and confirm to continue"
check
fi
unlock 24
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task25
fi
}
#TASK 25
# Start the new node
task25() {
lock 25
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #25 Start the new node $(tput sgr0)"
echo "################################"
log "(Starting Task 25)"
eval `$bin/ssh-agent` &> $logfile
$bin/ssh-add ~admin/.ssh/dpnid | tee -a $logfile
log "Verify that load average on storage nodes is less than 0.2"
mapall --all+ --user=root 'uptime'
log "Verify that balancing is still disabled"
avmaint config --ava | grep balancemin
echo -ne " Would you like to set balancemin to 0 ? "
_answered=0
while [[ $_answered != 1 ]] ; do
read -r -e -p $'\npress n to continue and y to set balancemin to 0 [y/n]: ' answer
case $answer in
[yY][eE][sS]|[yY])
log "Setting balancemin to 0"
avmaint config --ava balancemin=0
log "Validating that balancemin equals 0"
avmaint config --ava | grep balancemin
_answered=1
;;
[nN][oO]|[nN])
log "Continuing..."
_answered=1
continue
;;
*)
_answered=0
;;
esac
done
log "Disabling backup activity to prepare for the dedicated balance"
avmaint suspend
log "Disabling backup scheduler"
dpnctl stop sched
log "Disabling maintenance schedule"
dpnctl stop maint
sleep 10
log "Please verify that backup scheduler, maintenance scheduler, and gsan are in the correct states and confirm to continue"
log "Example: gsan (degraded), Backup Scheduler (down), Maintenance Scheduler (suspended)"
dpnctl status
check
log "Verify that no gsan processes are running on the new node and confirm to continue"
read -p "Please enter node number (0.X) ( NOTE: put X value only i.e: 3 ): " nodenumber
ssn 0.$nodenumber 'ps -ef | grep gsan'
check
log "Starting new node"
sleep 5
start.nodes --nodes=0.$nodenumber --clean
sleep 5
log "Updating networking connection info on the new node"
avmaint networkconfig --ava /usr/local/avamar/var/probe.xml
log "Enabling balancing so that a minimum of 2 stripes are copied to the new node"
avmaint config --ava balancemin=2
log "Waiting for 5 minutes then checking that the new node has at least 2 stripes"
log "Please wait ..."
sleep 300
status.dpn
log "Please verify that 2 or more stripes are 'onl' for the new node and confirm to continue"
check
log "Disabling balancing"
avmaint config --ava balancemin=0
log "Creating a new checkpoint"
avmaint checkpoint --ava
log "Please verify that the correct addition and physical locations of the new node"
nodenumbers
unlock 25
check
if [[ $success -eq "TRUE" ]] && [[ $proceed -eq "TRUE" ]] ; then
task26
fi
}
#TASK 26
# Put Avamar Server back to production ready state
task26() {
lock 26
echo "################################"
echo "$(tput setab 7)$(tput setaf 0)TASK #26 Put Avamar Server back to production state $(tput sgr0)"
echo "################################"
log "(Starting Task 26)"
log "Starting backup scheduler"
dpnctl start sched
if [[ $? == 0 ]] ; then
log "Backup Scheduler started successfully"
else
log "Unable to start backup scheduler, please start it manually and confirm to continue"
check
fi
loading 1
log "Starting maintenance scheduler"
dpnctl start maint
if [[ $? == 0 ]] ; then
log "Maintenance Scheduler started successfully"
else
log "Unable to start maintenance scheduler, please start it manually and confirm to continue"
check
fi
loading 1
log "Resuming normal gsan activity"
avmaint resume
if [[ $? == 0 ]] ; then
log "gsan activity resumed successfully"
else
log "Unable to resume gsan activity, please resume it manually and confirm to continue"
check
fi
loading 1
log "Ensuring that server is healthy and ready for production use"
dpnctl status
loading 1
log "Restarting asynchronous crunching"
avmaint config --ava asynccrunching=true
loading 1
log "Verifying that asynchronous crunching has restarted"
avmaint config --ava | grep crunch
loading 1
log "Changing the cpmostrecent parameter back to default"
avmaint config --ava cpmostrecent=2
loading 1
log "Changing cphfschecked parameter back to default"
avmaint config --ava cphfschecked=1
loading 1
log "Verifying the changes to cpmostrecent and cphfschecked parameters"
avmaint config --ava | grep cp
loading 1
log "Verifying that all services are running as expected"
dpnctl status
loading 1
log "Verifying that the server is fully operational as expected"
status.dpn
loading 1
if [[ $success -eq "TRUE" ]] ; then
log "Procedure completed successfully"
echo -ne "$(tput setaf 2)\nProcedure completed Successfully !!!\n$(tput sgr0)"
exit 0
fi
}
log "************"
log "Starting ACU"
log "************"
clear
echo "$(tput setaf 6)##############################################"
echo "# #"
echo "# Capacity Upgrade #"
echo "# Add Storage Node #"
echo "# #"
echo "##############################################"
echo -ne "\n"
echo -ne "\nPre-upgrade configuration:\n\n$(tput sgr0)"
echo -e "IPMI IP : $(tput setaf 6)$ipmi_ip\n$(tput sgr0)"
echo -e "Utility Node IP : $(tput setaf 6)$utility_node_ip\n$(tput sgr0)"
echo -e "New Storage Node IP: $(tput setaf 6)$storage_node_ip\n$(tput sgr0)"
echo -e "NTP Server IP : $(tput setaf 6)$ntp_ip\n$(tput sgr0)"
echo -e "Storage node bond0 : $(tput setaf 6)$bond0\n$(tput sgr0)"
echo -e "Storage node bond1 : $(tput setaf 6)$bond1\n$(tput sgr0)"
#echo -ne "$(tput setaf 6)\n\nPredefined download URLs :$(tput sgr0)\n\n"
#echo -ne "DataStore/GEN4S url : $(tput setaf 6)$g4surl$(tput sgr0)\n\n"
#echo -ne "Proactive check script url: $(tput setaf 6)$purl$(tput sgr0)\n\n"
#echo -ne "rebuild_collect script url: $(tput setaf 6)$rcurl$(tput sgr0)\n\n"
#echo -ne "Avamar dpnavsys Package url: $(tput setaf 6)$dpnavurl$(tput sgr0)\n\n"
echo -ne "\n\n\nPlease review the above information and confirm to continue.\n"
check
conncheck
_lock=`ls -a $lock | grep [0-9][0-9].lock`
if [[ $? != 0 ]] ; then
_lock=`ls -a $lock | grep [0-9].lock`
taskno=`echo $_lock | grep -o [0-9]`
else
taskno=`echo $_lock | grep -o [0-9][0-9]`
fi
if [[ $_lock ]] ; then
log "##########################################################################\n"
log "Detected previous incomplete upgrade attempt that was stopped in TASK : $taskno\n"
log "##########################################################################\n"
read -r -e -p $"Would you like to continue previous attempt and start from task $taskno (n to startover) ?[y/n] " answer
case $answer in
[yY][eE][sS]|[yY])
echo -ne "$(tput setaf 0)$(tput setab 1)Restarting task #$taskno$(tput sgr0)\n"
log "Continuing..."
rm -rf lock ; mkdir lock
loading 5
task$taskno
;;
[nN][oO]|[nN])
echo -e "$(tput setaf 7)$(tput setab 1)Starting over...$(tput sgr0)"
rm -rf lock ; mkdir lock
loading 5
log "(Starting over)"
task1
;;
*)
exit 6
;;
esac
else
task1
fi
| true |
3a2cc646121d843bf60a65c00cb8541908369124 | Shell | ecatanzani/MapsFitter | /AnaliticalTemplates/SubmitJobs/ToNode/SHs/submit.job.template | UTF-8 | 1,516 | 3.46875 | 3 | [] | no_license | #!/bin/sh
run_exec () {
TRY_IDX=$1
BATCH_TRY=$2
N_TRY=$3
CMD="_EXEC_ ${TRY_IDX} ${BATCH_TRY} ${N_TRY}"; echo ${CMD}; ${CMD};
}
echo "`whoami` @ `hostname` : `pwd`"
CMD="source _SETVAR_"; echo ${CMD}; ${CMD};
echo " ------------------------ "
echo " ---------- ENVS -------- "
echo " ------------------------ "
echo ""
env
echo ""
echo " ------------------------ "
echo " ------------------------ "
echo ""
HOME=`pwd`
UNIQUE="$$_$RANDOM"
SANDBOX=/data/$UNIQUE
while [[ -d $SANDBOX ]]; do
UNIQUE="$$_$RANDOM"
SANDBOX=/data/$UNIQUE
echo "Searching for a free SandBox dir !"
done
mkdir -vp $SANDBOX
cd $SANDBOX
pwd
#Passing parameters to the function using a list file
#PARAMETERS="_PAR_"
#cat <<< $PARAMETERS >> ./lista.dat
#run_exec lista.dat
#rm -fv lista.dat
#Passing each parameter separately to the function
run_exec "_PAR_"
######### Moving result ...
ls -altrh
mv -v *.root *.txt _OUTDIR_
rm -rfv "$SANDBOX"
######### Cleaning git repo
S_PATH=/storage/gpfs_data/dampe/users/ecatanzani/MyRepos/DAMPE/MapsFitter/AnaliticalTemplates/SubmitJobs/ToNode/ExeSW/assets/produceSeeds/seeds.txt
SCALE_PATH=/storage/gpfs_data/dampe/users/ecatanzani/MyRepos/DAMPE/MapsFitter/AnaliticalTemplates/SubmitJobs/ToNode/ExeSW/assets/scalingSoftware/scaled_reference_Isotropic_histos.root
TEMPL=/storage/gpfs_data/dampe/users/ecatanzani/MyRepos/DAMPE/MapsFitter/AnaliticalTemplates/SubmitJobs/ToNode/ExeSW/assets/computeTemplates/results
mv $S_PATH _OUTDIR_
rm $SCALE_PATH
rm $TEMPL/*.root
| true |
bab8b5126f3816c8b6e2ffa5f07179fbf5f53116 | Shell | Konstruktionist/dotfiles_dotbot | /windowmanager/bindir/i3mail | UTF-8 | 362 | 2.8125 | 3 | [
"Unlicense",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
case $BLOCK_BUTTON in
1) /usr/bin/qutebrowser https://mail.google.com ;;
3) /usr/bin/termite -e neomutt ;;
esac
COUNT=$(find ~/.mail -wholename */_Posteingang/new/* | sed -n '$=')
if [[ $COUNT -gt 0 ]]; then
echo "<span color=\"#b8bb26\">$COUNT</span>" $(cat ~/.config/mutt/.dl 2>/dev/null)
else
echo $(cat ~/.config/mutt/.dl 2>/dev/null)
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.