blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9d401e0c8a38a1fab6582bb53645d83cd2bd5da5 | Shell | Theoretical-Neuroscience-Group/synaptic_filter | /run_local.sh | UTF-8 | 370 | 2.90625 | 3 | [] | no_license | #!/bin/bash
folder=$1
file=$2
kwargs=$4
echo syn $file to folder: $folder
ssh jegminat@euler.ethz.ch mkdir ./$folder
rsync -a --ignore-existing ./* jegminat@euler.ethz.ch:/cluster/home/jegminat/$folder/
i=0
N=$3
echo runs $N
while [ $i -le $((N-1)) ]
do
echo submit job with args $folder -M $3 -i $i
ssh jegminat@euler.ethz.ch bash ./$folder/run_remote.sh $folder $file i=$i $kwargs
((i++))
done
| true |
cf96f1e22893f0a3a2d1721366f593b2153f3b37 | Shell | spartantri/rpi-nsm | /mosquitto.sh | UTF-8 | 1,996 | 3.390625 | 3 | [] | no_license | #!/bin/bash
if [ "$EUID" -ne 0 ]
then echo -e "Must be root\n usage: sudo $0"
exit
fi
read -p "Enter username to use for IOT MQTT (user1): " mqttUser
mqttUser=${mqttUser:-user1}
apt-get install -y mosquitto mosquitto-clients python-mosquitto
#Install and generate certificates
cd /home/pi
git clone https://github.com/spartantri/easy-ca.git
cd easy-ca
./create-root-ca -d /home/pi/PI_ROOT_CA
cd /home/pi/PI_ROOT_CA
bin/create-server -s `hostname`
cp ca/ca.crt /etc/mosquitto/ca_certificates/
cp crl/ca.crl /etc/mosquitto/ca_certificates/
cp certs/`hostname`.server.crt /etc/mosquitto/certs/
cp private/`hostname`.server.key /etc/mosquitto/certs/
mosquitto_passwd -c /etc/mosquitto/passwd_mqtt $mqttUser
#Generate mosquitto configuration
cat > /etc/mosquitto/mosquitto.conf << EOF
# Place your local configuration in /etc/mosquitto/conf.d/
#
# A full description of the configuration file is at
# /usr/share/doc/mosquitto/examples/mosquitto.conf.example
allow_anonymous false
password_file /etc/mosquitto/passwd_mqtt
pid_file /var/run/mosquitto.pid
persistence true
persistence_location /var/lib/mosquitto/
log_dest file /var/log/mosquitto/mosquitto.log
include_dir /etc/mosquitto/conf.d
log_type error
log_type warning
log_type notice
log_type information
connection_messages true
log_timestamp true
# MQTT over TLS/SSL
listener 8883
cafile /etc/mosquitto/ca_certificates/ca.crt
certfile /etc/mosquitto/certs/`hostname`.crt
keyfile /etc/mosquitto/certs/`hostname`.key
#require_certificate true
#use_identity_as_username true
crlfile /etc/mosquitto/ca_certificates/ca.crl
#Testing service
read -p "Enter password for ${mqttUser} : " mqttPass
sleep 2 && mosquitto_pub --cafile /etc/mosquitto/ca_certificates/ca.crt -d -t test_mqtt -m "MQTT mosquitto test successful! PRESS <CTRL+C>" -u $mqttUser -P $mqttPass -h `hostname` -p 8883 &
mosquitto_sub --cafile /etc/mosquitto/ca_certificates/ca.crt -d -t test_mqtt -u $mqttUser -P $mqttPass -h `hostname` -p 8883
EOF
| true |
84cc686383d27082a982a014114a30e053835cb8 | Shell | azhe12/shell_misc | /gen_symbol_file.sh | UTF-8 | 601 | 3.09375 | 3 | [] | no_license | #!/bin/bash
#Copyright (c) 2007 Li XianJing <xianjimli@hotmail.com>
set -x
if [ "$1" = "" ]
then
echo "usage: " $0 " [maps file]"
exit 1
fi
grep r-xp $1 |grep /.so >all_so.tmp.log
awk 'BEGIN{i=0} {print i " " strtonum("0x"substr($1, 0, 8)) " " $6; i++}' all_so.tmp.log >baseaddr_so.tmp.log
awk '{system("objdump -h " $3 "| grep text");}' baseaddr_so.tmp.log | \
awk 'BEGIN{i=0}{print i " " strtonum("0x" $4); i++}' >offset.tmp.log
join offset.tmp.log baseaddr_so.tmp.log >offset_baseaddr_so.tmp.log
awk '{printf("add-symbol-file %s 0x%x y ", $4, $2 + $3)}' offset_baseaddr_so.tmp.log
rm -f *.tmp.log
set +x
| true |
a22e8b0d71abccbf40331411db880eb4257fb667 | Shell | Krishan300/Website | /WebTeamwork/src/main/resources/dev/deploy.sh | UTF-8 | 1,653 | 2.59375 | 3 | [] | no_license | # Erase everything that was in the web folder
rm -rf ../web/*
# Create a "lib" folder for all of our dependencies
mkdir ../web/lib
# Copy over all of the libraries that we use
cp node_modules/bootstrap/dist/css/bootstrap.min.css ../web/lib
cp node_modules/bootstrap/dist/js/bootstrap.min.js ../web/lib
cp node_modules/jquery/dist/jquery.min.js ../web/lib
cp node_modules/handlebars/dist/handlebars.min.js ../web/lib
cp node_modules/js-cookie/src/js.cookie.js ../web/lib
# Copy Bootstrap's fonts
cp -R node_modules/bootstrap/fonts/ ../web/
# Merge our CSS files into the webed .css
cat article.css content.css index.css nav.css profile.css welcome.css > ../web/phase2web.css
# Compile our TypeScript into the webed .js
# NOTE: It's important that index.ts is last
node_modules/typescript/bin/tsc --outFile ../web/phase2web.js content.ts nav.ts welcome.ts article.ts profile.ts index.ts
# Compile our handlebars templates and append the results into the webed JS
node_modules/handlebars/bin/handlebars content.hb >> ../web/templates.js
node_modules/handlebars/bin/handlebars nav.hb >> ../web/templates.js
node_modules/handlebars/bin/handlebars welcome.hb >> ../web/templates.js
node_modules/handlebars/bin/handlebars article.hb >> ../web/templates.js
node_modules/handlebars/bin/handlebars profile.hb >> ../web/templates.js
# Copy the index.html file and data.json to the webed position
cp index.html ../web
cp -r ../dev ../../../../../phase1-backend/src/main/resources/dev
cp -r ../web ../../../../../phase1-backend/src/main/resources/web
### TODO FOR FRONTEND ###
# add comments section
# finish profile page with all posts and comments listed | true |
d35f0801fcbca37bfc546093644b99f4f4316177 | Shell | hstinson/pmu_sync_sampler | /Scripts/DataExperimentScripts/runThreadClassifier.sh | UTF-8 | 3,396 | 2.90625 | 3 | [] | no_license | # Script to perform thread classification
#
#
runThreadClassifier () #Param - output dir, test dataset
{
OutputDir="$1"
TestDataset="$2"
ClassifierFileName="$OutputDir/ThreadClassificationResults.csv"
FamilyFile="$OutputDir/malware_families_threadClassifyResults.csv"
if [ -a "$ClassifierFileName" ]; then
rm "$ClassifierFileName"
fi
if [ -a "$FamilyFile" ]; then
rm "$FamilyFile"
fi
#Run the classifier
echo .
echo "Thread classification for $TestDataset...."
echo .
../SampleParser/ThreadClassifier/bin/Debug/ThreadClassifier.exe malware_family_map.csv "$TestDataset" "$ClassifierFileName" 5
}
# perform for all experiments
echo Perfoming HISTOGRAM Based Classification
runThreadClassifier "Experiment_1/WekaClassification_HistogramCtx_OutlierSkip/" "Experiment_1/WekaClassification_HistogramCtx_OutlierSkip/test_procInfo_randomForest_histogram_predictions.arff"
runThreadClassifier "Experiment_2/WekaClassification_HistogramCtx_OutlierSkip/" "Experiment_2/WekaClassification_HistogramCtx_OutlierSkip/test_procInfo_randomForest_histogram_predictions.arff"
runThreadClassifier "Experiment_3/WekaClassification_HistogramCtx_OutlierSkip/" "Experiment_3/WekaClassification_HistogramCtx_OutlierSkip/test_procInfo_randomForest_histogram_predictions.arff"
runThreadClassifier "Experiment_4/WekaClassification_HistogramCtx_OutlierSkip/" "Experiment_4/WekaClassification_HistogramCtx_OutlierSkip/test_procInfo_randomForest_histogram_predictions.arff"
runThreadClassifier "Experiment_5/WekaClassification_HistogramCtx_OutlierSkip/" "Experiment_5/WekaClassification_HistogramCtx_OutlierSkip/test_procInfo_randomForest_histogram_predictions.arff"
runThreadClassifier "Experiment_6/WekaClassification_HistogramCtx_OutlierSkip/" "Experiment_6/WekaClassification_HistogramCtx_OutlierSkip/test_procInfo_randomForest_histogram_predictions.arff"
runThreadClassifier "Experiment_7/WekaClassification_HistogramCtx_OutlierSkip/" "Experiment_7/WekaClassification_HistogramCtx_OutlierSkip/test_procInfo_randomForest_histogram_predictions.arff"
echo .
echo .
echo .
echo Perfoming HISTOGRAM Based Classification
runThreadClassifier "Experiment_1/WekaClassification_SumContextSwitch/" "Experiment_1/WekaClassification_SumContextSwitch/test_procInfo_randomForest_contextSwitch_predictions.arff"
runThreadClassifier "Experiment_2/WekaClassification_SumContextSwitch/" "Experiment_2/WekaClassification_SumContextSwitch/test_procInfo_randomForest_contextSwitch_predictions.arff"
runThreadClassifier "Experiment_3/WekaClassification_SumContextSwitch/" "Experiment_3/WekaClassification_SumContextSwitch/test_procInfo_randomForest_contextSwitch_predictions.arff"
runThreadClassifier "Experiment_4/WekaClassification_SumContextSwitch/" "Experiment_4/WekaClassification_SumContextSwitch/test_procInfo_randomForest_contextSwitch_predictions.arff"
runThreadClassifier "Experiment_5/WekaClassification_SumContextSwitch/" "Experiment_5/WekaClassification_SumContextSwitch/test_procInfo_randomForest_contextSwitch_predictions.arff"
runThreadClassifier "Experiment_6/WekaClassification_SumContextSwitch/" "Experiment_6/WekaClassification_SumContextSwitch/test_procInfo_randomForest_contextSwitch_predictions.arff"
runThreadClassifier "Experiment_7/WekaClassification_SumContextSwitch/" "Experiment_7/WekaClassification_SumContextSwitch/test_procInfo_randomForest_contextSwitch_predictions.arff" | true |
520e78e767de12fc473015b59e12bdff109dc93a | Shell | mikhail-yarosh/todo | /deploy.sh | UTF-8 | 692 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# Minikube magic to use it's registry
eval $(minikube docker-env)
docker build -t todo:latest .
# Create secret. Should be placed anywhere, but not in git!
kubectl create secret generic todo-secrets --from-literal=SECRET_KEY='lksdf98wrhkjs88dsf8-324ksdm' --from-literal=DATABASE_NAME='django' \
--from-literal=DATABASE_HOST='todo-postgresql.default.svc.cluster.local' --from-literal=DATABASE_USER='postgres'
helm repo add stable https://kubernetes-charts.storage.googleapis.com
helm dependency build
# Helm update
helm upgrade -i todo --set postgresql.postgresqlDatabase=django . -f values.yaml
# Waiting for pods up and running...
sleep 120
# Open service on NodePort
minikube service todo
| true |
f8eeabff300c31e5ecc40c3069d39e8e26552643 | Shell | guogang1984/docker | /DevProjectFiles/ws-bin/v2/mysqlback.sh | UTF-8 | 1,675 | 2.890625 | 3 | [] | no_license | #!/bin/bash
CUR_ALL_DATE=`date "+%Y%m%d"`
CUR_ALL_TIME=`date "+%Y%m%d%H%M%S"`
DOCKER_DB_NAME=jyb-service
DB_CMD=`echo -e "mysqldump ${DOCKER_DB_BACK_OPTS}"`
sudo chmod 755 -R ~/DevProjectFiles/ws-back
sudo mkdir -p ~/DevProjectFiles/ws-back/${DOCKER_DB_NAME}/
RM_DIR=~/DevProjectFiles/ws-back/${DOCKER_DB_NAME}/${DOCKER_DB_NAME}_${CUR_ALL_DATE}\*
sudo rm -rf ${RM_DIR}
DOCKER_DB_CID=`docker ps -f name=db-mysql -q`
DOCKER_DB_BACK_FILE=/DevProjectFiles/ws-back/${DOCKER_DB_NAME}/${DOCKER_DB_NAME}_${CUR_ALL_TIME}.sql
DOCKER_DB_BACK_OPTS="--user=root --password=root --databases ${DOCKER_DB_NAME}"
DOCKER_DB_BACK_OPTS="-h localhost -P 3306 --complete-insert --extended-insert=false --add-drop-table --skip-opt --result-file=${DOCKER_DB_BACK_FILE} ${DOCKER_DB_BACK_OPTS}"
DOCKER_DB_CMD=`echo -e "mysqldump ${DOCKER_DB_BACK_OPTS}"`
echo -e "export db '${DOCKER_DB_NAME}' cmd: " && echo -e "mysqldump ${DOCKER_DB_BACK_OPTS}"
# docker exec
docker exec -i ${DOCKER_DB_CID} ${DOCKER_DB_CMD} > /dev/null
#
sudo gzip ~/DevProjectFiles/ws-back/${DOCKER_DB_NAME}/${DOCKER_DB_NAME}_${CUR_ALL_TIME}.sql
sudo ls -l ~/DevProjectFiles/ws-back/${DOCKER_DB_NAME}/
UPLOAD_URL="http://jiayoubao.hyszapp.cn/jyb-service/web-public-api/funcJyb/sysBackup/upload"
UPLOAD_FILE="@/home/dev/DevProjectFiles/ws-back/${DOCKER_DB_NAME}/${DOCKER_DB_NAME}_${CUR_ALL_TIME}.sql.gz"
UPLOAD_PARAMS="{\"backupName\":\"backup${CUR_ALL_TIME}\",\"state\":1}"
# upload
echo -e "curl -i ${UPLOAD_URL} -F \"Filedata=${UPLOAD_FILE}\" -F \"params=${UPLOAD_PARAMS}\""
curl -i ${UPLOAD_URL} -F "Filedata=${UPLOAD_FILE}" -F "params=${UPLOAD_PARAMS}"
# 0 0 * * * /home/dev/DevProjectFiles/ws-bin/v2/mysqlback.sh | true |
9640b9829251ff605a8a93bfc370a3eca6df904e | Shell | alejandro1395/Imputation-Analysis | /COMPARISON/compare_variants.sh | UTF-8 | 2,178 | 2.90625 | 3 | [] | no_license | #!/usr/bin/bash
module load gcc/4.9.3-gold
module load PYTHON/3.6.3
#VARIABLES
chimp_names="verus-McVean"
chromosomes="22"
coverages="0.006 0.036 0.056 0.076 0.106 0.35"
#OUTDIR FOR ANALYZING HIGH COVERAGE INDIVIDUAL VCFs
OUTDIR="/scratch/devel/avalenzu/Impute_Master_Project/ANALYSIS_sep2018-dec2018_panel58/results/Comparison/"
#INPUTS for chr
echo $coverages | tr " " "\n" | while read cov;
do DATA="/scratch/devel/avalenzu/Impute_Master_Project/ANALYSIS_sep2018-dec2018_panel58/results/Impute_out/Pan_troglodytes_verus-McVean/chr22/down_${cov}/"
SRC="/scratch/devel/avalenzu/Impute_Master_Project/ANALYSIS_sep2018-dec2018_panel58/src/COMPARISON/"
echo $chimp_names | tr " " "\n" | while read chimp_name;
do mkdir -p ${OUTDIR}/Pan_troglodytes_${chimp_name}
echo $chromosomes | tr " " "\n" | while read chr;
do mkdir -p ${OUTDIR}/Pan_troglodytes_${chimp_name}/chr${chr}
mkdir -p ${OUTDIR}/Pan_troglodytes_${chimp_name}/chr${chr}/out/
mkdir -p ${OUTDIR}/Pan_troglodytes_${chimp_name}/chr${chr}/qu/
mkdir -p ${OUTDIR}/Pan_troglodytes_${chimp_name}/chr${chr}/tmp/
INPUT=/scratch/devel/avalenzu/Impute_Master_Project/ANALYSIS_sep2018-dec2018_panel58/results/Comparison/Pan_troglodytes_verus-McVean/chr22/
echo $INPUT
name=downs_${cov}
sample_name="Pan_troglodytes_verus-McVean.variant130"
echo "#!/bin/bash
module purge
module load gcc/4.9.3-gold
module load PYTHON/3.6.3
#MAIN SCRIPT
#create sample_file
python ${SRC}compare_variants.py \
${INPUT}FILTER_snp_ref_info.gz \
${OUTDIR}/Pan_troglodytes_${chimp_name}/chr${chr}/genotypes/filtered_filtered_panel_genotype_${cov} \
${OUTDIR}/Pan_troglodytes_${chimp_name}/chr${chr}/comparison_files/filtered_filtered_panel_comparison_${cov}.txt" > ${OUTDIR}Pan_troglodytes_${chimp_name}/chr${chr}/qu/filtered_filtered_panel_compare_${cov}.sh
jobname=$(echo ${OUTDIR}Pan_troglodytes_${chimp_name}/chr${chr}/qu/filtered_filtered_panel_compare_${cov}.sh)
chmod 777 $jobname
/scratch/devel/avalenzu/CNAG_interface/submit.py -c ${jobname} \
-o ${OUTDIR}Pan_troglodytes_${chimp_name}/chr${chr}/out/${name}.out \
-e ${OUTDIR}Pan_troglodytes_${chimp_name}/chr${chr}/out/${name}.err \
-n ${name} -u 4 -t 1 -w 05:00:00
done; done; done;
| true |
5248bb2fee4cf51fcf12b9b335575c1df15cbb7c | Shell | jmenchacavr/script-tut | /shell/f00.loop.sh | UTF-8 | 126 | 3.1875 | 3 | [] | no_license | #!/bin/sh
# while construction
count=10
while [ $count -gt 0 ]; do
echo "\$count is $count"
count=$(( $count - 1 ))
done | true |
fe72fcbf7df438218784296b62759f981f1996f0 | Shell | marmitesandwich/git-diff-grep | /git-diff-grep | UTF-8 | 518 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Usage: git-diff-grep <revs-number> <query>
# Example: "git-diff-grep 10 login_required" will grep for
# login_required in the last 10 commits diffs
git log --pretty=oneline -$1 | awk '{print $1}' |
while read rev; do
git diff $rev |
grep -q $2
if [ $? -eq 0 ]
then
echo " commit $rev"
git show -s --format=" Author: %cn <%cE>" $rev
git show -s --format=" Date: %cd" $rev
git show -s --format=" %s" $rev
echo
git diff $rev | grep $2
echo
echo
fi
done
| true |
b016a03c1a0349d4bad7460aef1ce60cfc54089a | Shell | DemautTeam/demaut | /config/jenkins/deployDemautCyber.sh | UTF-8 | 2,851 | 3.328125 | 3 | [] | no_license | #!/bin/sh
current_dir="$(pwd)"
script_dir="$(dirname $0)"
echo "Current dir $current_dir"
echo "Script dir $script_dir"
whoami
workspaceContent=`ls ${WORKSPACE}`
configContent=`ls -al "${WORKSPACE}/config/jenkins/"`
echo Lister workspace : $workspaceContent
echo Lister config : $configContent
#if [ $script_dir = '.' ]
#then
#script_dir=$current_dir
#fi
projectFolderName=demaut-cyber
projectConfigName=application
tomcatConfigName=demaut-cyber.xml
projectBasedir=${WORKSPACE}/demaut-project/$projectFolderName
# deploy to demo
component=demaut
pathServer=/ccv/data/dsi_cyber/demautIN
remoteBin=$pathServer
remoteBaseApp=$pathServer/app/demaut
#remoteDeploy=$pathServer/app/demaut/deployment
#remoteConfig=$pathServer/app/demaut/config
remoteTomcatConfig=$pathServer/conf/Catalina/localhost
remoteServer=dsi_cyber@slv2395t.etat-de-vaud.ch
#contourne l'alerte de sécurité de SSH
chmod 600 ${WORKSPACE}/config/jenkins/id.rsa.jenkins
sshOptions="-o StrictHostKeyChecking=no -i ${WORKSPACE}/config/jenkins/id.rsa.jenkins"
echo Rechercher bundle à déployer $component : `ls $projectBasedir/target/$component*.tar.gz`
bundlecount=`ls -1 $projectBasedir/target/$component*.tar.gz | wc -l`
bundleName=`ls $projectBasedir/target/$component*.tar.gz`
if [ -f $projectBasedir/target/$component*.tar.gz ] && [ $bundlecount -eq 1 ]
then
echo Nouveau bundle à déployer: `ls $projectBasedir/target/$component*.tar.gz`
else
echo "Pas de bundle à déployer dans target. Veuillez compiler le projet"
exit 0
fi
echo "WARNING : You should copy jenkins public key to 'ssh-copy-id $remoteServer' or enter $remoteServer server password!"
echo "Stop du container Tomcat..."
ssh $sshOptions $remoteServer $remoteBin/tomcatctl.sh stop
echo "Stop du container Tomcat terminé"
echo "waiting 5s....."
sleep 5
echo "Cleaning Tomcat's container..."
ssh $sshOptions $remoteServer $remoteBin/tomcatctl.sh clean
echo "Cleaning Tomcat's container, done..."
echo "Suppression de l'ancien bundle..."
ssh $sshOptions $remoteServer rm -rf $remoteBaseApp/$component*
echo "Copie de la nouvelle version..."
scp $sshOptions $bundleName $remoteServer:$remoteBaseApp
echo "Décompression de la nouvelle version..."
ssh $sshOptions $remoteServer tar -xzvf $remoteBaseApp/$component*.tar.gz -C $remoteBaseApp
echo "Mise à jour du bundle terminée"
echo "Mise à jour du fichier de configuration tomcat sur $remoteServer:$remoteTomcatConfig..."
scp $sshOptions "$projectBasedir/tomcat/$tomcatConfigName.xml" $remoteServer:$remoteTomcatConfig
echo "Mise à jour du fichier de configuration tomcat terminée"
echo "Start du tomcat cyber..."
ssh $sshOptions $remoteServer $remoteBin/tomcatctl.sh start
echo "Start du tomcat cyber terminé"
echo "waiting 5s....."
sleep 5
echo "Status du tomcat cyber..."
ssh $sshOptions $remoteServer $remoteBin/tomcatctl.sh check
| true |
27e0692cf1eed8fb2add829d3e7682ce45c9277e | Shell | JonathanLalou/FlaviusJosepheEpub | /build.sh | UTF-8 | 448 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env bash
rm -rf *.epub
cd src
zipDate=`date +%Y%m%d-%H%M`
# -0: store without compression
# -X: don't add extra attributes (such as date)
# -r: recursive (useless on a single file)
zip -0 -X -r ../La_Guerre_des_Judeens-Flavius_Josephe-FR-GR-2019-$zipDate.epub mimetype
# -ll: convert LF to CR LF (-ll CR LF to LF)
# -9: compress better
zip -9 -ll -r ../La_Guerre_des_Judeens-Flavius_Josephe-FR-GR-2019-$zipDate.epub META-INF OEBPS
cd .. | true |
2b412ea4f2ebc6d204e7cc6491951996b6fa55d9 | Shell | PeerJ/check-mk-agent-plugins | /redis | UTF-8 | 542 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
while getopts "h" opt; do
case $opt in
h)
echo "Usage: $0 will alert if unable to connect to redis on localhost"
exit 3
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
OK_FOUND=`echo "quit" | nc -t -w 1 localhost 6379 | grep "+OK" | wc -l`
STATUS=3
MSG="UNKNOWN"
if [ "$OK_FOUND" != "1" ]; then
STATUS=2
MSG="CRITICAL - Unable to connect to redis on localhost"
else
STATUS=0
MSG="OK - Able to connect to redis on localhost"
fi
echo $STATUS redis status=$STATUS $MSG
| true |
20fe5b635fbe9367ced9add921dc944ecc263419 | Shell | luis-caldas/mydesktop | /programs/local/notifications/popup-volume.bash | UTF-8 | 1,467 | 3.765625 | 4 | [] | no_license | #!/usr/bin/env bash
POPUP_ID="30082"
# Get our folder
folder_now="$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")"
# Get volume from a given sink
function get_sink_vol() {
pamixer --get-volume
}
# Get muted state from a given sink
function get_sink_muted() {
pamixer --get-mute
}
# Create icon name
function gen_icon_name () {
"${folder_now}/generate-icon.bash" "audio-volume-${1}.svg"
}
# Create the notification function
function notfy() {
dunstify -i "$(gen_icon_name "${1}")" "${2}" "\n${3}" -h "int:value:${4}" -r "${POPUP_ID}" -t 1500
}
function main() {
# Get default sink
default_sink=$(get_sink)
# Get volume and muted state
vol_sink=$(get_sink_vol)
muted_sink=$(get_sink_muted)
# Check if it is muted
if [ "$muted_sink" == "true" ]; then
notfy "muted" "Volume" "Muted - ${vol_sink} %" "${vol_sink}"
else
# Overflow limit
[ "$vol_sink" -lt 0 ] && vol_sink=0
# Get correct icon
icon_setted="low"
if [ "$vol_sink" -gt 100 ]; then
icon_setted="overamplified"
elif [ "$vol_sink" -gt 0 ]; then
icons=( "${icon_setted}" "medium" "high" )
# Get correct icon for volue
icon_ratio=$(awk -v n="${vol_sink}" -v m="${#icons[@]}" 'BEGIN{print int( m * ( n / 100 ) )}')
# Catch bigger ratios
[ "$icon_ratio" -ge 3 ] && icon_ratio=2
# Get proper icon name
icon_setted="${icons[icon_ratio]}"
fi
# Send the notification
notfy "${icon_setted}" "Volume" "${vol_sink} %" "${vol_sink}"
fi
}
main "$@"
| true |
85059b82b69db0d2d64c8ad1c2e9013eb29de381 | Shell | Azure/azure-cli | /scripts/ci/a01/docker_app/prepare_pod | UTF-8 | 618 | 3.203125 | 3 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | #!/bin/bash
if [ "$AZURE_TEST_RUN_LIVE" != "True" ]; then
echo "Environment variable AZURE_TEST_RUN_LIVE is NOT True."
exit 0
fi
echo "Environment variable AZURE_TEST_RUN_LIVE is True. Login azure with service principal."
if [ -z "$A01_SP_USERNAME" ]; then
echo "Missing service principal username." >&2
exit 1
fi
if [ -z "$A01_SP_PASSWORD" ]; then
echo "Missing service principal password." >&2
exit 1
fi
if [ -z "$A01_SP_TENANT" ]; then
echo "Missing service principal tenant." >&2
exit 1
fi
az login --service-principal -u $A01_SP_USERNAME -p $A01_SP_PASSWORD -t $A01_SP_TENANT | true |
bcdb0b3c598e50f8c710168a44ff4a639f9de468 | Shell | 1984not-GmbH/molch | /run-ci.sh | UTF-8 | 545 | 3.4375 | 3 | [
"ISC",
"MIT",
"BSD-2-Clause"
] | permissive | #!/bin/bash
basedir=$(dirname "$0")
cd "$basedir" || exit 1
TESTS=("release.sh" "ci/test.sh" "ci/clang.sh" "ci/clang-tidy.sh" "ci/static-analysis.sh" "ci/sanitizers.sh" "ci/doxygen.sh")
STATUS="OK"
FAILED_TESTS=""
for TEST in "${TESTS[@]}"; do
echo "$TEST"
if ! "./$TEST"; then
STATUS="FAILED"
FAILED_TESTS="${FAILED_TESTS}${TEST};"
fi
done
case $STATUS in
"OK")
exit 0
;;
"FAILED")
echo "Failed tests: $FAILED_TESTS"
exit 1
;;
*)
exit 1
;;
esac
| true |
d238713eaa8b12c095048afbde1f2cece7eafcf3 | Shell | microsoft/ContextualSP | /adaptershare/scripts/adapter_diff_train.sh | UTF-8 | 2,308 | 3.359375 | 3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | #!/bin/bash
usage() {
echo "Usage: ${0} [-g|--gpu_num] [-o|--output_dir] [-m|--model_dir] [-tr|--train_datasets] [-te|--test_datasets] [-ls|--log_step] [-ss|--save_step]" 1>&2
exit 1
}
while [ $# -gt 0 ]
do
key=${1}
case ${key} in
-g|--gpu_num)
GPU_NUM=${2}
shift 2
;;
-o|--output_dir)
OUT_DIR=${2}
shift 2
;;
-m|--model_dir)
M_DIR=${2}
shift 2
;;
-tr|--train_datasets)
TRAIN=${2}
shift 2
;;
-te|--test_datasets)
TEST=${2}
shift 2
;;
-ls|--log_step)
LOG_STEP=${2}
shift 2
;;
-ss|--save_step)
SAVE_STEP=${2}
shift 2
;;
*)
usage
shift
;;
esac
done
N_GPUS="1"
if [ ! -z "$GPU_NUM" ]; then
N_GPUS=$GPU_NUM
fi
if [ ! -z "$TRAIN" ]; then
TRAIN=$TRAIN
fi
if [ ! -z "$TEST" ]; then
TEST=$TEST
fi
if [ ! -z "$LOG_STEP" ]; then
LOG_STEP=$LOG_STEP
fi
if [ ! -z "$SAVE_STEP" ]; then
SAVE_STEP=$SAVE_STEP
fi
NOW=$(date +"%Y%m%d%H%M")
ADAPTER_DIR="/mnt/chenzhi/checkpoints/nlu_downstream/adapterdiff/${TRAIN}"
OUTPUT_DIR=$ADAPTER_DIR
if [ ! -z "$OUT_DIR" ]; then
OUTPUT_DIR=$OUT_DIR
fi
MODEL_DIR="bert-large-uncased"
if [ ! -z "$M_DIR" ]; then
MODEL_DIR=$M_DIR
fi
echo "Run Name: $RUN_NAME"
echo "Model Dir:" $MODEL_DIR
echo "Output Dir:" $OUTPUT_DIR
Run_Command_Args=" --init_checkpoint $MODEL_DIR"
Run_Command_Args="$Run_Command_Args --train_datasets ${TRAIN}"
Run_Command_Args="$Run_Command_Args --test_datasets ${TEST}"
Run_Command_Args="$Run_Command_Args --log_per_updates $LOG_STEP"
Run_Command_Args="$Run_Command_Args --save_per_updates_on true"
Run_Command_Args="$Run_Command_Args --save_per_updates $SAVE_STEP"
Run_Command_Args="$Run_Command_Args --epochs 10"
Run_Command_Args="$Run_Command_Args --batch_size 8"
Run_Command_Args="$Run_Command_Args --batch_size_eval 8"
Run_Command_Args="$Run_Command_Args --grad_accumulation_step 2"
Run_Command_Args="$Run_Command_Args --output_dir $OUTPUT_DIR"
Run_Command_Args="$Run_Command_Args --adapter_cache_path $OUTPUT_DIR"
Run_Command_Args="$Run_Command_Args --min_intra_simiarity 2"
Run_Command_Args="$Run_Command_Args --max_interference_degree 0"
echo $Run_Command_Args
CUDA_VISIBLE_DEVICES=0 python adapter_diff_train.py $Run_Command_Args | true |
454b2fc98ae09be7b6e200e5ed4daea830123a56 | Shell | ps3dev/ps3toolchain | /scripts/005-binutils-SPU.sh | UTF-8 | 1,373 | 3.5625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh -e
# binutils-SPU.sh by Naomi Peori (naomi@peori.ca)
BINUTILS="binutils-2.22"
if [ ! -d ${BINUTILS} ]; then
## Download the source code.
if [ ! -f ${BINUTILS}.tar.bz2 ]; then wget --continue https://ftp.gnu.org/gnu/binutils/${BINUTILS}.tar.bz2; fi
## Download an up-to-date config.guess and config.sub
if [ ! -f config.guess ]; then wget --continue https://git.savannah.gnu.org/cgit/config.git/plain/config.guess; fi
if [ ! -f config.sub ]; then wget --continue https://git.savannah.gnu.org/cgit/config.git/plain/config.sub; fi
## Unpack the source code.
tar xfvj ${BINUTILS}.tar.bz2
## Patch the source code.
cat ../patches/${BINUTILS}-PS3.patch | patch -p1 -d ${BINUTILS}
## Replace config.guess and config.sub
cp config.guess config.sub ${BINUTILS}
fi
if [ ! -d ${BINUTILS}/build-spu ]; then
## Create the build directory.
mkdir ${BINUTILS}/build-spu
fi
## Enter the build directory.
cd ${BINUTILS}/build-spu
## Configure the build.
../configure --prefix="$PS3DEV/spu" --target="spu" \
--disable-nls \
--disable-shared \
--disable-debug \
--disable-dependency-tracking \
--disable-werror \
--with-gcc \
--with-gnu-as \
--with-gnu-ld
## Compile and install.
PROCS="$(nproc --all 2>&1)" || ret=$?
if [ ! -z $ret ]; then PROCS=4; fi
${MAKE:-make} -j $PROCS && ${MAKE:-make} libdir=host-libs/lib install
| true |
91e016441fda4605160f51c3f56c33eee2c1f534 | Shell | carlos-ch/alvtime | /scripts/setup.sh | UTF-8 | 1,370 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
SHORT_HASH=$(git rev-parse --short=7 HEAD)
ENV=$(echo "$1" | awk '{print tolower($0)}')
PROJECT="alvtime"
KEY_VAULT="$PROJECT$ENV"
CONTAINER_REGISTRY=alvk8sclustertestacr
if [ "$ENV" == 'test' ]; then
RESOURCE_GROUP_NAME="k8scluster-test-rg"
KUBERNETES_CLUSTER_NAME="k8scluster-test-aks"
fi
if [ "$ENV" == 'prod' ]; then
RESOURCE_GROUP_NAME="rg-alvtime-prod-westeurope"
KUBERNETES_CLUSTER_NAME="aks-alvtime-prod-westeurope"
fi
function getSecret() {
az keyvault secret show --vault-name $KEY_VAULT --name $1 | jq '.value' -r
}
echo "Getting secrets from key vault $KEY_VAULT..."
HOSTNAME="$(getSecret alvtime-hostname)"
REPORT_USER_PERSONAL_ACCESS_TOKEN="$(getSecret report-user-personal-access-token)"
SLACK_ADMIN_USERS="$(getSecret slack-admin-users)"
SLACK_BOT_TOKEN="$(getSecret slack-bot-token)"
SLACK_SIGNING_SECRET="$(getSecret slack-signing-secret)"
SP_ALVTIME_AUTH_SLACK_APP_SECRET="$(getSecret sp-alvtime-auth-slack-app-secret)"
MONGO_DB_ENCRYPTION_KEY="$(getSecret mongo-db-encryption-key)"
MONGO_DB_CONNECTION_STRING="$(getSecret mongo-db-connection-string)"
MONGO_DB_PRIMARY_KEY="$(getSecret mongo-db-primary-key)"
SQL_CONNECTION_STRING="$(getSecret sql-connection-string)"
SP_ALVTIME_ADMIN_CLIENT_ID="$(getSecret sp-alvtime-admin-client-id)"
SP_ALVTIME_ADMIN_RBAC_SECRET="$(getSecret sp-alvtime-admin-rbac-secret)"
| true |
9220a48a5d7e8c792f321881498d6fffb363b216 | Shell | hiulit/RetroPie-Shell-Script-Boilerplate | /utils/dialogs.sh | UTF-8 | 8,012 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# dialogs.sh
# Variables ############################################
DIALOG_BACKTITLE="$SCRIPT_TITLE (v$SCRIPT_VERSION)" # Change this text to your liking.
readonly DIALOG_HEIGHT=20 # Change this value to your liking.
readonly DIALOG_WIDTH=60 # Change this value to your liking.
# Exit status
readonly DIALOG_OK=0
readonly DIALOG_CANCEL=1
readonly DIALOG_HELP=2
readonly DIALOG_EXTRA=3
readonly DIALOG_ESC=255
# Functions ###########################################
function show_dialog_example() {
local dialog="${FUNCNAME[1]}"
echo
underline "Example of usage ($dialog):"
case "$dialog" in
"dialog_infobox")
echo 'dialog_infobox "title" "message" ["height" "width"]'
echo
echo '"title" can be left empty, like this: "".'
;;
"dialog_yesno")
echo 'dialog_yesno "title" "message" ["height" "width"]'
echo
echo '"title" can be left empty, like this: "".'
;;
"dialog_msgbox")
echo 'dialog_msgbox "title" "message" ["height" "width"]'
echo
echo '"title" can be left empty, like this: "".'
;;
"dialog_menu")
echo 'options=('
echo ' "1" "Option 1" #"Help message 1"'
echo ' "2" "Option 2" #"Help message 2"'
echo ' "N" "Option N" #"Help message N"'
echo ')'
echo
echo 'dialog_menu [-h/b] "Text describing the options." "${options[@]}"'
echo
echo '-h: Add help messages.'
echo '-b: Add back button.'
;;
*)
echo "There is no example for this dialog."
;;
esac
echo
}
# Dialogs #############################################
# An info dialog box.
#
# Example
# -------
# dialog_infobox "title" "message" ["height" "width"]
#
# "title" can be left empty, like this: "".
#
function dialog_infobox() {
local title="$1"
local message="$2"
local dialog_height="$3"
local dialog_width="$4"
if [[ -z "$message" ]]; then
show_error "'${FUNCNAME[0]}' needs a \"message\" as an argument!"
show_dialog_example
exit 1
fi
[[ -z "$dialog_height" ]] && dialog_height=8 # Change this value to your liking.
[[ -z "$dialog_width" ]] && dialog_width="$DIALOG_WIDTH"
dialog \
--backtitle "$DIALOG_BACKTITLE" \
--title "$title" \
--infobox "$message" "$dialog_height" "$dialog_width" 2>&1 >/dev/tty
}
# A message dialog box.
#
# Example
# -------
# dialog_msgbox "title" "message" ["height" "width"]
#
# "title" can be left empty, like this: "".
#
function dialog_msgbox() {
local title="$1"
local message="$2"
local dialog_height="$3"
local dialog_width="$4"
if [[ -z "$message" ]]; then
show_error "'${FUNCNAME[0]}' needs a \"message\" as an argument!"
show_dialog_example
exit 1
fi
[[ -z "$dialog_height" ]] && dialog_height=8 # Change this value to your liking.
[[ -z "$dialog_width" ]] && dialog_width="$DIALOG_WIDTH"
dialog \
--backtitle "$DIALOG_BACKTITLE" \
--title "$title" \
--ok-label "OK" \
--msgbox "$message" "$dialog_height" "$dialog_width" 2>&1 >/dev/tty
}
# A yes/no dialog box.
#
# Example
# -------
# dialog_yesno "title" "message" ["height" "width"]
#
# "title" can be left empty, like this: "".
#
function dialog_yesno() {
local title="$1"
local message="$2"
local dialog_height="$3"
local dialog_width="$4"
if [[ -z "$message" ]]; then
show_error "'${FUNCNAME[0]}' needs a \"message\" as an argument!"
show_dialog_example
exit 1
fi
[[ -z "$dialog_height" ]] && dialog_height=8 # Change this value to your liking.
[[ -z "$dialog_width" ]] && dialog_width="$DIALOG_WIDTH"
dialog \
--backtitle "$DIALOG_BACKTITLE" \
--title "$title" \
--yes-label "Yes" \
--no-label "No" \
--yesno "$message" "$dialog_height" "$dialog_width" 2>&1 >/dev/tty
}
# A menu dialog box.
#
# Example
# -------
# options=(
# "1" "Option 1" #"Help message 1"
# "2" "Option 2" #"Help message 2"
# "N" "Option N" #"Help message N"
# )
#
# dialog_menu [-h/b] "Text describing the options." "${options[@]}"
#
# -h: Add help messages.
# -b: Add back button.
#
function dialog_menu() {
local BACK=0
local HELP=0
# Check if the first argument starts with a hypen '-'.
if [[ "$1" =~ ^-.* ]]; then
if [[ "$1" =~ "b" ]]; then
BACK=1
fi
if [[ "$1" =~ "h" ]]; then
HELP=1
fi
shift
fi
local description_text="$1"
shift
# Get the options passed as arguments.
local argument_options=("$@")
if [[ -z "$argument_options" ]]; then
show_error "\"options\" is empty."
show_dialog_example
exit 1
fi
# Create a new options array.
local options=()
local option
# Rebuild the options array with the arguments.
for option in "${argument_options[@]}"; do
options+=("$option")
done
local cmd
local choice
cmd=(dialog \
--backtitle "$DIALOG_BACKTITLE" \
--title "$SCRIPT_TITLE" \
--ok-label "OK" \
--cancel-label "Exit" \
--menu "$description_text\n\nChoose an option." "$DIALOG_HEIGHT" "$DIALOG_WIDTH" "$((${#options[@]} / 2))")
if [[ "$BACK" -eq 1 ]]; then
# Insert the back button properties just before '--menu'.
cmd=("${cmd[@]:0:9}" "--extra-button" "${cmd[@]:9}")
cmd=("${cmd[@]:0:10}" "--extra-label" "${cmd[@]:10}")
cmd=("${cmd[@]:0:11}" "Back" "${cmd[@]:11}")
fi
if [[ "$HELP" -eq 1 ]]; then
# The options number must be divisible by 3.
if (( "${#options[@]}" % 3 != 0 )); then
show_error "There's at least 1 help message missing on the options passed."
exit 1
fi
# Check if the back button is enabled to
# modify the index position of the help dialog property.
local index_position=9
if [[ "$BACK" -eq 1 ]]; then
index_position=12
fi
# Insert '--item-help' just before '--menu'.
cmd=("${cmd[@]:0:$index_position}" "--item-help" "${cmd[@]:$index_position}")
# Adjust the menu height.
local last_array_element="$((${#cmd[@]} - 1))"
cmd["$last_array_element"]="$((${#options[@]} / 3))"
else
# The options number must be divisible by 2.
if (( "${#options[@]}" % 2 != 0 )); then
show_error "It seems like there are help messages passed but the '-h' argument is missing."
exit 1
fi
fi
choice="$("${cmd[@]}" "${options[@]}" 2>&1 >/dev/tty)"
local return_value="$?"
# "OK" button.
if [[ "$return_value" -eq "$DIALOG_OK" ]]; then
if [[ -n "$choice" ]]; then
# Add as many cases as options.
case "$choice" in
"1")
# Call some function or do something.
echo "You chose 'Option "$choice"'."
;;
"2")
# Call some function or do something.
echo "You chose 'Option "$choice"'."
;;
"N")
# Call some function or do something.
echo "You chose 'Option "$choice"'."
;;
esac
else
dialog_msgbox "Error!" "Choose an option."
fi
# "BACK" button.
elif [[ "$return_value" -eq "$DIALOG_EXTRA" ]]; then
# Call the previous dialog box.
# Example: i_am_the_previous_dialog
# Remove the 'exit 0' line below. It's only here for test purposes.
exit 0
# "EXIT" button.
elif [[ "$return_value" -eq "$DIALOG_CANCEL" ]]; then
# Exit the dialog box.
exit 0
fi
}
| true |
4d47a85fe92171e07b678510f43ddc4f842dd495 | Shell | MushroomObserver/mushroom-observer | /script/backup_blocked_ips.sh | UTF-8 | 387 | 2.96875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/usr/bin/env bash
if [ ! -d config ]; then
echo Please run this from /var/web/mo.
exit 1
fi
if [ ! -d config/blocked_ips ]; then
mkdir config/blocked_ips
fi
cp -f config/blocked_ips.txt config/blocked_ips/backup-`date +%d`-daily
cp -f config/blocked_ips.txt config/blocked_ips/backup-`date +%m`-monthly
cp -f config/blocked_ips.txt config/blocked_ips/backup-`date +%Y`-yearly
| true |
77c5482ca6b03a09c29309c6352a2fce73cb514a | Shell | munbot/master | /run.sh | UTF-8 | 253 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
set -eu
SRC=${1:-''}
if test '' = "${SRC}"; then
SRC='mb'
else
shift
fi
./build.sh ${SRC}
export MBENV='devel'
export MBENV_CONFIG=${PWD}/env
export MB_CONFIG=${PWD}/_devel/etc
export MB_RUN=${PWD}/_devel/run
exec ./_build/cmd/${SRC}.bin $@
| true |
0d8ff04cf54168770a18782bf13ea2823afd8651 | Shell | Andrea-MG/LyaCoLoRe | /run_process_colore_multi_node.sh | UTF-8 | 3,618 | 2.84375 | 3 | [] | no_license | # specify number of nodes and cores to use
NNODES=64
NCORES=64
TIME="00:20:00" #hh:mm:ss
# specify process parameters
NSIDE=16
IVAR_CUT=1150.0
CELL_SIZE=0.25
LAMBDA_MIN=3550.0
MIN_CAT_Z=1.8
# specify process flags
FLAGS="--add-RSDs --add-DLAs"
# specify details of colore output
COLORE_NGRID=4096
COLORE_NODES=32
R_SMOOTH=2.0
# full path to proces_colore executable (parallel version)
PROCESS_PATH="/global/homes/j/jfarr/Projects/LyaCoLoRe/example_scripts/"
# full path to folder where input will be taken from
INPUT_PATH="/global/cscratch1/sd/jfarr/LyaSkewers/CoLoRe_GAUSS/output_G_hZsmooth_${COLORE_NGRID}_${COLORE_NODES}_sr${R_SMOOTH}_bm1_biasG18_picos/"
echo "input will be taken from "$INPUT_PATH
INPUT_FILES=`ls -1 ${INPUT_PATH}/out_srcs_*.fits`
NFILES=`echo $files | wc -w`
echo "${NFILES} input files have been found"
# full path to folder where output will be written
OUTPUT_PATH="/global/cscratch1/sd/jfarr/LyaSkewers/CoLoRe_GAUSS/process_output_G_hZsmooth_${COLORE_NGRID}_${COLORE_NODES}_sr${R_SMOOTH}_bm1_biasG18_picos_nside${NSIDE}/"
OUTPUT_PATH="/global/cscratch1/sd/jfarr/LyaSkewers/CoLoRe_GAUSS/test_4/"
echo "output will written to "$OUTPUT_PATH
if [ ! -d $OUTPUT_PATH ] ; then
mkdir -p $OUTPUT_PATH
fi
echo "output logs will be saved to "$OUTPUT_PATH"/logs"
if [ ! -d $OUTPUT_PATH/logs ] ; then
mkdir -p $OUTPUT_PATH/logs
fi
# full path to file with tuning sigma_G data
TUNING_PATH="/global/homes/j/jfarr/Projects/LyaCoLoRe/input_files/tune_small_scale_fluctuations.fits"
# we will create this script
RUN_FILE="/global/homes/j/jfarr/Projects/LyaCoLoRe/run_files/process_colore_output_G_hZsmooth_${COLORE_NGRID}_${COLORE_NODES}_sr${R_SMOOTH}_bm1_biasG18_picos.sh"
echo "run file "$RUN_FILE
# make master file and new file structure
date
echo "making master file"
${PROCESS_PATH}/make_master.py --in-dir ${INPUT_PATH} --out-dir ${OUTPUT_PATH} --nside ${NSIDE} --nproc ${NCORES} --min-cat-z ${MIN_CAT_Z}
wait
date
cat > $RUN_FILE <<EOF
#!/bin/bash -l
#SBATCH --partition debug
#SBATCH --nodes ${NNODES}
#SBATCH --time ${TIME}
#SBATCH --job-name process_colore
#SBATCH --error "/global/homes/j/jfarr/Projects/LyaCoLoRe/run_files/process-colore-%j.err"
#SBATCH --output "/global/homes/j/jfarr/Projects/LyaCoLoRe/run_files/process-colore-%j.out"
#SBATCH -C haswell
#SBATCH -A desi
umask 0002
export OMP_NUM_THREADS=64
PIXDIRS=\`\ls -tr1d ${OUTPUT_PATH}/[0-9]*/*\`
NPIXELS=\`echo \$PIXDIRS | wc -w\`
PIXDIRS_list=(\$PIXDIRS)
PIXELS=()
for PIXDIR in \$PIXDIRS ; do
PIX=\${PIXDIR##*/}
PIXELS=("\${PIXELS[@]}" \$PIX)
done
NPIXELS_PER_NODE=\$(( (\$NPIXELS + $NNODES - 1)/$NNODES ))
START_INDEX=0
STOP_INDEX=\$(( \$NPIXELS_PER_NODE - 1 ))
FINAL_NODE=0
for NODE in \`seq $NNODES\` ; do
echo "starting node \$NODE"
NODE_PIXELS=\${PIXELS[@]:\$START_INDEX:\$NPIXELS_PER_NODE}
echo "looking at pixels: \${NODE_PIXELS}"
command="srun -N 1 -n 1 -c ${NCORES} ${PROCESS_PATH}/make_transmission.py --in-dir ${INPUT_PATH} --out-dir ${OUTPUT_PATH} --pixels \${NODE_PIXELS} --tuning-file ${TUNING_PATH} --nside ${NSIDE} --nproc ${NCORES} --IVAR-cut ${IVAR_CUT} --cell-size ${CELL_SIZE} --lambda-min ${LAMBDA_MIN} ${FLAGS}"
echo \$command
\$command >& ${OUTPUT_PATH}/logs/node-\${NODE}.log &
if (( \$FINAL_NODE == 1)) ; then
echo "all pixels allocated, no more nodes needed"
break
fi
START_INDEX=\$(( \$STOP_INDEX + 1 ))
STOP_INDEX=\$(( \$START_INDEX + \$NPIXELS_PER_NODE - 1))
if (( \$STOP_INDEX >= (\$NPIXELS - 1) )) ; then
STOP_INDEX=\$NPIXELS-1
FINAL_NODE=1
fi
done
wait
date
EOF
sbatch $RUN_FILE
| true |
eabb85cfa078a36a37e4a6d60d7e323f2676c83f | Shell | rhiswell/15418 | /assignment2/render/deploy.sh | UTF-8 | 736 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
project_name=render
target_root=~/Workspace/cmu15418/assignment2/$project_name/
host=k4
nvcc_path=/usr/local/cuda-8.0/bin
function init_once
{
ssh $host "mkdir -p $target_root"
}
function build_project
{
tar --exclude="./objs" --exclude="./deploy.sh" -cvf /tmp/$project_name.tar . && \
scp /tmp/$project_name.tar $host:/tmp/ && \
ssh $host "export PATH=$nvcc_path:\$PATH && cd $target_root && \
tar --no-same-owner -xvf /tmp/$project_name.tar && \
make clean && make"
}
case $1 in
init )
init_once
;;
build )
build_project
;;
run )
ssh -X $host "cd $target_root && ./render rgb"
;;
* )
;;
esac
| true |
e8a990a2b56383dcc57a7edf117f0bb2d5b2bbe7 | Shell | syyyn/docker-dynamic | /v2441/docker-entrypoint.sh | UTF-8 | 524 | 3.453125 | 3 | [] | no_license | #!/bin/sh
set -e
if [ $(echo "$1" | cut -c1) = "-" ]; then
echo "$0: assuming arguments for dynamicd"
set -- dynamicd "$@"
fi
if [ $(echo "$1" | cut -c1) = "-" ] || [ "$1" = "dynamicd" ]; then
mkdir -p "$DYNAMIC_DATA"
chmod 700 "$DYNAMIC_DATA"
chown -R dynamic "$DYNAMIC_DATA"
echo "$0: setting data directory to $DYNAMIC_DATA"
set -- "$@" -datadir="$DYNAMIC_DATA"
fi
if [ "$1" = "dynamicd" ] || [ "$1" = "dynamic-cli" ] || [ "$1" = "dynamic-tx" ]; then
echo
exec gosu dynamic "$@"
fi
echo
exec "$@" | true |
bfc16ee690f0ca305712faa327ac2631f43b2794 | Shell | ankalus/cron-on-docker | /scripts/script.sh | UTF-8 | 432 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Dump whole postgres database
DUMP_DIR="/cron/dumps/"
DUMP_PATH=$DUMP_DIR$(date +"%Y-%m-%d-dump.sql")
if [ ! -f $DUMP_PATH ]; then
# Setup database access
export PGPASSWORD=mysecretpassword
export PGHOST=db
export PGUSER=postgres
export PGDATABASE=postgres
pg_dumpall > $DUMP_PATH
if [ $? -eq 0 ]; then
echo "pg_dumpall in:" $DUMP_PATH
else
rm -f $DUMP_PATH
fi
else
echo "found:" $DUMP_PATH
fi
| true |
33f98ad6e6e38f7a5fd266c65b4d4c4e8893d957 | Shell | zchee/zsh-default-completions | /src/Debian/Command/_dpkg_source | UTF-8 | 1,332 | 2.6875 | 3 | [] | no_license | #compdef dpkg-source
_arguments \
'-x[specify source file]:Debian source file:_files -g "*.dsc(-.)"' \
'-b[specify source directory]:Debian source directory:_files -/' \
'-c-[control file]:control file:_files' \
'-l-[changelog file]:changelog file:_files' \
'-F-[changelog format]:changelog format:' \
'-V-[set substitutions variable]:expression:' \
'-T-[alternate variable file]:varlistfile:' \
'-D-[override dsc field]:expression:' \
'-U-[remove a field]:field:' \
'-i-[ignore files in diff]:filter:' \
'-sa[autoselect orig source]' \
'-sk[use packaged orig source - unpack and keep]' \
'-sp[use packaged orig source - unpack and remove]' \
'-su[use unpackaged orig source - unpack and keep]' \
'-sr[use unpackaged orig source - unpack and remove]' \
'-ss[trust packed and unpacked source are the same]' \
'-sn[no diff, do main tarfile only]' \
'-sA[autoselect orig source with overwrite]' \
'-sK[use packaged orig source - unpack and keep with overwrite]' \
'-sP[use packaged orig source - unpack and remove with overwrite]' \
'-sU[use unpackaged orig source - unpack and keep with overwrite]' \
'-sR[use unpackaged orig source - unpack and remove with overwrite]' \
'-sp[leave original source packed in cwd]' \
'-su[unpack original source tree too]' \
'-h[help]'
# vim:ft=zsh
| true |
67d66c0ab5256c7cc8f8c3871b1163fc5d32c211 | Shell | jamiemccarthy/jamie-hpcloud-setup | /barbican/cloud-setup.sh | UTF-8 | 2,557 | 3.578125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# "Cloud setup"
#
# Set up a new cloud machine to run devstack
USER=ubuntu
JAMIE_SETUP_PROJECT=jamie-hpcloud-setup
HOME_DIR=/home/$USER
SRC_DIR=$HOME_DIR/src
DEVSTACK_DIR=$SRC_DIR/devstack
BARBICAN_DIR=$SRC_DIR/barbican
JAMIE_SETUP_DIR=$SRC_DIR/$JAMIE_SETUP_PROJECT
# Install devstack and start it running
if [ ! -d $DEVSTACK_DIR ]; then
git clone -q http://github.com/openstack-dev/devstack.git $DEVSTACK_DIR
fi
git --git-dir=$DEVSTACK_DIR/.git --work-tree=$DEVSTACK_DIR pull origin master
# My localrc sets all devstack's passwords to "1"
cp -a $JAMIE_SETUP_DIR/barbican/localrc $DEVSTACK_DIR/localrc
cd $DEVSTACK_DIR
./stack.sh > stack.sh.out 2> stack.sh.err
# Reconfigure Keystone to use UUIDs instead of PKI tokens -- less secure,
# but easier to manipulate when testing.
#patch /etc/keystone/keystone.conf < $JAMIE_SETUP_DIR/barbican/patches/keystone.conf || exit 1
# Shut down devstack, then restart just Keystone, per
# <https://github.com/cloudkeep/barbican/wiki/Developer-Guide#running-openstack-keystone-authentication-middleware>
cd $DEVSTACK_DIR
./unstack.sh
/opt/stack/keystone/bin/keystone-all --verbose --debug > ~/keystone.out 2> ~/keystone.err &
# Install barbican, per <https://github.com/cloudkeep/barbican/wiki/Developer-Guide>
sudo apt-get -yqq install python-virtualenv python-pip python-dev libsqlite3-dev libpq-dev
cd $SRC_DIR
git clone https://github.com/stackforge/barbican.git
cd $BARBICAN_DIR
virtualenv .venv
source .venv/bin/activate
export VENV_HOME=$SRC_DIR/barbican
pip install uwsgi || exit 1
pip install -r tools/pip-requires || exit 1
pip install -r tools/test-requires || exit 1
pip install -e . || exit 1
cp -a etc/barbican/barbican-api.conf ~/
sudo mkdir /var/lib/barbican ; sudo chown ubuntu:ubuntu /var/lib/barbican
sudo mkdir /var/log/barbican ; sudo chown ubuntu:ubuntu /var/log/barbican
sudo mkdir /etc/barbican ; sudo chown ubuntu:ubuntu /etc/barbican
cp etc/barbican/barbican-{api,admin}-paste.ini /etc/barbican/
# Patch barbican-api-paste.ini to use Keystone
patch /etc/barbican/barbican-api-paste.ini < $JAMIE_SETUP_DIR/barbican/patches/barbican-api-paste.ini || exit 1
# Start Barbican
cd $BARBICAN_DIR
bin/barbican-all > ~/barbican-all.out 2> ~/barbican-all.err &
# Replace the sample password with the "1" password from our localrc, then
# create a "barbican" user and assign it the "admin" role.
perl -i~ -pe 's/orange/1/' $BARBICAN_DIR/bin/keystone_data.sh
bin/keystone_data.sh
# Just because I find "locate" useful
sudo updatedb
| true |
bffc79da8865e6dccaa3937498303b9bd1e11910 | Shell | satyanarayan-rao/tf_nucleosome_dynamics | /scripts/process_fimo_and_convert_to_bed.sh | UTF-8 | 336 | 2.953125 | 3 | [] | no_license | #!/bin/bash
# $1: fimo.tsv from fimo
# $2: fimo.bed
# skip header and remove all commments
head -1 ${1} > ${1}.header
awk 'NR>1' $1 | grep -v "^#" > ${1}.tmp1
cat ${1}.header ${1}.tmp1 > ${1}.tmp2
python $NGS_SCRIPTS_DIR/fimo2bed.py --fimo ${1}.tmp2 --out $2
# flank the file file
# cleanup
rm ${1}.tmp1 ${1}.tmp2 ${1}.header
| true |
c1aca14fdd3ae79b395e61e279ee759cae477a6e | Shell | outotec/iot-edge | /v1/tools/build_libuv_macos.sh | UTF-8 | 761 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file
# in the project root for full license information.
set -e
build_root=$(cd "$(dirname "$0")/.." && pwd)
build_root=$build_root/build_libuv
# clear the libuv build folder so we have a fresh build
rm -rf $build_root
mkdir -p $build_root
# build libuv
pushd $build_root
git clone https://github.com/libuv/libuv.git
cd libuv
git checkout -b v1.11.0 tags/v1.11.0
./gyp_uv.py -f xcode
xcodebuild -ARCHS="x86_64" -project uv.xcodeproj -configuration Release -target libuv
# Create a 'dist' folder where the includes/libs live
mkdir -p ../dist/include
cp include/*.h ../dist/include/
mkdir -p ../dist/lib
cp build/Release/libuv.a ../dist/lib/
popd
| true |
7e599f4cd2e0133d11c4267ecb72e75163d0ee7a | Shell | 3xitLight/archlinux | /scripts/iptables.sh | UTF-8 | 3,600 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#Ports: Hier eintragen welche Ports geöffnet werden sollen
SERVICES_UDP="" #freigegebene UDP-Ports
SERVICES_TCP="22 80" #freigegebene TCP-Ports (Hier sshd und http)
#Alle vorhandenen Regeln löschen
iptables -F
iptables -t nat -F
iptables -t mangle -F
iptables -X
iptables -t nat -X
iptables -t mangle -X
#Grundregeln
iptables -P OUTPUT ACCEPT
iptables -P INPUT DROP
iptables -P FORWARD DROP
#Sicherheit
iptables -N other_packets #Tabelle "other_packets" erzeugen
iptables -A other_packets -p ALL -m state --state INVALID -j DROP #Kaputte Pakete verwerfen
iptables -A other_packets -p icmp -m limit --limit 1/s -j ACCEPT #ICMP auf max. 1 Paket/Sekunde limitieren
iptables -A other_packets -p ALL -j RETURN #Tabelle "other_packets" verlassen
iptables -N service_sec #Tabelle "services_sec" erzeugen
iptables -A service_sec -p tcp --syn -m limit --limit 2/s -j ACCEPT #SYN-Flood Attacken
iptables -A service_sec -p tcp ! --syn -m state --state NEW -j DROP #TCP-SYN-Pakete ohne Status NEW verwerfen
iptables -A service_sec -p tcp --tcp-flags ALL NONE -m limit --limit 1/h -j ACCEPT #Portscanner ausschalten
iptables -A service_sec -p tcp --tcp-flags ALL ALL -m limit --limit 1/h -j ACCEPT #Portscanner ausschalten
iptables -A service_sec -p ALL -j RETURN #Tabelle "services" verlassen
iptables -N reject_packets #Tabelle "reject_packets" erzeugen
iptables -A reject_packets -p tcp -j REJECT --reject-with tcp-reset #TCP Pakete(Protokoll) zurückweisen
iptables -A reject_packets -p udp -j REJECT --reject-with icmp-port-unreachable #UDP Pakete(Protokoll) zurückweisen
iptables -A reject_packets -p icmp -j REJECT --reject-with icmp-host-unreachable #ICMP Pakete(Protokoll) zurückweisen (bei mehr als 1Paket/Sekunde [s.o.])
iptables -A reject_packets -j REJECT --reject-with icmp-proto-unreachable #Alle anderen Pakete(Protokolle) zurückweisen
iptables -A reject_packets -p ALL -j RETURN #Tabelle "reject_packets" verlassen
#Dienste
iptables -N services #Tabelle für die Dienste erzeugen
for port in $SERVICES_TCP ; do #Für jeden TCP Port (oben definiert) folgendes tun:
iptables -A services -p tcp --dport $port -j service_sec #Bei Verbindungen auf TCP Port "$port in die Tabelle "services_sec" springen
iptables -A services -p tcp --dport $port -j ACCEPT #Bei Verbindungen auf TCP Port "$port Verbindung zulassen
done
for port in $SERVICES_UDP ; do #Für jeden UDP Port (oben definiert) folgendes tun:
iptables -A services -p udp --dport $port -j service_sec #Bei Verbindungen auf UDP Port "$port" in die Tabelle "services_sec" springen
iptables -A services -p udp --dport $port -j ACCEPT #Bei Verbindungen auf UDP Port "$port Verbindung zulassen
done
iptables -A services -p ALL -j RETURN #Tabelle "services" verlassen
#INPUT
iptables -A INPUT -p ALL -i lo -j ACCEPT #Alle Pakete vom Loopback Interface zulassen
iptables -A INPUT -p ALL -m state --state ESTABLISHED,RELATED -j ACCEPT #Bereits vorhandene Verbindungen zulassen
iptables -A INPUT -p ALL -j other_packets #In die Tabelle "other_packets" springen
iptables -A INPUT -p ALL -j services #In die Tabelle "services" gehen
iptables -A INPUT -p ALL -m limit --limit 10/s -j reject_packets #Nicht erlaubte Pakete zurückweisen, max 10Pakete/Sekunde (Tabelle "reject_Packets")
iptables -A INPUT -p ALL -j DROP #Alles andere verwerfen
#OUTPUT:
iptables -A OUTPUT -p ALL -j ACCEPT #Ausgehende Pakete erlauben
#Speichern
iptables-save -f /etc/iptables/iptables.rules
| true |
24829f88547c3286f13d61191ee0b2f863c2dd68 | Shell | mkilgore/dotfiles | /bin/mailcheck.sh | UTF-8 | 873 | 3.4375 | 3 | [] | no_license | #!/bin/bash
read -r pid < ~/.offlineimap/pid
if ps $pid &>/dev/null; then
echo "offlineimap ($pid): another instnce running." >&2
kill -9 $pid
fi
nice -10 offlineimap -o
new_mail=0
maildir=" "
sum_mail=0
mailcheck | grep new | grep INBOX 2>&1 >/dev/null
if [ $? -eq 0 ];
then
maildir=`mailcheck | grep new | grep INBOX | awk '{ print $NF }' \
| sed 's/\// /g' | awk '{ print $NF }' \
| awk '{ printf "%s ", $0 }' | sed 's/ /, /g' \
| sed 's/, $//'`
new_mail=`mailcheck | grep new | grep INBOX | awk '{ print $3 }'`
for N in `echo $new_mail`
do
sum_mail=`expr $sum_mail + $N`
done
if [ $sum_mail -eq 1 ] ;
then
DISPLAY=:0 notify-send -c email.arrived -u normal -i mail_new \
"You have $sum_mail new mail:" "$maildir"
else
DISPLAY=:0 notify-send -c email.arrived -u normal -i mail_new \
"You have $sum_mail new mails:" "$maildir"
fi
fi
| true |
cb57a6ef95d8f7fc4be413504d230760a472000e | Shell | drodsou/denolib | /release.sh | UTF-8 | 386 | 3.890625 | 4 | [] | no_license | #!/bin/bash
if [ $# -eq 0 ]; then
echo "ERROR: parameter tag expected, eg: v1.0.0"
exit 1
fi
# TODO: run tests
read -p "About to commit release $1. Are you sure? (y/N) " -n 1 -r
echo # (optional) move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]; then
# do dangerous stuff
git add -A
git status
git commit -m "release $1"
git tag -a $1 -m "$1"
git push --tags
fi | true |
f6f46b1d873675698997265e9341f433caffb1dd | Shell | degerli/qcc | /src/runall.sh | UTF-8 | 498 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | # Run all .py targets in this directory.
#
# Note that this script uses -c opt on the bazel command-line.
# This can cause problems in some OS'es (MacOS).
#
# The option can be removed, things will just run a little
# slower.
bazel run lib/circuit_test || exit 1
for algo in `ls -1 *.py | sort`
do
if [ "$algo" = "__init__.py" ]; then
continue
fi
testcase=`echo $algo | sed s@\.py@@g`
echo ""
echo "--- [$testcase] ------------------------"
bazel run $@ $testcase || exit 1
done
| true |
583a021f11e1706eab701cf2566c0575d3d08dd2 | Shell | aws/aws-parallelcluster | /util/upload-cli.sh | UTF-8 | 4,147 | 4.3125 | 4 | [
"Python-2.0",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT-0",
"BSD-2-Clause"
] | permissive | #!/bin/bash
_error_exit() {
echo "$1"
exit 1
}
_info() {
echo "INFO: $1"
}
_help() {
local -- _cmd=$(basename "$0")
cat <<EOF
Usage: ${_cmd} [OPTION]...
Copy the AWS ParallelCluster Package to an S3 bucket.
--bucket <bucket> Bucket to upload the package to
--srcdir <src-dir> Root folder of the pcluster project
--profile <aws-profile> AWS profile name to use for the upload
(optional, default is AWS_PROFILE env variable or "default").
--region <aws-region> Region to use for AWSCli commands (optional, default is "us-east-1")
-h, --help Print this help message
EOF
}
main() {
# parse input options
while [ $# -gt 0 ] ; do
case "$1" in
--bucket) _bucket="$2"; shift;;
--bucket=*) _bucket="${1#*=}";;
--srcdir) _srcdir="$2"; shift;;
--srcdir=*) _srcdir="${1#*=}";;
--profile) _profile="$2"; shift;;
--profile=*) _profile="${1#*=}";;
--region) _region="$2"; shift;;
--region=*) _region="${1#*=}";;
-h|--help|help) _help; exit 0;;
*) _help; echo "[error] Unrecognized option '$1'"; exit 1;;
esac
shift
done
# verify required parameters
if [ -z "${_bucket}" ]; then
_error_exit "--bucket parameter not specified"
_help;
fi
if [ -z "${_srcdir}" ]; then
_error_exit "--srcdir parameter not specified"
_help;
fi
# initialize optional parameters
if [ -z "${AWS_PROFILE}" ] && [ -z "${_profile}" ]; then
_info "--profile parameter not specified, using 'default'"
elif [ -n "${_profile}" ]; then
_profile="--profile ${_profile}"
fi
if [ -z "${_region}" ]; then
_info "--region parameter not specified, using 'us-east-1'"
_region="us-east-1"
fi
# check bucket or create it
aws ${_profile} s3api head-bucket --bucket "${_bucket}" --region "${_region}"
if [ $? -ne 0 ]; then
_info "Bucket ${_bucket} does not exist, trying to create it"
aws ${_profile} s3api create-bucket --bucket "${_bucket}" --region "${_region}"
if [ $? -ne 0 ]; then
_error_exit "Unable to create bucket ${_bucket}"
fi
fi
_pcluster_version=$(grep "^VERSION = \"" "${_srcdir}/cli/setup.py" |awk '{print $3}'| tr -d \")
if [ -z "${_pcluster_version}" ]; then
_error_exit "Unable to detect ParallelCluster CLI version, are you in the right directory?"
fi
_info "Detected ParallelCluster CLI version ${_pcluster_version}"
_version=$(grep "^VERSION = \"" "${_srcdir}/awsbatch-cli/setup.py" |awk '{print $3}'| tr -d \")
if [ -z "${_version}" ]; then
_error_exit "Unable to detect ParallelCluster AWS Batch CLI version, are you in the right directory?"
fi
_info "Detected ParallelCluster AWS Batch CLI version ${_version}"
# Create archive
_cwd=$(pwd)
pushd "${_srcdir}" > /dev/null
_stashName=$(git stash create)
git archive --format tar --prefix="aws-parallelcluster-${_pcluster_version}/" "${_stashName:-HEAD}" | gzip > "${_cwd}/aws-parallelcluster-${_pcluster_version}.tgz"
popd > /dev/null
# upload package
_key_path="parallelcluster/${_pcluster_version}/cli"
aws ${_profile} --region "${_region}" s3 cp aws-parallelcluster-${_pcluster_version}.tgz s3://${_bucket}/${_key_path}/aws-parallelcluster-${_pcluster_version}.tgz || _error_exit 'Failed to push CLI to S3'
_bucket_region=$(aws ${_profile} s3api get-bucket-location --bucket ${_bucket} --output text)
if [ ${_bucket_region} == "None" ]; then
_bucket_region=""
else
_bucket_region=".${_bucket_region}"
fi
echo "Done. Add the following configuration to the pcluster create config file:"
echo ""
echo "DevSettings:"
echo " AwsBatchCliPackage: s3://${_bucket}/${_key_path}/aws-parallelcluster-${_pcluster_version}.tgz"
}
main "$@"
# vim:syntax=sh
| true |
55227e8e519cc804d239febb483b3d298b8ee024 | Shell | Amertime/Amertime | /.acpi.sh | UTF-8 | 189 | 2.921875 | 3 | [] | no_license | #!/bin/bash
count=1
while [ $count -lt 2 ]; do
echo "System Status" | cat >> .acpi.tmp
neofetch | cat >> .acpi.tmp
acpi | cat >> .acpi.tmp
cat .acpi.tmp
rm .acpi.tmp
sleep 10
done
| true |
1a4fec2f01456339a6560f940b8effb2a26fa057 | Shell | huangzhaolin/just-for-fun | /who-buy-fruit-this-week.sh | UTF-8 | 514 | 3.171875 | 3 | [] | no_license | #! /bin/bash
#author:Jolin Huang
#从A-J之间随即抽取一位同学买水果,根据每个同学的系数不同,命中的概率也不同。
declare -A buyers
buyers=([A]=10 [B]=30 [C]=10 [D]=20 [E]=10 [F]=10 [G]=10 [H]=10 [I]=10 [J]=10)
selector=""
index=0
sumSelector=0
for buyer in ${!buyers[*]}
do
buyerCounter=${buyers[$buyer]}
((sumSelector=$sumSelector+$buyerCounter))
for i in `seq $buyerCounter`
do
selector[((index++))]=$buyer
done
done
((random=$RANDOM%$sumSelector))
echo ${selector[$random]}
| true |
f81b722b78ef166f9db6d894d1888d907144df81 | Shell | goaxert/me-docker | /workspace/run.sh | UTF-8 | 550 | 2.671875 | 3 | [] | no_license | #!/bin/bash
PROJECT_PATH="$1"
PROJECT_NAME="$2"
if [ ! -d "$PROJECT/mnt" ]; then
echo "sudo ./init.sh [directory]"
exit 1
fi
docker run \
-d --rm --name $PROJECT_NAME \
-p 8022:22 \
-h $PROJECT_NAME \
-v $PROJECT_PATH/.zsh_history.d/.zsh_history:/root/.zsh_history.d/.zsh_history \
-v $PROJECT_PATH/.kube:/root/.kube \
-v $PROJECT_PATH/.ssh:/root/.ssh \
-v $PROJECT_PATH/.gitconfig:/root/.gitconfig \
-v $PROJECT_PATH/mnt:/root/environment \
-v /var/run/docker.sock:/var/run/docker.sock \
-w /root/environment \
nmops/workspace:1.3
| true |
80cfd1ad0fb62e4ef138c2f92f53da93f08a835d | Shell | delkyd/alfheim_linux-PKGBUILDS | /cheat/PKGBUILD | UTF-8 | 667 | 2.671875 | 3 | [] | no_license | # Maintainer: Kaan Genç <SeriousBug at Gmail dot com>
pkgname=cheat
pkgver=1.0.3
pkgrel=1
pkgdesc="A minimal unit testing framework for the C programming language."
arch=(any)
url="https://github.com/Tuplanolla/cheat"
license=('BSD')
source=("https://github.com/Tuplanolla/cheat/archive/1.0.3.tar.gz")
sha256sums=('2795b98230fb20cddf305ad1ff126518f82babfcb938be8fadd20ed80d45e979')
package() {
cd "$srcdir/$pkgname-$pkgver"
install -Dm644 cheat.h "$pkgdir/usr/include/cheat.h"
install -Dm644 cheats.h "$pkgdir/usr/include/cheats.h"
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
install -Dm644 cheat.7 "$pkgdir/usr/share/man/man7/cheat.7"
}
| true |
f38c8f037ca11ecec472f1d4b5969a431fd60717 | Shell | joelanders/dotfiles | /.bashrc | UTF-8 | 1,130 | 2.53125 | 3 | [] | no_license | export BASH_CONF="bashrc"
set -o vi
#alias mv='mv -i'
#alias cp='cp -i'
#alias rm='rm -i'
#alias vim='mvim -v'
alias ls='ls -F --color=auto'
alias ll='ls -ahl'
eval $( dircolors -b ~/.dir_colors )
export CLICOLOR=1
export GREP_OPTIONS="--color"
export RI="--format ansi -T"
#export PS1="\h:\W$ "
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1) /'
}
export PS1="\[\e[0;36m\]\u@\h\[\e[m\] \[\e[0;34m\]\w\[\e[m\] \[\e[0;33m\]\$(parse_git_branch)\[\e[m\]\[\033[35m\]\$\[\033[0m\] "
alias tmux="TERM=xterm-256color tmux"
mcd() { mkdir -p "$@" && cd "$@"; }
v() { vim -O "include/$@.hh" "src/$@.cc"; }
padoff() { synclient TouchpadOff=1; }
padon() { synclient TouchpadOff=0; }
alias mi='make -j5 && make -j5 install'
PATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting
export PERL_LOCAL_LIB_ROOT="/home/landers/perl5";
export PERL_MB_OPT="--install_base /home/landers/perl5";
export PERL_MM_OPT="INSTALL_BASE=/home/landers/perl5";
export PERL5LIB="/home/landers/perl5/lib/perl5/x86_64-linux-thread-multi:/home/landers/perl5/lib/perl5";
export PATH="/home/landers/perl5/bin:$PATH";
| true |
62e853d5521d9b1119af8fe690554d3f846e47fa | Shell | ajzuse/CompJournoStick | /Scripts/SetUpWorkstation/sudo-build-rstudio-repo.bash | UTF-8 | 770 | 2.609375 | 3 | [] | no_license | #! /bin/bash
#
# Copyright (C) 2013 by M. Edward (Ed) Borasky
#
# This program is licensed to you under the terms of version 3 of the
# GNU Affero General Public License. This program is distributed WITHOUT
# ANY EXPRESS OR IMPLIED WARRANTY, INCLUDING THOSE OF NON-INFRINGEMENT,
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Please refer to the
# AGPL (http://www.gnu.org/licenses/agpl-3.0.txt) for more details.
#
rm -fr /opt/RStudioRepo; mkdir -p /opt/RStudioRepo
pushd /opt/RStudioRepo
wget `curl -s http://www.rstudio.com/ide/download/desktop|grep rpm|sort -u|sed 's/^.*href="//'|sed 's/".*$//'`
popd
createrepo /opt/RStudioRepo
rm -f /etc/yum.repos.d/opt_RStudioRepo.repo
yum-config-manager --add-repo file:///opt/RStudioRepo
yum clean all
yum check-update
| true |
912cdb1025794825c36f080c512268a56e7784d3 | Shell | steven-em/sysdump_amazon_rex | /rootfs.img.gz/fwo_rootfs.img/etc/upstart/shutdown_special | UTF-8 | 18,590 | 3.765625 | 4 | [] | no_license | #!/bin/sh
# check number of args
if [ "$#" -ne "1" ]; then
echo "$0 requires the shutdown mode as an argument"
for L in $SHUTDOWN_MODE_LIST; do
echo "\t$L"
done
exit 1;
fi
# grab the shutdown mode
SHUTDOWN_MODE=$1
LOG_NAME="ss_${SHUTDOWN_MODE}"
# If this file is present, save logs for later debugging
SAVE_LOGS_FILE=/mnt/us/SAVE_LOGS
SAVE_LOGS=0
LOGS_TAR_GZ="/mnt/us/all_logs_as_of_`date +%a_%b_%d_%Y_%H_%M_%S`.tar.gz"
FLAG_NO_TRANSITIONS=`kdb get system/driver/filesystem/NO_TRANSITIONS`
DONT_DISABLE_LOGIN_FILE=/mnt/us/DONT_DISABLE_LOGIN
SAVE_FACTORY_RESET_LOGS_FILE=/mnt/us/SAVE_FACTORY_RESET_LOGS
SHIPMODE_SLEEP_TIME=/mnt/us/SHIPMODE_SLEEP_TIME
DISABLE_LOGIN=1
##
# sourcing critical files
#
TO_SOURCE=" \
/etc/upstart/functions \
/etc/upstart/upstart_functions \
/etc/upstart/shutdown_modes \
/etc/default/layout \
/etc/sysconfig/paths \
/etc/rc.d/functions"
for F in $TO_SOURCE; do
# make sure the file exists
if [ -f $F ]; then
source $F
RES=$?
# make sure we sourced it successfully
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot source file" "\"$F\""
exit 1
fi
else
f_log C $LOG_NAME "cannot find file" "$F"
exit 1;
fi
done
FLAG_FACTORY_FRESH="/opt/amazon/factory_fresh/factory_fresh"
FLAG_NO_TRANSITIONS="/opt/amazon/factory_fresh/no_transitions"
FLAG_FACTORY_FRESH_AUTHENTICATION="/opt/amazon/factory_fresh/factory_fresh_authentication"
##
# functions
#
# miscellaneous files
ss_remove_misc_files()
{
# remove misc files NOTE: Do not alter this line unless you first
# update the OTA blacklist to exclude them from future updates.
rm -f /etc/sysconfig/ALLOW_CVM_RESET
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot remove ALLOW_CVM_RESET"
fi
rm -rf /usr/local /opt/ar6k/include /opt/ar6k/host /opt/amazon/ebook/lib/uitest.jar
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot remove misc files"
return $RES
fi
}
# blast partition
ss_blast_partition()
{
local NAME=$1
local PARTITION=$2
# check for valid arguments
if [ -z "$NAME" -o -z "$PARTITION" ]; then
f_log E $LOG_NAME "invalid arguments to blast partition" \
"name=\"$NAME\" partition=$PARTITION"
return 1
fi
# read the start sector and number of consecutive sectors to erase
local START_SECTOR=$(cat /sys/block/${ROOT}/$PARTITION/start)
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to get start sector" \
"name=\"$NAME\" partition=$PARTITION" "getting start sector exited with code $RES"
return $RES
fi
local NUM_SECTORS=$(cat /sys/block/${ROOT}/$PARTITION/size)
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to get number of consecutive sectors" \
"name=\"$NAME\" partition=$PARTITION" "getting number of sectors exited with code $RES"
return $RES
fi
# log what we're about to do
f_log I $LOG_NAME "erasing partition" \
"name=\"$NAME\" partition=/dev/$PARTITION" \
"secure erase from ${START_SECTOR} of ${NUM_SECTORS} sectors"
# secure erase the filesystem
if [ "$(f_platform)" = "duet" ]; then
CTRL_ERASE_FILE=/sys/devices/system/falcon/falcon0/falcon_ctrl_erase
elif [ "$(f_platform)" = "heisenberg" ]; then
CTRL_ERASE_FILE=/sys/devices/soc0/soc.2/2100000.aips-bus/2194000.usdhc/mmc_host/mmc0/mmc0:0001/mmc_ctrl_erase
else
CTRL_ERASE_FILE=/sys/devices/system/mmc_ctrl/mmc_ctrl0/mmc_ctrl_erase
fi
echo "/dev/${ROOT} ${START_SECTOR} ${NUM_SECTORS}" > $CTRL_ERASE_FILE
RES=$?
local RES_ERASE=$(cat $CTRL_ERASE_FILE)
if [ $RES -eq 0 ] && [ ${RES_ERASE} -eq 1 ]; then
f_log I $LOG_NAME "partition erased successfully" \
"name=\"$NAME\" partition=/dev/$PARTITION"
else
f_log C $LOG_NAME "failed to erase partition" \
"name=\"$NAME\" partition=/dev/$PARTITION" "secure erase exited with code $RES"
fi
return $RES
}
# blast partition with dd
ss_blast_partition_with_dd()
{
local NAME=$1
local PARTITION=$2
# check for valid arguments
if [ -z "$NAME" -o -z "$PARTITION" ]; then
f_log E $LOG_NAME "invalid arguments to blast partition" \
"name=\"$NAME\" partition=$PARTITION"
return 1
fi
# read the size of the partition
local PARTITION_SIZE_BLKS=$(/sbin/sfdisk --show-size -uB -n $PARTITION)
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to get partition size" \
"name=\"$NAME\" partition=$PARTITION" "sfdisk exited with code $RES"
return $RES
fi
# maximum number of blocks to erase
local ERASE_SIZE_BLKS=4096
# erase min(partition size blks, erase size blks) number of blocks
if [ $PARTITION_SIZE_BLKS -lt $ERASE_SIZE_BLKS ]; then
ERASE_SIZE_BLKS=$PARTITION_SIZE_BLKS
fi
# log what we're about to do
f_log I $LOG_NAME "erasing partition" \
"name=\"$NAME\" partition=$PARTITION" \
"overwriting first ${ERASE_SIZE_BLKS} of ${PARTITION_SIZE_BLKS} blocks"
# blast the filesystem
dd if=/dev/zero of=$PARTITION count=$ERASE_SIZE_BLKS
RES=$?
if [ $RES -eq 0 ]; then
f_log I $LOG_NAME "partition erased successfully" \
"name=\"$NAME\" partition=$PARTITION"
else
f_log C $LOG_NAME "failed to erase partition" \
"name=\"$NAME\" partition=$PARTITION" "dd exited with code $RES"
fi
return $RES
}
# blast the /var/local/ partition
ss_blast_var_local()
{
local PARTITION=/dev/${ROOT}${LOCAL_P}
ss_blast_partition_with_dd "/var/local" $PARTITION
return $?
}
# blast the userstore partition
ss_blast_userstore()
{
local PARTITION=${ROOT}${USER_P}
ss_blast_partition "userstore" $PARTITION || ss_blast_partition_with_dd "/mnt/base-us" /dev/$PARTITION
return $?
}
ss_prune_var_local()
{
f_log I $LOG_NAME "varlocal pruning begin"
mount | grep /var/local/font/mnt | cut -d " " -f1 | xargs umount
RES=$?
f_log I $LOG_NAME "varlocal font unmount status: $RES"
find /var/local/* \( -type d -o -type f \) -maxdepth 0 ! -path "/var/local/log" -exec rm -rf '{}' \;
RES=$?
f_log I $LOG_NAME "varlocal delete files $RES"
if [ $RES -ne 0 ]; then
return $RES
fi
COUNT=`find /var/local/* \( -type d -o -type f \) -maxdepth 0 -path "/var/local/log" | wc -l`
if [ $COUNT -gt 1 ] ; then
f_log I $LOG_NAME "varlocal delete failed $COUNT"
return 1
fi
touch /var/local/copy_optvarlocal
f_log I $LOG_NAME "varlocal pruning complete"
}
# remove files in userstore, except ones whitelisted
ss_prune_userstore()
{
WHITELIST=/opt/amazon/data.whitelist
# demo whitelist is "required" files (for demo mode).
# currently no required demo files on the userstore
# DEMO_WHITELIST=/opt/amazon/demo_data.whitelist
# demo graylist is "optional" files (for demo mode).
DEMO_GRAYLIST=/opt/amazon/demo_data.graylist
# form the whitelist argument for the find command
FIND_WHITELIST=$(cat $WHITELIST $DEMO_GRAYLIST |sed 's@\(.*\)@-a \! -path \1@g' | xargs)
f_log I $LOG_NAME "userstore pruning begin"
# delete all files in userstore not specified in whitelist
cd /mnt/us
find . \( -name "*" -o -name ".*" \) -a ! -name "." $FIND_WHITELIST \
-depth -exec rm -rf '{}' \;
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "userstore pruning failed"
return $RES
fi
# delete files from graylist if it's a factory reset
if [ $SHUTDOWN_MODE = $SHUTDOWN_MODE_FACTORY_RESET ]; then
GRAYLIST=/opt/amazon/data.graylist
f_log I $LOG_NAME "userstore pruning - deleting gray list "
cat $GRAYLIST | xargs rm -rf
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "userstore pruning - deleting gray list unsuccessful"
fi
fi
f_log I $LOG_NAME "userstore pruning complete"
}
# auto-select wan mode
ss_set_wan_auto_select_mode()
{
f_wan || return 0
_WANINFO=/var/local/wan/info
_WAN_MGROAM_PEER=4
_WAN_MGROAM_BIN=/usr/sbin/dtpmgroam
f_log I $LOG_NAME "wan auto select started"
[ -e ${_WANINFO} ] && . ${_WANINFO}
MCS_COOKIE=/var/run/mcsdwc.dat
MCS_SET_AUTO_MODE_TO=60
MCS_SET_AUTO_MODE_OP="0"
_STOP_COUNT=0
_STOP_MAX=120
if [ ${WAN_TYPE} -eq 5 ]; then
MCS_SET_AUTO_MODE_OP="AT+COPS=0"
fi
f_log I $LOG_NAME "turning wan off"
wancontrol wanoffkill
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to turn off wan (2)"
return $RES
fi
#Banff and Solden doesn't need to reset to auto-select mode
if [ ${WAN_TYPE} -eq 6 -o ${WAN_TYPE} -eq 7 ]; then
return 0
fi
f_log I $LOG_NAME "setting mcs cookie"
echo "$MCS_SET_AUTO_MODE_TO $MCS_SET_AUTO_MODE_OP" > $MCS_COOKIE
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to set mcs cookie"
return $RES
fi
f_log I $LOG_NAME "turning wan on"
wancontrol wanon
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to turn on wan"
return $RES
fi
case "$WAN_TYPE" in
4) # elmo
f_log I $LOG_NAME "clearing mru"
#modemcmd -c "AT*MRUCLEAR" -t 5
#for alta
/usr/sbin/elmo_kit factory_test -e
RES=$?
if [ $RES -ne 0 ]; then
f_log W $LOG_NAME "failed to clear mru"
# continue
fi
;;
esac
f_log I $LOG_NAME "turning wan off (2)"
wancontrol wanoffkill
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to turn off wan (2)"
else
f_log I $LOG_NAME "wan auto select completed"
fi
return $RES
}
# common cleanup tasks (shared between shipping_mode and factory_reset)
ss_common_cleanup()
{
f_log I $LOG_NAME "cleaning misc files"
mntroot rw
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to mount rootfs R/W"
return $RES
fi
rm -f ${FLAG_FACTORY_FRESH} $MNTLOG_DC/system/ENABLE_DIAGS
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to remove factory fresh and diags-enable files"
return $RES
fi
ss_remove_misc_files || return $? # remove misc prints logs
}
ss_waveform_cleanup()
{
f_log I $LOG_NAME "erasing waveforms"
# erase stored waveform / i18n screens
wfm_mount
if [ -f "/mnt/wfm/DONT_ERASE_WFM" -a "$(f_platform)" != "yoshi" -a "$(f_platform)" != "yoshime3" ]; then
wfm_mount
f_log I $LOG_NAME "Creating a backup for waveform"
tar -cvf /tmp/waveform.tar /mnt/wfm/waveform_to_use
wfm_erase
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to erase waveforms"
return $RES
fi
wfm_mount
tar -xf /tmp/waveform.tar -C /
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "copying waveform failed"
return $RES
fi
touch /mnt/wfm/DONT_ERASE_WFM
wfm_umount
else
wfm_erase
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed to erase waveforms"
return $RES
fi
fi
}
# side effect: can leave logs in userstore (only for dev, QA and debugging
# factory process). Be sure to not clean up userstore after this.
ss_save_logs()
{
if [ $SAVE_LOGS -eq 1 ]; then
f_log I $LOG_NAME "saving logs"
tar -zcvf $LOGS_TAR_GZ /var/local/log /var/log
RES=$?
if [ ${RES} -ne 0 ]; then
f_log E $LOG_NAME "could not save all logs.."
# -/var/local/log may not yet be created
# -user store run out of space
# either case we don't care and just log this failure
fi
fi
}
# rpinit
ss_run_rpinit()
{
#To disable user logins to the device,
#we make the password field empty and lock the user accounts
#We don't want to disable password for engineering builds for sometime
if [ $DISABLE_LOGIN -eq 1 ]; then
SHADOW_FILE="/etc/shadow"
USER_LIST=`awk 'BEGIN { FS=":" } { if ($2 != "*") { print $1 } }' $SHADOW_FILE`
for user in $USER_LIST
do
passwd -d $user
passwd -l $user
done
fi
}
# removes the /MNTUS_EXEC file
ss_remove_mntus_exec()
{
MNTUS_EXEC_FILE=/MNTUS_EXEC
if [ -f $MNTUS_EXEC_FILE ]; then
f_log I $LOG_NAME "removing /MNTUS_EXEC file"
rm -f ${MNTUS_EXEC_FILE}
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot remove /MNTUS_EXEC file"
return $RES
fi
fi
}
# disable rtc on PMIC when shipping mode
ss_disable_rtc()
{
f_log I $LOG_NAME "Disabling RTC"
echo 0 > /sys/class/rtc/rtc0/wakealarm
sleep 1
rtc_value=`cat /sys/class/rtc/rtc0/wakealarm`
if [[ "$rtc_value" != "" ]]
then
f_log E $LOG_NAME "Cannot clear RTC0 setting"
return 1
fi
echo 0 > /sys/class/rtc/rtc1/wakealarm
sleep 1
rtc_value=`cat /sys/class/rtc/rtc1/wakealarm`
if [[ "$rtc_value" != "" ]]
then
f_log E $LOG_NAME "Cannot clear RTC1 setting"
return 1
fi
f_log I $LOG_NAME "RTC0 and RTC1 settings are clear!"
return 0
}
# removes the not-shipped file and touches first_boot file to mark
# the device as shipped.
ss_mark_as_shipped()
{
f_log I $LOG_NAME "removing not-shipped file"
rm -f ${NOT_SHIPPED_FILE}
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot remove not-shipped file"
return $RES
fi
f_log I $LOG_NAME "touching first_boot file"
touch ${FIRST_BOOT_FILE}
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot touch first_boot file"
return $RES
fi
}
# mark the device with a factory fresh tag
ss_factory_fresh()
{
f_log I $LOG_NAME "marking system factory-fresh"
factory_fresh_dir=`dirname ${FLAG_FACTORY_FRESH}`
mkdir -p $factory_fresh_dir
touch ${FLAG_FACTORY_FRESH}
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot touch factory-fresh file"
return $RES
fi
f_log I $LOG_NAME "marking system no_transitions for volumd and powerd"
touch ${FLAG_NO_TRANSITIONS}
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot touch no_transitions file"
return $RES
fi
# The factory fresh authentication file created on shipping mode or factory
# reset. It is removed by the framework when authentication credentials
# have been received (FRO).
f_log I $LOG_NAME "marking system factory-fresh-authentication"
touch ${FLAG_FACTORY_FRESH_AUTHENTICATION}
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "cannot touch factory-fresh-authentication file"
return $RES
fi
sync
}
# This feature is available only in pre-GM builds
if [ -e "/PRE_GM_DEBUGGING_FEATURES_ENABLED__REMOVE_AT_GMC" ] ; then
SAVE_LOGS=1
fi
#
# Figure out what mode we're in and do the work
#
case $SHUTDOWN_MODE in
$SHUTDOWN_MODE_FACTORY_RESET)
#wan auto select mode; ignore if there is a failure as we can't do anything in case of factory reset
ss_set_wan_auto_select_mode || ss_set_wan_auto_select_mode
# prune the userstore before unmounting it
# unmount /var/local before blasting it
if ss_common_cleanup && ss_waveform_cleanup ; then
if [ -e $SAVE_FACTORY_RESET_LOGS_FILE ]; then
ss_prune_userstore && \
ss_save_logs && \
f_upstart_job_stop_timeout 30 "filesystems" && \
ss_blast_var_local && \
ss_factory_fresh
else
f_upstart_job_stop_timeout 30 "filesystems" && \
ss_blast_userstore && \
ss_blast_var_local && \
ss_factory_fresh
fi
fi
RES=$?
;;
$SHUTDOWN_MODE_SHIP_RESTART | \
$SHUTDOWN_MODE_SHIP)
# prune the userstore before unmounting it
# unmount /var/local before blasting it
[ -e $SAVE_LOGS_FILE ] && SAVE_LOGS=1
# This feature is available only in pre-GM builds
if [ -f "/PRE_GM_DEBUGGING_FEATURES_ENABLED__REMOVE_AT_GMC" ]; then
[ -e $DONT_DISABLE_LOGIN_FILE ] && DISABLE_LOGIN=0
fi
if [ -e $SHIPMODE_SLEEP_TIME ] ; then
sleep_time=`cat $SHIPMODE_SLEEP_TIME`
sleep $sleep_time
fi
#wan auto select mode; ignore if there is a failure as we can't do anything in the case of ship mode
ss_set_wan_auto_select_mode
RES=$?
if [ $RES -ne 0 ]; then
sleep 1
# retry
ss_set_wan_auto_select_mode
RES=$?
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed, returnCode=$RES"
fi
fi
ss_disable_rtc && \
ss_common_cleanup && \
ss_run_rpinit && \
ss_prune_userstore && \
ss_save_logs && \
f_upstart_job_stop_timeout 30 "filesystems" && \
ss_blast_var_local && \
ss_remove_mntus_exec && \
ss_mark_as_shipped && \
ss_factory_fresh
RES=$?
;;
$SHUTDOWN_MODE_SETTINGS_RESET)
# blast /var/local (unmount /var/local first)
f_upstart_job_stop_timeout 30 "filesystems" && ss_blast_var_local
RES=$?
;;
$SHUTDOWN_MODE_USERSTORE_RESET)
# blast the userstore (unmount userstore first)
f_upstart_job_stop_timeout 30 "filesystems" && ss_blast_userstore
RES=$?
;;
$SHUTDOWN_MODE_USERSTORE_CLEAN)
# reset the userstore in place
# (this isn't necessarily called during shutdown)
ss_prune_userstore
RES=$?
;;
$SHUTDOWN_MODE_CRITBATT | \
$SHUTDOWN_MODE_CUST_SERVICE)
f_log I $LOG_NAME "nothing to do for this shutdown mode" "\"$SHUTDOWN_MODE\""
RES=0
;;
*)
f_log C $LOG_NAME "unknown shutdown mode" "\"$SHUTDOWN_MODE\""
LIST=$(echo $SHUTDOWN_MODE_LIST | sed -e 's/\s*/,/g' -e 's/^\,//g')
f_log D $LOG_NAME "valid shutdown modes" "\"$LIST\""
exit 1
;;
esac
# check the return code of the stuff that was just run
if [ $RES -ne 0 ]; then
f_log C $LOG_NAME "failed"
else
f_log I $LOG_NAME "completed successfully"
fi
mntroot ro
exit $RES
| true |
842c04180dd99fd34e3821da256ddc3f5fac0881 | Shell | builtbykrit/gracias | /test/generate_test_dir.sh | UTF-8 | 113 | 2.9375 | 3 | [] | no_license | #!/bin/bash
mkdir test_dir
cd test_dir
x=10
while [ $x -gt 0 ];
do
touch "file_$x.txt"
x=$(($x-1))
done | true |
f55f7aa34315ecedd1f31423e1db290ec68d85c3 | Shell | GuangYueCHEN/ENSIIE | /UE/S1/bin/bin/sys/E2/punition.sh~ | UTF-8 | 340 | 3.328125 | 3 | [] | no_license | #!/bin/bash
#
#usage: punition1.sh n word
if test $# -gt 3 ; then
echo 1>&3 "$0:FATAL: $# invalid argument number (expected )"
exit 1
fi
if test $# = 0; then
n=10;m=3;word="je_ne_qsdqsdsqdsqdqsd"
elif test $# = 1; then
n=10;m=3;word=$1
elif test $# = 2; then
n=10;m=$1;word=$2
else
n=$1;m=$2;word=$3
fi
./punition3.sh "$n" "$m" "$word"
| true |
116b450a95b4c39eba1dfd754c6d266a6ac8074c | Shell | justpayne/jburkardt-f77 | /toms626/toms626_prb2.sh | UTF-8 | 525 | 3 | 3 | [] | no_license | #!/bin/bash
#
gfortran -c -g toms626_prb2.f >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling toms626_prb2.f"
exit
fi
rm compiler.txt
#
gfortran toms626_prb2.o -L$HOME/libf77/$ARCH -ltoms626 -lcalcomp
if [ $? -ne 0 ]; then
echo "Errors linking and loading toms626_prb2.o"
exit
fi
rm toms626_prb2.o
#
mv a.out toms626_prb2
./toms626_prb2 > toms626_prb2_output.txt
if [ $? -ne 0 ]; then
echo "Errors running toms626_prb2"
exit
fi
rm toms626_prb2
#
echo "Test results written to toms626_prb2_output.txt."
| true |
2ffa2233752e631f01d606e6bc24b33510180106 | Shell | scope-lab-vu/power-attack | /dsl/grammar/check-meta.sh | UTF-8 | 286 | 3.171875 | 3 | [] | no_license | input_filename=$(realpath $1)
output_filename=$(basename -- "$input_filename")
output_filename="${output_filename%.*}.dot"
echo "$output_filename"
if [ -f $output_filename ] ; then
rm -f $output_filename
fi
textx check $input_filename
textx generate $input_filename --target dot
| true |
8de66e5f30bd4a35126a484f53ef8d4c107332ba | Shell | ghsable/dotfiles | /bin/virtualbox/virtualbox.sh | UTF-8 | 243 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function usage() {
cat<< _EOT_
Description:
VirtualBox Commands
Usage:
sh ${0} virtualbox : START VirtualBox
sh ${0} * : USAGE
_EOT_
exit 1
}
case ${1} in
virtualbox)
${1}
;;
*)
usage
;;
esac
| true |
909b80a72afcfe1b5f5e948ebdc5817c91135e60 | Shell | qwang1/cartridge | /redhat-zend/1.0.3/versions/5.6/configuration/shared-files/usr/local/zend/bin/jqd.sh | UTF-8 | 889 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
NAME="JobQueue"
if [ -f /etc/zce.rc ];then
. /etc/zce.rc
else
echo "/etc/zce.rc doesn't exist!"
exit 1;
fi
if [ -f $ZCE_PREFIX/bin/shell_functions.rc ];then
. $ZCE_PREFIX/bin/shell_functions.rc
else
echo "$ZCE_PREFIX/bin/shell_functions.rc doesn't exist!"
exit 1;
fi
#check_root_privileges
WEB_USER=$(whoami)
WD_UID=`id -u $WEB_USER`
WD_GID=`id -g $WEB_USER`
. ${ZCE_PREFIX}/bin/shell_functions.rc
WD_INI=${ZCE_PREFIX}/etc/watchdog-jq.ini
WATCHDOG="${ZCE_PREFIX}/bin/watchdog -c $WD_INI"
BINARY=jqd
start()
{
launch
}
stop()
{
_kill
}
status()
{
$WATCHDOG -i $BINARY
}
case "$1" in
start)
start
sleep 1
status
;;
stop)
stop
rm -f ${ZCE_PREFIX}/tmp/jqd.{app,wd}
;;
restart)
stop
rm -f ${ZCE_PREFIX}/tmp/jqd.{app,wd}
sleep 1
start
;;
status)
status
;;
*)
usage
esac
exit $?
| true |
67245f804b38d2c51cb57f877b11821aed7a61de | Shell | panchalravi/equifax_bundle | /templatePartial.sh | UTF-8 | 1,438 | 4.09375 | 4 | [] | no_license | #!/bin/bash
if [[ -z "$1" || -z "$2" ]]; then
echo "Need to specify a bundle and template.properties: $( basename $0 ) <bundle> <template.properties>"
exit 1
elif [ ! -e "$1" ]; then
echo "Bundle does not exist: $1"
exit 1
elif [ ! -r "$1" ]; then
echo "Cannot read bundle $1"
exit 1
elif [ ! -e "$2" ]; then
echo "Template properties file does not exist: $2"
exit 1
elif [ ! -r "$2" ]; then
echo "Cannot read file template properties file $2"
exit 1
fi
#scriptDir=$(dirname $0)
workingDir="templateWork"
mkdir -p ${workingDir}
bundle="$1"
templateProperties="$2"
fullTemplateFile=`mktemp -p ${workingDir} -t fullTemplate.XXXXXXXXXX.properties`
propertiesListFile=`mktemp -p ${workingDir} -t propertiesList.XXXXXXXXXX.properties`
reducedTemplateFile=`mktemp -p ${workingDir} -t reducedTemplate.XXXXXXXXXX.properties`
#templatize the bundle
GatewayMigrationUtility.sh template -b ${bundle} -t ${fullTemplateFile}
#find the list of template properties to leave from the given template properties file
awk '/^[:blank:]*[^#].*=.*$/ {print substr($0, 0, index($0, "=")-1)}' ${templateProperties} > ${propertiesListFile}
# remove all the template properties from the full template properties file
grep -v -F -f ${propertiesListFile} ${fullTemplateFile} > ${reducedTemplateFile}
#partially detemplatize the bundle
GatewayMigrationUtility.sh detemplate -b ${bundle} -t ${reducedTemplateFile}
| true |
375760f6478fac6601981fd754b2373e8c379d00 | Shell | junghans/mpip_scripts | /lunchmail | UTF-8 | 4,883 | 3.234375 | 3 | [] | no_license | #! /bin/bash
#version 0.1 01.10.07 -- added --subject
#version 0.1.1 01.10.07 -- added --list
#version 0.2 02.10.07 -- added --add option
#version 0.3 10.10.07 -- allowing short opts
#version 0.4 18.10.07 -- removed string bug in short opts
#version 0.5 10.01.08 -- added --removeme --addme
#version 0.6 06.02.08 -- added --test and make it work with special character in stdin
#version 0.7 20.02.08 -- added --forbit
#version 0.7.1 16.04.08 -- better version system
#version 0.8 11.06.08 -- changed sendername
#version 0.8.1 08.04.09 -- more forbit people + footer
#version 0.8.2 09.04.09 -- fixed typos
#version 0.8.3 30.04.09 -- more forgit
#version 0.8.4 15.05.09 -- added --topic
#version 0.8.5 20.05.09 -- added space in data readin
#version 0.8.6 03.11.09 -- update forbit
#version 0.8.7 27.01.10 -- add --notime
usage="Usage: ${0##*/} [OPTIONS] TIME"
listname=".lunch"
defaultsubject="Lunch at "
list="no"
msg=""
domain="@mpip-mainz.mpg.de"
towho=""
liste=""
test="no"
forbidden="bereau bevc bohlius boncina engin galbis hessb herbers hueck jonglee lambeth leewonbo lenzo marcon minoia muellerm nordqvis reynolds uschille vehoff vettorel villa yehcathe"
sendername="${USER}$domain"
homepage='https://194.95.63.77/mpip_scripts/summary'
check_time="yes"
die() {
echo -e "$*"
exit 1
}
help () {
cat << eof
Send a mail to user with a $listname file in their home
$usage
OPTIONS:
-s, --subject TEXT Change subject of the mail
Default: "$defaultsubject TIME"
-a, --add ADRESS Also send email to ADRESS (may multiple)
"$domain" will be added, if no @ given
--forbit USER Remove USER from lunchmail (may multiple)
Default: "$forbidden"
--topic TOPIC Change the file to search for
Default: "${listname#.}"
-r, --reason REASON Give a reason
--notime Do not check if last argument is a time
--addme Creates $listname for you
--removeme Removes $listname for you
--test Will send the mail ONLY to you
-l, --list Show list of user and exit
--all Reset forbit
-h, --help Show this help
-v, --version Show version
Examples: ${0##*/} 12:00
essen | ${0##*/} 12:00
${0##*/} -r "Martin is hungry" 11:00
${0##*/} -s "Cafeteria at 11:30" -a stuehn -a abc@abc.com
Send bugs and comment to junghans@mpip-mainz.mpg.de
eof
}
while [ "${1#-}" != "$1" ]; do
if [ "${1#--}" = "$1" ] && [ -n "${1:2}" ]; then
if [ "${1#-[ars]}" != "${1}" ]; then
set -- "${1:0:2}" "${1:2}" "${@:2}"
else
set -- "${1:0:2}" "-${1:2}" "${@:2}"
fi
fi
case $1 in
-s | --subject)
subject="$2"
shift 2;;
-r | --reason)
msg="REASON: $2\n\n"
shift 2;;
-a | --add)
if [ -z "${2//*@*}" ]; then
user="$2"
else
user="${2}${domain}"
fi
towho="$towho ${user}"
liste="${liste}\n${user}"
shift 2;;
--all)
forbidden=""
shift 1;;
--notime)
check_time="no"
shift 1;;
--forbit)
forbidden="$forbidden $2"
shift 2;;
--topic)
listname=".${2}"
shift 2;;
-l | --list)
list="yes"
shift ;;
--addme)
touch ~/${listname}
exit 0;;
--removeme)
rm -f ~/${listname}
exit 0;;
--test)
test="yes"
shift ;;
-h | --help)
help
exit 0;;
--hg)
echo "${0##*/}: $(sed -ne 's/^#version.* -- \(.*$\)/\1/p' $0 | sed -n '$p')"
exit 0;;
-v | --version)
echo "${0##*/}", $(sed -ne 's/^#\(version.*\) -- .*$/\1/p' $0 | sed -n '$p') by C. Junghans
exit 0;;
*)
die "Unknown option '$1'"
shift ;;
esac
done
if [ "$list" = "no" ] && [ -z "$subject" ]; then
[ -z "$1" ] && die "${0##*/} need at least one argument (the time)\ntry: ${0##*/} --help"
[ "$check_time" = "yes" ] && [ -n "${1//[0-9][0-9]:[0-9][0-9]}" ] && \
die "Argument 1 should have the form of a time (XX:XX), disable it --notime option"
subject="${defaultsubject}$1"
shift
fi
if [ "$list" = "yes" ]; then
echo "Members of the ${0##*/} list:"
fi
for user in $(ls /people/thnfs/homes); do
if [ -n "${forbidden//*${user}*}" ] && [ -e "/people/thnfs/homes/$user/$listname" ]; then
towho="$towho ${user}${domain}"
liste="${liste}\n${user}${domain}"
if [ "$list" = "yes" ]; then
echo $user
fi
fi
done
if [ "$list" = "yes" ]; then
exit 0
fi
[ -z "$towho" ] && die "Nobody with $listname file found !"
echo Type in some Message \(end with CRTL-D\):
while read -r; do
msg="${msg}${REPLY} \n"
done
#add footer
msg="${msg}---------------------------------------------\nThis is $($0 -v)\n"
msg="${msg}Homepage: $homepage\n"
echo -n Sending email to:
echo -e "$liste"
if [ "$test" = "yes" ]; then
towho="${USER}${domain}"
echo ONLY TESTING \-\> Sending mail to $towho
fi
echo -e "$msg" | mail -r $sendername -s "$subject" $towho
echo Done
| true |
23cc6c92f1068e0fefb2ac70a6a8176758a9c240 | Shell | mgred/dotfiles | /.config/bash/init.bash | UTF-8 | 399 | 2.9375 | 3 | [] | no_license | # Set shell options
# https://www.gnu.org/software/bash/manual/html_node/The-Shopt-Builtin.html
shopt -s autocd
shopt -s cdspell
shopt -s cmdhist
shopt -s histappend
shopt -s checkwinsize
shopt -s globstar
shopt -s dotglob
# Include all files in the `profile` subdirectory.
# There are vars, aliases and functions defined.
for f in $(find $HOME/.config/bash/profile/ -type f | sort); do . $f; done
| true |
8b301dbb3f534546bd8d5d4d1cd14f84de9f4975 | Shell | mercion/mypi | /rootfs/usr/bin/update-ca-trust | UTF-8 | 1,412 | 3.734375 | 4 | [] | no_license | #!/bin/bash
# At this time, while this script is trivial, we ignore any parameters given.
# However, for backwards compatibility reasons, future versions of this script must
# support the syntax "update-ca-trust extract" trigger the generation of output
# files in $DEST.
DEST=/etc/ca-certificates/extracted
trust extract --overwrite --format=pem-bundle --filter=ca-anchors --purpose server-auth $DEST/tls-ca-bundle.pem
trust extract --overwrite --format=pem-bundle --filter=ca-anchors --purpose email $DEST/email-ca-bundle.pem
trust extract --overwrite --format=pem-bundle --filter=ca-anchors --purpose code-signing $DEST/objsign-ca-bundle.pem
# Removes all files in the target directory, but not directories or files therein
trust extract --overwrite --format=openssl-directory --filter=certificates $DEST/cadir
SSL=/etc/ssl/certs
trust extract --overwrite --format=openssl-bundle --filter=certificates $SSL/ca-bundle.trust.crt
trust extract --overwrite --format=java-cacerts --filter=ca-anchors --purpose server-auth $SSL/java/cacerts
ln -fsrT $DEST/tls-ca-bundle.pem $SSL/ca-certificates.crt
# We don't want to have to remove everything from the certs directory but neither
# do we want to leave stale certs around, so symlink it all from somewhere else
for f in $DEST/cadir/*; do
ln -fsr -t $SSL "$f"
done
# Now find and remove all broken symlinks
find -L $SSL -maxdepth 1 -type l -delete
| true |
0fb5bb41ce18e2c5705fd063b6445cccbf9fdaf0 | Shell | otron/dotfiles | /bash/.bash_profile | UTF-8 | 108 | 2.828125 | 3 | [] | no_license | # Load `~/.bashrc` if it exists, else do nothing.
if [ -f $HOME/.bashrc ]; then
source $HOME/.bashrc
fi
| true |
54ab2f923178cb12cf9201d37094394b291ed112 | Shell | tarmiste/lfspkg | /archcore/svnsnap/packages/totem-plparser/repos/extra-x86_64/PKGBUILD | UTF-8 | 923 | 2.703125 | 3 | [] | no_license | # $Id: PKGBUILD 307018 2017-10-06 12:04:05Z heftig $
# Maintainer: Jan Alexander Steffens (heftig) <jan.steffens@gmail.com>
# Contributor: Jan de Groot <jgc@archlinux.org>
pkgname=totem-plparser
pkgver=3.26.0
pkgrel=1
pkgdesc="Simple GObject-based library to parse and save a host of playlist formats"
url="https://git.gnome.org/browse/totem-pl-parser"
license=(LGPL)
arch=(i686 x86_64)
depends=(gmime3 libarchive libquvi libxml2)
makedepends=(gobject-introspection git gtk-doc libsoup meson)
_commit=279ca9c68f9ed24a29bec5ababcdbf97fd5d08e7 # tags/V_3_26_0^0
source=("git+https://git.gnome.org/browse/totem-pl-parser#commit=$_commit")
sha256sums=('SKIP')
pkgver() {
cd totem-pl-parser
git describe --tags | sed 's/^V_//;s/_/./g;s/-/+/g'
}
prepare() {
mkdir build
cd totem-pl-parser
}
build() {
cd build
meson setup --prefix=/usr --libexecdir=/usr/lib --buildtype=release -Denable-gtk-doc=true ../totem-pl-parser
ninja
}
package() {
cd build
DESTDIR="$pkgdir" ninja install
}
| true |
94c9e34614cbc4e1ae4f501de4fc0ce96da4ebc4 | Shell | CESNET/perun-services | /slave/process-ldap-vsb-vi/conf/example-pre_10_setup_connection | UTF-8 | 591 | 2.96875 | 3 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | #!/bin/bash
#
# Example pre-file for LDAP service providing authz information.
# For production use remove "example-" prefix for file and fill the values.
#
# LDAP_LOGIN=[login]
# LDAP_PASSWORD=[password]
# LDAP_URL=[LDAP server location with optional ":port" part]
# LDAP_FILTER_USERS=[filter for query in standard LDAP syntax used to read current state of ldap (users)]
# LDAP_FILTER_GROUPS=[filter for query in standard LDAP syntax used to read current state of ldap (groups)]
#
LDAP_LOGIN=
LDAP_PASSWORD=
LDAP_URL=
LDAP_FILTER_USERS="(objectclass=*)"
LDAP_FILTER_GROUPS="(objectclass=*)" | true |
0e833dc449a14768dfaa482eed3004fb645702cf | Shell | scampersand/sonos-front | /provision.bash | UTF-8 | 6,693 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Provisioning script for vagrant.
# If there's only one project with requirements and deps in TOP, this is
# fine, otherwise set VAGRANT_PROJ
: ${VAGRANT_TOP:=/vagrant}
: ${VAGRANT_PROJ:=*}
main() {
if [[ $EUID != 0 ]]; then
msg "--> provision.bash as_user"
as_user
msg "<-- provision.bash as_user"
return
fi
msg "--> provision.bash as_root"
as_root
msg "<-- provision.bash as_root"
su vagrant -c "$(printf '%q ' "$0" "$@")"
}
as_root() {
is_vbox && vbox_preinstall
install_packages
(is_docker || is_lxc) && lxc_postinstall
common_postinstall
}
vbox_preinstall() {
# If the host has moved between networks, sometimes DNS needs to be
# reconnected.
/etc/init.d/networking restart
}
common_postinstall() {
# Set up the locale support files
sed -i '/en_US.UTF-8/s/^# *//' /etc/locale.gen
locale-gen
# Set the timezone
ln -sfn /usr/share/zoneinfo/EST5EDT /etc/localtime
}
lxc_postinstall() {
declare uid gid
# Make the vagrant uid/gid match the host user
# so the bind-mounted source area works properly.
read uid gid <<<"$(stat -c '%u %g' "$VAGRANT_TOP")"
if [[ ! -n $uid ]]; then
die "Couldn't read uid/gid for vagrant user"
fi
if [[ $(id -u vagrant) != $uid || $(id -g vagrant) != $gid ]]; then
# usermod/userdel doesn't work when logged in
sed -i '/vagrant/d' /etc/passwd /etc/shadow
groupmod -g $gid vagrant
useradd -u $uid -g vagrant -G sudo -s /bin/bash vagrant \
-p "$(perl -e "print crypt('vagrant', 'AG')")"
find /home/vagrant -xdev -print0 | xargs -0r chown $uid:$gid
chown $uid:$gid /tmp/vagrant-shell 2>/dev/null ||:
fi
}
install_packages() {
declare -a packages
packages+=( locales ) # for locale-gen
packages+=( curl rsync )
packages+=( git )
packages+=( sudo ssh )
packages+=( make gcc g++ binutils )
packages+=( inotify-tools ) # inotifywait
packages+=( nodejs ) # add npm if not installing from nodesource
packages+=( graphicsmagick ) # for image resizing
# Don't install extra stuff.
# Suggests list is long; recommends list is short and sensible.
# To omit recommends, add APT::Install-Recommends "false";
cat > /etc/apt/apt.conf.d/99vagrant <<EOT
APT::Install-Suggests "false";
EOT
# Add nodejs upstream.
if [[ " ${packages[*]} " == *" nodejs "* && \
! -e /etc/apt/sources.list.d/nodesource.list ]]; then
which curl &>/dev/null || (apt-get update; apt-get install curl -y)
curl -sL https://deb.nodesource.com/setup_6.x | bash -
fi
# This should prevent apt-get install/upgrade from asking ANY questions
export DEBIAN_FRONTEND=noninteractive
# Update package list
apt-get update
# Upgrade ssh server first to avoid killing the running server
apt-get install -y ssh \
-o 'PackageManager::Configure=no' \
-o 'DPkg::ConfigurePending=no'
chmod -x /etc/init.d/ssh # prevents restart
dpkg --configure -a
# Now the rest
apt-get install -y "${packages[@]}"
apt-get upgrade -y "${packages[@]}"
# Make /usr/bin/nodejs available as /usr/local/bin/node
ln -sfn /usr/bin/nodejs /usr/local/bin/node
}
as_user() {
cd ~
if [[ $PWD == */vagrant ]]; then
rm -f .profile
cat > .bash_profile <<'EOT'
source ~/.bashrc
EOT
cat > .bashrc <<'EOT'
PATH=~/node_modules/.bin:$PATH
[[ -e ~/env ]] && source ~/env/bin/activate
[[ $- != *i* ]] && return
PS1='\u@\h:\w\$ '
EOT
echo "cd $VAGRANT_TOP" >> .bashrc
source .bash_profile
fi
user_virtualenv
user_gems
user_npm
}
user_virtualenv() {
cd ~
if ! type virtualenv &>/dev/null; then
echo "no virtualenv, skipping python requirements" >&2
return
fi
# Always create the virtualenv, even if there's no requirements.txt, since
# we also use it to isolate ruby gems.
if [[ ! -d env ]]; then
virtualenv env
fi
source env/bin/activate
declare reqs
if reqs=$(src requirements.txt); then
pip install -U pip
pip install -r "$reqs"
fi
}
pip() {
PYTHONUNBUFFERED=1 command pip "$@"
}
user_gems() {
cd ~
if [[ ! -d env ]]; then
echo "no virtualenv, skipping ruby gems" >&2
return
fi
if ! grep -q GEM_HOME env/bin/activate; then
echo 'export GEM_HOME="$VIRTUAL_ENV/ruby" PATH="$VIRTUAL_ENV/ruby/bin:$PATH"' >> env/bin/activate
fi
source env/bin/activate
declare gemfile
if gemfile=$(src Gemfile); then
cd "$(dirname "$gemfile")"
bundle clean --force
bundle install
fi
}
user_npm() {
cd ~
declare found
if found=$(src npm-shrinkwrap.json) || found=$(src package.json); then
cd "$(dirname "$found")"
# This is a little bit of a hack in that it does a local install to a
# symlinked node_modules, so that the install is in the vagrant image
# rather than the bind-mounted src dir.
mkdir -p ~/node_modules
ln -sfn ~/node_modules node_modules
npm install
fi
}
src() {
# ff only checks one level of nesting, and testing it with -f will only
# succeed if there was a single match.
declare f=$VAGRANT_TOP/"$1" ff=$(echo $VAGRANT_TOP/$VAGRANT_PROJ/"$1")
if [[ -f $f ]]; then
echo "$f"
elif [[ -f $ff ]]; then
echo "$ff"
else
return 1
fi
}
msg() {
echo "$*"
}
die() {
echo "$*"
exit 1
}
is_docker() {
if [[ ! -d /home/vagrant ]]; then
echo "is_docker: running outside vagrant?" >&2
return 1
fi
sudo grep -qw docker /proc/1/cgroup
eval "is_docker() { return $?; }"
is_docker
}
is_lxc() {
if [[ ! -d /home/vagrant ]]; then
echo "is_lxc: running outside vagrant?" >&2
return 1
fi
# https://www.redhat.com/archives/virt-tools-list/2013-April/msg00117.html
sudo grep -q container=lxc /proc/1/environ
eval "is_lxc() { return $?; }"
is_lxc
}
is_vbox() {
if [[ ! -d /home/vagrant ]]; then
echo "is_vbox: running outside vagrant?" >&2
return 1
fi
which dmidecode &>/dev/null || sudo apt-get install -y dmidecode
sudo dmidecode 2>/dev/null | grep -q VirtualBox
eval "is_vbox() { return $?; }"
is_vbox
}
#######################################################################
#
# RUN MAIN only if not sourced into another script
#
#######################################################################
case ${0##*/} in
provision.bash|vagrant-shell) main "$@" ;;
esac
| true |
cc17947f7ab97751550030eda7f817c701698c8b | Shell | elzaggo/tdm-wrf | /docker/run_metgrid | UTF-8 | 316 | 3.359375 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
WPSRUN=$1
COMPONENT=metgrid
cd ${WPSRUN}
ln -sf /wrf/WPS/${COMPONENT}.exe .
# Command for COMPONENT
./${COMPONENT}.exe >& print.${COMPONENT}.txt
ls -ls FILE:*
OK_step=$?
if [ $OK_step -eq 0 ]; then
tail print.${COMPONENT}.txt
else
echo ${COMPONENT} FAILED
exit 444
fi
| true |
5c3151725560d8cc3b5557e354efdcd3679df118 | Shell | gmatht/leviathan | /starcluster_files/change_region.sh | UTF-8 | 1,946 | 2.859375 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | region=$1
ami=$2
if [ -z "$2" ]
then
echo usage: $0 region ami
echo $0 ca-central-1 ami-b3d965d7
firefox https://simonpbriggs.co.uk/amazonec2/
exit 1
fi
AWS_REGION_NAME=ap-southeast-1
NODE_IMAGE_ID=ami-8fcc75ec
NEW_HOME=~/.home.$region
mkdir -p $NEW_HOME
ln -s ~/.ssh $NEW_HOME/.ssh
mkdir $NEW_HOME/.starcluster
< ~/.starcluster/config sed "s/$AWS_REGION_NAME/$region/g
s/$NODE_IMAGE_ID/$ami/g" > $NEW_HOME/.starcluster/config
ln ~/.gitconfig $NEW_HOME/.gitconfig
#[ -e ~/.ssh/mykey-$region.rsa ] || HOME=$NEW_HOME starcluster ck $region -o ~/.ssh/mykey-$region.rsa
[ -e ~/.ssh/mykey-$region.rsa ] || HOME=$NEW_HOME starcluster ck mykey -o ~/.ssh/mykey-$region.rsa
HOME=$NEW_HOME bash
exit
ca-central-1 ami-b3d965d7
https://cloud-images.ubuntu.com/locator/ec2/
ap-northeast-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-1de1df7a hvm
ap-northeast-2 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-6722ff09 hvm
ap-south-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-4fa4d920 hvm
ap-southeast-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-93ef68f0 hvm
ap-southeast-2 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-1e01147d hvm
ca-central-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-e273cf86 hvm
cn-north-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170303 ami-a163b4cc hvm
eu-central-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-a74c95c8 hvm
eu-west-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-6c101b0a hvm
eu-west-2 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-056d7a61 hvm
sa-east-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-4bd8b727 hvm
us-east-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-20631a36 hvm
us-east-2 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-a5b196c0 hvm
us-gov-west-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170330 ami-ff22a79e hvm
us-west-1 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-9fe6c7ff hvm
us-west-2 xenial 16.04 LTS amd64 hvm:ebs-ssd 20170516 ami-45224425 hvm
| true |
f7d2ac924d50017d456f5352e2e638191637e249 | Shell | LEI-code/L201819 | /SO/TP1/P3.sh | UTF-8 | 437 | 2.90625 | 3 | [] | no_license | #echo $(ls *$1 | sed -e 's/\..*$//')
#regex:
# s/ <- substitution indicator
# \. <- escaped dot
# . <- single instance
# * <- any number of the previous (single instance of a character)
# $ <- end of the string
# // <- first indicates the beginning of the second pattern,
# second indicates the end of the s operator
#http://tldp.org/LDP/abs/html/x23170.html
for fil in $(ls *$1 | sed -e 's/\..*$//');do
mv $fil$1 $fil$2
done
| true |
8331db808b02ad8f99231cc7f654c74a8e32088d | Shell | Kang-Jack/AWS-Lambda-ShuangSe | /ref/zlib-1.2.11-0/info/recipe/build.sh | UTF-8 | 272 | 2.5625 | 3 | [
"Zlib"
] | permissive | #!/bin/bash
CFLAGS="-fPIC" ./configure --shared --prefix=$PREFIX
make
make check
make install
# Remove man files.
rm -rf $PREFIX/share
# Copy license file to the source directory so conda-build can find it.
cp $RECIPE_DIR/license.txt $SRC_DIR/license.txt
| true |
051d6f6ed018a8895acea359f99c6238704a41cf | Shell | lfeng1231/RSS_Scripts | /20_bgpolish_snvcall/polish_snvcall.sh | UTF-8 | 1,144 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env bash
source ~/.bashrc
module load rsu_sw_binaries
module load pipelines/cappmed
#module load docker
if [ "$#" -ne 5 ]
then
echo "Need 3 command args"
echo "sorted bam file"
echo "slopped target bed file"
echo "read length - 4 basepair MID length"
exit
fi
snvcaller="/isilon/Analysis/onco/prog/CM_gitclone/ctdna-snv-caller/R/snv_caller3.r"
samtools="/isilon/Apps/site_sw/prd/RHEL7.1_x86_64/samtools/1.2/bin/samtools"
polish="/isilon/Analysis/onco/prog/CM_gitclone/ctdna-bg-polishing/filter-freq.pl"
sortedbam="$1"
targetbed="$2"
bg="$3"
white="$4"
sample="$5"
outname=${sortedbam/.sorted.bam/.dualindex-deduped.sorted.bam}
outnamedup=${sortedbam/.sorted.bam/.dualindex-deduped.duplex.sorted.bam}
# Now polish
polishout=$outname".snv.freq"
polishout=${polishout/.freq/.bg-polished.freq}
$polish $outname".snv.freq" $outnamedup".snv.freq" $bg 0.2 1 $polishout $polishout".qc"
#SNV_caller
$snvcaller $polishout $outnamedup".snv.freq" ${targetbed/.add500bp.bed/.bed} $white 0 $sample /isilon/Analysis/onco/indexes/hg38/6gene_blacklist.bed /isilon/Analysis/onco/indexes/hg38/RefSeq_Gencodev23_20160623.allexons.sorted.bed
| true |
622383bbf32f51f960fe31cf59cdb7a1d3e8b806 | Shell | laranea/servtools | /install_rhel.sh | UTF-8 | 1,560 | 2.546875 | 3 | [] | no_license | mkdir -p /root/tmp
cd /root/tmp
# vim /etc/ssh/sshd_config
# edit PermitRootLogin to yes
# in bashrc
export PATH="/usr/local/bin:$PATH"
wget http://download.fedora.redhat.com/pub/epel/5/x86_64/libyaml-0.1.2-3.el5.i386.rpm
yum localinstall libyaml-0.1.2-3.el5.i386.rpm --nogpgcheck -y
wget http://download.fedora.redhat.com/pub/epel/5/x86_64/libyaml-devel-0.1.2-3.el5.i386.rpm
yum localinstall libyaml-devel-0.1.2-3.el5.i386.rpm --nogpgcheck -y
rpm -Uvh http://repo.webtatic.com/yum/centos/5/latest.rpm
yum install --enablerepo=webtatic git-all -y
yum install gcc-c++ -y
yum install curl-devel -y
yum install zlib-devel -y
yum install pcre pcre-devel -y
# install ruby from install.sh
gem i passenger
passenger-install-nginx-module
# passenger_root /usr/local/lib/ruby/gems/1.9.1/gems/passenger-3.0.11;
# passenger_ruby /usr/local/bin/ruby;
# activate repos, set enable to 1
vim /etc/yum.repos.d/webtatic.repo
vim /etc/yum.repos.d/rhel-source.repo
# install epel repos
rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm
# check http://download.fedora.redhat.com/pub/epel/5/i386/ for updates
# mysql rhel:
# http://adityo.blog.binusian.org/?p=428
# http://php-fpm.org/wiki/Documentation
yum install mysql55 mysql55-devel mysql55-server mysql55-libs -y
yum install libxml2 libxslt -y
# nginx init.d
cd
mkdir -p tmp
cd tmp
wget https://raw.github.com/gist/1936900/7fe9c0daf90b66a96e31aef244975364e349fcd6/nginx.sh
cp nginx.sh /etc/init.d/nginx
chmod +x /etc/init.d/nginx
/etc/init.d/nginx restart
| true |
e665f830dc195232a4f72980cea1bf0d7c3e9849 | Shell | JadenMcKey/scm-docker-1 | /03-docker-hosts/start/start-docker-hosts.sh | UTF-8 | 1,291 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Load configuration
CONFIG_STANDALONE_HOSTS=$(cat ${SCM_DOCKER_1_APPL_DOCKER_HOSTS_DIR}/data/standalone-hosts.conf)
CONFIG_CLUSTER_HOSTS=$( cat ${SCM_DOCKER_1_APPL_DOCKER_HOSTS_DIR}/data/cluster-hosts.conf)
echo "> Starting Docker Hosts:"
# Process all config elements for standalone hosts
for config in ${CONFIG_STANDALONE_HOSTS}
do
# Retrieve host from current config element
host=`echo ${config} | cut -d',' -f1`
echo ">> Starting host '${host}'..."
# Start current host
docker-machine start ${host}
echo ">> Host '${host}' destroyed."
done
# Process all config elements for hosts that are part of Docker Swarm Mode cluster
for config in ${CONFIG_CLUSTER_HOSTS}
do
# Retrieve environmnet from current config element
environment=`echo ${config} | cut -d',' -f1`
echo ">> Starting environment '${environment}'..."
# Set prefix of host names
prefix="swarm-${environment}"
# Remove all hosts in current environment
for i in $(seq 1 ${AMOUNT_OF_HOSTS_PER_CLUSTER})
do
echo ">>> Starting host ${i}..."
# Set name of current host
host="${prefix}-${i}"
# Start current host
docker-machine start ${host}
echo ">>> Host ${i} started."
done
echo ">> Environment '${environment}' started."
done
echo "> Docker Hosts started."
echo
| true |
cc0b3fe5eae278d9d6c012ed9ceb5405c62a259e | Shell | lcodesignx/bashtools | /countdown | UTF-8 | 390 | 3.6875 | 4 | [] | no_license | #!/bin/bash
COUNTER=$1
COUNTER=$(( COUNTER * 60 ))
minusone(){
COUNTER=$(( COUNTER - 1 ))
sleep 1
}
while [ $COUNTER -gt 0 ]
do
echo you have $COUNTER seconds left
minusone
done
[ $COUNTER = 0 ] && echo time is up && minusone
[ $COUNTER = "-1" ] && echo you are one second late && minusone
while true
do
echo you are now ${COUNTER#-} seconds late
minusone
done
| true |
e944b49496150f4e569ce7f69b530d903e2f7725 | Shell | certpub/issuers | /src/scripts/create-pem.sh | UTF-8 | 226 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
REF=${REF:-dev}
for env in $(ls src/certificates); do
openssl pkcs12 \
-in target/certpub-$env-$REF.p12 \
-out target/certpub-$env-$REF.pem \
-password pass:changeit
done | true |
e16955fcaf1552eb5f52c4f941133578292f1366 | Shell | aselvan/scripts | /linux/find_zmeu.sh | UTF-8 | 2,167 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# find_zmeu.sh:
# Desc: finds the unique IPs, hosts trying to do look for php vulnerabilities in our apache server
# Author : Arul
# Version : May 7, 2011
#
# Source info: http://ensourced.wordpress.com/2011/02/25/zmeu-attacks-some-basic-forensic/
#
httpLogFile=/var/log/apache2/access.log
zmeuLogFile=/var/www/zmeuAttackers.html
std_header=/var/www/std_header.html
#ufwRules=/var/lib/ufw/user.rules
ufwRules=/lib/ufw/user.rules
title="selvans.net zmenu log"
desc="This file contains selvans.net zmenu log"
sed_st="s/__TITLE__/$title/g;s/__DESC__/$desc/g"
cat $std_header |sed -e "$sed_st" > $zmeuLogFile
echo "<body><h2>IPs/Hosts that does ZmEu attacks/scan</h2><br> <pre>" >> $zmeuLogFile
echo "Run date: `date +"%D"`" >> $zmeuLogFile
echo "Source info: http://ensourced.wordpress.com/2011/02/25/zmeu-attacks-some-basic-forensic/" >> $zmeuLogFile
echo "" >> $zmeuLogFile
echo "IP/Hosts List below (need to add to iptables periodically)" >> $zmeuLogFile
echo "" >> $zmeuLogFile
echo "<table border=\"1\" cellspacing=\"1\" cellpadding=\"3\">" >> $zmeuLogFile
echo "<tr><th>Host</th><th>IP</th><th>In iptables?</th><th>Whois Info</th></tr>" >> $zmeuLogFile
#cat $httpLogFile |grep ZmEu |awk '{print $1;}'|sort|uniq >> $zmeuLogFile
output=$(cat $httpLogFile |grep ZmEu |awk '{print $1;}'|sort|uniq)
for hostName in $output; do
# see if the lookup succeeds
lookup=`host $hostName 2>/dev/null`
if [ $? -eq 0 ]; then
hostIp=`echo $lookup|awk '{print $3}'`
# see it is already blocked
grep $hostIp $ufwRules >/dev/null 2>&1
if [ $? -eq 0 ]; then
blocked=Yes
else
blocked=No
fi
whoisInfo=`whois $hostIp| egrep -w 'descr:|owner:|e-mail:'`
else
hostIp="N/A"
# see it is already blocked
grep $hostName $ufwRules >/dev/null 2>&1
if [ $? -eq 0 ]; then
blocked=Yes
else
blocked=No
fi
whoisInfo=`whois $hostName| egrep -w 'descr:|owner:|e-mail:'`
fi
echo "<tr><td>$hostName</td><td>$hostIp</td><td>$blocked</td><td>$whoisInfo</td> </tr>" >> $zmeuLogFile
done
echo "</table> </pre></body></html>" >> $zmeuLogFile
| true |
5b3836fe94f60ee7e3ef5d92fc401f47d9944c14 | Shell | oceanscape/dotfiles-12 | /test/services.bats | UTF-8 | 2,375 | 3.234375 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/env bats
#shellcheck disable
load 'helpers/bats-support/load'
load 'helpers/bats-file/load'
load 'helpers/bats-assert/load'
rootDir="$(git rev-parse --show-toplevel)"
[[ "${rootDir}" =~ private ]] && rootDir="${HOME}/dotfiles"
filesToSource=(
"${rootDir}/scripting/helpers/services.bash"
"${rootDir}/scripting/helpers/baseHelpers.bash"
)
for sourceFile in "${filesToSource[@]}"; do
[ ! -f "${sourceFile}" ] \
&& {
echo "error: Can not find sourcefile '${sourceFile}'"
echo "exiting..."
exit 1
}
source "${sourceFile}"
trap - EXIT INT TERM
done
# Set initial flags
quiet=false
printLog=false
logErrors=false
verbose=false
force=false
dryrun=false
declare -a args=()
ping -t 2 -c 1 1.1.1.1 &>/dev/null \
&& noint=false \
|| noint=true
@test "Sanity..." {
run true
assert_success
assert_output ""
}
@test "_haveInternet_: true" {
("$noint") && skip "!! No Internet connection."
run _haveInternet_
assert_success
}
@test "_httpStatus_: Bad URL" {
("$noint") && skip "!! No Internet connection."
run _httpStatus_ http://thereisabadurlishere.com 1
assert_success
assert_line --index 1 "000 Not responding within 1 seconds"
}
@test "_httpStatus_: redirect" {skip "not working yet...."
("$noint") && skip "!! No Internet connection."
run _httpStatus_ https://jigsaw.w3.org/HTTP/300/301.html 3 --status -L
assert_success
assert_output --partial "Redirection: Moved Permanently"
}
@test "_httpStatus_: google.com" {
("$noint") && skip "!! No Internet connection."
run _httpStatus_ google.com
assert_success
assert_output --partial "200 Successful:"
}
@test "_httpStatus_: -c" {
("$noint") && skip "!! No Internet connection."
run _httpStatus_ https://natelandau.com/something/not/here/ 3 -c
assert_success
assert_output "404"
}
@test "_httpStatus_: --code" {
("$noint") && skip "!! No Internet connection."
run _httpStatus_ www.google.com 3 --code
assert_success
assert_output "200"
}
@test "_httpStatus_: -s" {
("$noint") && skip "!! No Internet connection."
run _httpStatus_ www.google.com 3 -s
assert_success
assert_output "200 Successful: OK within 3 seconds"
}
@test "_httpStatus_: --status" {
("$noint") && skip "!! No Internet connection."
run _httpStatus_ www.google.com 3 -s
assert_success
assert_output "200 Successful: OK within 3 seconds"
} | true |
f3d550560189a07433b9d1780eb57a6ad030c809 | Shell | Adaptavist/avst-app | /share/avst-app/lib/product/synchrony/stop.d/99stop | UTF-8 | 1,163 | 3.09375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2015 Adaptavist.com Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ "$(uname)" != "Linux" ]]; then
fatal "systemd/stop/stop: Error: unsupported Operating system $(uname)"
return 99
fi
RUN_CMD="sudo -u \"${INSTANCE_USER}\" bash -c \"${INSTALL_DIR}/stop-synchrony.sh\""
debug "synchrony/stop/stop: Running: ${RUN_CMD}"
SYNCHRONY_STOP_CMD=$( run_cmd "${RUN_CMD}" )
debug "${SYNCHRONY_STOP_CMD}"
if [[ "$(get_std_return ${SYNCHRONY_STOP_CMD})" != "0" ]]; then
signal_stop_process "synchrony/stop/stop"
fi
PID_FILE="${INSTALL_DIR}/synchrony.pid"
# cleanup pid
if [[ -f ${PID_FILE} ]]; then
rm -f ${PID_FILE}
fi | true |
84c5f11ecf9c5574231a7d81474a6694cf3247b6 | Shell | jouve/urbi | /sdk-remote/sdk/umake-java | UTF-8 | 12,450 | 3.84375 | 4 | [] | no_license | #! /bin/bash
test -f /bin/ksh && test -z "$RUNNING_KSH" \
&& { UNAMES=`uname -s`; test "x$UNAMES" = xULTRIX; } 2>/dev/null \
&& { RUNNING_KSH=true; export RUNNING_KSH; exec /bin/ksh $0 ${1+"$@"}; }
unset RUNNING_KSH
# No failure shall remain unpunished.
set -e
me=$(basename "$0")
medir=$(dirname "$0")
# We have to initialize IFS to space tab newline since we save and
# restore IFS and apparently POSIX allows stupid/broken behavior with
# empty-but-set IFS.
# http://lists.gnu.org/archive/html/automake-patches/2006-05/msg00008.html
# We need space, tab and new line, in precisely that order. And don't
# leave trailing blanks.
space=' '
tab=' '
newline='
'
IFS="$space$tab$newline"
# Pacify verbose cds.
CDPATH=${ZSH_VERSION+.}$path_sep
# In case someone crazy insists on using grep -E.
: ${EGREP=egrep}
debug=false
quiet=false # by default let the tools' message be displayed
verb=false # true for verbose mode
## --------------------- ##
## Auxiliary functions. ##
## --------------------- ##
# In case `local' is not supported by the shell.
(
foo=bar
test_local () {
local foo="foo"
}
test_local
test $foo = bar
) || local () {
case $1 in
*=*) eval "$1";;
esac
}
# stderr LINE1 LINE2...
# ---------------------
# Report some information on stderr.
stderr ()
{
for i
do
echo "$i"
done | sed -e "s/^/$me: /" >&2
}
# verbose WORD1 WORD2
# -------------------
# Report some verbose information.
verbose ()
{
if $verb; then
stderr "$@"
fi
}
# run COMMAND-LINE
# ----------------
# Run the COMMAND-LINE verbosely, and catching errors as failures.
run ()
{
if $verb; then
first=true
for i
do
if $first; then
stderr "Running: $i"
first=false
else
stderr " : $i"
fi
done
fi
"$@" 1>&5 ||
error 1 "$1 failed"
}
# error EXIT_STATUS LINE1 LINE2...
# --------------------------------
# Report an error and exit with EXIT_STATUS.
error ()
{
local s="$1"
# FIXME: use m4sh to factor.
case ${s#EX_} in
(OK) s=0;;
(USAGE) s=64;;
(DATAERR) s=65;;
(NOINPUT) s=66;;
(NOUSER) s=67;;
(NOHOST) s=68;;
(UNAVAILABLE) s=69;;
(SOFTWARE) s=70;;
(OSERR) s=71;;
(OSFILE) s=72;;
(CANTCREAT) s=73;;
(IOERR) s=74;;
(TEMPFAIL) s=75;;
(PROTOCOL) s=76;;
(NOPERM) s=77;;
(CONFIG) s=78;;
(SKIP) s=176;;
(HARD) s=177;;
esac
shift
stderr "$@"
exit $s
}
# fata LINE1 LINE2...
# -------------------
# Report an error and exit 1.
fatal ()
{
error 1 "$@"
}
# dirlist_error EXIT_STATUS WHAT WHERE WHICH
# ------------------------------------------
# Report an error and exit with failure if WHICH, of type WHAT
# does not exist in WHERE.
# This function tests only directories
dirlist_error ()
{
local err="$1"
local type="$2"
local base="$3"
local val="$4"
if test ! -d $base/$val; then
stderr "no such $type $val, possible choices are :"
for d in $base/*; do
if test -d $d; then
stderr " - $(basename $d)"
fi
done
exit $err
fi
}
# exist_error EXIT_STATUS WHAT WHERE OPTION
# -----------------------------------------
# Report an error and exit with failure if WHERE is not found
# or is not of type WHAT.
# OPTION indicates which umake option to set for this value.
# This function tests only directories
exist_error ()
{
local err="$1"
local type="$2"
local base="$3"
local option="$4"
local longtype="$2"
case $type in
d) longtype=directory;;
f) longtype=file;;
esac
test "$type" = d -a -n "$base" &&
base=$(dirname $base/.)
if test ! -$type "$base"; then
stderr "no such $longtype $base"
if test -n "$option"; then
stderr " use option --$option to set to an alternative value."
fi
exit $err
fi
}
# Initialize the common set up. Should be done when $debug and
# $quiet are set.
initialize ()
{
# File descriptor usage:
# 0 standard input
# 1 standard output (--verbose messages)
# 2 standard error
# 3 some systems may open it to /dev/tty
# 4 used on the Kubota Titan
# 5 tools output (turned off by --quiet)
# 6 tracing/debugging (set -x output, etc.)
# Main tools' output (TeX, etc.) that TeX users are used to seeing.
#
# If quiet, discard, else redirect to the error flow.
if $quiet; then
exec 5>/dev/null
else
exec 5>&2
fi
# Enable tracing, and auxiliary tools output.
#
# Should be used where you'd typically use /dev/null to throw output
# away. But sometimes it is convenient to see that output (e.g., from
# a grep) to aid debugging. Especially debugging at distance, via the
# user.
if $debug || test x"$VERBOSE" = xx; then
exec 6>&1
set -x
else
exec 6>/dev/null
fi
verbose "$0 running."
}
# append VARIABLE CONTENT [SEPARATOR=' ']
# ---------------------------------------
append ()
{
local var="$1"
local content="$2"
local sep
sep=${3-' '}
eval "$var=\$$var\${$var:+$sep}\$content"
}
usage ()
{
cat <<EOF
Usage: $me [OPTION]... [FILE]...
General options:
-D, --debug turn on shell debugging (set -x)
-h, --help output this help and exit successfully
-q, --quiet no output unless errors
-V, --version output version information and exit successfully
-v, --verbose report on what is done
Compilation options:
--deep-clean remove all building directories
-c, --clean clean building directory before
compilation
-cp, --classpath=classpath A : separated list of directories, JAR
archives, and ZIP archives to search for
class files
-m, --manifest=manifest include manifest information from specified
manifest file
-o, --output=output output file name
Developper options:
-p, --prefix=DIR library file location [$prefix]
-k, --kernel=DIR kernel location [$(kernel)]
Exit codes:
1 some tool failed
2 invalid command line option
3 unknown command line argument
4 unable to find file or directory
FILE may be Java source files, jar archives files, or directory that will be searched for such files.
Report bugs to sdk-remote-bugs@gostai.com.
EOF
exit 0
}
version ()
{
cat <<\EOF
umake 2.7.5 (Urbi 2.7.5)
Copyright (C) 2004-2012, Gostai S.A.S.
EOF
exit 0
}
# Return the location of param_mk
param_mk ()
{
if test -n "$param_mk"; then
echo "$param_mk"
else
# If we are building a library, there is no core, so use param.mk
# from the remote core which is always present.
echo "$brandlibdir/remote/java/param.mk"
fi
}
# Return the location of the kernel
kernel ()
{
echo "${kernel+$prefix}"
}
# Clean all build directories.
deep_clean ()
{
if find . -name "${builddir_pref}*" -a -type d | xargs rm -rf; then
error 0 "all build directories cleaned."
else
fatal "cannot clean build directories."
fi
}
tool_test ()
{
local toolname="$1"
local tool="$2"
test -e $tool \
|| fatal "Could not find a working '$toolname' tool."
}
## ---------------------- ##
## Command line parsing. ##
## ---------------------- ##
get_options ()
{
# Push a token among the arguments that will be used to notice when we
# ended options/arguments parsing.
# Use "set dummy ...; shift" rather than 'set - ..." because on
# Solaris set - turns off set -x (but keeps set -e).
# Use ${1+"$@"} rather than "$@" because Digital Unix and Ultrix 4.3
# still expand "$@" to a single argument (the empty string) rather
# than nothing at all.
arg_sep="$$--$$"
set dummy ${1+"$@"} "$arg_sep"; shift
# Parse command line arguments.
while test x"$1" != x"$arg_sep"
do
# Handle --option=value by splitting apart and putting back on argv.
case $1 in
(--*=*)
opt=`echo "$1" | sed -e 's/=.*//'`
val=`echo "$1" | sed -e 's/[^=]*=//'`
shift
set dummy "$opt" "$val" ${1+"$@"}; shift
;;
esac
case $1 in
(-D | --debug ) debug=true;;
(-v | --verbose) verb=true;;
(-h | --help ) usage;;
(-q | --quiet ) quiet=true;;
(-V | --version) version;;
( --deep-clean) deep_clean ;;
(-c | --clean) clean=true ;;
(-o | --output) shift; target=$1;;
(-p | --prefix) shift; prefix=$1;;
(-k | --kernel) shift; kernel=$1;;
(-cp | --classpath) shift; classpath="$classpath:$1";;
(-m | --manifest) shift; manifest="$1";;
(--) # What remains are not options.
shift
while test x"$1" != x"$arg_sep"
do
set dummy ${1+"$@"} "$1"; shift
shift
done
break
;;
(-*)
error EX_USAGE "unknown or ambiguous option \`$1'." \
"Try \`--help' for more information."
;;
(*) set dummy ${1+"$@"} "$1"; shift;;
esac
shift
done
# Pop the token
shift
# Interpret remaining command line args as filenames.
case $#:$oname in
([01]:* | *:);;
(*) error 2 "Can't use option \`--output' with more than one argument.";;
esac
while test x"$1" != x || test $havearg = false
do
if test x"$1" = x && test $havearg = false; then
set dummy . ${1+"$@"}; shift
havearg=true
fi
# If this is a directory, append a slash.
case $1$(test -d "$1" && echo '/') in
(VPATH=*) vpath=$vpath:$(echo "$1" | sed -e 's/^[^=]*=//');;
(*=*) append makeargs "'$1'";;
(*.java) append sources "'$1'"; havearg=true ;;
(*.jar) append jars "'$1'"; havearg=true ;;
(*/)
# It is a directory.
files=$(find "$1" \
-iname '*.java' \
-or -iname '*.jar' | grep -Fv "$builddir_pref" ) || true
havearg=true;
shift
set dummy $files ${1+"$@"};;
(*)
error 3 "unknown type of file '$1'"
;;
esac
shift
done
}
## ------ ##
## Main. ##
## ------ ##
: ${javac="javac"}
: ${jar="jar"}
tool_test "javac" "$javac"
tool_test "jar" "$jar"
clean=false
havearg=false # we have at least one path or file arg
builddir=
builddir_pref="classes"
# Make the package relocatable: the urbi-root contains the bin
# directory that contains this tool. Yet, make it absolute. For
# instance because libtool does not want relative rpath, and prefix
# contributes to libdir.
prefix=$(cd $(dirname $0)/.. && pwd)
# Keep the variables in that order, they have dependencies. bindir is
# needed at least on Windows, where libdir is defined as $bindir.
: ${PACKAGE_BRAND="gostai"}
: ${exec_prefix="${prefix}"}
: ${bindir="${exec_prefix}/bin"}
: ${libdir="${exec_prefix}/lib"}
: ${brandlibdir="${libdir}/${PACKAGE_BRAND}"}
: ${liburbi_jar="$prefix/share/sdk-remote/java/lib/liburbijava.jar"}
: ${classpath=".:$liburbi_jar"}
# Target name.
target="uobject"
sources=
makeargs=
manifest=
objects=
get_options "$@"
initialize
for s in $(eval echo "$sources")
do
append objects "'"$(echo "$s" | sed 's/\.[^.]*$/.class/g')"'"
done
libext=".jar"
target=${target%$libext}$libext
append makeargs "OUTJAR=$target"
# Then pass env.
append makeargs "prefix=$prefix"
# Set and create build dir for temporary files
builddir="$(dirname $target)/${builddir_pref}-$(basename $target)"
# Clean target build directory
if $clean; then
if rm -rf "$builddir"; then
stderr "build directory cleaned."
else
fatal "cannot remove $builddir"
fi
fi
# Create target build directory
mkdir -p "$builddir"
# Generate object fullnames
obj_fullnames=
jar_objects=
for o in $objects; do
obj_fullnames="$obj_fullnames '"${builddir}/$(echo "$o" | tr -d "'")"'"
jar_objects="$jar_objects -C ${builddir} '"$(echo "$o" | tr -d "'")"'"
done
objects=$obj_fullnames
# Check if base directory exists
exist_error 4 d "$prefix" prefix
# Check param.mk file
exist_error 4 f $(param_mk) param-mk
# Invoke make.
if $verb; then
echo >&2 "$(param_mk):"
sed >&2 's/^/> /' $(param_mk)
fi
jarflags="cf"
if test "x$manifest" != x; then
jarflags="${jarflags}m"
fi
verbose "invoking make -f $(param_mk) $target"
run eval make -f "$(param_mk)" \
"$target" \
UMAKE_BUILD_DIR="$builddir" \
JOBJECTS="'$objects'" \
JSOURCES="'$sources'" \
JAROBJECTS="'$jar_objects'" \
CLASSPATH="'$classpath'" \
MANIFEST="$manifest" \
JAVAC="$javac" \
JAR="$jar" \
JARFLAGS="$jarflags" \
"$makeargs"
verbose "done."
exit 0
# Local variables:
# mode: shell-script
# End:
| true |
9f240935bc9af2b8e3c98535a45698ed75067f06 | Shell | TimDeve/.dotfiles | /packages/cargo.sh | UTF-8 | 233 | 2.75 | 3 | [] | no_license | #!/bin/bash
### Packages to install
toInstall=(
'git-delta'
'cargo-edit'
'cargo-watch'
'cargo-update'
'ht'
)
for i in "${toInstall[@]}"
do
cargo install $i
echo ""
echo "---------"
echo ""
done
unset toInstall
| true |
d9961d79387c9ba8ee7b8b8226388ff6d15371a0 | Shell | janhicken/maven-wagon-gs | /scripts/cat_version.sh | UTF-8 | 227 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
if [[ $# -ne 1 ]]; then
printf 'Usage: %s FILE\n' "$0" >&2
exit 1
fi
exec awk 'match($0, /^PROJECT_VERSION = "(.*)"$/, matches) { print matches[1] }' "$1"
| true |
ad6bf8e1d1ce95f7f87bb5eaf904dc9091466e88 | Shell | Gandi/react-translate | /bin/extract_messages | UTF-8 | 1,245 | 4.15625 | 4 | [
"ISC"
] | permissive | #!/bin/bash
# Check arguments
if [[ $# -eq 0 ]] ; then
echo -e "Usage: extract_messages <path/to/src> <namespaces>
This script extracts translations from the passed directory and generates a json file in a fake
locale 'template', that serves the purposes of a .pot file. You will then need to run the
\`update_catalog\` script to populate the real locales with the new translations that were found."
exit 1
fi
if [[ $# -eq 1 ]] ; then
echo 'You must supply the namespace in which to extract messages'
exit 1
fi
set -e
CURRENT_DIR="$(pwd)"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TMP_DIRECTORY=tmp-react-translate
APP_DIR=$1
NAMESPACE=$2
PO_FILE=$TMP_DIRECTORY/extracted.po
FILENAME=locales/template/${NAMESPACE}.json
mkdir -p $TMP_DIRECTORY/
mkdir -p $CURRENT_DIR/locales/template/
node_modules/.bin/babel --presets $SCRIPT_DIR/babel-preset-gettext.json $APP_DIR > /dev/null
node_modules/.bin/po2json $PO_FILE $FILENAME -f mf --pretty --fallback-to-msgid
echo -e " \e[32mSuccessfully extracted messages to ${FILENAME}\e[0m"
# clean
rm -r $TMP_DIRECTORY
# merge external libs messages
$SCRIPT_DIR/merge_catalogs $NAMESPACE
# rewrite plurals
$SCRIPT_DIR/create_counterpart_plurals
set +e
| true |
8aef1b4f4d0f570558df02276d2bb0f06a3b45e0 | Shell | Boon67/device-management-and-ble-system | /adapters/bleAdapter/files/deploy.sh/deploy.sh | UTF-8 | 1,714 | 3.71875 | 4 | [] | no_license | #!/bin/bash
if [ "$EUID" -ne 0 ]
then
echo "---------Permissions Error---------"
echo "STOPPING: Please run as root or sudo"
echo "-----------------------------------"
exit
fi
SCRIPTDIR="${0%/*}"
CONFIGFILENAME="adapterconfig.txt"
source "$SCRIPTDIR/$CONFIGFILENAME"
echo "Adapter Service Name: $ADAPTERSERVICENAME"
echo "SystemD Path: $SYSTEMDPATH"
echo "Python File: $PYTHONFILE"
echo "Python Bin: $PYTHONBIN"
#Ensure files are executable
echo "------Setting Executable Flag"
chmod +x "./"
#Clean up any old adapter stuff
echo "------Cleaning Up Old Adapter"
sudo systemctl stop $ADAPTERSERVICENAME
sudo systemctl disable $ADAPTERSERVICENAME
sudo rm $SYSTEMDPATH/$ADAPTERSERVICENAME
systemctl daemon-reload
#Create a systemd service
echo "------Configuring Service"
cat >"$SYSTEMDPATH/$ADAPTERSERVICENAME" <<EOF
[Unit]
Description=$ADAPTERSERVICENAME
[Service]
Type=simple
ExecStart=$PYTHONBIN $PWD/$PYTHONFILE
Restart=on-abort
TimeoutSec=30
RestartSec=30
StartLimitInterval=350
StartLimitBurst=10
[Install]
WantedBy=multi-user.target
EOF
echo "-----Install Pre-requisite sofware"
#apt-get install git build-essential libglib2.0-dev -y
#git clone https://github.com/IanHarvey/bluepy.git
#cd bluepy
#python setup.py build
#python setup.py install
pip install --upgrade pip
pip install --upgrade bluepy
pip install --upgrade clearblade
echo "------Reloading daemon"
systemctl daemon-reload
#Enable the adapter to start on reboot Note: remove this if you want to manually maintain the adapter
echo "------Enabling Startup on Reboot"
systemctl enable "$ADAPTERSERVICENAME"
systemctl start "$ADAPTERSERVICENAME"
echo "------Thunderboard Adapter Deployed"
cat $SYSTEMDPATH/$ADAPTERSERVICENAME
| true |
b5864710642305d45782c8d58aff353bd05f7d5d | Shell | adriankalinowski/capstoneProject | /references-task/references-parser/bash_scripts/parse_annual_baseline.sh | UTF-8 | 2,103 | 3.375 | 3 | [] | no_license | #!/bin/bash
set -e
set -u
set -o pipefail
filepattern='pubmed18n*'
n_jobs=3
#retrieve arguments from command line
while getopts "p:n:h" opt; do
case ${opt} in
p)
filepattern=${OPTARG}
;;
n)
n_jobs=${OPTARG}
;;
h)
echo "Usage: ./parse_annual_baseline.sh"
exit 0;;
esac
done
temp_archive=$(mktemp -d /tmp/references_archive.XXXX)
ls -1 downloaded_data/${filepattern}.xml.gz | parallel --progress -j ${n_jobs} python3 script.py -a ${temp_archive}/article{#}.csv.gz -c ${temp_archive}/chemical{#}.csv.gz -m ${temp_archive}/mesh{#}.csv.gz -ab ${temp_archive}/abstract{#}.csv.gz -i {} -av ${temp_archive}/versions_article{#}.csv.gz -cv ${temp_archive}/versions_chemical{#}.csv.gz -mv ${temp_archive}/versions_mesh{#}.csv.gz -abv ${temp_archive}/versions_abstract{#}.csv.gz -cm 'gzip'
> /usr/src/app/data/article.csv.gz
> /usr/src/app/data/mesh.csv.gz
> /usr/src/app/data/chemical.csv.gz
> /usr/src/app/data/abstract.csv.gz
> /usr/src/app/data/deleted.csv.gz
#versions
> /usr/src/app/data/versions_article.csv.gz
> /usr/src/app/data/versions_mesh.csv.gz
> /usr/src/app/data/versions_chemical.csv.gz
> /usr/src/app/data/versions_abstract.csv.gz
> /usr/src/app/data/versions_deleted.csv.gz
for f in $(dir ${temp_archive}/article*.csv.gz); do
cat $f >> /usr/src/app/data/article.csv.gz
done
for f in $(dir ${temp_archive}/mesh*.csv.gz); do
cat $f >> /usr/src/app/data/mesh.csv.gz
done
for f in $(dir ${temp_archive}/chemical*.csv.gz); do
cat $f >> /usr/src/app/data/chemical.csv.gz
done
for f in $(dir ${temp_archive}/abstract*.csv.gz); do
cat $f >> /usr/src/app/data/abstract.csv.gz
done
#versions
for f in $(ls ${temp_archive}/versions_article*.csv.gz); do
cat $f >> /usr/src/app/data/versions_article.csv.gz
done
for f in $(ls ${temp_archive}/versions_mesh*.csv.gz); do
cat $f >> /usr/src/app/data/versions_mesh.csv.gz
done
for f in $(ls ${temp_archive}/versions_chemical*.csv.gz); do
cat $f >> /usr/src/app/data/versions_chemical.csv.gz
done
for f in $(ls ${temp_archive}/versions_abstract*.csv.gz); do
cat $f >> /usr/src/app/data/versions_abstract.csv.gz
done | true |
4a127acafac3ef8c8fa39df7820752c2a565e7e1 | Shell | antenore/svntogit-community | /frescobaldi/repos/community-any/PKGBUILD | UTF-8 | 1,045 | 2.59375 | 3 | [] | no_license | # Maintainer: Bruno Pagani <archange@archlinux.org>
pkgname=frescobaldi
pkgver=3.1.3
pkgrel=3
pkgdesc="A LilyPond sheet music text editor"
arch=(any)
url="http://www.frescobaldi.org/"
license=(GPL)
depends=(
python python-pyqt5 python-pyqt5-webengine python-ly poppler
python-poppler-qt5 qt5-base qt5-svg qt5-webkit
)
makedepends=(python-setuptools)
optdepends=(
'lilypond: music engraving'
'python-pygame: MIDI input and playback'
'python-pycups: printing to a CUPS server'
'hyphen-lang: hyphenation patterns for desired languages'
)
source=(https://github.com/frescobaldi/frescobaldi/releases/download/v${pkgver}/${pkgname}-${pkgver}.tar.gz)
sha256sums=('9c7f5036f367691454753f1db06ea02c33501c1e4f24bfb47485103911f3e9c1')
prepare() {
cd ${pkgname}-${pkgver}
# Provided by hyphen-*
rm -f frescobaldi_app/hyphdicts/hyph_*.dic
rm -f frescobaldi_app/hyphdicts/README*
}
build() {
cd ${pkgname}-${pkgver}
python setup.py build
}
package() {
cd ${pkgname}-${pkgver}
python setup.py install --root="${pkgdir}/" --skip-build --optimize=1
}
| true |
8ece905fe3def34d5c037273819e3a191f9dfb8c | Shell | skangas/home-bin | /pngcrush.sh | UTF-8 | 235 | 3.515625 | 4 | [] | no_license | #!/bin/bash
if [ -z "$1" ]; then
echo "Usage: `basename $0` foo.png [...]" >&2
exit 1
fi
for file in "$@"; do
# tmpfile=`mktemp .pngcrush.XXXXXXXX` || exit 1
pngcrush -brute -fix "$file" || exit 1
mv pngout.png "$file"
done
| true |
5986c5cd783b4598664a4f86ac1a1c936c02b7a0 | Shell | simleo/pyecvl | /docker/_OLD/build_manylinux_wheels_gpu.sh | UTF-8 | 379 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
docker build -t manylinux-cuda101 -f Dockerfile.manylinux-cuda101 .
docker build -t ecvl-manylinux-gpu -f Dockerfile.ecvl-manylinux-gpu .
docker build -t pyecvl-manylinux-gpu -f Dockerfile.manylinux-gpu .
# copy the wheels to /tmp/wheels on the host
docker run --rm pyecvl-manylinux-gpu bash -c "tar -c -C /pyecvl wheels" | tar -x -C /tmp
| true |
a97103811182f4114795a9d62a47d1262ea45d4a | Shell | daviesgeek/BatterySaver.sh | /global.sh | UTF-8 | 1,584 | 3.9375 | 4 | [] | no_license | ######
#
# Global functions & misc other stuff
# Matthew Davies, Dec 12th, 2013
#
######
#Missing message declarations
#Set a bunch of variables
name='BatterySaver'
osversion=$(system_profiler SPSoftwareDataType | grep 'System Version' | awk '{print($5)}')
os=$(echo $osversion | awk -F"." '{print $1}')
osDot=$(echo $osversion | awk -F"." '{print $2}')
charge=$(ioreg -l | grep -i capacity | tr '\n' ' | ' | awk '{printf("%3.1f\n", $10/$5 * 100)}')
plugged=$(ioreg -n AppleSmartBattery | grep ExternalConnected | awk '{print($5)}')
notify=$(which terminal-notifer > /dev/null)
logfile='BatterySaver.log'
configfile='settings.conf'
LOGMESSAGE=''
##
# Logger, used for logging to the $LOGFILE, then exiting
##
die(){
echo $(date)': OS: '$version', Battery: '$charge', AC: '$plugged', '${LOGMESSAGE%,} >> $logfile
exit 1
}
##
# Creates an array of arguements
##
args=()
for i in "$@"; do
args+=("$i")
done
x=0
x2=0
argArray=()
for a in ${args[@]}; do
if [[ $a =~ ^- ]]; then
i=$(($x2+1))
argArray+=("$a ${args[$i]}")
let "x++"
fi
let "x2++"
done
arg(){
for a in "${argArray[@]}"; do
get="$@"
b=$(echo "$a" | awk '{ printf($1) }' | cut -d "-" -f 2)
if [ "$b" == "$get" ]; then
echo ${a/-${get}/}
fi
done
}
readSettings(){
i=0
while read line; do
if [[ "$line" =~ ^[^#]*= ]]; then
a=$(echo ${line%% =*} | sed 's/ /_/g' | awk '{print tolower($0)}')
eval $a="${line#*= }"
((i++))
fi
done < $configfile
}
setSetting(){
echo $1' = '$2 >> $configfile
LOGMESSAGE+="Set $1 = $2, "
} | true |
be0077e1a67fe398ec2d0c98c7a2d5eb0a4d66b2 | Shell | JieYang031/deepcpg | /examples/setup.sh | UTF-8 | 1,658 | 3.625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
shopt -s extglob
check=1
function run {
cmd=$@
echo
echo "#################################"
echo $cmd
echo "#################################"
eval $cmd
if [ $check -ne 0 -a $? -ne 0 ]; then
1>&2 echo "Command failed!"
exit 1
fi
}
data_dir="./data"
data_host="http://www.ebi.ac.uk/~angermue/deepcpg/alias"
function download_genome {
name=$1
url=$2
out_dir="$data_dir/dna/$name"
if [[ -d $out_dir ]]; then
return
fi
run "mkdir -p $out_dir"
run "wget $url -P $out_dir"
}
function download_zip {
key=$1
out_dir=$2
if [[ -e $out_dir ]]; then
return
fi
run "wget $data_host/$key -O $out_dir.zip"
run "unzip -o $out_dir.zip -d $out_dir"
run "rm $out_dir.zip"
}
# Genome
download_genome "mm10" "ftp://ftp.ensembl.org/pub/release-85/fasta/mus_musculus/dna/Mus_musculus.GRCm38.dna.chromosome.*.fa.gz"
# CpG profiles
if [[ ! -e "$data_dir/cpg" ]]; then
download_zip "b3afd7f831dec739d20843a3ef2dbeff" "$data_dir/cpg"
run "gunzip $data_dir/cpg/*gz"
fi
# Motif database
motif_file="motif_databases.12.15.tgz"
if [[ ! -e $data_dir/motif_databases ]]; then
run "wget http://meme-suite.org/meme-software/Databases/motifs/$motif_file -O $data_dir/$motif_file"
run "tar xf $data_dir/$motif_file -C $data_dir"
run "rm $data_dir/$motif_file"
fi
# Annotations
if [[ ! -e "$data_dir/anno" ]]; then
echo "If the following command fails, download 'anno.zip' manually from the following link and extract to './data/anno':"
echo "https://drive.google.com/open?id=1rjQLshQZi1KdGSs-HUIB8vyPHOIYprkL"
download_zip 8c336f759e7010fa7a8287576281110e "$data_dir/anno"
fi
| true |
379290fc763f595cdfe665d4cd9c3e1769a0e8a4 | Shell | weseek/pukiwiki-plus-plus-i18n | /bin/setup_site.sh | UTF-8 | 708 | 3.640625 | 4 | [] | no_license | #!/bin/sh
basedir=`pwd`
if [ $# -ne 1 ]; then
echo "usage: $0 WIKI_NAME"
exit -1
fi
coredir=$basedir/engine/pukiwiki-plus-i18n
if [ ! -e $coredir ]; then
echo "[ERROR] the directory '$coredir' not found."
exit -1
fi
sitedir=$basedir/sites/$1
if [ -e $sitedir ]; then
cd $sitedir
ln -s -f ../../engine/pukiwiki-plus-i18n/image image
ln -s -f ../../engine/pukiwiki-plus-i18n/skin skin
else
echo "[ERROR] the directory '$sitedir' not found."
exit -1
fi
datadir=$sitedir/data
if [ -e $datadir ]; then
cd $datadir
chmod 777 wiki diff backup cache counter
chmod 666 wiki/* diff/* backup/* cache/* counter/*
else
echo "[ERROR] the directory '$datadir' not found."
exit -1
fi
echo "created."
| true |
6ec505902d8261260ac613472ebc760303e272cf | Shell | cpsharma97/Batch262 | /day6/palindrome.sh | UTF-8 | 381 | 3.1875 | 3 | [] | no_license | #!/bin/bash -x
read n1
#read n2
temp1=$n1;
#temp2=$n2;
sum1=0;
#sum2=0;
reverse1=0;
#reverse2=0;
while [[ $n1 -gt 0 ]]
do
reverse1=$(( $n1 % 10 ))
sum1=$(( (( $sum1 * 10 )) + $reverse1 ))
n1=$(( $n1 / 10 ))
#reverse2=$(( $n2 % 10 ))
#sum2=$(( (( $sum2 * 10 )) + $reverse2 ))
#n2=$(( $n2/10 ))
done
if [[ $temp1 -eq $sum1 ]]
then
echo "Palindrome"
else
echo "Not Palindrome"
fi
| true |
97f125c03024de1f2aafb0b33771e244674e1b05 | Shell | UnionPOS/baseline | /bin/ssh-keys | UTF-8 | 1,910 | 3.9375 | 4 | [] | no_license | #!/usr/bin/env bash
# set -x
# find script location
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# ensure build harness has been initialized
if [[ ! -d "${DIR}/../build-harness/bashlib" ]]; then make init; fi
# shellcheck source=./../build-harness/bashlib/bashopts.sh
source "${DIR}/../build-harness/bashlib/bashopts.sh"
# shellcheck source=./../build-harness/bashlib/bashui.sh
source "${DIR}/../build-harness/bashlib/bashui.sh"
function usage() {
cat <<- EOF
create SSH Keys for user
usage: $0 <email>
email: <your-name>@union-pos.com
EOF
}
function is_valid_email() {
local address=${1:-}
regex="^[a-z0-9!#\$%&'*+/=?^_\`{|}~-]+(\.[a-z0-9!#$%&'*+/=?^_\`{|}~-]+)*@getunion.com?\$"
if [[ ! $address =~ $regex ]] ; then
die "invalid email address"
fi
}
function main() {
for f in $HOME/.ssh/id_rsa*; do
## Check if the glob gets expanded to existing files.
## If not, f here will be exactly the pattern above
## and the exists test will evaluate to false.
if [ ! -f "$f" ]; then
ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa -q -N "" -C "$EMAIL"; ok
# Then add your new key to the ssh-agent:
# start the ssh-agent in the background
eval "$(ssh-agent -s)"; ok
ssh-add ~/.ssh/id_rsa; ok
fi
break
done
for f in $HOME/.ssh/id_ed25519*; do
## Check if the glob gets expanded to existing files.
## If not, f here will be exactly the pattern above
## and the exists test will evaluate to false.
if [ ! -f "$f" ]; then
ssh-keygen -o -a 100 -t ed25519 -f ~/.ssh/id_ed25519 -q -N "" -C "$EMAIL"; ok
# Then add your new key to the ssh-agent:
# start the ssh-agent in the background
eval "$(ssh-agent -s)"; ok
ssh-add ~/.ssh/id_ed25519; ok
fi
break
done
}
[ "$#" -lt 1 ] && usage && die
EMAIL=${1:-}
is_valid_email ${1:-}
main "$@"
| true |
163d31f4c1cfc09a24da69406d75c593189555ae | Shell | Unvanquished/release-scripts | /build-release | UTF-8 | 21,963 | 4 | 4 | [
"MIT"
] | permissive | #! /usr/bin/env bash
# ===========================================================================
#
# Copyright (c) 2017-2022 Unvanquished Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ===========================================================================
# exit in case of failure
set -e
# error on undefined variable
set -u
throwError () {
local error_keyword="${1}"
local error_message="${2}"
local error_code
case "${error_keyword}" in
'BADREQUEST')
error_code=2
;;
'INTERNAL')
error_code=3
;;
'NOTIMPLEMENTED')
error_code=4
;;
esac
printf 'ERROR %s: %s\n' "${error_keyword}" "${error_message}" >&2
exit "${error_code}"
}
printHelp () {
local prog_name="$(basename "${0}")"
local tab="$(printf '\t')"
cat <<-EOF
${prog_name}: a tool to build game for release purpose.
Usage:
${tab}${prog_name} [option] <target>
The script must be called within game source directory,
but can be called from anywhere in source directory.
Option can be:
${tab}-j<NUMBER>
${tab}${tab}with NUMBER the number of parallel compilation jobs
${tab}-p
${tab}${tab}build multiple targets at once in parallel
${tab}${tab}beware: return code will be inaccurate
${tab}-u
${tab}${tab}write username in version string (requires -v)
${tab}-v
${tab}${tab}write package version strings
Target can be:
${tab}vm
${tab}${tab}build virtual machine
${tab}linux-amd64
${tab}${tab}build linux amd64 engine
${tab}linux-armhf
${tab}${tab}build linux armhf engine
${tab}linux-arm64
${tab}${tab}build linux arm64 engine
${tab}macos-amd64
${tab}${tab}build mac amd64 engine
${tab}windows-amd64
${tab}${tab}build windows amd64 engine
${tab}windows-i686
${tab}${tab}build windows i686 engine
Example:
${tab}${prog_name} vm linux-amd64
EOF
exit
}
getUserName () {
whoami \
| tr '[:upper:]' '[:lower:]' \
| tr -d '[:blank:]' \
| tr -d '[:punct:]' \
| cut -c'1-10'
}
getBinPath () {
local system_windows="${1}"
local bin_name="${2}"
if "${system_windows}"
then
echo "${bin_name}.exe"
else
echo "${bin_name}"
fi
}
dumpSymbols () {
local dumpsyms_bin="${1}"
local symbol_dir="${2}"
local exec_file="${3}"
local temp_file="$(mktemp)"
"${dumpsyms_bin}" "${exec_file}" > "${temp_file}"
local symbol_basename="$(head -n'1' "${temp_file}" | cut -f'5' -d' ')"
local build_id="$(head -n'1' "${temp_file}" | cut -f'4' -d' ')"
local exec_symbol_dir="${symbol_dir}/${symbol_basename}/${build_id}"
mkdir -pv "${exec_symbol_dir}"
mv "${temp_file}" "${exec_symbol_dir}/${symbol_basename}.sym"
}
findDll () {
local mingw_arch="${1}"
local dll_name="${2}"
if [ -z "${mingw_arch}" ]
then
find "${MINGW_PREFIX}/bin/${dll_name}"
return
fi
# HACK: sort to get posix flavor of libstdc++ before win32 flavor
find '/usr' -name "${dll_name}" -type f | sort | grep --max-count=1 "${mingw_arch}" \
|| throwError INTERNAL "couldn't find DLL ${dll_name}"
}
cleanSymbols () {
local symbol_dir="${1}"
local symbol_archive_filename="${2}"
if [ -e "${symbol_dir}" ]
then
find "${symbol_dir}" -type f -name '*.sym' -exec rm -v {} \;
find "${symbol_dir}" -depth -type d -exec rmdir {} \;
fi
if [ -f "${symbol_archive_filename}" ]
then
rm "${symbol_archive_filename}"
fi
}
cleanBinaries () {
local system_windows="${1}"
local target_build_dir="${2}"
local content_dir="${3}"
local bin_list="${4}"
for bin_filename in ${bin_list}
do
bin_path="$(getBinPath "${system_windows}" "${target_build_dir}/${bin_filename}")"
engine_bin_path="$(getBinPath "${system_windows}" "${content_dir}/${bin_filename}")"
if [ -f "${bin_path}" ]
then
rm "${bin_path}"
fi
if [ -f "${engine_bin_path}" ]
then
rm "${engine_bin_path}"
fi
done
if [ -d "${content_dir}" ]
then
rmdir "${content_dir}"
fi
}
cleanEngineBuildDir () {
local content_dir="${1}"
if [ -e "${content_dir}" ]
then
find "${content_dir}" -type f -exec rm -v {} \;
find "${content_dir}" -depth -type d -exec rmdir {} \;
fi
}
cleanVmBuildDir () {
local content_dir="${1}"
local symbol_archive_basename="${2}"
if [ -e "${content_dir}" ]
then
find "${content_dir}" -type f -name '?game-*.nexe' -exec rm -v {} \;
find "${content_dir}" -type f -name "${symbol_archive_basename}.*" -exec rm -v {} \;
find "${content_dir}" -depth -type d -exec rmdir {} \;
fi
}
package () {
local archive_format="${1}"
local archive_filename="${2}"
local content_dir="${3}"
(
cd "${content_dir}"
if [ -f "${archive_filename}" ]
then
rm -v "${archive_filename}"
fi
7z -mx='9' -t"${archive_format}" a "${archive_filename}" .
)
}
printVersion () {
local tag_string='0'
local date_string=''
local ref_string=''
local dirt_string=''
local git_last_commit_short="$(git rev-parse --short HEAD)"
if [ -n "${git_last_commit_short}" ]
then
local git_describe_string="$(git describe --tags --match 'v[0-9].*' 2>/dev/null | cut -c2-)"
local git_closest_tag="$(git describe --tags --abbrev=0 --match 'v[0-9].*' 2>/dev/null | cut -c2-)"
local git_last_commit_date="$(date --date="@$(git log -1 '--pretty=format:%ct')" --utc '+%Y%m%d-%H%M%S')"
if [ -n "${git_closest_tag}" ]
then
tag_string="${git_closest_tag}"
if [ "${git_closest_tag}" != "${git_describe_string}" ]
then
date_string="-${git_last_commit_date}"
ref_string="-${git_last_commit_short}"
fi
else
date_string="-${git_last_commit_date}"
ref_string="-${git_last_commit_short}"
fi
else
date_string="-$(date --utc '+%Y%m%d-%H%M%S')"
ref_string='-0'
fi
if ! git diff --quiet 2>/dev/null
then
dirt_string='+dirty'
fi
echo "${tag_string}${date_string}${ref_string}${dirt_string}"
}
build () {
local job_count="${1}"
local write_version_string="${2}"
local write_username_string="${3}"
local root_dir="${4}"
local target="${5}"
local symbol_archive_basename='symbols'
local vmpak_archive_basename=''
local engine_archive_basename=''
local engine_archive_format='zip'
local symbol_archive_format='7z'
local vmpak_archive_format='zip'
local vmpak_archive_extension='dpk'
local build_dir="${root_dir}/build"
local release_dir="${build_dir}/release"
local vm_kind_list='cgame sgame'
local vm_arch_list='i686 amd64 armhf'
local main_nexe='main.nexe'
local engine_file_list=''
local engine_strip_list=''
local engine_symbolize_list=''
local build_vm='false'
local build_engine='false'
local system_linux='false'
local system_macos='false'
local system_windows='false'
local dumpsyms_relpath=''
local arch_amd64='false'
local arch_i686='false'
local arch_arm64='false'
local arch_armhf='false'
local host_linux='false'
local host_mac='false'
local host_windows='false'
local mingw_arch_prefix=''
case "${target}" in
'vm')
build_vm='true'
dumpsyms_relpath=linux/dump_syms/dump_syms
;;
'linux-'*)
build_engine='true'
system_linux='true'
dumpsyms_relpath=linux/dump_syms/dump_syms
;;
'macos-'*)
build_engine='true'
system_macos='true'
dumpsyms_relpath=mac/dump_syms/dump_syms_mac
;;
'windows-'*)
build_engine='true'
system_windows='true'
dumpsyms_relpath=windows/dump_syms_dwarf/dump_syms
;;
esac
case "${target}" in
*'-amd64')
arch_amd64='true'
engine_file_list="${engine_file_list} irt_core-amd64.nexe"
;;
*'-i686')
arch_i686='true'
engine_file_list="${engine_file_list} irt_core-i686.nexe"
;;
*'-arm64')
arch_arm64='true'
engine_file_list="${engine_file_list} irt_core-armhf.nexe"
;;
*'-armhf')
arch_armhf='true'
engine_file_list="${engine_file_list} irt_core-armhf.nexe"
;;
esac
local target_root_dir="${build_dir}/target"
local target_build_dir="${target_root_dir}/${target}"
local content_dir="${target_build_dir}/content"
local symbol_dir="${target_build_dir}/${symbol_archive_basename}"
local symbol_archive_filename="${target_build_dir}/${symbol_archive_basename}.${symbol_archive_format}"
local uname_system="$(uname -s)"
case "${uname_system}" in
'Linux'*)
host_linux='true'
;;
'Darwin'*)
host_mac='true'
;;
'CYGWIN'*|'MINGW'*)
host_windows='true'
;;
*)
throwError NOTIMPLEMENTED "unknown system: ${uname_system}"
;;
esac
if "${write_version_string}"
then
build_version="$(printVersion)"
if "${write_username_string}"
then
build_version+="-$(whoami | tr '[:upper:]' '[:lower:]')"
fi
vmpak_version_string="_${build_version}"
engine_version_string="_${build_version}"
else
vmpak_version_string='_0'
engine_version_string=''
fi
if [ -z "${job_count}" ]
then
if command -v 'nproc' >/dev/null
then
job_count="$(nproc)"
elif command -v 'sysctl' >/dev/null
then
job_count="$(sysctl -n 'hw.ncpu')"
else
job_count='4'
fi
fi
if [ -d "${target_build_dir}" ]
then
echo "Removing '${target_build_dir}' and contents"
# Safe w.r.t. symlinks, as long as you don't put a trailing slash:
# https://superuser.com/questions/382314/does-rm-rf-follow-symbolic-links
rm -r "${target_build_dir}"
fi
mkdir -pv "${target_build_dir}"
mkdir -pv "${release_dir}"
local cmake_opts='-DBUILD_GAME_NATIVE_DLL=OFF -DBUILD_GAME_NATIVE_EXE=OFF'
local cmake_cflags=''
if "${system_macos}"
then
PATH="${PATH}:/Applications/CMake.app/Contents/bin"
cmake_opts="${cmake_opts} -DCMAKE_OSX_DEPLOYMENT_TARGET=10.9 -DCMAKE_BUILD_TYPE=Release -DUSE_BREAKPAD=OFF"
produce_symbols=false
else
cmake_opts="${cmake_opts} -DCMAKE_BUILD_TYPE=RelWithDebInfo -DUSE_BREAKPAD=ON"
produce_symbols=true
fi
if "${system_macos}" && "${arch_amd64}"
then
cmake_opts="${cmake_opts} -DCMAKE_OSX_ARCHITECTURES=x86_64"
fi
if "${build_vm}"
then
vmpak_archive_basename='vm'
cmake_opts="${cmake_opts} -DBUILD_GAME_NACL=ON -DBUILD_GAME_NACL_NEXE=ON -DBUILD_CGAME=ON -DBUILD_SGAME=ON -DBUILD_CLIENT=OFF -DBUILD_TTY_CLIENT=OFF -DBUILD_SERVER=OFF"
fi
if "${build_engine}"
then
engine_archive_basename="${target}"
cmake_opts="${cmake_opts} -DUSE_LTO=ON -DBUILD_CLIENT=ON -DBUILD_SERVER=ON -DBUILD_TTY_CLIENT=ON -DBUILD_GAME_NACL=OFF -DBUILD_GAME_NACL_NEXE=OFF -DBUILD_CGAME=OFF -DBUILD_SGAME=OFF -DUSE_HARDENING=1"
local strip='strip'
if "${system_windows}" && ! "${host_windows}"
then
if "${arch_i686}"
then
bitness='32'
mingw_arch_prefix='i686'
else
bitness='64'
mingw_arch_prefix='x86_64'
fi
strip="${mingw_arch_prefix}-w64-mingw32-strip"
cmake_opts="${cmake_opts} -DCMAKE_TOOLCHAIN_FILE=${root_dir}/daemon/cmake/cross-toolchain-mingw${bitness}.cmake"
# unused
# cmake_opts="${cmake_opts} -DPKG_CONFIG_EXECUTABLE=${mingw_arch_prefix}-w64-mingw32-pkg-config"
fi
if ${system_linux}
then
cmake_opts="${cmake_opts} -DUSE_STATIC_LIBS=1 -DOpenGL_GL_PREFERENCE=LEGACY"
engine_file_list="${engine_file_list} daemon daemonded daemon-tty crash_server nacl_helper_bootstrap nacl_loader"
engine_symbolize_list='daemon daemonded daemon-tty'
engine_strip_list='daemon daemonded daemon-tty crash_server'
if "${arch_i686}"
then
cmake_opts+=' -DCMAKE_C_COMPILER=i686-linux-gnu-gcc'
cmake_opts+=' -DCMAKE_CXX_COMPILER=i686-linux-gnu-g++'
elif "${arch_arm64}"
then
engine_file_list+=" nacl_helper_bootstrap-armhf lib-armhf/ld-linux-armhf lib-armhf/libc.so.6 lib-armhf/libgcc_s.so.1 lib-armhf/libm.so.6 lib-armhf/libpthread.so.0 lib-armhf/librt.so.1 lib-armhf/libstdc++.so.6"
cmake_opts+=' -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc'
cmake_opts+=' -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++'
elif "${arch_armhf}"
then
cmake_opts+=' -DCMAKE_C_COMPILER=arm-linux-gnueabihf-gcc'
cmake_opts+=' -DCMAKE_CXX_COMPILER=arm-linux-gnueabihf-g++'
fi
fi
if "${system_macos}"
then
engine_file_list="${engine_file_list} daemon daemonded daemon-tty nacl_loader"
# No Breakpad, hence no symbolize and strip steps
fi
if "${system_windows}"
then
if "${arch_i686}"
then
engine_file_list="${engine_file_list} nacl_loader-amd64.exe irt_core-amd64.nexe" # WOW64 support
if "${host_windows}"
then
# MSYS2 uses the DWARF exception flavor
special_dll='libgcc_s_dw2-1.dll'
else
special_dll='libgcc_s_sjlj-1.dll'
fi
else
special_dll='libgcc_s_seh-1.dll'
fi
extra_dll_list="${special_dll} libstdc++-6.dll libwinpthread-1.dll libssp-0.dll"
# DLLs are added to engine_file_list after building
engine_file_list="${engine_file_list} daemon.exe daemonded.exe daemon-tty.exe crash_server.exe nacl_loader.exe"
engine_symbolize_list='daemon.exe daemonded.exe daemon-tty.exe'
engine_strip_list='daemon.exe daemonded.exe daemon-tty.exe crash_server.exe'
# those paths are distro-centric
# cp -av "/usr/${mingw_arch_prefix}-w64-mingw32/lib/libwinpthread-1.dll" "${target_build_dir}/"
# cp -av "/usr/lib/gcc/${mingw_arch_prefix}-w64-mingw32/7.3-posix/libstdc++-6.dll" "${target_build_dir}/"
# cp -av "/usr/lib/gcc/${mingw_arch_prefix}-w64-mingw32/7.3-posix/${special_dll}" "${target_build_dir}/"
for dll_name in ${extra_dll_list}
do
dll_location="$(findDll "${mingw_arch_prefix}" "${dll_name}")"
cp -av "${dll_location}" "${target_build_dir}/"
done
fi
local strip='strip'
if "${system_windows}" && ! "${host_windows}"
then
if "${arch_i686}"
then
bitness='32'
mingw_arch_prefix='i686'
else
bitness='64'
mingw_arch_prefix='x86_64'
fi
strip="${mingw_arch_prefix}-w64-mingw32-strip"
cmake_opts="${cmake_opts} -DCMAKE_TOOLCHAIN_FILE=${root_dir}/daemon/cmake/cross-toolchain-mingw${bitness}.cmake"
# unused
# cmake_opts="${cmake_opts} -DPKG_CONFIG_EXECUTABLE=${mingw_arch_prefix}-w64-mingw32-pkg-config"
fi
fi
if "${build_vm}"
then
# configuration
cmake -H"${root_dir}" \
-B"${target_build_dir}" \
-G"Unix Makefiles" \
${cmake_opts} \
|| throwError INTERNAL "${target} cmake failed"
fi
if "${build_engine}"
then
# configuration
cmake -H"${root_dir}" \
-B"${target_build_dir}" \
-G"Unix Makefiles" \
-D"CMAKE_C_FLAGS=${cmake_cflags}" \
-D"CMAKE_CXX_FLAGS=${cmake_cflags}" \
-D"CMAKE_EXE_LINKER_FLAGS=${cmake_cflags}" \
${cmake_opts} \
|| throwError INTERNAL "${target} cmake failed"
fi
if "${build_vm}" || "${build_engine}"
then
daemon_dir="$(cmake -H"${root_dir}" -B"${target_build_dir}" -LH | grep '^DAEMON_DIR:' | sed -e 's/[^=]*=//')"
fi
if "${build_vm}"
then
# build vm
cmake --build "${target_build_dir}" -- -j"${job_count}" nacl-vms \
|| throwError INTERNAL "${target} build failed"
fi
if "${build_engine}"
then
# build engine
cmake --build "${target_build_dir}" -- -j"${job_count}" \
|| throwError INTERNAL "${target} build failed"
if "${system_windows}"
then
engine_file_list="${engine_file_list} $(cd "${target_build_dir}" && ls *.dll)"
elif "${system_macos}"
then
# On Mac there are multiple copies of each dylib; get the ones with 3 version numbers
# stat -f%N just echos back the filenames while erroring if one doesn't exist
engine_file_list="${engine_file_list} $(cd "${target_build_dir}" && stat -f%N libGLEW.*.*.*.dylib libopenal.*.*.*.dylib)"
fi
fi
if "${produce_symbols}"
then
# build breakpad
local breakpad_dir="${daemon_dir}/libs/breakpad"
local dumpsyms_bin="$(getBinPath ${host_windows} ${breakpad_dir}/src/tools/${dumpsyms_relpath})"
if ! [ -d "${breakpad_dir}" ]
then
throwError INTERNAL "breakpad dir missing: ${breakpad_dir}"
fi
# Check for working dump_syms. In MSYS2 it doesn't run from an opposite-bitness shell.
set +e
"${dumpsyms_bin}" --help 2>/dev/null
local exitcode=$?
set -e
if [ "${exitcode}" -gt 1 ]
then
(
cd "${breakpad_dir}"
autoreconf -fvi && ./configure \
|| throwError INTERNAL 'breakpad configure failed'
)
make -C"${breakpad_dir}" clean \
|| true
local make_targets=''
if "${host_windows}"
then
make_targets='src/tools/windows/dump_syms_dwarf/dump_syms.exe'
fi
make -j"${job_count}" -C"${breakpad_dir}" $make_targets \
|| throwError INTERNAL 'breakpad build failed'
fi
fi
if "${build_vm}"
then
cleanSymbols "${symbol_dir}" "${symbol_archive_filename}"
# extract vm symbols
for vm in ${vm_kind_list}
do
for arch in ${vm_arch_list}
do
(
cd "${target_build_dir}"
local vm_file="${vm}-${arch}.nexe"
local stripped_vm_file="${vm}-${arch}-stripped.nexe"
printf 'extracting symbols from %s\n' "${vm_file}"
if ! [ -f "${vm_file}" ]
then
throwError INTERNAL "missing: ${vm_file}"
fi
if [ -f "${main_nexe}" ]
then
rm "${main_nexe}"
fi
ln -s "${vm_file}" 'main.nexe'
dumpSymbols "${dumpsyms_bin}" "${symbol_dir}" "${main_nexe}"
mkdir -pv "${content_dir}"
cp -v "${stripped_vm_file}" "${content_dir}/${vm_file}"
)
done
done
# compress vm symbols
package "${symbol_archive_format}" "${symbol_archive_filename}" "${symbol_dir}"
cp -v "${symbol_archive_filename}" "${content_dir}/${symbol_archive_basename}.${symbol_archive_format}"
# make vm package
vmpak_archive_filename="${release_dir}/${vmpak_archive_basename}${vmpak_version_string}.${vmpak_archive_extension}"
if [ -f "${vmpak_archive_filename}" ]
then
rm -v "${vmpak_archive_filename}"
fi
package "${vmpak_archive_format}" "${vmpak_archive_filename}" "${content_dir}"
cleanSymbols "${symbol_dir}" "${symbol_archive_filename}"
cleanVmBuildDir "${content_dir}" "${symbol_archive_basename}"
fi
if "${build_engine}"
then
local bin_path
local engine_bin_path
cleanSymbols "${symbol_dir}" "${symbol_archive_filename}"
mkdir -pv "${content_dir}"
if "${produce_symbols}"
then
# extract engine symbols
for bin in ${engine_symbolize_list}
do
bin_path="${target_build_dir}/${bin}"
printf 'extracting symbols from %s\n' "${bin_path}"
dumpSymbols "${dumpsyms_bin}" "${symbol_dir}" "${bin_path}"
done
fi
local engine_file_dest="${content_dir}"
if "${system_macos}"
then
engine_file_dest="${content_dir}/Unvanquished.app/Contents/MacOS"
mkdir -pv "${engine_file_dest}"
cp -v "${root_dir}/macosx/unvanquished.sh" "${engine_file_dest}"
cp -av "${target_build_dir}/SDL2.framework" "${engine_file_dest}"
rm -rv "${engine_file_dest}/SDL2.framework/Headers"
rm -rv "${engine_file_dest}/SDL2.framework/Versions/A/Headers"
rm -rv "${engine_file_dest}/SDL2.framework/Versions/Current/Headers"
cp -v "${root_dir}/macosx/Info.plist" "${content_dir}/Unvanquished.app/Contents"
mkdir -v "${content_dir}/Unvanquished.app/Contents/Resources"
cp -v "${root_dir}/macosx/Unvanquished.icns" "${content_dir}/Unvanquished.app/Contents/Resources"
fi
for file in ${engine_file_list}
do
file_dir="$(dirname "${file}")"
if [ "${file_dir}" != '.' ]
then
mkdir -pv "${engine_file_dest}/${file_dir}"
fi
cp -v "${target_build_dir}/${file}" "${engine_file_dest}/${file}"
done
for file in ${engine_strip_list}
do
echo "Stripping ${file}"
"${strip}" "${engine_file_dest}/${file}"
done
if "${produce_symbols}"
then
# compress engine symbols
package "${symbol_archive_format}" "${symbol_archive_filename}" "${symbol_dir}"
cp -v "${symbol_archive_filename}" "${content_dir}/${symbol_archive_basename}-${target}.${symbol_archive_format}"
fi
# make engine archive
engine_archive_filename="${release_dir}/${engine_archive_basename}${engine_version_string}.${engine_archive_format}"
package "${engine_archive_format}" "${engine_archive_filename}" "${content_dir}"
cleanSymbols "${symbol_dir}" "${symbol_archive_filename}"
cleanEngineBuildDir "${content_dir}"
fi
}
root_dir="$(git rev-parse --show-toplevel)"
[ -f "${root_dir}/src/cgame/cg_main.cpp" ] || throwError INTERNAL "must be called from game source tree"
[ -z "${1:-}" ] && throwError BADREQUEST 'missing target'
job_count=''
parallel_target='false'
write_version_string='false'
write_username_string='false'
target_list=''
while [ -n "${1:-}" ]
do
case "${1}" in
'vm'|'linux-amd64'|'linux-i686'|'linux-arm64'|'linux-armhf'|'macos-amd64'|'windows-amd64'|'windows-i686')
target_list="${target_list} ${1}"
shift
;;
'macos-i686')
throwError NOTIMPLEMENTED "unsupported target: ${1}"
;;
'-d')
set -x
shift
;;
'-j'*)
job_count="${1:2}"
shift
;;
'-p')
parallel_target='true'
shift
;;
'-u')
write_username_string='true'
shift
;;
'-v')
write_version_string='true'
shift
;;
'-h'|'--help')
printHelp
;;
'-'*)
throwError BADREQUEST "unknown option: ${1}"
;;
*)
throwError BADREQUEST "unknown target: ${1}"
;;
esac
done
for target in ${target_list}
do
if "${parallel_target}"
then
build "${job_count}" "${write_version_string}" "${write_username_string}" "${root_dir}" "${target}" &
else
build "${job_count}" "${write_version_string}" "${write_username_string}" "${root_dir}" "${target}"
fi
done
wait
#EOF
| true |
d1d449c11e88a1842c97db1c7176ee128104cdc4 | Shell | arosh/pixiv-isucon2016 | /webapp/reload.sh | UTF-8 | 686 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
now=`date +%Y%m%d-%H%M%S`
if [ -e /var/log/nginx/access.log ]; then
mv /var/log/nginx/access.log /var/log/nginx/access.log.$now
fi
if [ -e /var/log/mysql/mysql-slow.log ]; then
mv /var/log/mysql/mysql-slow.log /var/log/mysql/mysql-slow.log.$now
fi
if [ "$(pgrep mysql | wc -l)" ]; then
mysqladmin -uroot flush-logs
fi
cp conf/sysctl.conf /etc/sysctl.conf
sysctl -p
cp conf/nginx.conf /etc/nginx/nginx.conf
systemctl reload nginx
cp conf/my.cnf /etc/mysql/my.cnf
systemctl restart mysql
cp conf/isu-python.service /etc/systemd/system/isu-python.service
systemctl daemon-reload
systemctl restart isu-python
journalctl -f -u nginx -u mysql -u isu-python
| true |
4aac136cdfbb056bbb7998dbc596339ef5992865 | Shell | rsippl/yocto-env-template | /start.sh | UTF-8 | 2,814 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
usage() {
echo "Usage:"
echo "$0 <options>"
echo "Available options:"
echo "-x Use X11 forwarding"
echo "-n Forward /dev/net/tun"
echo "-p Run container in privileged mode"
echo "-e Execute additional instance"
exit 0
}
container_name="yocto-env"
image_tag="yocto-env:1.0"
serial_dev="/dev/ttyUSB0"
# rough check to see if we are in correct directory
dirs_to_check=( "./cache/downloads" "./cache/sstate" "./home" )
for d in "${dirs_to_check[@]}"; do
if [[ ! -d ${d} ]]; then
echo "\"${d}\" directory not found"
usage
fi
done
arg_privileged=""
set_arg_privileged() {
echo "WARNING: Running the container with privileged access"
arg_privileged="--privileged"
}
arg_x11_forward=""
set_arg_x11() {
xhost +
arg_x11_forward="--env DISPLAY=unix${DISPLAY} \
--volume ${XAUTH}:/root/.Xauthority \
--volume /tmp/.X11-unix:/tmp/.X11-unix "
}
arg_net_forward=""
tun_dev="/dev/net/tun"
set_arg_net() {
arg_net_forward="--cap-add=NET_ADMIN \
--device ${tun_dev}:/dev/net/tun
--publish 8000:8000"
}
run_additional_instance=false
# parse input arguments
while getopts ":hxnpe" opt; do
case ${opt} in
h )
usage
;;
x )
command -v xhost >/dev/null 2>&1 || { echo >&2 "\"xhost\" is not installed"; exit 1; }
set_arg_x11
;;
n )
[[ -e "${tun_dev}" ]] || { echo >&2 "\"${tun_dev}\" not found, is the \"tun\" kernel module loaded?"; exit 1; }
set_arg_net
;;
p )
set_arg_privileged
;;
e )
run_additional_instance=true
;;
\? )
echo "Invalid Argument: \"${opt}\"" 1>&2
usage
;;
esac
done
empty_password_hash="U6aMy0wojraho"
# check if serial device file exists
if [ -e ${serial_dev} ]; then
echo "Serial device file ${serial_dev} found, you may use ./serial.sh."
device_opt="--device=${serial_dev}:${serial_dev}"
else
echo "Serial device file ${serial_dev} not found, ./serial.sh will not work!"
fi
if [ "${run_additional_instance}" = true ]; then
docker container exec \
-it \
--user yocto \
-w /opt/yocto/workspace \
${container_name} \
/bin/bash
else
docker container run \
-it \
--rm \
--name ${container_name} \
${arg_net_forward} \
${arg_x11_forward} \
${arg_privileged} \
--volume "${PWD}":/opt/yocto \
--volume "${PWD}/home":/home/yocto \
${device_opt} \
${image_tag} \
sudo bash -c "\
groupadd -g 7777 yocto && \
useradd --password ${empty_password_hash} --shell /bin/bash -u ${UID} -g 7777 yocto && \
usermod -aG sudo yocto && \
usermod -aG users yocto && \
usermod -aG dialout yocto && \
cd /opt/yocto && \
su yocto"
fi
| true |
fbe995807b3a0bb7990c89294821a5f2557698f2 | Shell | awesomest/NetWork_g-2015 | /2014:11:7:edkn/mkdir_CYCLE_data.sh | UTF-8 | 1,003 | 3.796875 | 4 | [] | no_license | #!/bin/bash
#WSモデルのディレクトリ作成
#拡散データ格納のディレクトリと作ったグラフのデータを格納するディレクトリを作るスクリプト
#拡散データ格納のディレクトリ(0),グラフのデータを格納するディレクトリ(1) begin endは偶数
#使用例 ./mkdir_data.sh 0or1 begin end ディレクトリ名
directory=$4
begin=$2
end=$3
if [ $1 = "0" ];then
cd informspread
mkdir ${directory}
cd ${directory}
mkdir ${directory}_${begin}_data
cd ${directory}_${begin}_data
mkdir NC
mkdir IC
mkdir DC_desc
mkdir DC_asc
mkdir NC_noise
mkdir IC_noise
mkdir DC_desc_noise;
mkdir DC_asc_noise;
i=`expr $begin + 2`
while [ $i -le $end ]
do
#echo "../${directory}_${begin}_data"
cp -r ../${directory}_${begin}_data ../${directory}_${i}_data
i=`expr $i + 2`
done
elif [ $1 = "1" ];then
mkdir ${directory}
i=${begin}
cd ${directory}
while [ $i -le $end ]
do
mkdir ${directory}_${i}_data
i=`expr $i + 2`
done
fi | true |
936b25ecddefb516410fd6c1b17aa0019a09e887 | Shell | mciverza/docker-acroread | /start-acroread.sh | UTF-8 | 275 | 2.796875 | 3 | [] | no_license | #!/bin/bash
groupmod -g $gid acroread
usermod -u $uid -g $gid acroread
if [ -d /home/acroread/.adobe ]; then
chown -R acroread:acroread /home/acroread/.adobe
fi
exec su -ls "/bin/bash" -c "mkdir -p /home/acroread/.local/share; /usr/bin/acroread '$ARGS' '$FILE'" acroread
| true |
7caf7190d6fad024455eb45f046c5a98bc0a1aac | Shell | clouds56/setup | /mac/setup.sh | UTF-8 | 1,184 | 2.59375 | 3 | [] | no_license | #!/bin/sh
# change shell
chsh -s /bin/zsh clouds
# install xcode and homebrew
sudo xcodebuild -license
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# install wget and zshrc
brew install wget
wget -O .zshrc http://git.grml.org/f/grml-etc-core/etc/zsh/zshrc
# setup git (manually setup username and email)
git config --global alias.st "status -s"
git config --global alias.lg "log --oneline --all --graph --decorate"
# install shadowsocks
brew install shadowsocks-libev
vim /usr/local/etc/shadowsocks-libev.json
ln -s /usr/local/opt/shadowsocks-libev/homebrew.mxcl.shadowsocks-libev.plist ~/Library/LaunchAgents/
launchctl load ~/Library/LaunchAgents/homebrew.mxcl.shadowsocks-libev.plist
# https://github.com/shadowsocks/shadowsocks/wiki/Using-Shadowsocks-with-Command-Line-Tools
mkdir ~/.proxychains && cp proxychains.conf ~/.proxychains
# install python3
brew install python3
pip3 install jupyter
pip3 install numpy
brew install freetype
pip3 install matplotlib
brew install gcc
pip3 install scipy
brew install libtiff libjpeg webp little-cms2
brew install homebrew/dupes/zlib
brew link zlib --force
pip3 install pillow
| true |
a1f55bab6bc621437018a585fb7f12ac15f1c486 | Shell | rvennam/kui | /tools/travis/test/script.sh | UTF-8 | 815 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
if [ "$LAYERS" == "k8s" ]; then which kubectl; fi
export KEY=`node -e 'console.log(parseInt(process.env.IDX) + process.env.NUM_OPENWHISK_AUTH_LAYERS * (process.env.TRAVIS_BUILD_NUMBER % process.env.MAX_TRAVIS_CONCURRENCY))'`
echo "Using KEY=$KEY"
if [ "$LAYERS" == "LINT" ]; then
npm run lint && ./tools/scancode/scancode.sh
elif [ "$LAYERS" != "HEADLESS" ]; then
(cd tests && ./bin/runLocal.sh $LAYERS)
# When testing against build headless, we set TEST_SPACE manually since we can't get the env var TEST_SPACE from the previous runLocal.sh => runTest.sh process. Namespace Current tests will fail if we don't have TEST_SPACE.
else
export TEST_SPACE="${TEST_SPACE_PREFIX-ns}${KEY}"
(cd tests && ./bin/allocate.sh "$TEST_SPACE")
(cd dist/builds/kui && npm run test)
fi
| true |
595dab65e95ae9e71455d951b2301d14398da8af | Shell | chenjuneking/redis-setup-action | /entrypoint.sh | UTF-8 | 415 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
commands="docker run --name $INPUT_CONTAINERNAME"
commands="$commands -d -p $INPUT_HOSTPORT:$INPUT_CONTAINERPORT redis:$INPUT_VERSION redis-server"
if [ -n "$INPUT_PASSWORD" ]; then
commands="$commands --requirepass $INPUT_PASSWORD"
fi
commands="$commands --port $INPUT_CONTAINERPORT"
echo "execute command: $commands"
sh -c "$commands"
echo "::set-output name=containerName::$INPUT_CONTAINERNAME" | true |
531f742698de8de85d08b531621f5b6b7dbe761a | Shell | ltouret/ft_services | /setup.sh | UTF-8 | 2,036 | 3.4375 | 3 | [] | no_license | #!/bin/bash
set -e
panic ()
{
echo "Failed !"
exit 1
}
echo "
# Starting minikube ...
"
minikube status > /dev/null \
&& echo Reusing current instance. \
|| minikube start --driver docker \
|| panic
MINIKUBE_IP=$(minikube ip)
if [ "$MINIKUBE_IP" = "127.0.0.1" ]
then
MINIKUBE_IP="172.17.0.2"
fi
echo "
MINIKUBE_IP = $MINIKUBE_IP"
eval $(minikube docker-env)
echo "
# MetalLB installation ...
"
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/metallb.yaml
envsubst < srcs/metallb/metallb.yaml | kubectl apply -f - || panic
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
echo "
# Building containers ...
"
sed -i.bak "s/minikube_ip/$MINIKUBE_IP/g" srcs/ftps/vsftpd.conf
sed -i.bak "s/minikube_ip/$MINIKUBE_IP/g" srcs/wordpress/start.sh
sed -i.bak "s/minikube_ip/$MINIKUBE_IP/g" srcs/metallb/metallb.yaml
for service in phpmyadmin influxdb mysql grafana wordpress ftps nginx
do
docker build -t ${service}_custom srcs/$service/
kubectl apply -f srcs/$service/$service.yaml
done
kubectl apply -f srcs/metallb/metallb.yaml
sleep 5
mv srcs/ftps/vsftpd.conf.bak srcs/ftps/vsftpd.conf
mv srcs/wordpress/start.sh.bak srcs/wordpress/start.sh
mv srcs/metallb/metallb.yaml.bak srcs/metallb/metallb.yaml
echo "
ftps : ${MINIKUBE_IP}
user: admin
password: passwd
Use lftp -u admin,passwd ${MINIKUBE_IP}, to connect and add set ftp:ssl-allow no
"
echo "
Nginx : http://${MINIKUBE_IP} or https://${MINIKUBE_IP}
"
echo "PhpMyAdmin : http://${MINIKUBE_IP}:5000 or http://${MINIKUBE_IP}/phpmyadmin
user: admin
password: passwd
"
echo "Wordpress : http://${MINIKUBE_IP}:5050 or http://${MINIKUBE_IP}/wordpress
users: admin, user1, user2, user3
password: passwd
to login use wp-login.php
"
echo "Grafana : http://${MINIKUBE_IP}:3000
user: admin
password: passwd"
echo "
# starting dashboard ...
"
minikube dashboard
| true |
64546307cbcd38271536b86327124ad012fb0dc0 | Shell | anzhihe/learning | /shell/book/笨办法学BashShell编程-基础篇/示例脚本/12-09pushd.sh | UTF-8 | 349 | 2.953125 | 3 | [] | no_license | #!/bin/bash
dir1=/usr/local
dir2=/var/spool
pushd $dir1
echo "Now in directory `pwd`."; echo
pushd $dir2
echo "Now in directory `pwd`."; echo
echo "The top entry in the DIRSTACK array in $DIRSTACK." ; echo
dirs -v ; echo
popd
echo "Now back in directory `pwd`."; echo
popd
echo "Now back in original working directory `pwd`."; echo
exit 0
| true |
b71a10dd8be7f50c5947a59dfc803aff34e80a86 | Shell | tkonduri/CICD_Pipeline | /Packer/scripts/ubuntu/cleanup.sh | UTF-8 | 238 | 2.546875 | 3 | [] | no_license | #!/bin/bash
set -e
set -x
export DEBIAN_FRONTEND=noninteractive
echo "Delete unneeded files."
sudo rm -f /home/vagrant/*.sh
echo "Cleaning up dhcp leases"
sudo rm /var/lib/dhcp/*
sudo apt-get -y autoremove
sudo apt-get -y clean
sync | true |
c0e4e3d541ae44579afabaf813691d9065f78407 | Shell | msgparser/rawmessage | /smallset/POST.sh | UTF-8 | 371 | 2.921875 | 3 | [] | no_license | # Change following HOST to test the service from localhost, EC2 and EKS
# Service Unit Test
export SERVICE_HOST="localhost"
for M in $(ls *.msg)
do
echo "======[ $M ]===================="
DATA="'$(cat $M)'"
curl --request POST\
--header 'Content-Type: text/plain'\
http://${SERVICE_HOST}:4000/rawmsg \
-d "$DATA"
echo
echo
done
| true |
dcfa987a3d091147e760cd2ca1dbfd90da170257 | Shell | Excoriate/simple-aka-47 | /run.sh | UTF-8 | 526 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env bash
printenv | grep "port" --color
#NPM_INSTALL=$(npm install);
#RUN=$(npm run dev);
PORT=${PORT_EXPOSE}
if lsof -Pi :${PORT} -sTCP:LISTEN -t >/dev/null ; then
echo "port already in use"
kill $(lsof -t -i:${PORT})
echo "port has been killed"
fi
if [[ "${NODE_ENV}" = "development" ]] ;then
echo "Attempting to run application in DEV mode"
npm run run:dev:debug
fi
if [[ "${NODE_ENV}" = "production" ]] ;then
echo "Attempting to run application in PROD mode"
npm run run:prod
fi | true |
e18b27e523b8dcabfc209794d218b83774997766 | Shell | alexandersan/small-bash-tools | /ssh-vpn | UTF-8 | 2,743 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
f_help () {
[ $1 -eq 1 ] && echo "Error: bad command"
cat << eof
Usage: ssh-vpn help|--help -- This message
Usage: ssh-vpn start|--start -- Creates VPN tunnel over ssh (you must have root privileges on both machines)
-h|--host Remote host public IP. Default "128.199.133.88"
-p|--port Remote port. Default "22"
-l|--left Left IP for VPN channel. Default "10.100.0.200"
-r|--right Right IP for VPN channel. Default "10.100.0.100"
-t|--tun Tunnel interface number for BOTH sides. Default "0"
-s|--sleep Timeout before client side connection setup. Default 5
-n|--net Private subnet behind VPN gatevay. Default "192.168.2.0/24"
-k|--key Path and filename of private SSH key for 'root' user on remote server. Default use ssh-agent.
Usage: ssh-vpn stop|--stop|status|--status -- Show PID or stop VPN tunnel
-t|--tun Tunnel interface number for BOTH sides. Default "0"
eof
exit $1
}
[ ! -n $1 ] && f_help 1
case $1 in
help|--help) f_help 0 ;;
start|stop|status|--start|--stop|--status) COMMAND=$1 ;;
*) f_help 1 ;;
esac
shift
for i in $(seq 1 2 $#); do
key=${!i}
echo $key
j=$((i+1))
[ $# -ge $j ] && value=${!j} && echo $value
case $key in
-h|--host) HOST=$value ;;
-p|--port) PORT=$value ;;
-l|--left) L_IP=$value ;;
-r|--right) R_IP=$value ;;
-t|--tun) TUN=$value ;;
-s|--sleep) SLEEP=$value ;;
-n|--net) NET=$value ;;
-k|--key) SSH_KEY="-i $value" ;;
esac
done
case $COMMAND in
start|--start)
# By default start SSH VPN sesion on two machines with tun0 an simple routing
HOST=${HOST:-128.199.133.88} # $2
PORT=${PORT:-22} # $3
L_IP=${L_IP:-10.100.0.200} # $4
R_IP=${R_IP:-10.100.0.100} # $5
TUN=${TUN:-0} # $6
SLEEP=${SLEEP:-5} # $7
NET=${NET:-192.168.2.0/24} # $8
[ -z "$SSH_KEY" ] && SSH_KEY=""
sudo ssh -p $PORT $SSH_KEY -Cf -w $TUN:$TUN $HOST \
"sleep $SLEEP; ip link set tun$TUN up; ip addr add $R_IP/32 peer $L_IP dev tun$TUN" && \
sudo bash -c "sleep $SLEEP; ip link set tun$TUN up; ip addr add $L_IP/32 peer $R_IP dev tun$TUN" && \
sudo echo "$(pgrep -u root -f 'ssh -p.* -Cf')" > ~/.ssh/vpn$TUN.pid
sudo ip r a $NET via $R_IP dev tun$TUN src $L_IP
set +x
;;
stop|--stop)
TUN=${TUN:-0}
set -x
ps -fp $(cat ~/.ssh/vpn$TUN.pid) | grep 'ssh -p.* -Cf' > /dev/null && sudo kill -15 $(cat ~/.ssh/vpn$TUN.pid) || echo "ERROR: wrong PID"
rm -f ~/.ssh/vpn$TUN.pid
set +x
;;
status|--status)
TUN=${TUN:-0}
set -x
ps -fp $(cat ~/.ssh/vpn$TUN.pid) | grep 'ssh -p.* -Cf' > /dev/null && echo "vpn$TUN.pid: $(cat ~/.ssh/vpn$TUN.pid)" || echo "ssh VPN is not running"
set +x
;;
esac
| true |
037e2cd8d1bf194f21e34a957467bb8c62631e15 | Shell | brownman/do_for_others_first_old | /.CODE/REGULAR/.old/old_cfg/LISTS/BANK/test/cfg/test.sh | UTF-8 | 192 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/bash -e
path=`dirname $0`
pushd "$path">/dev/null
dir_units=$path/units
file_test=$dir_units/test_error.sh
ls $file_test
cmd="$file_test"
eval "$cmd"
popd>/dev/null
| true |
4dec3b8bb593b45bf1525c026848f9fe5f3271a7 | Shell | wladitrujillo/beerV1 | /deploy.sh | UTF-8 | 605 | 3.140625 | 3 | [] | no_license | #!/bin/sh
DEPLOY_SERVER_PATH=~/apache-tomcat-9.0.39
APP_NAME=Beer-v1
echo "APP NAME: ${APP_NAME}"
if [ -d ${DEPLOY_SERVER_PATH}/webapps/${APP_NAME} ]
then
echo "Directory ${APP_NAME} exists."
rm -rf ${DEPLOY_SERVER_PATH}/webapps/${APP_NAME}
fi
mkdir ${DEPLOY_SERVER_PATH}/webapps/${APP_NAME}
mkdir ${DEPLOY_SERVER_PATH}/webapps/${APP_NAME}/WEB-INF
echo "Copy classes"
cp -avr classes/ ${DEPLOY_SERVER_PATH}/webapps/Beer-v1/WEB-INF
echo "Copy Files"
cp etc/web.xml ${DEPLOY_SERVER_PATH}/webapps/Beer-v1/WEB-INF/
cp web/*.* ${DEPLOY_SERVER_PATH}/webapps/Beer-v1/
echo "Deploy Success..." | true |
e38f5b8b544e02d410ee7cfbfb63c08360156b52 | Shell | sshalhou/FinalStateAnalysis | /recipe/recipe_common.sh | UTF-8 | 2,857 | 2.609375 | 3 | [] | no_license | #!/bin/bash
set -o errexit
set -o nounset
pushd $CMSSW_BASE/src
#for standalone version of svfit
cvs co -r V00-01-04s TauAnalysis/CandidateTools
# for some reason patTuple creation fails due to lack of plugin PFCandIsolatorFromDeposits
# to fix
cvs co -r V00-03-13 CommonTools/ParticleFlow
# Tags that work in any release
# To install lumiCalc.py
if [ "$LUMI" = "1" ]
then
cvs co -r V04-01-06 RecoLuminosity/LumiDB
fi
# Add and patch to way speed up trigger matching
# Don't crash if patch already applied.
set +o errexit
echo "Applying pat trigger matching speedup"
patch -N -p0 < FinalStateAnalysis/recipe/patches/V06-04-16_DataFormats_PatCandidates_PassStrByRef.patch
#echo "Adding 2D expression histogram feature"
#addpkg -z CommonTools/Utils
#patch -N -p0 < FinalStateAnalysis/recipe/patches/V00-04-02_CommonTools_Utils_Add2DHistoFeature.patch
#set -o errexit
# Only checkout PAT tuple production dependencies if requested.
if [ "$PATPROD" = "1" ]
then
# Set the compile time flag which enables PAT modules that have external
# dependencies.
cat > $CMSSW_BASE/src/FinalStateAnalysis/PatTools/interface/PATProductionFlag.h << EOF
#define ENABLE_PAT_PROD
EOF
# Add support for PU Jet ID
# See https://twiki.cern.ch/twiki/bin/view/CMS/PileupJetID
cvs co -r V00-04-01 CondFormats/EgammaObjects
cvs co -r V00-02-05 -d CMGTools/External UserCode/CMG/CMGTools/External
cvs co -r V00-02 -d pharris/MVAMet UserCode/pharris/MVAMet
rm pharris/MVAMet/data/gbrmet.root
rm pharris/MVAMet/data/*unityresponse*root
cvs up -r 1.24 CMGTools/External/src/PileupJetIdAlgo.cc
## MOVED TO VERSION SPECIFIC
#Add Electron ID MVA, Photon and Electron PFIsolation Estimators
#cvs co -r V00-00-21 -d EGamma/EGammaAnalysisTools UserCode/EGamma/EGammaAnalysisTools
#individual file tweaks a'la: https://twiki.cern.ch/twiki/bin/view/CMS/HtoZgPhotonID
#cvs up -r 1.13 EGamma/EGammaAnalysisTools/interface/PFIsolationEstimator.h
#cvs up -r 1.22 EGamma/EGammaAnalysisTools/src/PFIsolationEstimator.cc
## MOVED TO VERSION SPECFIC
# apply patch so we can configure the passing mask for the PassWP function
patch -N -p0 < FinalStateAnalysis/recipe/patches/EGammaAnalysisTools_configpatch.patch
# Add Electron ID MVA
pushd EGamma/EGammaAnalysisTools/data
cat download.url | xargs wget
popd
# Add muon effective area code
cvs co -r V00-00-10 -d Muon/MuonAnalysisTools UserCode/sixie/Muon/MuonAnalysisTools
# Remove trainings we don't use
rm Muon/MuonAnalysisTools/data/*xml
else
cat > $CMSSW_BASE/src/FinalStateAnalysis/PatTools/interface/PATProductionFlag.h << EOF
//#define ENABLE_PAT_PROD
EOF
fi
# Get the VBF MVA weight files
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2012#VBF_selection_Matthew
cvs co -r 1.2 UserCode/MitHtt/data/VBFMVA/MuTau/VBFMVA_BDTG.weights.xml
popd
| true |
d1ed809d0246e716fea28a88832246c56ba9f27d | Shell | Revenni/restic | /templates/restic-run.j2 | UTF-8 | 556 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Restic backup script
# set environment variables for restic run
source {{ restic_path }}/.restic-env
# backup root filesystem
{{ restic_bin }} backup --verbose --one-file-system /
{% if restic_additional_mounts is defined %}
# backup any additional mounts if specified
for i in {{ restic_additional_mounts }}
do
echo Backing up $i
{{ restic_bin }} backup --verbose --one-file-system $i
done
{% endif %}
# prune snapshots older than retention period
{{ restic_bin }} forget --verbose --keep-within {{ restic_retention }} --prune
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.