blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
722c9035c589c841b533d04c04b72d0bad3cadfa | Shell | docksal/service-cli | /8.1/healthcheck.sh | UTF-8 | 385 | 2.9375 | 3 | [
"MIT",
"LicenseRef-scancode-free-unknown"
] | permissive | #!/usr/bin/env bash
# Initialization phase in startup.sh is complete
[[ -f /var/run/cli ]] || exit 1
# supervisor services are running
if [[ -f /run/supervisord.pid ]]; then
if [[ "${IDE_ENABLED}" == "1" ]]; then
# IDE mode
ps aux | grep code-server >/dev/null || exit 1
else
# php-fpm/cli mode
[[ -f /run/php-fpm.pid ]] || exit 1
[[ -f /run/sshd.pid ]] || exit 1
fi
fi
| true |
c69448ed34cd9335a34dc9f9491e886a5894a172 | Shell | shwetap29/shellscript | /Programselection ifelif/Arithmeticoperation4.sh | UTF-8 | 1,108 | 3.59375 | 4 | [] | no_license | #!/bin/bash -x
#Taking 3 input from user
read -p 'Enter Value : ' a
read -p 'Enter Value : ' b
read -p 'Enter Value : ' c
#performing arithmetic operation
number1=`echo "scale=2;$a + $b * $c" |bc -1` #bc -1 is used for fractional value
number2=`echo "scale=2;$a % $b + $c" |bc -1`
number3=`echo "scale=2;$c + $a / $b" |bc -1`
number4=`echo "scale=2;$a * $b + $c" |bc -1`
echo "a+b*c = $number1"
echo "a%b+c = $number2"
echo "c+a/b = $number3"
echo "a*b+c = $number4"
#finding maximum value
max=$number1
if ((`echo "$max < $number2" | bc -q` == 1 )) #bc -q it return true & false value and using for compair fractional value
then
max=$number2
fi
if ((`echo "$max < $number3" | bc -q` == 1 ))
then
max=$number3
fi
if ((`echo "$max < $number4" | bc -q` == 1 ))
then
max=$number4
fi
echo "maximum value is : $max"
#finding minimum value
min=$number1
if ((`echo "$min > $number2" | bc -q` == 1 ))
then
min=$number2
fi
if ((`echo "$min > $number3" | bc -q` == 1 ))
then
min=$number3
fi
if ((`echo "$min > $number4" | bc -q` == 1 ))
then
min=$number4
fi
echo "minimum value is : $min"
| true |
b2a9353b2273b818c38f8e9774cad4946a7a2732 | Shell | MigdaliaBrito/catapult-release-management | /provisioners/redhat/modules/cloudflare_dns.sh | UTF-8 | 5,786 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | source "/catapult/provisioners/redhat/modules/catapult.sh"
domains=()
domain=$(catapult websites.apache.$5.domain)
domains+=("${domain}")
domain_tld_override=$(catapult websites.apache.$5.domain_tld_override)
if [ ! -z "${domain_tld_override}" ]; then
domains+=("${domain}.${domain_tld_override}")
fi
for domain in "${domains[@]}"; do
# create array from domain
IFS=. read -a domain_levels <<< "${domain}"
# determine if cloudflare zone exists
cloudflare_zone=$(curl --silent --show-error --connect-timeout 30 --max-time 60 --write-out "HTTPSTATUS:%{http_code}" --request GET "https://api.cloudflare.com/client/v4/zones?name=${domain_levels[-2]}.${domain_levels[-1]}" \
--header "X-Auth-Email: $(catapult company.cloudflare_email)" \
--header "X-Auth-Key: $(catapult company.cloudflare_api_key)" \
--header "Content-Type: application/json")
cloudflare_zone_status=$(echo "${cloudflare_zone}" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
cloudflare_zone=$(echo "${cloudflare_zone}" | sed -e 's/HTTPSTATUS\:.*//g')
# check for a curl error
if [ $cloudflare_zone_status == 000 ]; then
echo "there was a problem with the cloudflare api request - please visit https://www.cloudflarestatus.com to see if there is a problem"
elif [ "$(echo "${cloudflare_zone}" | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["result"]')" == "[]" ]; then
echo "[${domain}] cloudflare zone does not exist"
else
# create an array of dns records
domain_dns_records=()
if [ "${1}" == "production" ]; then
domain_dns_records+=("${domain}")
domain_dns_records+=("www.${domain}")
else
domain_dns_records+=("${1}.${domain}")
domain_dns_records+=("www.${1}.${domain}")
fi
for domain_dns_record in "${domain_dns_records[@]}"; do
# get the cloudflare zone id
cloudflare_zone_id=$(echo "${cloudflare_zone}" | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["result"][0]["id"]')
# determine if dns a record exists
dns_record=$(curl --silent --show-error --connect-timeout 30 --max-time 60 --write-out "HTTPSTATUS:%{http_code}" --request GET "https://api.cloudflare.com/client/v4/zones/${cloudflare_zone_id}/dns_records?type=A&name=${domain_dns_record}" \
--header "X-Auth-Email: $(catapult company.cloudflare_email)" \
--header "X-Auth-Key: $(catapult company.cloudflare_api_key)" \
--header "Content-Type: application/json")
dns_record_status=$(echo "${dns_record}" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
dns_record=$(echo "${dns_record}" | sed -e 's/HTTPSTATUS\:.*//g')
# check for a curl error
if [ $dns_record_status == 000 ]; then
echo "there was a problem with the cloudflare api request - please visit https://www.cloudflarestatus.com to see if there is a problem"
# create dns a record
elif [ "$(echo "${dns_record}" | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["result"]')" == "[]" ]; then
dns_record=$(curl --silent --show-error --connect-timeout 30 --max-time 60 --write-out "HTTPSTATUS:%{http_code}" --request POST "https://api.cloudflare.com/client/v4/zones/${cloudflare_zone_id}/dns_records" \
--header "X-Auth-Email: $(catapult company.cloudflare_email)" \
--header "X-Auth-Key: $(catapult company.cloudflare_api_key)" \
--header "Content-Type: application/json" \
--data "{\"type\":\"A\",\"name\":\"${domain_dns_record}\",\"content\":\"$(catapult environments.$1.servers.redhat.ip)\",\"ttl\":1,\"proxied\":true}")
dns_record_status=$(echo "${dns_record}" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
dns_record=$(echo "${dns_record}" | sed -e 's/HTTPSTATUS\:.*//g')
# update dns a record
else
dns_record_id=$(echo "${dns_record}" | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["result"][0]["id"]')
dns_record=$(curl --silent --show-error --connect-timeout 30 --max-time 60 --write-out "HTTPSTATUS:%{http_code}" --request PUT "https://api.cloudflare.com/client/v4/zones/${cloudflare_zone_id}/dns_records/${dns_record_id}" \
--header "X-Auth-Email: $(catapult company.cloudflare_email)" \
--header "X-Auth-Key: $(catapult company.cloudflare_api_key)" \
--header "Content-Type: application/json" \
--data "{\"id\":\"${dns_record_id}\",\"type\":\"A\",\"name\":\"${domain_dns_record}\",\"content\":\"$(catapult environments.$1.servers.redhat.ip)\",\"ttl\":1,\"proxied\":true}")
dns_record_status=$(echo "${dns_record}" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
dns_record=$(echo "${dns_record}" | sed -e 's/HTTPSTATUS\:.*//g')
fi
# output the result
if [ $dns_record_status == 000 ]; then
echo "there was a problem with the cloudflare api request - please visit https://www.cloudflarestatus.com to see if there is a problem"
elif [ "$(echo "${dns_record}" | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["success"]')" == "False" ]; then
echo "[${domain_dns_record}] $(echo ${dns_record} | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["errors"][0]["message"]')"
else
echo "[${domain_dns_record}] successfully set dns a record"
fi
done
fi
done
touch "/catapult/provisioners/redhat/logs/cloudflare_dns.$(catapult websites.apache.$5.domain).complete"
| true |
0bb42ee34c5f2ef36858ee3042439c3b9456456c | Shell | petronny/aur3-mirror | /qtwifimon/PKGBUILD | UTF-8 | 945 | 2.828125 | 3 | [] | no_license | # Contributor: Your Name <youremail@domain.com>
pkgname=qtwifimon
pkgver=0.5
pkgrel=1
pkgdesc="Qt monitor for your wireless net"
arch=(any)
url="http://github.com/bielern/qtwifimon"
license=('GPL')
depends=('python' 'pyqt' 'wireless_tools')
makedepends=('git')
provides=()
conflicts=()
replaces=()
backup=()
options=(!emptydirs)
install=
_gitroot="git://github.com/bielern/qtwifimon.git"
_gitname="qtwifimon"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
}
package() {
#cd "$srcdir/$pkgname-$pkgver"
cd "$srcdir/$_gitname"
python setup.py install --root="$pkgdir/" --optimize=1
# Remember to install licenses if the license is not a common license!
# install -D -m644 "$srcdir/LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true |
6ce7153b1808a16f41a9d23d0e1defbf2f71f381 | Shell | DigGe/tools | /wpa_listap | UTF-8 | 321 | 3.21875 | 3 | [] | no_license | #!/bin/bash
if [ -z "$1" ]
then
echo "`basename $0` <device_id>"
exit -1
fi
for p in `dbus-send --system --print-reply --dest=fi.epitest.hostap.WPASupplicant /fi/epitest/hostap/WPASupplicant/Interfaces/$1 fi.epitest.hostap.WPASupplicant.Interface.scanResults | grep object | cut -d\" -f2` ; do wpa_getssid $p ; done
| true |
ddd343a192180a3abce95250d6705a46782e1789 | Shell | songfuture/asv-subtools | /recipe/voxceleb/gather_results_from_epochs.sh | UTF-8 | 12,198 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright xmuspeech (Author: Snowdar 2020-02-27 2019-12-22)
prefix=mfcc_23_pitch
epochs="21"
positions="far near"
vectordir=exp/standard_xv_baseline_warmR_voxceleb1_adam
score=plda
trainset=voxceleb1_train_aug
enrollset=voxceleb1_enroll
testset=voxceleb1_test
score_norm=false # Use as-norm.
top_n=300
cohort_set= # If not NULL, use provided set
cohort_method="sub" # "sub" | "mean"
cohort_set_from=voxceleb1_train # Should be a subset of $trainset if use cohort_set_method.
sub_option="" # Could be --per-spk
sub_num=2000
prenorm=false
lda_norm=false
lda=true
clda=256
submean=false
default=true
string=
force=false
. subtools/parse_options.sh
. subtools/path.sh
trials=data/$prefix/$testset/trials
lda_process="submean-trainlda"
plda_process="submean-lda-norm-trainplda"
test_process="submean-lda-norm"
lda_data_config="$trainset[$trainset $enrollset $testset]"
submean_data_config="$trainset[$trainset $enrollset $testset]"
if [[ "$default" == "true" && "$lda" == "true" ]];then
[ "$score" == "cosine" ] && prenorm=false && lda_norm=false && clda=128
[ "$score" == "plda" ] && prenorm=false && lda_norm=false && clda=256
fi
[ "$lda" == "true" ] && lda_string="_lda$clda"
[ "$submean" == "true" ] && submean_string="_submean"
[ "$lda_norm" == "true" ] && lda_process="norm-"$lda_process
if [ "$prenorm" == "true" ];then
prenorm_string="_norm"
test_process="norm-"$test_process
plda_process="norm-"$plda_process
fi
extra_name="$trainset"
[[ "$score" == "cosine" && "$lda" == "false" && "$submean" == "false" ]] && extra_name=""
name="$testset/score/${score}_${enrollset}_${testset}${prenorm_string}${submean_string}${lda_string}_norm${extra_name:+_$extra_name}"
results="\n[ $score ] [ lda=$lda clda=$clda submean=$submean trainset=$trainset]"
for position in $positions;do
results="$results\n\n--- ${position} ---\nepoch\teer%"
[ "$score_norm" == "true" ] && results="${results}\tasnorm($top_n)-eer%"
for epoch in $epochs;do
obj_dir=$vectordir/${position}_epoch_${epoch}
# Prepare task for scoring. Here it is only needed to extract voxceleb1_test/voxceleb xvectors and then it will split subsets.
# voxcleb1_test -> voxceleb1_enroll
# voxceleb -> voxceleb1-O/E/H[-clean]_enroll/test
if [[ "$testset" == "voxceleb1_test" && "$enrollset" == "voxceleb1_enroll" ]];then
[ "$force" == "true" ] && rm -rf data/$prefix/voxceleb1_test/enroll.list data/$prefix/voxceleb1_enroll \
$obj_dir/voxceleb1_enroll
if [ ! -f $trials ];then
[ ! -f data/$prefix/voxceleb1_test/voxceleb1-O.trials ] && \
echo "[exit] Expected data/$prefix/voxceleb1_test/voxceleb1-O.trials to exist." && exit 1
cp data/$prefix/voxceleb1_test/voxceleb1-O.trials data/$prefix/voxceleb1_test/trials
fi
[ ! -f data/$prefix/voxceleb1_test/enroll.list ] && awk '{print $1}' $trials | sort -u > \
data/$prefix/voxceleb1_test/enroll.list
[[ ! -d data/$prefix/voxceleb1_enroll ]] && subtools/filterDataDir.sh data/$prefix/voxceleb1_test \
data/$prefix/voxceleb1_test/enroll.list data/$prefix/voxceleb1_enroll
[[ ! -d $obj_dir/voxceleb1_enroll ]] && subtools/filterVectorDir.sh $obj_dir/voxceleb1_test \
data/$prefix/voxceleb1_test/enroll.list $obj_dir/voxceleb1_enroll
elif [[ "$testset" == "voxceleb1_O_test" && "$enrollset" == "voxceleb1_O_enroll" ]];then
subtools/recipe/voxcelebSRC/prepare_task_for_scoring.sh --force $force --prefix $prefix --tasks voxceleb1-O --vectordir $obj_dir || exit 1
elif [[ "$testset" == "voxceleb1_E_test" && "$enrollset" == "voxceleb1_E_enroll" ]];then
subtools/recipe/voxcelebSRC/prepare_task_for_scoring.sh --force $force --prefix $prefix --tasks voxceleb1-E --vectordir $obj_dir || exit 1
elif [[ "$testset" == "voxceleb1_H_test" && "$enrollset" == "voxceleb1_H_enroll" ]];then
subtools/recipe/voxcelebSRC/prepare_task_for_scoring.sh --force $force --prefix $prefix --tasks voxceleb1-H --vectordir $obj_dir || exit 1
elif [[ "$testset" == "voxceleb1_O_clean_test" && "$enrollset" == "voxceleb1_O_clean_enroll" ]];then
subtools/recipe/voxcelebSRC/prepare_task_for_scoring.sh --force $force --prefix $prefix --tasks voxceleb1-O-clean --vectordir $obj_dir || exit 1
elif [[ "$testset" == "voxceleb1_E_clean_test" && "$enrollset" == "voxceleb1_E_clean_enroll" ]];then
subtools/recipe/voxcelebSRC/prepare_task_for_scoring.sh --force $force --prefix $prefix --tasks voxceleb1-E-clean --vectordir $obj_dir || exit 1
elif [[ "$testset" == "voxceleb1_H_clean_test" && "$enrollset" == "voxceleb1_H_clean_enroll" ]];then
subtools/recipe/voxcelebSRC/prepare_task_for_scoring.sh --force $force --prefix $prefix --tasks voxceleb1-H-clean --vectordir $obj_dir || exit 1
fi
[[ "$force" == "true" || ! -f $obj_dir/$name.eer ]] && \
subtools/scoreSets.sh --prefix $prefix --score $score --vectordir $obj_dir --enrollset $enrollset --testset $testset \
--lda $lda --clda $clda --submean $submean --lda-process $lda_process --trials $trials --extra-name "$extra_name" \
--enroll-process $test_process --test-process $test_process --plda-process $plda_process \
--lda-data-config "$lda_data_config" --submean-data-config "$submean_data_config" --plda-trainset $trainset
if [[ "$score_norm" == "true" && -f $obj_dir/$name.score ]];then
if [ "$cohort_set" == "" ];then
if [ "$cohort_method" == "sub" ];then
cohort_set=${cohort_set_from}_cohort_sub_${sub_num}$sub_option
[[ "$force" == "true" ]] && rm -rf data/$prefix/$cohort_set
[ ! -d data/$prefix/$cohort_set ] && subtools/kaldi/utils/subset_data_dir.sh $sub_option \
data/$prefix/$cohort_set_from $sub_num data/$prefix/$cohort_set
elif [ "$cohort_method" == "mean" ];then
cohort_set=${cohort_set_from}_cohort_mean
[[ "$force" == "true" ]] && rm -rf data/$prefix/$cohort_set
[ ! -d data/$prefix/$cohort_set ] && mkdir -p data/$prefix/$cohort_set && \
awk '{print $1,$1}' data/$prefix/$cohort_set_from/spk2utt > data/$prefix/$cohort_set/spk2utt && \
awk '{print $1,$1}' data/$prefix/$cohort_set_from/spk2utt > data/$prefix/$cohort_set/utt2spk
fi
if [ "$cohort_method" == "sub" ];then
[[ "$force" == "true" ]] && rm -rf $obj_dir/$cohort_set
[[ ! -d $obj_dir/$cohort_set ]] && subtools/filterVectorDir.sh $obj_dir/$cohort_set_from \
data/$prefix/$cohort_set/utt2spk $obj_dir/$cohort_set
elif [ "$cohort_method" == "mean" ];then
[[ "$force" == "true" ]] && rm -rf $obj_dir/$cohort_set
[[ ! -d $obj_dir/$cohort_set ]] && mkdir -p $obj_dir/$cohort_set && ivector-mean ark:data/$prefix/$cohort_set_from/spk2utt \
scp:$obj_dir/$trainset/xvector.scp ark,scp:$obj_dir/$cohort_set/xvector.ark,$obj_dir/$cohort_set/xvector.scp
fi
else
[[ "$force" == "true" ]] && rm -rf $obj_dir/$cohort_set
[[ ! -d $obj_dir/$cohort_set ]] && subtools/filterVectorDir.sh $obj_dir/$cohort_set_from \
data/$prefix/$cohort_set/utt2spk $obj_dir/$cohort_set
fi
[ ! -f data/$prefix/$cohort_set/utt2spk ] && echo "Expected cohort_set to exist." && exit 1
[ "$force" == "true" ] && rm -rf data/$prefix/$cohort_set/$enrollset.list data/$prefix/$cohort_set/$testset.list \
data/$prefix/$cohort_set/$enrollset.cohort.trials data/$prefix/$cohort_set/$testset.cohort.trials
[ ! -f data/$prefix/$cohort_set/$enrollset.list ] && awk '{print $1}' $trials | sort -u > data/$prefix/$cohort_set/$enrollset.list
[ ! -f data/$prefix/$cohort_set/$testset.list ] && awk '{print $2}' $trials | sort -u > data/$prefix/$cohort_set/$testset.list
[ ! -f data/$prefix/$cohort_set/$enrollset.cohort.trials ] && sh subtools/getTrials.sh 3 data/$prefix/$cohort_set/$enrollset.list \
data/$prefix/$cohort_set/utt2spk data/$prefix/$cohort_set/$enrollset.cohort.trials
[ ! -f data/$prefix/$cohort_set/$testset.cohort.trials ] && sh subtools/getTrials.sh 3 data/$prefix/$cohort_set/$testset.list \
data/$prefix/$cohort_set/utt2spk data/$prefix/$cohort_set/$testset.cohort.trials
enroll_cohort_name="$cohort_set/score/${score}_${enrollset}_${cohort_set}${prenorm_string}${submean_string}${lda_string}_norm${extra_name:+_$extra_name}"
test_cohort_name="$cohort_set/score/${score}_${testset}_${cohort_set}${prenorm_string}${submean_string}${lda_string}_norm${extra_name:+_$extra_name}"
output_name="${name}_asnorm${top_n}_$cohort_set"
[[ "$force" == "true" ]] && rm -rf $obj_dir/$enroll_cohort_name.score $obj_dir/$test_cohort_name.score \
$obj_dir/$output_name.score $obj_dir/$output_name.eer
lda_data_config="$trainset[$trainset $enrollset $cohort_set]"
submean_data_config="$trainset[$trainset $enrollset $cohort_set]"
[ ! -f "$obj_dir/$enroll_cohort_name.score" ] && \
subtools/scoreSets.sh --prefix $prefix --eval true --score $score --vectordir $obj_dir \
--lda $lda --clda $clda --submean $submean --lda-process $lda_process --extra-name "$extra_name" \
--enroll-process $test_process --test-process $test_process --plda-process $plda_process \
--lda-data-config "$lda_data_config" --submean-data-config "$submean_data_config" --plda-trainset $trainset \
--enrollset $enrollset --testset $cohort_set \
--trials data/$prefix/$cohort_set/$enrollset.cohort.trials $string
lda_data_config="$trainset[$trainset $testset $cohort_set]"
submean_data_config="$trainset[$trainset $testset $cohort_set]"
[ ! -f "$obj_dir/$test_cohort_name.score" ] && \
subtools/scoreSets.sh --prefix $prefix --eval true --score $score --vectordir $obj_dir \
--lda $lda --clda $clda --submean $submean --lda-process $lda_process --extra-name "$extra_name" \
--enroll-process $test_process --test-process $test_process --plda-process $plda_process \
--lda-data-config "$lda_data_config" --submean-data-config "$submean_data_config" --plda-trainset $trainset \
--enrollset $testset --testset $cohort_set \
--trials data/$prefix/$cohort_set/$testset.cohort.trials $string
[ ! -f "$obj_dir/$output_name.score" ] && \
python3 subtools/score/ScoreNormalization.py --top-n=$top_n --method="asnorm" $obj_dir/$name.score \
$obj_dir/$enroll_cohort_name.score $obj_dir/$test_cohort_name.score \
$obj_dir/$output_name.score
[ ! -f "$obj_dir/$output_name.eer" ] && \
subtools/computeEER.sh --write-file $obj_dir/$output_name.eer $trials $obj_dir/$output_name.score
eer=""
[ -f "$obj_dir/$output_name.eer" ] && eer=`cat $obj_dir/$output_name.eer`
results="$results\n$epoch\t`cat $obj_dir/$name.eer`\t$eer"
else
eer=""
[ -f "$obj_dir/$name.eer" ] && eer=`cat $obj_dir/$name.eer`
results="$results\n$epoch\t$eer"
fi
done
done
echo -e $results > $vectordir/${score}_${testset}${lda_string}${submean_string}.results
echo -e $results
| true |
3dd4efe16db365f1e20ed66e9a178e0d8f07fd23 | Shell | cyralinc/os-eBPF | /sockredir/load.sh | UTF-8 | 1,311 | 3.328125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# enable debug output for each executed command
# to disable: set +x
set -x
# exit if any command fails
set -e
# Mount the bpf filesystem
sudo mount -t bpf bpf /sys/fs/bpf/
# Compile the bpf_sockops_v4 program
clang -O2 -g -target bpf -I/usr/include/linux/ -I/usr/src/linux-headers-5.0.0-23/include/ -c bpf_sockops_v4.c -o bpf_sockops_v4.o
# Load and attach the bpf_sockops_v4 program
sudo bpftool prog load bpf_sockops_v4.o "/sys/fs/bpf/bpf_sockops"
sudo bpftool cgroup attach "/sys/fs/cgroup/unified/" sock_ops pinned "/sys/fs/bpf/bpf_sockops"
# Extract the id of the sockhash map used by the bpf_sockops_v4 program
# This map is then pinned to the bpf virtual file system
MAP_ID=$(sudo bpftool prog show pinned "/sys/fs/bpf/bpf_sockops" | grep -o -E 'map_ids [0-9]+' | cut -d ' ' -f2-)
sudo bpftool map pin id $MAP_ID "/sys/fs/bpf/sock_ops_map"
# Load and attach the bpf_tcpip_bypass program to the sock_ops_map
clang -O2 -g -Wall -target bpf -I/usr/include/linux/ -I/usr/src/linux-headers-5.0.0-23/include/ -c bpf_tcpip_bypass.c -o bpf_tcpip_bypass.o
sudo bpftool prog load bpf_tcpip_bypass.o "/sys/fs/bpf/bpf_tcpip_bypass" map name sock_ops_map pinned "/sys/fs/bpf/sock_ops_map"
sudo bpftool prog attach pinned "/sys/fs/bpf/bpf_tcpip_bypass" msg_verdict pinned "/sys/fs/bpf/sock_ops_map"
| true |
b4d2c27be3ed82c1b43402312fe4b2fd912f2121 | Shell | rotemad/TheElephant | /modules/EC2-Prometheus-Grafana/userdata/prom_grafana.sh | UTF-8 | 7,058 | 2.59375 | 3 | [] | no_license | #!/bin/bash
#Docker install
apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common -y
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get install docker-ce docker-ce-cli containerd.io -y
usermod -aG docker ubuntu
systemctl enable docker
systemctl start docker
#Consul agent install
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add -
apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
apt-get update
apt-get install consul dnsmasq -y
cat << EOF >/etc/dnsmasq.d/10-consul
# Enable forward lookup of the 'consul' domain:
server=/consul/127.0.0.1#8600
EOF
systemctl restart dnsmasq
cat << EOF >/etc/systemd/resolved.conf
[Resolve]
DNS=127.0.0.1
Domains=~consul
EOF
systemctl restart systemd-resolved.service
useradd consul
mkdir --parents /etc/consul.d
chown --recursive consul:consul /etc/consul.d
IP=$(curl http://169.254.169.254/latest/meta-data/local-ipv4)
cat << EOF > /etc/consul.d/agent_config.json
{
"advertise_addr": "$IP",
"client_addr": "0.0.0.0",
"data_dir": "/opt/consul",
"datacenter": "opsschool",
"encrypt": "jrlBUPF89ipG6nVorTPL5zYy92/jGn4jWpSeX3zcAy8=",
"disable_remote_exec": true,
"disable_update_check": true,
"leave_on_terminate": true,
"retry_join": ["provider=aws tag_key=consul-server tag_value=true"],
"enable_script_checks": true,
"server": false
}
EOF
chown --recursive consul:consul /etc/consul.d/agent_config.json
chmod 640 /etc/consul.d/agent_config.json
touch /usr/lib/systemd/system/consul.service
cat << EOF > /usr/lib/systemd/system/consul.service
[Unit]
Description="HashiCorp Consul - A service mesh solution"
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/consul.d/consul.hcl
[Service]
Type=notify
User=consul
Group=consul
ExecStart=/usr/bin/consul agent -config-dir=/etc/consul.d/
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGTERM
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
cat << EOF >/etc/consul.d/prom-grafana.json
{
"service": {
"name": "prom-grafana",
"tags": [
"monitoring"
],
"checks": [
{
"id": "Grafana",
"name": "Grafana Status",
"tcp": "localhost:3000",
"interval": "10s",
"timeout": "1s"
},
{
"id": "Prometheus",
"name": "Prometheus Status",
"tcp": "localhost:9090",
"interval": "10s",
"timeout": "1s"
}
]
}
}
EOF
systemctl daemon-reload
systemctl enable consul
systemctl start consul
#Prom conf
IP=$(curl http://169.254.169.254/latest/meta-data/local-ipv4)
mkdir /etc/prometheus
cat << EOF >/etc/prometheus/prometheus.yml
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
# - job_name: 'node_exporter'
# scrape_interval: 15s
# static_configs:
# - targets:
# - '172.17.0.1:9100'
- job_name: 'Consul_service_exporters'
consul_sd_configs:
- server: '$IP:8500'
relabel_configs:
- source_labels: ['__address__']
target_label: '__address__'
regex: '(.*):.*'
separator: ':'
replacement: '\$1:9100'
- source_labels: [__meta_consul_node]
target_label: instance
- job_name: 'Consul-server'
metrics_path: '/v1/agent/metrics'
consul_sd_configs:
- server: '$IP:8500'
services:
- consul
relabel_configs:
- source_labels: ['__address__']
target_label: '__address__'
regex: '(.*):.*'
separator: ':'
replacement: '\$1:8500'
EOF
#Grafana install
apt-get install -y apt-transport-https
apt-get install -y software-properties-common wget
wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -
echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list
apt-get update
apt-get install grafana -y
cat << EOF >/etc/grafana/provisioning/datasources/default.yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://localhost:9090
- name: Prometheus-k8s
type: prometheus
url: http://my-prometheus-server-default.service.opsschool.consul:9090
EOF
sed -i '/# enable anonymous access/a enabled = true' /etc/grafana/grafana.ini
sed -i '/;default_home_dashboard_path/a default_home_dashboard_path = /etc/grafana/provisioning/dashboards/dashboard.json' /etc/grafana/grafana.ini
wget https://grafana.com/api/dashboards/1860/revisions/22/download -O /etc/grafana/provisioning/dashboards/dashboard.json
cat << EOF >/etc/grafana/provisioning/dashboards/default.yaml
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
folderUid: ''
type: file
options:
path: /etc/grafana/provisioning/dashboards
EOF
systemctl enable grafana-server.service
systemctl start grafana-server.service
docker run -d --name=prometheus --restart=unless-stopped -p 9090:9090 -v /etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus
#docker run -d --name=grafana --restart=unless-stopped -p 3000:3000 grafana/grafana
# filebeat
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-oss-7.11.0-amd64.deb
dpkg -i filebeat-*.deb
sudo mv /etc/filebeat/filebeat.yml /etc/filebeat/filebeat.yml.BCK
cat <<\EOF > /etc/filebeat/filebeat.yml
filebeat.modules:
- module: system
syslog:
enabled: true
auth:
enabled: false
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.dashboards.enabled: false
setup.template.name: "filebeat"
setup.template.pattern: "filebeat-*"
setup.template.settings:
index.number_of_shards: 1
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
output.elasticsearch:
hosts: [ "elk.service.opsschool.consul:9200" ]
index: "filebeat-%{[agent.version]}-%{+yyyy.MM.dd}"
## OR
#output.logstash:
# hosts: [ "127.0.0.1:5044" ]
EOF
systemctl enable filebeat.service
systemctl start filebeat.service
| true |
0f0ab639dfcf6e00a6f367e8fc6bb5121605d808 | Shell | dru18/track | /App/install.sh | UTF-8 | 1,013 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Print installing track
echo "Installing Track"
# Make required directories for track
sudo mkdir -v -p /opt/track/water/
sudo mkdir -v -p /var/opt/track/water/
sudo mkdir -v -p /var/log/track/water/
# Give privilleges to local user for required directories
sudo chown -vR $USER:$USER /opt/track/
sudo chown -vR $USER:$USER /var/opt/track/
sudo chown -vR $USER:$USER /var/log/track/
sudo chown -v $USER:$USER /usr/local/bin/track
# Copy source file to system
sudo cp -v track.py /opt/track/water/track.py
# Make required log file
sudo touch -v /var/log/track/water/water.log
# Make required variables
sudo touch -v /var/opt/track/water/target
sudo touch -v /var/opt/track/water/count
# Initialise variables
sudo echo 0 > /var/opt/track/water/target
sudo echo 0 > /var/opt/track/water/count
# Initialise log
sudo echo -v "" > /log/track/water/water.log
# Make command for track
sudo ln -v -s /var/opt/track/water/track /usr/local/bin/track
# Print installation done
echo "Installation done"
| true |
6b70bff965c5743cf9ec8369f92ed8d115c33db7 | Shell | robcsi/dotfiles | /local_bin/.local/bin/ubuntu-maintenance.sh | UTF-8 | 324 | 2.578125 | 3 | [] | no_license | #!/bin/bash
echo Check systemd failed services:
systemctl --failed
echo
echo Check Journal entries:
sudo journalctl -p 3 -xb
echo
echo Update packages:
sudo apt update && sudo apt upgrade
echo
echo Remove unused packages:
sudo apt autoremove
echo
# Clean the journal:
# sudo journalctl vacuum-time=2weeks (in my case)
| true |
2139cb8e52501d2b2c17d8d2db64884e8caf9e30 | Shell | leifcr/rails5-kubernetes | /docker-entrypoint.sh | UTF-8 | 922 | 3.078125 | 3 | [] | no_license | #!/bin/bash
# Note: due to gitlab autodevops not setting args to any value, fallback to bundle exec rails server as default
set -e
echo "Rails entrypoint running: $1"
case "$1" in
bundle*)
command="$1";;
/bin/bash|/bin/sh|bash|sh)
command="$1";;
annotate|cap|capify|cucumber|foodcritic|guard|irb|jekyll|kitchen|knife)
command="bundle exec $@";;
middleman|nanoc|pry|puma|rackup|rainbows|rails|rake|rspec|shotgun|sidekiq|spec)
command="bundle exec $@";;
spork|spring|strainer|tailor|taps|thin|thor|unicorn|unicorn_rails|webpacker|yarn)
command="bundle exec $@";;
./bin/webpack-dev-server|bin/webpack-dev-server|./bin/webpack|bin/webpack)
command="bundle exec $@";;
"")
command="bundle exec rails server";;
*)
command="bundle exec rails server";;
esac
echo "Full command $command"
exec ${command}
| true |
d833a1085b802ae7dea291bd5a95eeedd79fabd6 | Shell | alisw/alidist | /lzma.sh | UTF-8 | 1,534 | 2.984375 | 3 | [] | no_license | package: lzma
version: "%(tag_basename)s"
tag: "v5.2.3"
source: https://github.com/alisw/liblzma
build_requires:
- "autotools:(slc6|slc7)"
- "GCC-Toolchain:(?!osx)"
- rsync
prefer_system: "(?!slc5)"
prefer_system_check: |
printf "#include <lzma.h>\n" | c++ -xc++ - -c -M 2>&1
---
rsync -a --delete --exclude '**/.git' --delete-excluded $SOURCEDIR/ ./
./autogen.sh
./configure CFLAGS="$CFLAGS -fPIC -Ofast" \
--prefix="$INSTALLROOT" \
--disable-shared \
--enable-static \
--disable-nls \
--disable-rpath \
--disable-dependency-tracking \
--disable-doc
make ${JOBS+-j $JOBS} install
rm -f "$INSTALLROOT"/lib/*.la
# Modulefile
MODULEDIR="$INSTALLROOT/etc/modulefiles"
MODULEFILE="$MODULEDIR/$PKGNAME"
mkdir -p "$MODULEDIR"
cat > "$MODULEFILE" <<EoF
#%Module1.0
proc ModulesHelp { } {
global version
puts stderr "ALICE Modulefile for $PKGNAME $PKGVERSION-@@PKGREVISION@$PKGHASH@@"
}
set version $PKGVERSION-@@PKGREVISION@$PKGHASH@@
module-whatis "ALICE Modulefile for $PKGNAME $PKGVERSION-@@PKGREVISION@$PKGHASH@@"
# Dependencies
module load BASE/1.0 ${GCC_TOOLCHAIN_ROOT:+GCC-Toolchain/$GCC_TOOLCHAIN_VERSION-$GCC_TOOLCHAIN_REVISION}
# Our environment
set LZMA_ROOT \$::env(BASEDIR)/$PKGNAME/\$version
setenv LZMA_ROOT \$LZMA_ROOT
set BASEDIR \$::env(BASEDIR)
prepend-path LD_LIBRARY_PATH \$BASEDIR/$PKGNAME/\$version/lib
prepend-path PATH \$BASEDIR/$PKGNAME/\$version/bin
EoF
| true |
b1739f16650091f61d90fbb67352025fc5fb1fed | Shell | Menziess/New | /Makefile.sh | UTF-8 | 1,026 | 3.25 | 3 | [] | no_license |
help:
@echo "Tasks in \033[1;32m${APP_NAME}\033[0m:"
@echo " init"
@echo " Install and activate project environment."
@echo " clean"
@echo " Remove build artifacts."
@echo " lint"
@echo " Check style with mypy."
@echo " test"
@echo " Run pytest."
@echo " build"
@echo " Run pybuilder build command."
@echo " dev"
@echo " Build ${APP_NAME} and run development docker container."
@echo ""
@echo "By github/menziess"
init:
pipenv shell
lint:
mypy . --ignore-missing-imports
test:
pytest
clean:
pyb clean -c
find . \( -name __pycache__ -o -name "*.pyc" -o -name __init__.py \) -delete
rm -rf .pytest_cache/ .mypy_cache/
build:
pyb -c
docker build --rm -f "Dockerfile" -t new:latest .
clean-build:
make clean
make build
dev:
docker run --rm -it \
-e FLASK_ENV=development \
-p 3000:3000/tcp \
-p 80:80 \
-v $$(pwd):/app \
new:latest
run:
docker run --rm -it \
-e FLASK_ENV=production \
-p 80:80 \
-v $$(pwd):/app \
new:latest
| true |
5c4f2861e899ff4a026a169770ac478bc7e85bc3 | Shell | tyrannicaltoucan/dotfiles | /zsh/config/options.zsh | UTF-8 | 1,235 | 2.515625 | 3 | [] | no_license | autoload -Uz compinit && compinit -i
setopt no_case_glob # use case insensitive globbing
setopt correct # enable shell correction
setopt always_to_end # move the cursor to the end of a completed word
setopt auto_menu # display menu after successive tab press
setopt prompt_subst # enable prompt subsitution syntax
setopt share_history # share history between sessions
setopt append_history # append to history file
setopt inc_append_history # add commands to history as they are typed, instead of at shell exit
setopt hist_expire_dups_first # delete duplicates first
setopt hist_ignore_dups # don't store duplicates
setopt hist_reduce_blanks # remove blank lines from history
unsetopt menu_complete # don't auto-select the first entry
unsetopt flowcontrol # disable start and stop characters
unsetopt beep # disable terminal bells
# History
HISTFILE="$HOME/.zhistory"
HISTSIZE=2000
SAVEHIST=2000
# Completion styling
zstyle ":completion:*" menu select
zstyle ":completion:*" list-dirs-first true
zstyle ":completion:*" matcher-list "" "m:{[:lower:][:upper:]}={[:upper:][:lower:]}" "+l:|=* r:|=*"
| true |
dec2029d5350466dbd16f9187522c7e1af049c0c | Shell | etclabscore/emerald-wallet | /.circleci/deploy.sh | UTF-8 | 350 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
VERSION_BASE=$(janus version -format='v%M.%m.x')
echo "Deploy to http://builds.etcdevteam.com/emerald-wallet/$VERSION_BASE/"
mkdir deploy
mv dist/*.dmg dist/*.zip deploy/
janus deploy -to="builds.etcdevteam.com/emerald-wallet/$VERSION_BASE/" -files="deploy/*" -key=".circleci/gcloud-circleci.json.enc"
echo "Deployed" | true |
085e7d098f5bbd54836ef94bfc193fc03ad254a7 | Shell | kyletravis/fah | /fah_restart.sh | UTF-8 | 412 | 3.515625 | 4 | [] | no_license | #!/bin/bash
tsfile=`stat -c %Y /var/lib/fahclient/log.txt`
now=`date +%s`
diff=`echo "$now-$tsfile" | bc`
limit=300
if [ $diff -gt $limit ] ; then
echo "`date` diff of $diff is greater than $limit seconds - restarting"
/etc/init.d/FAHClient stop
killall -9 FAHClient
sleep 15
/etc/init.d/FAHClient start
else
echo "`date` diff of $diff is less than $limit seconds - doing nothing"
fi
exit 0
| true |
50da5816bb18155edaef883793b04aca1c71be59 | Shell | sukhchander/dotfiles | /.bash_profile | UTF-8 | 2,390 | 3.03125 | 3 | [] | no_license | export DISPLAY=:0.0
export EDITOR=/usr/local/bin/vim
export PATH="$(brew --prefix coreutils)/libexec/gnubin:/usr/local/bin:$PATH"
`keychain ~/.ssh/sukhchander@gmail`
. ~/.keychain/$HOSTNAME-sh
test -e ~/.dircolors && eval `dircolors -b ~/.dircolors`
export RBENV_ROOT=/usr/local/var/rbenv
if which rbenv > /dev/null; then eval "$(rbenv init -)"; fi
if [ -f $(brew --prefix)/etc/bash_completion ]; then
source $(brew --prefix)/etc/bash_completion
fi
source $(brew --prefix)/etc/bash_completion.d/git-prompt.sh
export GREP_COLOR=31
alias grep="grep --color=auto"
alias ll="ls -lha --color=auto"
alias l="ls -lh --color=auto"
alias b="cd .."
alias vi="vim"
alias mysql="`which mysql` -u root"
alias listen="netstat -atn | grep LISTEN"
# http://henrik.nyh.se/2008/12/git-dirty-prompt
# http://www.simplisticcomplexity.com/2008/03/13/show-your-git-branch-name-in-your-prompt/
# username@Machine ~/dev/dir[master]$ # clean working directory
# username@Machine ~/dev/dir[master*]$ # dirty working directory
gitStatus() {
st=$(git status 2>/dev/null | tail -n 1)
if [[ $st == "" ]]; then
echo ''
elif [[ $st == "nothing to commit (working directory clean)" ]]; then
echo ''
elif [[ $st == 'nothing added to commit but untracked files present (use "git add" to track)' ]]; then
echo '?'
else
echo '*'
fi
}
gitBranch() {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/[\1$(gitStatus)]/" ;
}
function prompt {
local RED="\[\033[0;31m\]"
local LIGHT_RED="\[\033[1;31m\]"
export PS1='\[\e[01;30m\]\t`if [ $? = 0 ]; then echo "\[\e[32m\] ✔ "; else echo "\[\e[31m\] ✘ "; fi`\[\e[00;37m\]\u\[\e[01;37m\]:`[[ $(git status 2> /dev/null | head -n2 | tail -n1) != "# Changes to be committed:" ]] && echo "\[\e[31m\]" || echo "\[\e[33m\]"``[[ $(git status 2> /dev/null | tail -n1) != "nothing to commit (working directory clean)" ]] || echo "\[\e[32m\]"`$(__git_ps1 "(%s)\[\e[00m\]")\[\e[01;34m\]\w\[\e[00m\]\$ '
#export PS1="\n\[\033[35m\]\$(/bin/date)\n\[\033[32m\]\w\n\[\033[1;31m\]\u@\h: \[\033[1;34m\]\$(/usr/bin/tty | /bin/sed -e 's:/dev/::'): \[\033[1;36m\]\$(/bin/ls -1 | /usr/bin/wc -l | /bin/sed 's: ::g') files \[\033[1;33m\]\$(/bin/ls -lah | /bin/grep -m 1 total | /bin/sed 's/total //')b\[\033[0m\] -> \[\033[0m\]"
}
prompt
alias pg="pg_ctl -D /usr/local/var/postgres -l /usr/local/var/postgres/server.log"
| true |
a115f447b0aa08120b326ace61806543c51b3774 | Shell | jakirkham-feedstocks/libcugraph-feedstock | /recipe/build.sh | UTF-8 | 4,573 | 3 | 3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Derived from cugraph build script, as seen here:
# https://github.com/rapidsai/cugraph/blob/db20b485cfc5399214afcff604b38493f38e83bf/build.sh#L137
# NOTE: it is assumed the RMM header-only sources have been downloaded from the
# multi-source "source:" section in the meta.yaml file.
# cmake must be able to find the RMM headers using find_path(). The RMM_ROOT env
# var is set so RMM_ROOT/include results in a valid dir for cmake to search.
# Set env var DUMP_LOGS_ON_ERROR=0 to suppress
DUMP_LOGS_ON_ERROR=${DUMP_LOGS_ON_ERROR:=1}
export CUGRAPH_SRC_DIR="${SRC_DIR}/cugraph"
export RMM_ROOT="${SRC_DIR}/rmm"
export LIBCUGRAPH_BUILD_DIR=${LIBCUGRAPH_BUILD_DIR:=${CUGRAPH_SRC_DIR}/cpp/build}
export GPU_ARCHS=ALL
export INSTALL_PREFIX=${PREFIX:=${CONDA_PREFIX}}
export BUILD_DISABLE_DEPRECATION_WARNING=ON
export BUILD_TYPE=Release
export BUILD_CPP_TESTS=OFF
export BUILD_CPP_MG_TESTS=OFF
export BUILD_STATIC_FAISS=OFF
export PARALLEL_LEVEL=${CPU_COUNT}
export INSTALL_TARGET=install
export VERBOSE_FLAG="-v"
# Use the nvcc wrapper installed with cudatoolkit, assumed to be first in PATH.
# This ensures nvcc calls the compiler in the conda env.
export CUDA_NVCC_EXECUTABLE=$(which nvcc)
# Manually specify the location of the cudatoolkit libs since cmake is not
# adding this lib dir to the link options.
CUDATK_LIB_DIR=$PREFIX/lib
export CXXFLAGS="${CXXFLAGS} -L${CUDATK_LIB_DIR}"
mkdir -p ${LIBCUGRAPH_BUILD_DIR}
cd ${LIBCUGRAPH_BUILD_DIR}
cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DGPU_ARCHS=${GPU_ARCHS} \
-DDISABLE_DEPRECATION_WARNING=${BUILD_DISABLE_DEPRECATION_WARNING} \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DBUILD_STATIC_FAISS=${BUILD_STATIC_FAISS} \
-DBUILD_TESTS=${BUILD_CPP_TESTS} \
-DBUILD_CUGRAPH_MG_TESTS=${BUILD_CPP_MG_TESTS} \
"${CUGRAPH_SRC_DIR}/cpp"
ERRCODE=$?
if (( ${ERRCODE} != 0 )); then
if (( ${DUMP_LOGS_ON_ERROR} == 1 )); then
echo "********************************************************************************"
echo "* START OF: ${CUGRAPH_SRC_DIR}/cpp/build/CMakeFiles/CMakeOutput.log"
cat ${CUGRAPH_SRC_DIR}/cpp/build/CMakeFiles/CMakeOutput.log
echo "* END OF: ${CUGRAPH_SRC_DIR}/cpp/build/CMakeFiles/CMakeOutput.log"
echo "********************************************************************************"
echo "* START OF: ${CUGRAPH_SRC_DIR}/cpp/build/CMakeFiles/CMakeError.log"
cat ${CUGRAPH_SRC_DIR}/cpp/build/CMakeFiles/CMakeError.log
echo "* END OF: ${CUGRAPH_SRC_DIR}/cpp/build/CMakeFiles/CMakeError.log"
echo "********************************************************************************"
fi
exit ${ERRCODE}
fi
cmake --build "${LIBCUGRAPH_BUILD_DIR}" -j${PARALLEL_LEVEL} --target ${INSTALL_TARGET} ${VERBOSE_FLAG}
# FIXME: The v0.19.0a cugraph sources in the tarfile used on 2021-04-16 do not
# appear to have the update to generate the version_config.hpp file, so generate
# it here. If the final release of the v0.19 cugraph sources does have the
# generated file, this should still not cause harm. This should be removed for a
# 0.20+ build.
VERSION_CONFIG_IN='/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define CUGRAPH_VERSION_MAJOR @CUGRAPH_VERSION_MAJOR@
#define CUGRAPH_VERSION_MINOR @CUGRAPH_VERSION_MINOR@
#define CUGRAPH_VERSION_PATCH @CUGRAPH_VERSION_PATCH@
'
VERSION_CONFIG_OUT=${INSTALL_PREFIX}/include/cugraph/version_config.hpp
CMAKELISTS_TXT=${CUGRAPH_SRC_DIR}/cpp/CMakeLists.txt
CUGRAPH_VER_STRING=$(grep "project(CUGRAPH" ${CMAKELISTS_TXT}|awk '{print $3}')
MAJOR=$(echo "${CUGRAPH_VER_STRING}"|cut -d'.' -f1)
MINOR=$(echo "${CUGRAPH_VER_STRING}"|cut -d'.' -f2)
PATCH=$(echo "${CUGRAPH_VER_STRING}"|cut -d'.' -f3)
echo "${VERSION_CONFIG_IN}" > ${VERSION_CONFIG_OUT}
sed -i "s/@CUGRAPH_VERSION_MAJOR@/${MAJOR}/g" ${VERSION_CONFIG_OUT}
sed -i "s/@CUGRAPH_VERSION_MINOR@/${MINOR}/g" ${VERSION_CONFIG_OUT}
sed -i "s/@CUGRAPH_VERSION_PATCH@/${PATCH}/g" ${VERSION_CONFIG_OUT}
| true |
b22ab8747e5b589a6bbafb05ab63161ced71e75a | Shell | honginpyo1/tf_storage_edu | /script | UTF-8 | 954 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env bash
PROGRAM_USER = $(whoami)
function slack_message(){
# $1 : message
# $2 : true=good, false=danger
COLOR="danger"
if $2 ; then
COLOR="good"
fi
curl -s -d 'payload={"attachments": [
{
"fallback": "Required plain-text summary of the attachment.",
"color": "#36a64f",
"pretext": "서버 계정 정보",
"author_name": '"$HOSTNAME"',
"title": "Server Credential",
"text": "User : $PROGRAM_USER\n Password : ",
"fields": [
{
"title": "Priority",
"value": "High",
"short": false
}
],
"footer": "Slack API",
"footer_icon": "https://platform.slack-edge.com/img/default_application_icon.png",
}
]}' https://hooks.slack.com/services/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
} | true |
cef1a380964acd67d921acf04151bd5a80fdba67 | Shell | f/yarn | /scripts/bootstrap-env-ubuntu.sh | UTF-8 | 392 | 2.703125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# Bootstraps a Yarn development environment on Ubuntu.
set -ex
# Add Yarn package repo - We require Yarn to build Yarn itself :D
sudo apt-key adv --fetch-keys http://dl.yarnpkg.com/debian/pubkey.gpg
echo "deb http://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
sudo apt-get update -qq
sudo apt-get install -y rpm lintian yarn
gem install fpm
| true |
a41cfee760636fb6c119704666bdcbf2f5a3fa47 | Shell | comsci-uwc-isak/process-journal-LingyeWU | /BASH/Class8_Sep.18th/fileCreator.sh | UTF-8 | 259 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#This program crates files inside a folder
echo "Creating the folder"
mkdir test
#entering the folder
cd test
#create 100 files
for (( f=0; f<100; f++ ))
do
echo "creating file $f"
echo "Message #$f" > file_$f.txt
done
echo "Task completed"
| true |
ca05b4070cb15e97cb73db3c54c6e951372dfeea | Shell | pryorda/ubuntuPackerImage | /build.sh | UTF-8 | 2,338 | 3.234375 | 3 | [] | no_license | #!/bin/bash -e
# On the esx host configure firewall rules for vnc: https://platform9.com/support/enable-vnc-on-vmware-deployments/
PACKER_ESXI_VNC_PROBE_TIMEOUT=2s
set -x
export VMWARE_SSH_USER=${VMWARE_SSH_USER:-"root"}
export VMWARE_DATASTORE=${VMWARE_DATASTORE:-"tools"}
export VIRTUAL_MACHINE_NAME=${VIRTUAL_MACHINE_NAME:-"Ubuntu-18.04"}
export VIRTUAL_MACHINE_VERSION=${VIRTUAL_MACHINE_VERSION:-"v1"}
export VIRTUAL_MACHINE_NAME="${VIRTUAL_MACHINE_NAME}-${VIRTUAL_MACHINE_VERSION}"
export VCENTER_HOST=${VCENTER_HOST:-"vcenter.pryorda.dok"}
export SSH_KEY_FILE=${SSH_KEY_FILE:-"${HOME}/.ssh/id_rsa"}
export OUTPUT_DIR="${OUTPUT_DIR:-"packer-output-${VIRTUAL_MACHINE_NAME}"}"
export REMOTE_BUILD_HOST=${REMOTE_BUILD_HOST:-"vmware-hypervisor1.pryorda.dok"}
export VMWARE_NETWORK=${VMWARE_NETWORK:-"VM Network"}
set +x
# Enable Logging:
# export PACKER_LOG=1
if [ -z ${PACKER_UBUNTU_PASSWORD} ]; then
echo "PACKER_UBUNTU_PASSWORD not defined. Please export this variable to set required ubuntu user password"
exit 1
fi
if [ -z ${VMWARE_SSH_PASSWORD} ]; then
echo "VMWARE_SSH_PASSWORD not defined. Please export this variable to set required password"
exit 1
fi
if [ -z ${VMWARE_SSH_USER} ]; then
echo "VMWARE_SSH_USER not defined. Please export this variable to set required user"
exit 1
fi
if [ -z ${PACKER_UBUNTU_AUTHORIZED_KEY} ] || [ ! -f ${PACKER_UBUNTU_AUTHORIZED_KEY} ]; then
echo "PACKER_UBUNTU_AUTHORIZED_KEY not defined or set to a non-existant file."
echo "Please export this variable to point to a public key file that will be "
echo "placed in the ubuntu users authorized keys."
echo "${PACKER_UBUNTU_AUTHORIZED_KEY}"
exit 1
fi
export UBUNTU_AUTHORIZED_KEY="$(cat ${PACKER_UBUNTU_AUTHORIZED_KEY})"
echo "UBUNTU_AUTHORIZED_KEY=${UBUNTU_AUTHORIZED_KEY}"
echo "==> Building ESX (vmware-iso) VM ${VIRTUAL_MACHINE_NAME}"
packer build --only=vmware-iso ubuntu.json
echo '==> Copy shrinkdisk.sh to host'
sshpass -e scp -i ${SSH_KEY_FILE} -o 'StrictHostKeyChecking=no' shrinkdisk.sh ${VMWARE_SSH_USER}@${REMOTE_BUILD_HOST}:/tmp/shrinkdisk.sh
echo '==> Shrink disk'
sshpass -e ssh -i ${SSH_KEY_FILE} -o 'StrictHostKeyChecking=no' ${VMWARE_SSH_USER}@${REMOTE_BUILD_HOST} "export VMWARE_DATASTORE=${VMWARE_DATASTORE} && export OUTPUT_DIR=${OUTPUT_DIR} && sh /tmp/shrinkdisk.sh"
| true |
724d1be462fd6e8cffe55a785691066d58990ff2 | Shell | dbarobin/github | /linux/linux_run.sh | UTF-8 | 509 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
BASE_PATH=$(dirname $(readlink -f $0))
python ${BASE_PATH}/github_hosts.py
if [ $? -ne 0 ]; then
echo "get host name error"
exit 1
fi
sed -i "/GitHub Start/d" /etc/hosts
sed -i "/GitHub End/d" /etc/hosts
# Clear /etc/hosts first
for i in `cat ${BASE_PATH}/github_domain.txt`; do
sed -i "/$i/d" /etc/hosts
done
# Add dns record at /etc/hosts
IFS_OLD=$IFS
IFS=$'\n'
ALINES=$(cat ${BASE_PATH}/github_hosts.txt)
for ALINE in $ALINES;do
echo $ALINE >> /etc/hosts
done
IFS=$IFS_OLD
| true |
7d712f73b4b8522fa5363898236e4d85d9715434 | Shell | mlab-upenn/arch-apex | /APEX-S/Libraries/drake-v0.9.11-mac/install_prereqs.sh | UTF-8 | 681 | 3.671875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
case $1 in
("homebrew")
brew install cmake pkg-config gtk+ ;;
("macports")
port install cmake gtk2 ;;
("ubuntu")
apt-get install cmake openjdk-6-jdk build-essential ;;
("cygwin")
cygwin-setup -q -P make pkg-config ;;
(*)
echo "Usage: ./install_prereqs.sh package_manager"
echo "where package_manager is one of the following: "
echo " homebrew"
echo " macports"
echo " ubuntu"
echo " cygwin"
exit 1 ;;
esac
SUBDIRS="drake externals"
for subdir in $SUBDIRS; do
if [ -f $subdir/install_prereqs.sh ]; then
echo "installing prereqs for $subdir"
( cd $subdir; ./install_prereqs.sh $1 || true )
fi
done
| true |
8c31d932aaa5f5c931ff0c6bcdd37b33b46a1fb4 | Shell | fk2000/bash-scriptlog | /ss.sh | UTF-8 | 214 | 2.921875 | 3 | [
"MIT"
] | permissive | #! /bin/bash
directory=${ROOT_DIR}/logs/$(date +%Y)/$(date +%m)/$(date +%d);
filename=script_$(date +%H%M%S).txt;
mkdir -p ${directory};
script ${directory}/${filename}
echo 'Script End! time='$(date +%H:%M:%S)
| true |
8a9af2d0c7e18239f7196c744576ecdb68213c7c | Shell | som-snytt/jenkins-scripts | /job/pr-scala-test | UTF-8 | 418 | 2.59375 | 3 | [] | no_license | #!/bin/bash -e
scriptsDir="$( cd "$( dirname "$0" )/.." && pwd )"
. $scriptsDir/common
cd $WORKSPACE/scala
# put build folder from upstream pr-scala-distpack in place
# -m sets modification to now, so that we don't rebuild (sources will have older modification time)
tar -mxz -f $WORKSPACE/build.tgz
./pull-binary-libs.sh || ./pull-binary-libs
ant test-opt # TODO: add ant target that doesn't run stability test
| true |
04b95012244c81725f3bb050ae8ba907bd8f3ac2 | Shell | barnsnake351/dotfiles | /go/path.zsh | UTF-8 | 155 | 2.90625 | 3 | [
"MIT"
] | permissive | # Configure paths for Go
if [[ $(which go) ]] && [[ -d "${PROJECTS}/go/bin" ]]
then
export GOPATH=$PROJECTS/go
export PATH="$GOPATH/bin:$PATH"
fi
| true |
7f5e04e9a1c1a2992c55e5846e0d2e28a7fe8812 | Shell | guillaumeisabelleevaluating/gia-RetinexNet | /dockers/build_all.sh | UTF-8 | 135 | 3.078125 | 3 | [
"MIT"
] | permissive | cdir=$(pwd)
for d in $(ls -d *); do
if [ -d $d ] ; then
echo "--------$d-----------"
cd $d;. build.sh ; cd $cdir;
fi
done
| true |
6fd7f2b935421a6c41c10e74a3bba5ae323a10cb | Shell | iosphere/docker-tools | /.docker/docker-entrypoint.sh | UTF-8 | 490 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# Show versions
# echo "tools:"
# docker --version
# docker-machine --version
# docker-compose --version
# echo ""
if [ -z ${MACHINE_NAME+x} ]
then
echo -n ""
else
# Configure Docker endpoint
echo "setting up environment for '$MACHINE_NAME':"
eval $(docker-machine env --shell bash $MACHINE_NAME)
# fix ssh key permissions
chown 600 "/root/.docker/machine/machines/${MACHINE_NAME}/id_rsa"
# Show info
docker info
fi
# Execute CMD
exec "$@"
| true |
eccabee7e2dd74c2758046a5141f84e1f4f4712c | Shell | phenixid/ansible-pas | /examples/digo/files/start-PhenixID.sh | UTF-8 | 5,172 | 2.546875 | 3 | [] | no_license | #!/bin/bash
#
# Start Phenix Server
#
PHENIX_HOME="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
echo "INFO: Using PHENIX_HOME: ${PHENIX_HOME}"
PHENIX_VERSION=$( ${PHENIX_HOME}/bin/version.sh )
echo "INFO: Using PHENIX_VERSION: ${PHENIX_VERSION}"
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# Java settings
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
JAVA="$PHENIX_HOME/jre/bin/java"
JAVA_HEAP_SIZE=1536M
JAVA_OPTS="-server -d64"
# Memory heap size settings (use same value for min and max)
JAVA_OPTS="${JAVA_OPTS} -Xms${JAVA_HEAP_SIZE} -Xmx${JAVA_HEAP_SIZE}"
# Assertions
#JAVA_OPTS="${JAVA_OPTS} -ea"
# GC
JAVA_OPTS="${JAVA_OPTS} -XX:+HeapDumpOnOutOfMemoryError"
JAVA_OPTS="${JAVA_OPTS} -XX:+UseParNewGC"
JAVA_OPTS="${JAVA_OPTS} -XX:+UseConcMarkSweepGC"
JAVA_OPTS="${JAVA_OPTS} -XX:+CMSParallelRemarkEnabled"
JAVA_OPTS="${JAVA_OPTS} -XX:SurvivorRatio=8"
JAVA_OPTS="${JAVA_OPTS} -XX:MaxTenuringThreshold=1"
JAVA_OPTS="${JAVA_OPTS} -XX:CMSInitiatingOccupancyFraction=75"
JAVA_OPTS="${JAVA_OPTS} -XX:+UseCMSInitiatingOccupancyOnly"
JAVA_OPTS="${JAVA_OPTS} -XX:MaxDirectMemorySize=512G"
# JAVA_OPTS="${JAVA_OPTS} -XX:+UnlockCommercialFeatures"
# JAVA_OPTS="${JAVA_OPTS} -XX:+FlightRecorder"
# JMX/management
#JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote.port=7199"
#JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote.ssl=false"
#JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote.authenticate=false"
#
# Entropy gathering device
#
# Enable when running i virtualized environments
#
#JAVA_OPTS="${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom"
# Debug
#JAVA_OPTS="${JAVA_OPTS} -agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005"
#Adding from command line#
for var in "$@"
do
echo ${var}
if [[ "$var" == "-D"* ]]
then
JAVA_OPTS="${JAVA_OPTS} ${var}"
fi
done
# Misc
JAVA_OPTS="${JAVA_OPTS} -Djava.awt.headless=true -Dfile.encoding=UTF-8"
JAVA_OPTS="${JAVA_OPTS} -Dhazelcast.phone.home.enabled=false"
JAVA_OPTS="${JAVA_OPTS} -Dstorage.useWAL=true"
#Set timeout until WS client connection is established
JAVA_OPTS="${JAVA_OPTS} -Djavax.xml.ws.client.connectionTimeout=6000"
#Set timeout until the WS client receives a response
JAVA_OPTS="${JAVA_OPTS} -Djavax.xml.ws.client.receiveTimeout=6000"
JAVA_OPTS="${JAVA_OPTS} -Dorg.terracotta.quartz.skipUpdateCheck=true"
JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.xml.internal.ws.request.timeout=6000"
JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.xml.internal.ws.connect.timeout=6000"
##################Proxy settings,To use proxy uncomment and change parameters to fit your environment#######
#http.proxyHost (default: <none>)
#http.proxyPort (default: 80)
#https.proxyHost(default: <none>)
#https.proxyPort (default: 443)
#JAVA_OPTS="${JAVA_OPTS} -Dhttp.proxyHost=127.0.0.1 -Dhttp.proxyPort=8080 -Dhttps.proxyHost=127.0.0.1 -Dhttps.proxyPort=8433"
#############################################
# Logging
JAVA_OPTS="${JAVA_OPTS} -Dlog4j.configurationFile=${PHENIX_HOME}/config/log4j2.xml"
JAVA_OPTS="${JAVA_OPTS} -Djava.util.logging.config.file=${PHENIX_HOME}/config/logging.properties"
JAVA_OPTS="${JAVA_OPTS} -Dorg.vertx.logger-delegate-factory-class-name=com.phenixidentity.server.Log4j2LogDelegateFactory"
JAVA_OPTS="${JAVA_OPTS} -Dhazelcast.logging.type=log4j2"
# Vertx
JAVA_OPTS="${JAVA_OPTS} -Dvertx.home=${PHENIX_HOME}"
JAVA_OPTS="${JAVA_OPTS} -Dvertx.clusterManagerFactory=org.vertx.java.spi.cluster.impl.hazelcast.HazelcastClusterManagerFactory"
JAVA_OPTS="${JAVA_OPTS} -Dvertx.mods=${PHENIX_HOME}/mods"
JAVA_OPTS="${JAVA_OPTS} -Dvertx.pool.worker.size=20"
# Message Gateway Client
JAVA_OPTS="${JAVA_OPTS} -Dmsggw.connection.timeout=4"
JAVA_OPTS="${JAVA_OPTS} -Dmsggw.socket.timeout=4"
# Application
JAVA_OPTS="${JAVA_OPTS} -Dcom.phenixidentity.globals.asyncStoreRequestTimeout=10000"
JAVA_OPTS="${JAVA_OPTS} -Dcom.phenixidentity.globals.http.port=8443"
JAVA_OPTS="${JAVA_OPTS} -Dapplication.name=PhenixServer"
# Override java user.home to enable loading modules from bundled repo in installation (isof m2 repo in user home)
JAVA_OPTS="${JAVA_OPTS} -Duser.home=${PHENIX_HOME}"
# Is running on PhenixID appliance/default
JAVA_OPTS="${JAVA_OPTS} -Dcom.phenixidentity.operatingPlattform=default"
# Classpath
JAVA_CP="-cp ${PHENIX_HOME}/classes"
for file in ${PHENIX_HOME}/lib/*.jar
do
if [[ -f $file ]]; then
JAVA_CP="${JAVA_CP}:$file"
fi
done
SERVER_OPTS=$@
# Run in foreground
RUN_IN_FOREGROUND=$(echo "$SERVER_OPTS" | grep -- "--fg")
SERVER_OPTS=$(echo ${SERVER_OPTS} | sed 's/--fg//g')
JAVA_CMD="${JAVA} ${JAVA_OPTS} ${JAVA_CP} com.phenixidentity.server.Server runmod com.phenixidentity~phenix-node~${PHENIX_VERSION} -conf ${PHENIX_HOME}/config/boot.json ${SERVER_OPTS}"
echo "INFO: Start command: ${JAVA_CMD}"
if [[ ! -z ${RUN_IN_FOREGROUND} ]]; then
( cd ${PHENIX_HOME}; nohup ${JAVA_CMD} 2>&1 | tee ${PHENIX_HOME}/logs/nohup.out )
else
( cd ${PHENIX_HOME}; nohup ${JAVA_CMD} > ${PHENIX_HOME}/logs/nohup.out 2>&1 & )
fi
| true |
648646fc55036b3d7fea5e672d175f46afba4bd1 | Shell | jonx/sn_authenticator_mobile | /get_release_description.sh | UTF-8 | 1,275 | 3.046875 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | read -r -d '' release_description << 'EOF'
The Safe Authenticator acts as a gateway to the [Safe Network](https://safenetwork.tech/) by enabling users to create an account & authenticate themselves onto the Safe Network.
It helps users ensure they have full control over the permissions they grant to Safe apps.
## Changelog
CHANGELOG_CONTENT
## SHA-256 checksums:
```
APK checksum
APK_CHECKSUM
IPA checksum
IPA_CHECKSUM
```
## Related Links
* Safe Browser - [Desktop](https://github.com/maidsafe/sn_browser/releases/) | [Mobile](https://github.com/maidsafe/sn_mobile_browser/)
* [Safe CLI](https://github.com/maidsafe/sn_api/tree/master/sn_cli)
* [Safe Network Node](https://github.com/maidsafe/sn_node/releases/latest/)
* [sn_csharp](https://github.com/maidsafe/sn_csharp/)
EOF
apk_checksum=$(sha256sum "../net.maidsafe.SafeAuthenticator.apk" | awk '{ print $1 }')
ipa_checksum=$(sha256sum "../SafeAuthenticatoriOS.ipa" | awk '{ print $1 }')
changelog_content=$(sed '1,/]/d;/##/,$d' ../CHANGELOG.MD)
release_description=$(sed "s/APK_CHECKSUM/$apk_checksum/g" <<< "$release_description")
release_description=$(sed "s/IPA_CHECKSUM/$ipa_checksum/g" <<< "$release_description")
echo "${release_description/CHANGELOG_CONTENT/$changelog_content}" > release_description.txt
| true |
ea43a5efc0ee9589123374a2cf04d4e5def412c3 | Shell | kiransharma755/utilities | /utilities/maven_install | UTF-8 | 735 | 3.078125 | 3 | [] | no_license | #!/bin/bash
#!/bin/bash
set -x
download_folder=/opt
maven_version=3.5.4
#url="http://archive.apache.org/dist/maven/maven-`echo $maven_version|sed 's/\.[0-9]*//g'`/${maven_version}/binaries"
maven_url="http://archive.apache.org/dist/maven/maven-`echo $maven_version|sed 's/\.[0-9]*//g'`/${maven_version}/binaries/"
maven_binary_file="apache-maven-${maven_version}-bin.tar.gz"
download_url=${maven_url}/${maven_binary_file}
wget -q -O ${download_folder}/${maven_binary_file} ${download_url}
tar -zxf ${download_folder}/${maven_binary_file} -C ${download_folder}
echo "export M2_HOME=${download_folder}/apache-maven-${maven_version}" >>/etc/profile.d/maven.sh
echo "export PATH=$PATH:$M2_HOME/bin">>/etc/profile.d/maven.sh
| true |
c869e9e18e5adc9ff7e68b12ac598246ba2d7d61 | Shell | RENCI-NRIG/chaos-jungle | /vagrant/xdp-fedora25-xo/exogeni-image-create.sh | UTF-8 | 2,986 | 3.0625 | 3 | [] | no_license | #/bin/bash -x
dnf -y update
#dnf -y install yum-utils epel-release
#package-cleanup --oldkernels --count=1
dnf install -y python python-devel python-boto python-daemon python-ipaddr python-netaddr
dnf install -y wget vim git
dnf install -y dracut-config-generic net-tools cloud-init cloud-utils-growpart acpid
dnf install -y iscsi-initiator-utils iscsi-initiator-utils-iscsiuio
sed -i s/"disable_root: 1"/"disable_root: 0"/g /etc/cloud/cloud.cfg
sed -i s/SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config
sed -r -i 's/^#*(PermitRootLogin).*/\1 without-password/g' /etc/ssh/sshd_config
cat << EOF >> /etc/hosts.deny
rpcbind: ALL EXCEPT 172.16.0.0/255.240.0.0 10.0.0.0/255.0.0.0 192.168.0.0/255.255.0.0
EOF
FEDORA=`cat /etc/fedora-release | grep -oE '[0-9]+'`
cat << EOF > /etc/motd
########################################################
# ExoGENI VM Instance - Fedora ${FEDORA} #
# #
# /etc/hosts.deny file is customized for rpcbind. #
# If you are using rpcbind daemon, please #
# check /etc/hosts.deny for connectivity on dataplane. #
# #
# More information about security: #
# www.exogeni.net/2016/10/securing-your-slice-part-1/ #
########################################################
EOF
cat << EOF > /etc/resolv.conf
nameserver 8.8.8.8
EOF
cat << EOF > /etc/sysconfig/network
NETWORKING=yes
NOZEROCONF=yes
EOF
cat << EOF > /etc/default/grub
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=saved
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL="serial console"
GRUB_SERIAL_COMMAND="serial --speed=115200"
GRUB_CMDLINE_LINUX=" vconsole.keymap=us console=tty0 vconsole.font=latarcyrheb-sun16 console=ttyS0,115200"
GRUB_DISABLE_RECOVERY="true"
EOF
###cat << EOF > /usr/lib/dracut/dracut.conf.d/02-generic-image.conf
###hostonly="no"
###EOF
git clone https://github.com/RENCI-NRIG/neuca-guest-tools.git
cd ./neuca-guest-tools/neuca-py
python setup.py install
cp neucad.service /usr/lib/systemd/system/neucad.service
ln -s /usr/lib/systemd/system/neucad.service /etc/systemd/system/multi-user.target.wants/neucad.service
systemctl enable neucad
systemctl enable iscsid
#systemctl stop firewalld
#systemctl disable firewalld
IMG_NAME="fedora25-v1.0.3"
IMG_URL="http://geni-images.renci.org/images/standard/fedora/${IMG_NAME}"
DEST="/mnt/target"
SIZE="8G"
if [ ! -d ${DEST} ]; then
mkdir -p ${DEST}
fi
cd /tmp
wget http://geni-images.renci.org/images/tools/imgcapture.sh
chmod +x imgcapture.sh
./imgcapture.sh -o -n ${IMG_NAME} -s ${SIZE} -u ${IMG_URL} -d ${DEST}
cd ${DEST}
# Re-create initramfs with hostonly=no option in /usr/lib/dracut/dracut.conf.d/02-generic-image.conf
rm -f initramfs*.img
dracut -f ${DEST}/initramfs-$(uname -r).img $(uname -r)
chmod 644 *.tgz *.xml *.img
echo Please recompute initramfs checksum as it has been rebuilt...
| true |
5fb5baba8d5b4862c35d71da982c73e0ad6b4af0 | Shell | cristi-d/git-workspace-scripts | /scripts/checkout-branch.sh | UTF-8 | 123 | 2.828125 | 3 | [] | no_license | #!/bin/bash
# Expects
# $1 = branch name
# $2 = project directory
cd $2
echo "+ $2"
git checkout $1
echo ""
cd ..
| true |
72b0a086cef6aab1152ec6b391d32f7802545e41 | Shell | esac-lab/visual-robot | /install/install_ORB_SLAM2.sh | UTF-8 | 805 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
# Install Pangolin
#
# Install dependencies
sudo apt-get install libglew-dev -y
sudo apt-get install cmake -y
# Build Pangolin from source and install it
curr_dir=`pwd`
cd /tmp
git clone https://github.com/gaunthan/Pangolin
cd Pangolin
mkdir build
cd build
cmake ..
make
sudo make install
cd "$curr_dir"
# Install OpenCV python binding
sudo apt install python-opencv python3-opencv -y
# Install Eigen3
curr_dir=`pwd`
cd /tmp
wget http://bitbucket.org/eigen/eigen/get/3.3.4.tar.gz
tar xzvf 3.3.4.tar.gz
cd eigen-eigen-*
mkdir build
cd build
cmake ..
sudo make install
cd "$curr_dir"
# Install ORB_SLAM2
cd ./src
if [ ! -d ORB_SLAM2 ]; then
git clone https://github.com/gaunthan/ORB_SLAM2
else
git pull
fi
cd ORB_SLAM2
chmod +x build.sh build_ros.sh
./build.sh
./build_ros.sh
| true |
c320aff6029d352d65c5be772636e7f2ab0a867c | Shell | richard512/CrowdsourcingPhotoshop | /Tools/storyboard/download-storyboard.sh | UTF-8 | 942 | 3.25 | 3 | [] | no_license | #!/bin/bash
regex="^((http[s]?|ftp):\/)?\/?([^:\/\s]+)((\/\w+)*\/)([\w\-\.]+[^#?\s]+)(.*)?(#[\w\-]+)?$"
n=0
while read line; do
n=$(($n+1));
echo $n $line;
path=$( echo "$line" | perl -MURI -le 'chomp($line = <>); print URI->new($line)->path' )
# echo $path
arr=$(echo $path | tr "/" "\n")
i=1
for x in $arr; do
if [[ $i -eq 2 ]]; then
# echo "$x"
vid=$x
fi
if [[ $i -eq 4 ]]; then
# echo "$x"
filename=$x
fi
let i++
done
echo $vid/$filename
# AFTER_SLASH=${path##*/}
# echo $AFTER_SLASH
# if [[ $line =~ $regex ]]; then
# echo "matches"
# i=1
# match=${#BASH_REMATCH[*]}
# while [[ $i -lt $match ]]
# do
# echo " capture[$i]: ${BASH_REMATCH[$i]}"
# let i++
# done
# fi
#wget $line --no-check-certificate -O $path
curl $line --create-dir -o $vid/$filename
done <"url-storyboard-list"
echo "Final line count is: $n"; | true |
45204f2a47613fb38384b871ee0164c6c1581bb9 | Shell | jensites/kuwi-admin-tools | /etc-rc6.d-reboot-notify | UTF-8 | 269 | 2.828125 | 3 | [] | no_license | #! /bin/sh
# save this file as /etc/rc6.d/notify-reboot
FROM="user@host"
TO="user1@host1, user2@host2"
cat <<EOT | sendmail -oi -t -f "$FROM"
From: $FROM
To: $TO
Subject: Server `uname -n` going down for reboot
Server `uname -n` going down for reboot at `date`
EOT
| true |
f28c2f1f21fc129ff30dbc12e9709647184245e6 | Shell | smo921/skel | /bin/multissh | UTF-8 | 467 | 3.296875 | 3 | [] | no_license | #!/bin/bash
. ~/.bash_functions
[ "${#}" -eq 0 ] && exit 255
if [ -z "$TITLE" ]; then
TITLE="multi"
fi
first=`echo $1 | awk -F[0-9] '{print $1}'`
pod=`echo $1 | cut -d\. -f 2`
symbol=`podsymbol $pod`
tmux new-window -n "$TITLE{$first$symbol" "/usr/bin/ssh $1"; shift
for host in ${*}
do
tmux split-window -h "/usr/bin/ssh $host"
tmux select-layout tiled > /dev/null
done
tmux select-pane -t 0
tmux set-window-option synchronize-panes on > /dev/null
| true |
46847b7f54659d078ee237062e3e5b433492c53b | Shell | alviezhang/dotfiles | /oh-my-zsh/preload.zsh | UTF-8 | 304 | 2.984375 | 3 | [] | no_license | #!/bin/zsh
_plugins=(git golang httpie python ripgrep)
# Platform specific configurations
UNAME=`uname`
if [ "$UNAME" = "Linux" ]; then
export OS=linux
os_plugins=()
elif [ "$UNAME" = "Darwin" ]; then
export OS=macOS
os_plugins=(macos macports)
fi
plugins=(${_plugins} ${os_plugins})
| true |
9f831b54ca9d99b1e8ff43051bd833de497aea24 | Shell | mchadder/unit-tester | /unittests.sh | UTF-8 | 3,456 | 4 | 4 | [] | no_license | #!/bin/bash
SCRIPT_NAME="Unit-Tester"
SCRIPT_VERSION="0.7"
COPYRIGHT_NOTICE="(c) Chadders 2018"
CONFIG_FILE="unittests.cfg"
function dbg() {
if [ "$UNITTESTS_DEBUG" == "Y" ]
then
echo "$1"
fi
}
function script_head() {
echo "$SCRIPT_NAME $SCRIPT_VERSION - $COPYRIGHT_NOTICE"
if [ "$1" != "" ]
then
echo "$1"
fi
}
function read_config() {
if [ -f "${CONFIG_FILE}" ]
then
while read p; do
if [[ "$p" != "" && "${p:0:1}" != "#" ]]
then
eval export $p
fi
done <"${CONFIG_FILE}"
fi
}
function check_dependencies() {
if [ "$CHECK_CURL_INSTALLED" == "Y" ]
then
if ! hash curl 2>/dev/null; then
script_head "Cannot execute. curl is not available"
exit 1
fi
fi
}
function check_output() {
# $1 - Actual Output, $2 - Expected Output, $3 - Test Name, $4 - Elapsed Time, $5 - Comment
local STATE=""
if [[ "$1" == "$2" ]]
then
if [ "$SHOW_PASSES" == "Y" ]
then
STATE="PASS"
fi
else
if [ "$SHOW_FAILURES" == "Y" ]
then
STATE="FAIL"
fi
fi
# Output in CSV style
if [ "$STATE" != "" ]
then
echo "\"${FILENAME}\",\"${STATE}\",\"$1\",\"$2\",\"$3\",\"$4\",\"$5\""
fi
}
# Example call:
# curl_test "${TEST_RESOURCE}/1" "415" "http_code" "application/chadders" "Invalid Accept Header"
function curl_test() {
# TEST_CURL_CONFIG is set by a given test
local TEST_URL_PATTERN="${URL_PREFIX}/$1"
local TEST_CURL_RESPONSE="$2"
local TEST_CURL_WRITEOUT="$3"
local TEST_ACCEPT_HEADER="$4"
local TEST_COMMENT="$5"
if [ "${TEST_CURL_RESPONSE}" == "" ]
then
TEST_CURL_RESPONSE="200"
fi
if [ "${TEST_CURL_WRITEOUT}" == "" ]
then
TEST_CURL_WRITEOUT="http_code"
fi
# Loop around each field (via brace expansion)
for TEST_OUTPUT_URL in $(eval echo "${TEST_URL_PATTERN}");
do
local TEST_START=`date +%s`
if [ "${TEST_ACCEPT_HEADER}" == "" ]
then
TEST_CURL_OUTPUT=`curl -K "${TEST_CURL_CONFIG}" -w "%{${TEST_CURL_WRITEOUT}}" "${TEST_OUTPUT_URL}" -o /dev/null`
else
TEST_CURL_OUTPUT=`curl -K "${TEST_CURL_CONFIG}" -H "Accept: ${TEST_ACCEPT_HEADER}" -w "%{${TEST_CURL_WRITEOUT}}" "${TEST_OUTPUT_URL}" -o /dev/null`
fi
local TEST_END=`date +%s`
local TEST_RUNTIME=$((TEST_END-TEST_START))
check_output "${TEST_CURL_OUTPUT}" "${TEST_CURL_RESPONSE}" "${TEST_OUTPUT_URL}" \
"${TEST_RUNTIME}" "Test on ${TEST_CURL_WRITEOUT} returns ${TEST_CURL_RESPONSE} ${TEST_COMMENT}"
done
}
dbg "Start Script"
read_config
dbg "Read Config"
check_dependencies
dbg "Checked Dependencies"
export -f check_output
export -f curl_test
# The tester can run a specific test script via $1
# Need to check if the script exists first, error otherwise
dbg "RUNNING: $1"
if [ "$1" != "" ]
then
FULL_FILENAME="${TESTS_FOLDER}/$1"
if [ -f "${FULL_FILENAME}" ]
then
export FILENAME="$1"
# Run the command in a subshell to ensure that it does not need to know
# the folder context (e.g. for curl config files etc)
dbg "Subshelling: ${FULL_FILENAME}"
(cd "$TESTS_FOLDER"; ./"${FILENAME}")
else
script_head "${FULL_FILENAME} does not exist!"
exit 1
fi
else
for filename in "${TESTS_FOLDER}"/*.sh; do
export FILENAME=`basename ${filename}`
dbg "Subshelling $filename"
(cd "$TESTS_FOLDER"; ./"${FILENAME}")
done
fi
exit 0
| true |
6a520a100e15751e95b83f2f9f253925c2e9d675 | Shell | pittsdave/IBM7000 | /s709/runibsys2 | UTF-8 | 811 | 3.046875 | 3 | [] | no_license | #!/bin/sh
PATH=.:$PATH
#
# Run IBSYS Job using tape as input, using 2 channels.
#
if [ "$1" = "" -o "$2" = "" ];then
echo "usage: runibsys2 program listing [s709options]"
exit 1
fi
rm -f punch.* print.* sysou* sys*.bin $2
#
# Make sysin file
#
gendate >sysin.txt
#cat ibsysc.ctl >>sysin.txt
cat $1 >>sysin.txt
cat eof.dat >>sysin.txt
cat ibsys.ctl >>sysin.txt
txt2bcd sysin
#
txt2bcd ibsys.ctl reader.bcd 80 80
bcd2cbn reader
#
# Run IBSYS
#
s709 $3 -cibsys2.cmd -m7094 r=reader p=print u=punch \
a1r=ASYS12.BIN a2r=ASYS8.BIN a3=sysin.bcd a4=syslb4.bin \
a5=sysut1.bin a6=sysut3.bin a7=sysck2.bin \
b1=sysou1.bcd b2=sysou2.bcd b3=syspp1.bin b4=syspp2.bin \
b5=sysut2.bin b6=sysut4.bin
#
# Convert printed output
#
bcd2txt -p sysou1.bcd $2
rm -f reader.* sysck*.bin print.bcd sysou1.* sysin.*
| true |
4d939ccb7ee85831002fcf282c9149ca7b8368bb | Shell | JoseaScripts/pendientes | /yavuelvo | UTF-8 | 900 | 3.125 | 3 | [] | no_license | #!/bin/bash
# /home/pi/bin/yavuelvo
# Primero cambia el estado del pin, luego
# Localiza una ip, cuando deja de estar en línea comienza su rastreo
# Al volver a estar en línea salta la luz y otros avisos
## PENDIENTE ##
# Direcciones ip
jose='192.168.1.123'
fatima='192.168.1.125'
ip=$1
[ $ip = 'jose' ] && ip="$jose"
[ $ip = 'fatima' ] && ip="$fatima"
# Tiempo de espera para cada rastreo del ping
ciclo=3
# Tiempos de ciclo en espera cuando todavía no me he marchado
espera=30
# Tiempo de transición a partir de cuando se pierde la señal
trans=300
./rpin 3 2
while ping -c1 $ip
do
printf "%s todavía no se ha marchado. >> %s\n" $1 `date +"%H:%M:%S"`
sleep $ciclo
done
# Aquí ya me he marchado y espera mi vuelta
# Apago la luz
./rpin 3 2
sleep $trans
until ping -c1 $ip
do
printf "No está\n"
sleep $ciclo
done
printf "ip localizada\n"
sleep $ciclo
# encender la luz
./rpin 3 2
| true |
2958f2083cd70d49732233356f60b5408e034da3 | Shell | Biogitte/chat_bot | /exec/fetch_data.sh | UTF-8 | 239 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo 'Downloading Kaggle dataset:' "$KAGGLE_DATASET"
python3 "$KAGGLE_SCRIPT" "$KAGGLE_DATASET" "$TRAINING_DATA" "$KAGGLE_NEW_NAME"
echo 'Download completed. The file can be found here:' "$TRAINING_DATA"'/'"$KAGGLE_NEW_NAME" | true |
3bc2d6d2bb104b6859a234328586e3cca4dfbe7d | Shell | pmpardeshi/bash_scripts | /file_handeling/read3.sh | UTF-8 | 329 | 3 | 3 | [] | no_license | #!/usr/bin/env bash
#read 3 lines from file my solution
COUNT=1
while IFS='' read -r LINE
do
echo $COUNT $LINE
if [ $COUNT -eq 3 ]
then
break
fi
((COUNT++))
done <$1
exit 0
# output
# :~/pypractice/bash/shell scripting$ ./read3.sh names.txt
# 1 pramod
# 2 shubham
# 3 navin
# :~/pypractice/bash/shell scripting$
| true |
350d1d6f8d209de2f4287a4e1f98fad5ff24ca10 | Shell | BangHyunjoon/temp | /temp/NNPAgent/SMSAgent/bin/shell/AIX/OSInfoAIX.sh | UTF-8 | 391 | 3.09375 | 3 | [] | no_license | #!/bin/sh
PATH=$PATH:/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin:/usr/local/sbin
export PATH
LANG=en
export LANG
FILE_CNT=`ls -al ../aproc/shell/OSInfoAIX.dat 2> /dev/null | wc -l`
FILE_SIZE=`ls -al ../aproc/shell/OSInfoAIX.dat 2> /dev/null | awk '{print $5}'`
if [[ $FILE_SIZE -eq 0 ]] || [[ $FILE_SIZE -le 1 ]] ; then
echo $FILE_SIZE
oslevel > ../aproc/shell/OSInfoAIX.dat
fi
| true |
0521f8fded44b9987a0d4cbffcb94c7f14455e04 | Shell | srapisarda/stypes | /scripts/ndl2sql/ndl2sql.sh.bkp | UTF-8 | 856 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
NDL=$1
STYPES_JAR=/home/hduser/development/stypes/target/scala-2.12/stypes_2.12-1.1.1.jar
STYPES_FLINK_JAR=/home/hduser/development/stypes-flink/target/scala-2.12/stypes-flink_2.12-1.0.jar
for PREDICATE in "p3"
do
for PAR in 10
do
for TTL in 5
do
NDL_FLATTEN_FILE="$1_rew.dlp"
# shellcheck disable=SC2091
NDL_FLATTEN=$(java -cp $STYPES_JAR uk.ac.bbk.dcs.stypes.utils.NdlSubstitution "$NDL" $PREDICATE)
echo "$NDL_FLATTEN" > "$NDL_FLATTEN_FILE"
SQL=$(java -cp $STYPES_JAR uk.ac.bbk.dcs.stypes.sql.SqlUtils "$NDL_FLATTEN_FILE" "$2")
#echo ${SQL}
java -cp $STYPES_JAR uk.ac.bbk.dcs.stypes.utils.UtilRunner "$SQL" abc
/opt/flink/bin/flink run \
-c uk.ac.bbk.dcs.stypes.flink.FlinkRewritingSql \
-p $PAR \
$STSTYPES_FLINK_JAR $TTL sql_$PAR_$TTL true $SQL
#echo "submitte q45 ttl: $ttl, par: $par"
#sleep 30
done
done
done
| true |
83910ec36952f5340db6357224d395a739ca8175 | Shell | wzinke/myfia | /fsl_mad | UTF-8 | 1,539 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
Usage() {
cat << EOF
Usage: fsl_mad <infile> [options]
calculates the median absolute deviation for a 4D-datafile
OPTIONS:
-out basename for the data output
-mask use file for masking region
-keep do not delete temporary files
###########################################################
## (c) wolf zinke (2008) - part of the MaFIA toolbox ##
## > MAcaque Functional Image Analysis < ##
## Released under the MIT license ##
###########################################################
EOF
exit 1
}
if [ $# -lt 1 ]; then
Usage
else
flnm=$1;
shift
fi
outnm=${flnm}_mad
keep_tmp=0
do_mask=0
while [ $# -gt 0 ] ;
do
case $1 in
-out) outnm=$2
shift 2
;;
-mask) maskfile=$2
do_mask=1
shift 2
;;
-keep) keep_tmp=1
;;
-*) echo "Wrong option: <$1>"
echo ""
Usage
;;
*) break
;;
esac
done
tmpdir=`tmpnam tmpMAD`
rm $tmpdir
mkdir $tmpdir
if [ $do_mask -eq 1 ]
then
fslmaths $flnm -mas $maskfile -Tmedian $tmpdir/tmp_median -odt float
else
fslmaths $flnm -Tmedian $tmpdir/tmp_median -odt float
fi
fslmaths $flnm -sub $tmpdir/tmp_median -abs $tmpdir/tmp_residuals -odt float
fslmaths $tmpdir/tmp_residuals -Tmedian $outnm
if [ $keep_tmp -eq 0 ]
then
rm -r $tmpdir
fi
| true |
8278d155e3a585ae23c81b3f39720276396fb468 | Shell | wangsqly0407/easypack | /maven/easypack_mvn.sh | UTF-8 | 3,296 | 3.796875 | 4 | [] | no_license | #!/bin/sh
###############################################################################
#
#VARS INIT
#
###############################################################################
JAVA_HOME=/usr/local/java
MVN_HOME=/usr/local/maven
DIR_DOWNLOAD=/tmp/download.$$
DIR_NEW_MVN=apache-maven-3.5.3
DIR_NEW_JDK=jdk1.8.0_172
JDK_TAR_GZ=jdk-8u172-linux-x64.tar.gz
JDK_URL="http://download.oracle.com/otn-pub/java/jdk/8u172-b11/a58eab1ec242421181065cdc37240b08/${JDK_TAR_GZ}"
MAVEN_VERSION=3.5.3
MAVEN_TAR_GZ=apache-maven-${MAVEN_VERSION}-bin.tar.gz
MAVEN_URL="http://www-us.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/${MAVEN_TAR_GZ}"
PROFILES=/etc/profile
###############################################################################
#
#DOWNLOAD JDK & MAVEN
#
###############################################################################
mkdir -p ${DIR_DOWNLOAD}
cd $DIR_DOWNLOAD
date
echo "## Download begins : JDK : ${JDK_TAR_GZ}"
wget --header "Cookie: oraclelicense=accept-securebackup-cookie" ${JDK_URL}
if [ $? -ne 0 ]; then
echo "failed to download JDK"
exit 1
fi
echo "## Download ends : JDK : ${JDK_TAR_GZ}"
echo
date
echo "## Download begins : MAVEN: ${MAVEN_TAR_GZ}"
wget ${MAVEN_URL}
if [ $? -ne 0 ]; then
echo "failed to download maven"
exit 1
fi
echo "## Download ends : MAVEN: ${MAVEN_TAR_GZ}"
echo
echo "## Check download"
ls -l ${JDK_TAR_GZ} ${MAVEN_TAR_GZ}
###############################################################################
#
#INSTALL JDK & MAVEN
#
###############################################################################
#create directories
mkdir -p ${JAVA_HOME} ${MVN_HOME}
date
echo "## Install begins : JDK : ${JAVA_HOME}"
cd ${JAVA_HOME}
gunzip -c ${DIR_DOWNLOAD}/${JDK_TAR_GZ} | tar xv
echo "## Install ends : JDK : ${JAVA_HOME}"
echo
date
echo "## Install begins : MAVEN : ${MVN_HOME}"
cd ${MVN_HOME}
gunzip -c ${DIR_DOWNLOAD}/${MAVEN_TAR_GZ} | tar xv
echo "## Install ends : MAVEN : ${MVN_HOME}"
###############################################################################
#
#ENVIRONMENT VARS
#
###############################################################################
echo "## Env setting : JDK : JAVA_HOME + PATH"
echo "" >>${PROFILES}
echo "#JDK Setting" >>${PROFILES}
echo "export JAVA_HOME=${JAVA_HOME}/${DIR_NEW_JDK}" >>${PROFILES}
echo "export PATH=\${JAVA_HOME}/bin:\$PATH" >>${PROFILES}
export JAVA_HOME=${JAVA_HOME}/${DIR_NEW_JDK}
export export PATH=${JAVA_HOME}/bin:$PATH
echo "## Env setting : M2_HOME : + PATH"
echo "" >>${PROFILES}
echo "#Maven Setting" >>${PROFILES}
echo "export M2_HOME=${MVN_HOME}/${DIR_NEW_MVN}" >>${PROFILES}
echo "export PATH=\${M2_HOME}/bin:\$PATH" >>${PROFILES}
export M2_HOME=${MVN_HOME}/${DIR_NEW_MVN}
export PATH=${M2_HOME}/bin:$PATH
###############################################################################
#
#CONFIRM VERSION
#
###############################################################################
echo "## Check Java version"
java -version
echo
echo "## Check Maven version"
mvn --version
echo
###############################################################################
#
#REMOVE DOWNLOAD FILES
#
###############################################################################
echo "## Delete Download files"
rm -rf ${DIR_DOWNLOAD}
| true |
62b01bc1366be12af379a71e0362a3b0d27735de | Shell | MauricioTorres111/Practicas | /Practica3/practica3.sh | UTF-8 | 6,306 | 3.9375 | 4 | [] | no_license | #!/bin/bash
declare -i opt1=$1
declare -i opt2=$2
name=$3
new_name=$4
new_dir=$4
############################################################################
# Carpetas
createFolder(){
detect="False"
files=$(ls)
for file in $files; do
if [[ $name == $file ]]; then
echo "[!] Esta carpeta ya existe." > /dev/stderr
detect="True"
break
fi
done
name=$1
if [[ $detect == "False" ]]; then
echo "[*] La carpeta se creo satisfactoriamente." > /dev/stderr
mkdir $name
fi
}
renameFolder(){
detect="False"
files=$(ls)
name="$1"
new_name="$2"
# Revisamos que el folder exista.
for file in $files; do
# Si existe la carpeta
if [[ $file == $name ]]; then
detect="True"
break
fi
done
if [[ $detect == "True" ]]; then
# Verificamos que el nuevo nombre no exista ya.
for file in $files; do
if [[ $file == $new_name ]]; then
# Si satisface la anterior condicion quiere decir
# que ya existe un archivo o carpeta con el mismo nombre
detect="True"
break
else
# Si no satisface la condicion significa que no se a
# encontrado ningun archivo con ese mismo nombre aun
detect="False"
fi
done
# Si todo esta bien...
if [[ $detect == "False" ]]; then
echo "[*] El nombre de la carpeta se cambio satisfactoriamente." > /dev/stderr
mv $name $new_name
# Si el nombre nuevo que recibira la carpeta ya existe...
else
echo "[!] Ya existe un archivo o carpeta con el nombre '$new_name'" > /dev/stderr
fi
else
echo "[!] La carpeta '$name' no existe." > /dev/stderr
fi
}
moveFolder(){
detect="False"
files=$(dir)
name=$1
new_dir=$2
for file in $files; do
# Si satisface la siguiente condicion significa
# que la carpeta a mover si existe
if [[ $file == $name ]]; then
detect="True"
echo "[*] La carpeta se movio satisfactoriamente." > /dev/stderr
mv $name $new_dir
break
else
detect="False"
fi
done
if [[ $detect == "False" ]]; then
echo "[!] No se pudo encontrar la carpeta '$name'" > /dev/stderr
fi
}
removeFolder(){
detect="False"
files=$(dir)
name=$1
for file in $files; do
if [[ $name == $file ]]; then
detect="True"
echo "[*] La carpeta '$name' se elimino satisfactoriamente." > /dev/stderr
rm -r $name
break
fi
done
if [[ $detect == "False" ]]; then
echo "[!] La carpeta '$name' no existe." > /dev/stderr
fi
}
#####################################################################################
# Archivos
createFile(){
detect="False"
files=$(ls)
for file in $files; do
if [[ $name == $file ]]; then
echo "[!] Este archivo ya existe." > /dev/stderr
detect="True"
break
fi
done
name=$1
if [[ $detect == "False" ]]; then
echo "[*] El archivo se creo satisfactoriamente." > /dev/stderr
touch $name
fi
}
renameFile(){
detect="False"
files=$(ls)
name="$1"
new_name="$2"
# Revisamos que el File exista.
for file in $files; do
# Si existe el archivo
if [[ $file == $name ]]; then
detect="True"
break
fi
done
if [[ $detect == "True" ]]; then
# Verificamos que el nuevo nombre no exista ya.
for file in $files; do
if [[ $file == $new_name ]]; then
# Si satisface la anterior condicion quiere decir
# que ya existe un archivo o carpeta con el mismo nombre
detect="True"
break
else
# Si no satisface la condicion significa que no se a
# encontrado ningun archivo con ese mismo nombre aun
detect="False"
fi
done
# Si todo esta bien...
if [[ $detect == "False" ]]; then
echo "[*] El nombre del archivo se cambio satisfactoriamente." > /dev/stderr
mv $name $new_name
# Si el nombre nuevo que recibira el archivo ya existe...
else
echo "[!] Ya existe un archivo o carpeta con el nombre '$new_name'" > /dev/stderr
fi
else
echo "[!] La carpeta '$name' no existe." > /dev/stderr
fi
}
moveFile(){
detect="False"
files=$(dir)
name=$1
new_dir=$2
if [[ $new_dir == *".."* ]]; then
new_dir=$new_dir
else
if [[ $new_dir == *"\\"* ]]; then
new_dir=$new_dir
else
new_dir="\\$new_dir"
fi
fi
for file in $files; do
# Si satisface la siguiente condicion significa
# que la carpeta a mover si existe
if [[ $file == $name ]]; then
detect="True"
echo "[*] La carpeta se movio satisfactoriamente." > /dev/stderr
mv $name $new_dir
break
else
detect="False"
fi
done
if [[ $detect == "False" ]]; then
echo "[!] No se pudo encontrar la carpeta '$name'" > /dev/stderr
fi
}
removeFile(){
detect="False"
files=$(dir)
name=$1
for file in $files; do
if [[ $name == $file ]]; then
detect="True"
echo "[*] El archivo '$name' se elimino satisfactoriamente." > /dev/stderr
rm $name
break
fi
done
if [[ $detect == "False" ]]; then
echo "[!] El archivo '$name' no existe." > /dev/stderr
fi
}
########################################################################
# Aplicacion
if [[ ($opt1 == 1 || $opt1 == 2) && $opt2 -ge 1 && $opt2 -le 4 ]]; then
# Carpetas
if [[ $opt1 == 1 ]]; then
if [[ $opt2 == 1 ]]; then
# Crear Carpeta
createFolder $name | echo "[*] Creando carpeta..."
elif [[ $opt2 == 2 ]]; then
# Renombrar Carpeta
renameFolder $name $new_name | echo "[*] Renombrando carpeta..."
elif [[ $opt2 == 3 ]]; then
# Mover Carpeta
moveFolder $name $new_dir | echo "[*] Moviendo carpeta..."
elif [[ $opt2 == 4 ]]; then
# Eliminar Carpeta
removeFolder $name | echo "[*] Eliminando carpeta..."
fi
# Archivos
elif [[ $opt1 == 2 ]]; then
if [[ $opt2 == 1 ]]; then
# Crear Carpeta
createFile $name | echo "[*] Creando carpeta..."
elif [[ $opt2 == 2 ]]; then
# Renombrar Carpeta
renameFile $name $new_name | echo "[*] Renombrando carpeta..."
elif [[ $opt2 == 3 ]]; then
# Mover Carpeta
moveFile $name $new_dir | echo "[*] Moviendo carpeta..."
elif [[ $opt2 == 4 ]]; then
# Eliminar Carpeta
removeFile $name | echo "[*] Eliminando carpeta..."
fi
fi
else
echo "[!] Esta opcion no existe."
echo "::: Lea el archivo README.txt para mas informacion."
fi | true |
978602b69e4e9dfedf31e085aac49cfa9b74d100 | Shell | Mizzrym/shellscripts | /logstash_reindexer/reindex_logsta.sh | UTF-8 | 1,198 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# This thing reads all logstash indices, reindexes them and deletes the old ones
# It's not a very beautiful or sophisticated script, but it get's the job done
#
set -o pipefail
set -o nounset
# connectionstring to the elasticsearch server
readonly cstr="localhost:9200"
# don't delete indices if warnings/errors occured
readonly safemode=0
# get all indices
indices=$(curl -s -X GET "${cstr}/_cat/indices?v" | grep 'logstash' | awk '{ print $3 }')
for index in $indices ; do
# skip indices already reindexed
if [ `echo $index | grep 'reindexed' | wc -l` -ne 0 ] ; then
continue
fi
echo "reindexing $index"
json="
{
\"source\": {
\"index\": \"${index}\"
},
\"dest\": {
\"index\": \"${index}.reindexed\"
}
}
"
out=`curl -s -X POST "${cstr}/_reindex?pretty" -H 'Content-Type: application/json' -d"${json}"`
if [ `echo $out | grep '"failures" : \[ \]' | wc -l` -eq 1 ] ; then
echo "success, deleting old index"
curl -XDELETE "${cstr}/${index}?pretty"
else
if [ "$safemode" -eq 0 ] ; then
echo "failed, deleting nontheless"
echo $out
curl -XDELETE "${cstr}/${index}?pretty"
else
echo "failed"
echo $out
fi
fi
echo
done
| true |
204a35d63e02e75765fe4b5ebfe34bcbde67e159 | Shell | Inist-CNRS/inist-tools | /tools/service-restart.sh | UTF-8 | 2,800 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env bash
################################################################################
#
# inist-tools / tools / service-restart.sh
#
# Wrapper qui permet de redémarrer un service en arrière plan
#
# @author : INIST-CNRS/DPI
#
################################################################################
# ------------------------------------------------------------------------------
# Ressources
# ------------------------------------------------------------------------------
source "/opt/inist-tools/libs/std.rc"
source "/opt/inist-tools/libs/ansicolors.rc"
# ------------------------------------------------------------------------------
# Variables globales
# ------------------------------------------------------------------------------
# USER=$(logname)
LOCALUSER=$(who am i | awk '{print $1}' | head -1)
ID=$(which id)
# GROUP=$($ID -g -n "$LOCALUSER")
ENV_DIR="/opt/inist-tools/env"
CONF_DIR="/opt/inist-tools/conf"
SERVICE=$(which service)
# ------------------------------------------------------------------------------
# Variables locales
# ------------------------------------------------------------------------------
ServiceName="$1"
if [ -z "$ServiceName" ]; then
_it_std_consoleMessage "ERROR" "Nom du service manquant"
exit $FALSE
fi
# ------------------------------------------------------------------------------
# Lancement du service
# ------------------------------------------------------------------------------
case "$HOST_SYSTEM" in
ubuntu)
case "$HOST_SYSTEM_VERSION" in
"12.04" | "12.10" | "13.04" | "13.10" | "14.04" | "14.10" )
$SERVICE $ServiceName restart > /dev/null 2>&1
;;
*)
systemctl daemon-reload > /dev/null 2>&1
sleep 1
systemctl restart "$ServiceName" > /dev/null 2>&1
;;
esac
;;
debian)
systemctl daemon-reload > /dev/null 2>&1
sleep 1
systemctl restart "$ServiceName" > /dev/null 2>&1
;;
*)
_it_std_consoleMessage "ERROR" "Je ne sais pas comment redémarrer « $ServiceName » sur ce système !"
exit $FALSE
;;
esac
# ------------------------------------------------------------------------------
# On attend de trouver le service dans un ps pour dire qu'il est lancé...
# ------------------------------------------------------------------------------
while :
do
ServicePIDs=$(pgrep "$ServiceName")
if [ -z "$ServicePIDs" ]; then
false
else
break
fi
done
notify-send --expire-time=1500 --icon="/opt/inist-tools/libs/gfx/cnrs_64px.png" --urgency=low "INFORMATION" "Le service « $ServiceName » est redémarré"
# ------------------------------------------------------------------------------
# Fin
# ------------------------------------------------------------------------------
exit 0
| true |
5566a4f5af8e2e685d5ea72e90dda5b568745520 | Shell | nisarpro/shell-random | /live/sh/scripts/examples/trapping-control-c.sh | UTF-8 | 371 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env sh
# Tested 2017-10-24 on Devuan 1.0.0-jessie-i386-DVD with dash 0.5.7-4+b1
# INT is ^C
trap control_c INT
control_c()
{
\echo 'control-c detected.'
# You can also exit this entire script with:
#exit 0
}
for i in $( \seq 3 -1 1 ); do
\echo "Exiting in $i.."
\sleep 1
done
\echo
\echo 'Exiting trap example."'
| true |
42b2364ede4e467ddc1e0c8cdb7b9f82434d109b | Shell | vtumuluri/cockroach | /build/teamcity-sqllogictest.sh | UTF-8 | 1,177 | 2.8125 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-bsl-1.0",
"MIT",
"LicenseRef-scancode-cockroach"
] | permissive | #!/usr/bin/env bash
set -euxo pipefail
source "$(dirname "${0}")/teamcity-support.sh"
mkdir -p artifacts
# TestSqlLiteLogic needs the sqllogictest repo from the host's GOPATH, so we
# can't hide it like we do in the other teamcity build scripts.
# TODO(jordan) improve builder.sh to allow partial GOPATH hiding rather than
# the all-on/all-off strategy BUILDER_HIDE_GOPATH_SRC gives us.
export BUILDER_HIDE_GOPATH_SRC=0
# Run SqlLite tests.
# Need to specify the flex-types flag in order to skip past variations that have
# numeric typing differences.
# TODO(yuzefovich): remove crdb_test_off tag once sqllite tests have been
# adjusted to run in reasonable time with batch size randomizations.
run_json_test build/builder.sh \
stdbuf -oL -eL \
make test GOTESTFLAGS=-json TESTFLAGS="-v -bigtest -flex-types" TESTTIMEOUT='24h' PKG='./pkg/sql/logictest' TESTS='^TestSqlLiteLogic$$' TAGS=crdb_test_off
# Run the tests with a multitenant configuration.
run_json_test build/builder.sh \
stdbuf -oL -eL \
make test GOTESTFLAGS=-json TESTFLAGS="-v -bigtest -flex-types" TESTTIMEOUT='24h' PKG='./pkg/ccl/logictestccl' TESTS='^TestTenantSQLLiteLogic$$' TAGS=crdb_test_off
| true |
fa3fe1ff3272918e73b938dbc8576c07554bcad4 | Shell | PiDelport/twilio-oai-rust | /fetch-specs.sh | UTF-8 | 436 | 3.3125 | 3 | [] | no_license | #!/bin/sh -ex
# Fetch specs for twilio-oai $version
#
# Usage: version=… ./fetch-specs.sh
#
# Specs: https://github.com/twilio/twilio-oai
: "${version:?}"
if test -d specs; then rm -r specs; fi
mkdir specs
curl \
--fail \
--location \
"https://github.com/twilio/twilio-oai/archive/refs/tags/${version}.tar.gz" \
|
tar -xz \
--directory 'specs' \
--strip-components=3 \
"twilio-oai-${version}/spec/yaml"
| true |
b1a39f43d5a0d82e6a5ccd25c1e5c9ed14a5920c | Shell | bu6hunt3r/nix-dotfiles | /users/bin/mpcprevious.sh | UTF-8 | 226 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env sh
time=$(mpc | sed -n 2p | awk '{print $3}' | sed 's/\/.*//')
min=$(echo $time | sed 's/\:.*//' )
sec=$(echo $time | sed 's/\.*://' )
if [ $min -eq 0 ] && [ $sec -lt 6 ]
then mpc -q prev
else mpc -q seek 0
fi
| true |
364aaec84814de066499776e5e72535ede792d77 | Shell | brownman/magnify_the_small_2 | /BANK/GISTS/BANK/add_snippet/.old1/fu2.sh | UTF-8 | 793 | 2.921875 | 3 | [] | no_license | #http://www.commandlinefu.com/commands/view/10275/search-commandlinefu-and-view-syntax-highlighted-results-in-vim#comment
#
#http://vim.wikia.com/wiki/Folding
#manual: set fdm=marker
#update .gvimrc:
#augroup vimrc
# au BufReadPre * setlocal foldmethod=marker
# au BufWinEnter * if &fdm == 'indent' | setlocal foldmethod=manual | endif
#augroup END
str=$1
t=/tmp/snippet
file_new=${t}_${str}
until [[ -z $1 ]]; do
echo -e "\n# $1 {{{1" >> $t;
curl -L -s "commandlinefu.com/commands/matching/$1/`echo -n $1|base64`/plaintext" | sed '1,2d;s/^#.*/& {{{2/g' | tee -a $t > $t.c;
sed -i "s/^# $1 {/# $1 - `grep -c '^#' $t.c` {/" $t;
shift;
done;
cp $t $file_new
echo "vim -u /dev/null -c \"set ft=sh fdm=marker fdl=1 noswf\" -M $file_new" | xsel --clipboard;
#rm $t $t.c
| true |
cb664182fab81bdaf4e7e68b961769366a958de1 | Shell | dc4cities/dc4es | /datasets/italy/data/windprediction/scripts/predictedHourAggregation.sh | UTF-8 | 1,075 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#ZONA,ORA01,ORA02,ORA03,ORA04,ORA05,ORA06,ORA07,ORA08,ORA09,ORA10,ORA11,ORA12,ORA13,ORA14,ORA15,ORA16,ORA17,ORA18,ORA19,ORA20,ORA21,ORA22,ORA23,ORA24
SAVEIFS=$IFS;
IFS=$(echo -en "\n\b");
if [ A$1 == "A" ]; then
echo "Need an input file"
exit 0;
fi
MIX_ENERGY_DATA_FILE=$1
filename=`basename $1`;
index=`expr index "$filename" '20'`;
dat=${filename:index-1:8};
y=${dat:0:4}
m=${dat:4:2}
d=${dat:6:2}
#Filtering regions
awk -F, '{if(match($1,/CNOR|CSUD|NORD|SARD|SICI|SUD/)) print;}' $MIX_ENERGY_DATA_FILE > filtered_regions_to_match_actual_regions.csv;
#then Aggregate all regions electricity prediction by hour
awk -v y=$y -v m=$m -v d=$d -F, 'BEGIN{for(i=1;i<=24;i++) data_hours[i] = 0;} {for(i=1;i<=24;i++) data_hours[i] +=$(i+1);}END{for(i in data_hours) {ddate=mktime(y" "m" "d" "i" "00" "00); dd=strftime("%Y-%m-%dT%H:00:00", ddate); print dd, ",", data_hours[i];}}' filtered_regions_to_match_actual_regions.csv; # > output_hourly_prediction.csv;
exit 0
#$1:date, $2: generation
#dattt=`expr match $filename '20[:digit:]+'`;
#echo "date $dattt";
| true |
bd3896be117727c1c7ee089f76f8b98a50e1850d | Shell | Tanami/VagrantLamp | /provision.sh | UTF-8 | 2,054 | 3.796875 | 4 | [] | no_license | #!/bin/bash
PROJECT=$1;
apache_vhost_file="/etc/apache2/sites-available/$PROJECT.conf"
apache_server_name="www.$PROJECT.com"
main() {
echo Provisioning vagrant instance
update
apache
php
mysql
}
update() {
echo Making sure everything is up to date
sudo apt-get update > /dev/null
sudo apt-get -y upgrade > /dev/null
}
apache() {
echo Installing and Setting up Apache2
sudo apt-get -y install apache2 > /dev/null
sudo cat << EOF > ${apache_vhost_file}
<VirtualHost *:80>
ServerName ${apache_server_name}
ServerAdmin webmaster@localhost
DocumentRoot /var/www/$PROJECT
<Directory /var/www/$PROJECT>
Options Indexes FollowSymLinks MultiViews
AllowOverride All
Order allow,deny
allow from all
</Directory>
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
EOF
sudo a2dissite 000-default > /dev/null
sudo a2ensite $PROJECT > /dev/null
sudo a2enmod rewrite > /dev/null
sudo service apache2 restart > /dev/null
}
php(){
echo Installing PHP
sudo apt-get -y install php5 php5-curl php5-mysql > /dev/null
}
mysql(){
echo Installing Mysql
echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
sudo apt-get -y install mysql-client mysql-server > /dev/null
echo "Updating mysql configs in /etc/mysql/my.cnf."
sudo sed -i "s/.*bind-address.*/bind-address = 0.0.0.0/" /etc/mysql/my.cnf
echo "Updated mysql bind address in /etc/mysql/my.cnf to 0.0.0.0 to allow external connections."
echo "Assigning mysql user root access on %."
sudo mysql -u root --password=root --execute "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'root' with GRANT OPTION; FLUSH PRIVILEGES;"
echo "Assigned mysql root access on all hosts."
sudo service mysql restart > /dev/null
}
main
exit 0
| true |
f46c8855ead59eb7e9546b99465162ad91279752 | Shell | menkhus/falco | /test/version | UTF-8 | 781 | 2.875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
db='/usr/local/falco/db/cvedb'
function vtest {
if [ "`../falco -d $db -n $1 -v $2 | wc -l`" -eq $3 ]
then
echo "version: package $1, version $2 failed"
exit 1
fi
}
echo "test for version 8"
vtest postgresql 8 1
echo "test for version 8.0"
#../falco -d ../vfeed.db -n postgresql -v 8.0
vtest postgresql 8.0 1
echo "test for version 8.1"
vtest postgresql 8.1 1
echo "test for version 8.2"
vtest postgresql 8.2 1
echo "test for version 8.3"
vtest postgresql 8.3 1
echo "test for version 8.5:"
vtest postgresql 8.5: 1
# using CPE knowledge, to override & test broken versioning
echo "test for version 8.4"
vtest postgresql:8.4 '' 1
echo "test for version 8.5:"
vtest postgresql:8.5: '' 1
echo "test for version 8.5:alpha2"
vtest postgresql 8.5:alpha2 1
| true |
4f44cd348d1643b46559e27b3f97f7ded7bb3e11 | Shell | dimmells/init | /scripting/04 | UTF-8 | 212 | 2.875 | 3 | [] | no_license | #!/bin/bash
if [ -f ./old ];
then
if ! diff ./old /etc/crontab >/dev/null ;
then
echo "File has been changed" | mail -s "ALLERT!" root
fi
else
echo "* 0 * * * ./04" | crontab -
fi
cat /etc/crontab > ./old
| true |
8509bf80952393de2297a9e0a74e2fc665f03fb4 | Shell | rportelaw/hbs | /rebuild.sh | UTF-8 | 4,340 | 3.546875 | 4 | [] | no_license | #!/bin/sh
# -------------------------------------------------------
# HBS Platform Manager Script
# author: trsimoes
# -------------------------------------------------------
# -------------------------------------------------------
# Backup
# -------------------------------------------------------
backup_current_application() {
echo
echo ---------------------------
echo Backup old HBS version
echo ---------------------------
mkdir -p /backup/hbs
tar cvf - /opt/hbs/* | gzip > /backup/hbs/$(date +%Y%m%d%H%M%S).tar.gz
echo +++ OK
}
# -------------------------------------------------------
# STOP APPLICATION
# -------------------------------------------------------
stop_hbs_generic() {
SERVER_NAME=$1
SERVER_PORT=$2
echo
echo ---------------------------
echo Stopping $SERVER_NAME
echo ---------------------------
#ps -ef | grep $SERVER_NAME | grep -v grep | awk '{print $2}' | xargs sudo kill -9
curl -X POST http://localhost:$SERVER_PORT/actuator/shutdown
echo +++ OK
}
stop_hbs_server() {
stop_hbs_generic hbs-server 9090
}
stop_hbs_web() {
stop_hbs_generic hbs-web 8090
}
stop_all() {
stop_hbs_server
stop_hbs_web
}
# -------------------------------------------------------
# DEPLOY APPLICATION
# -------------------------------------------------------
deploy_hbs_generic() {
SERVER_NAME=$1
echo
echo ---------------------------
echo Deploying HBS $SERVER_NAME
echo ---------------------------
rm -rf /opt/hbs/$SERVER_NAME*
cp /tmp/hbs/_staging_/$SERVER_NAME*.jar /opt/hbs/
cp /tmp/hbs/_staging_/run-$SERVER_NAME.sh /opt/hbs/
chmod -R 766 /opt/hbs
ln -sf /tmp/$SERVER_NAME.log /opt/hbs/$SERVER_NAME.log
echo +++ OK
}
deploy_hbs_server() {
deploy_hbs_generic hbs-server
}
deploy_hbs_web() {
deploy_hbs_generic hbs-web
}
deploy_all() {
deploy_hbs_server
deploy_hbs_web
}
# -------------------------------------------------------
# START APPLICATION
# -------------------------------------------------------
start_hbs_generic() {
SERVER_NAME=$1
echo
echo ---------------------------
echo Starting HBS $SERVER_NAME
echo ---------------------------
echo
cd /opt/hbs/
./run-$SERVER_NAME.sh &
echo +++ OK
}
start_hbs_server() {
start_hbs_generic hbs-server
}
start_hbs_web() {
start_hbs_generic hbs-web
}
start_all() {
start_hbs_server
start_hbs_web
}
# -------------------------------------------------------
# VALIDATE PRE CONDITIONS
# -------------------------------------------------------
validate_generic() {
SERVER_NAME=$1
echo
echo ---------------------------------
echo Validating HBS $SERVER_NAME files
echo ---------------------------------
echo
if [ ! -f /tmp/hbs/_staging_/$SERVER_NAME*.jar ]; then
echo "$SERVER_NAME JAR > NOT OK"
exit 1
else
echo "$SERVER_NAME JAR > OK"
fi
if [ ! -f /tmp/hbs/_staging_/run-$SERVER_NAME.sh ]; then
echo "$SERVER_NAME SH > NOT OK"
exit 1
else
echo "$SERVER_NAME SH > OK"
fi
}
validate_server() {
validate_generic hbs-server
}
validate_web() {
validate_generic hbs-web
}
validate_all() {
validate_generic hbs-server
validate_generic hbs-web
}
# -------------------------------------------------------
# DEPLOY APPLICATION
# -------------------------------------------------------
update_all() {
validate_all
stop_all
backup_current_application
deploy_all
#start_all
}
update_hbs_server() {
validate_server
stop_hbs_server
backup_current_application
deploy_hbs_server
#start_hbs_server
}
update_hbs_web() {
validate_web
stop_hbs_web
backup_current_application
deploy_hbs_web
#start_hbs_web
}
# -------------------------------------------------------
# HELP
# -------------------------------------------------------
usage() {
echo "usage: ./rebuild.sh [all|server|web]"
}
## Main script starts here
if [ "$1" != "" ]; then
case $1 in
server) update_hbs_server
;;
web) update_hbs_web
;;
all) update_all
;;
*) usage
exit 1
esac
else
usage
exit 1
fi
| true |
0dd5d6c20689f28df0e57799a48107e0ce92f930 | Shell | tomswartz07/linux-configs | /backups/backup-weekly.sh | UTF-8 | 728 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# shellcheck disable=SC2086
# Backup Cron Job to rsync files to and from NAS
RSYNC_BIN=/usr/bin/rsync
RSYNC_OPTIONS="-v --recursive -a --delete --times --perms --owner --group --update --checksum --links --compress --protect-args"
BACKUP_DIR=/mnt/newnas/home/magrathea
if [ -d ${BACKUP_DIR} ]; then
# Documents
${RSYNC_BIN} ${RSYNC_OPTIONS} /home/tom/Arduino ${BACKUP_DIR}
${RSYNC_BIN} ${RSYNC_OPTIONS} /home/tom/Documents ${BACKUP_DIR}
${RSYNC_BIN} ${RSYNC_OPTIONS} /home/tom/KiCAD ${BACKUP_DIR}
# Archive Files
${RSYNC_BIN} ${RSYNC_OPTIONS} /mnt/Storage/Backup/pgdumps ${BACKUP_DIR}/pgdumps
${RSYNC_BIN} ${RSYNC_OPTIONS} /home/tom/wxtoimg ${BACKUP_DIR}
fi
| true |
2e68363748120a15982171f86f6dd352bfe4c71b | Shell | vducuy/ceph-test | /iperf-test.sh | UTF-8 | 1,744 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# Usage:
# ./iperf_test.sh <server host> <window size>
SERVER_NODE=$1
WSIZE=$2
MTU=1500
NBYTES=2G # Number of bytes
OSEC=5 #Omit 5 first sconds
MIN_MSS=$(sysctl net.ipv4.route.min_adv_mss | awk '{print $3}')
echo "MIN_MSS=${MIN_MSS}"
if [ "$MSS" = "" ] ; then
MSS=$MTU
fi
if [ "$WSIZE" = "" ] ; then
WSIZE=4M
fi
rm -rf result/iperf
NOW=`date +%y%m%d-%H%M%S` SESSION_LOG="${NOW}"
LOG_DIR=result/iperf/${SESSION_LOG}
mkdir -p ${LOG_DIR}
#Get iperf Default setting
log_file=${LOG_DIR}/iperf-${SERVER_NODE}-Mdefault-wdefault-n${NBYTES}.log
echo $log_file
iperf3 -c ${SERVER_NODE} -f k -n ${NBYTES} > ${log_file}
# Get data by MSS
for mss in ${MIN_MSS} 1460 2960 8960
do
#echo "iperf3 -c ${SERVER_NODE} -f m -M $mss -w ${WSIZE} -n ${NBYTES}"
log_file=${LOG_DIR}/iperf-${SERVER_NODE}-M${mss}-w${WSIZE}-n${NBYTES}.log
echo $log_file
iperf3 -c ${SERVER_NODE} -f k -O ${OSEC} -M $mss -w ${WSIZE} -n ${NBYTES} > ${log_file}
sleep 5
done
# Get data by Windows Size
for wsize in 512 1M 2M 4M
do
#echo "iperf3 -c ${SERVER_NODE} -f m -w $wsize -n ${NBYTES}"
log_file=${LOG_DIR}/iperf-${SERVER_NODE}-Mdefault-w$wsize-n${NBYTES}.log
echo $log_file
iperf3 -c ${SERVER_NODE} -f k -O ${OSEC} -w $wsize -n ${NBYTES} > ${log_file}
sleep 5
done
# Get data by
#for mss in ${MIN_MSS} 1460 2960 8960
#do
# echo "iperf3 -c ${SERVER_NODE} -f m -M $mss -w ${WSIZE} -n ${NBYTES}"
# iperf3 -c ${SERVER_NODE} -f m -M $mss -w ${WSIZE} -n ${NBYTES} > ${LOG_DIR}/iperf-${SERVER_NODE}-${mss}-w${WSIZE}-n${NBYTES}.log
# sleep 5
#done
#-------------------------------
tar cvzf iperf-${SERVER_NODE}-${SESSION_LOG}.tar.gz ${LOG_DIR}
| true |
19df5dd6f9775fb724375baedaf38cc113da8f6f | Shell | petronny/aur3-mirror | /tremfusion/PKGBUILD | UTF-8 | 2,363 | 2.703125 | 3 | [] | no_license | # Maintainer: Jan Keith Darunday <jkcdarunday@uplb.edu.ph>
# Contributor: Thomas Dziedzic <gostrc@gmail.com>
# Contributor: Dario `dax` Vilardi <dax@deelab.org>
# Contributor: Frozen Fox <frozenfoxz@gmail.com>
pkgname=tremfusion
pkgver=0.99r3
pkgrel=14
pkgdesc='Community updated Tremulous client.'
url='http://tremfusion.net'
arch=('i686' 'x86_64')
license=('GPL')
depends=('tremulous' 'curl' 'directfb' 'freetype2' 'libogg' 'ncurses' 'sdl')
optdepends=("openal: OpenAL audio support")
install=tremfusion.install
conflicts=('tremfusion-hg')
source=("http://dl.tremfusion.net/files/0.99/r3/Tremfusion-0.99r3-linux32.tar.bz2"
'tremfusion.png'
'tremfusion.desktop'
'tremfusion.sh'
'tremfusion-tty.sh'
'tremfusionded.sh')
md5sums=('3d6df33585ad406d7ebde8ef23c1826a'
'1dd34741fd50422f8103da142afddcc7'
'f49d5326d4fdfde01730dbdf0338e360'
'd6d6147de0a6ef7d1c994f3d109b98af'
'd6d6147de0a6ef7d1c994f3d109b98af'
'd6d6147de0a6ef7d1c994f3d109b98af')
[ "$CARCH" = "x86_64" ] && \
source[0]="http://dl.tremfusion.net/files/0.99/r3/Tremfusion-0.99r3-linux64.tar.bz2" && \
md5sums[0]="c58c97bb9f1b99e683e1c35d6c1b1a51"
build() {
# Install the .desktop and icon files
install -Dm644 "$srcdir/tremfusion.desktop" "$pkgdir/usr/share/applications/tremfusion.desktop"
install -Dm644 "$srcdir/tremfusion.png" "$pkgdir/usr/share/pixmaps/tremfusion.png"
# Copy the executable files
# Note: The binaries are compiled against /usr/local/bin
# So, this is the simplest and cleanest workaround
install -Dm755 "$srcdir/tremulous" "$pkgdir/opt/tremulous/tremfusion/tremfusion"
install -Dm755 "$srcdir/tremulous-tty" "$pkgdir/opt/tremulous/tremfusion/tremfusion-tty"
install -Dm755 "$srcdir/tremded" "$pkgdir/opt/tremulous/tremfusion/tremfusionded"
install -Dm755 "$srcdir/tremfusion.sh" "$pkgdir/usr/bin/tremfusion"
install -Dm755 "$srcdir/tremfusion-tty.sh" "$pkgdir/usr/bin/tremfusion-tty"
install -Dm755 "$srcdir/tremfusionded.sh" "$pkgdir/usr/bin/tremfusionded"
# Copy game data files
install -Dm644 "$srcdir/z-tremfusion-menu-0.99r3.pk3" "$pkgdir/opt/tremulous/tremfusion/z-tremfusion-menu-0.99r3.pk3"
if [ $CARCH = "x86_64" ] ; then
install -Dm644 "$srcdir/gamex86_64.so" "$pkgdir/opt/tremulous/tremfusion/gamex86_64.so"
else
install -Dm644 "$srcdir/gamex86.so" "$pkgdir/opt/tremulous/tremfusion/gamex86.so"
fi
}
| true |
3ef46904b99726b53c22055a9ac45f157b0ef6cc | Shell | epiphyllum/docker-mirror | /assets/start | UTF-8 | 578 | 2.71875 | 3 | [] | no_license | #!/bin/bash
#############################################
# 准备目录
#############################################
# /data/apt-mirror
# /www
# /epel
# /repoforge
# /centos
#
mkdir -p /data/{apt-mirror,www/{epel,repoforge,centos}};
chown -R www-data:www-data /data
chown -R www-data:www-data /var/log/apache2
export APACHE_RUN_USER=www-data
export APACHE_RUN_GROUP=www-data
export APACHE_LOCK_DIR=/var/lock/apache2
export APACHE_PID_FILE=/var/run/apache2.pid
export APACHE_LOG_DIR=/var/log/apache2
# 启动apache
exec apache2 -D FOREGROUND
| true |
770a0b714daef506872bbe772d7fb417acf209d3 | Shell | hjanime/ChIP-exo_mm9 | /bin/20-MACS2.keepDup.sh | UTF-8 | 241 | 2.5625 | 3 | [] | no_license | function run_macs2_keepDup {
out_prefix=$1
IP_bam_file=$2
out_dir=MACS2/results/$out_prefix
mkdir -p $out_dir
cd $out_dir
${macs2} callpeak -t $project_dir/$IP_bam_file -f BAM -g mm -n $out_prefix --keep-dup all
cd $project_dir
}
| true |
9cd980d4b18f2ca53c27fcb93687fb2c8479ec82 | Shell | lllovej/coevolve_test | /ref_proteomes/rbh_blast/bin/blast_forward.sh | UTF-8 | 1,151 | 2.75 | 3 | [] | no_license | #!/bin/bash
#SBATCH -A SNIC2019-3-319
#SBATCH -c 5
#SBATCH -t 24:00:00
#SBATCH --array=1-565
#SBATCH --error=/home/j/juliezhu/pfs/data/ref_proteomes/rbh_blast/error/%A_%a.error
#SBATCH --output=/home/j/juliezhu/pfs/data/ref_proteomes/rbh_blast/out/%A_%a.out
#insert modules
ml GCC/7.3.0-2.30 CUDA/9.2.88 OpenMPI/3.1.1 pandas-plink/1.2.30-Python-2.7.15
##get the argument from the input
if [ -z $1 ]
then
offset=0
else
offset=$1
fi
idx=$(expr $offset + $SLURM_ARRAY_TASK_ID)
proteome_list=/home/j/juliezhu/pfs/data/ref_proteomes/rbh_blast/proteome_list
pname=$(sed -n ${idx}p $proteome_list)
ecoli_file=/home/j/juliezhu/pfs/data/ref_proteomes/ecoli_data/after_rem.fasta
psiblast -query $ecoli_file -db ../alldata_db/$pname -out ../blast_out/$pname -num_iterations 3 -evalue 0.01 -outfmt "6 qseqid sseqid qlen slen length qcovs pident qstart qend sstart send evalue"
cd ../blast_out/
##1step: remove blank lines and lines with "Search has converged!" OBS: i use '-i' means change inplace which I could only run once.
#sed -i '/^Search.*/d;/^$/d' $pname
#run the forward filter
python ../bin/blast_mainfilter.py $pname forward
| true |
fccc82a7358f3927dfb3713ac68b4577ef8ce602 | Shell | osp/osp.residency.masereel | /landscape-plotter.sh | UTF-8 | 915 | 3.6875 | 4 | [] | no_license | #!/bin/bash
CURDIR=$1
COUNT1=$(ls $CURDIR | wc -w )
MYARRAY1=($(ls -l $CURDIR| cut -f6 -d" " | sort -n))
COUNT2=0
for i in $MYARRAY1
do
COUNT2=$(( $COUNT2 + 1 ))
done
START=future
END=future
if [[ $COUNT2 -gt 0 ]]
then
START=${MYARRAY1[0]}
END=${MYARRAY1[-1]}
fi
SIZES=$(ls -l $CURDIR | sed -r 's/ +/ /g' | grep -Ev '^d' | cut -f5 -d" ")
TOTALSIZE=0
for i in $SIZES
do
TOTALSIZE=$(( $TOTALSIZE + ${i} ))
done
MYSEP=''
EXTS=''
for i in $(find $CURDIR -maxdepth 1 -type f)
do
EXTS=${EXTS}${MYSEP}$(basename "$i" | sed -r 's/.+\.(.̉*)/\1/')
MYSEP=':'
done
SCRIPT=<<EOF
fn=$argv[1]
names=FontsInFile(fn)
Print(StrJoin(names, "⁇"))
EOF
PDFFONTS=''
for i in $(find $CURDIR -maxdepth 1 -type f -iname *.pdf)
do
PDFFONTS=${PDFFONTS}‽$i‼$(fontforge -lang=ff -c "Print(StrJoin(FontsInFile('$i'), '⁇'))")
done
echo $CURDIR ';' $COUNT1 ';' $TOTALSIZE ';' $START ';' $END ';' $EXTS ';' $PDFFONTS
| true |
333c2c22906f642befff77c0fc777565a5835147 | Shell | daisuke0115/study | /shell_nyumon_06.sh | UTF-8 | 222 | 3.5625 | 4 | [] | no_license | #!/bin/bash -x
file_path=${1:?ファイルパスを指定してください}
ls ${file_path} >/dev/null 2>&1 ;RC=$?
if [ ${RC} = 0 ]; then
echo $(ls ${file_path})
exit 0
else
echo "ファイルなし"
exit 1
fi
| true |
c4e49bcc97cf6c8a2b173318f025cc8b113c2607 | Shell | gbsf/dbscripts | /testing2x | UTF-8 | 2,108 | 3.84375 | 4 | [] | no_license | #!/bin/bash
. "$(dirname $0)/config"
. "$(dirname $0)/db-functions"
if [ $# -lt 1 ]; then
msg "usage: ${0##*/} <pkgname> ..."
exit 1
fi
# Lock everything to reduce the possibility of interfering task between the different repo-updates
script_lock
for repo in ${TESTING_REPO} ${STABLE_REPOS[@]}; do
for pkgarch in ${ARCHES[@]}; do
repo_lock ${repo} ${pkgarch} || exit 1
repo_lock ${repo}-${DEBUGSUFFIX} ${pkgarch} || exit 1
done
done
declare -a removed_pkgs
for pkgname in "$@"; do
msg "Moving $pkgname"
for pkgarch in ${ARCHES[@]}; do
repo_from=""
repo_to=""
found_source=false
found_target=false
path="${HISTORYREPO}/${TESTING_REPO}/${pkgarch}/${pkgname}"
if [ -e "${path}" ]; then
found_source=true
repo_from="${TESTING_REPO}"
fi
${found_source} || continue
repo_to=$(find_repo_for_package ${pkgname} ${pkgarch}) && found_target=true
if ! ${found_target}; then
warning "${pkgname} not found in any of these repos: ${STABLE_REPOS[@]}. skipping"
continue
fi
removed_pkgs+=($(find_removed_split_packages ${repo_from} ${repo_to} ${pkgarch} ${pkgname}))
msg2 "$pkgarch $repo_from -> $repo_to"
arch_db_move "$repo_from" "$repo_to" "$pkgarch" "$pkgname"
if [[ -f "$HISTORYREPO/$repo_to-$DEBUGSUFFIX/$pkgarch/$pkgname-$DEBUGSUFFIX" ]]; then
arch_db_move "$repo_from-$DEBUGSUFFIX" "$repo_to-$DEBUGSUFFIX" "$pkgarch" "$pkgname-$DEBUGSUFFIX"
fi
done
${found_source} || warning "${pkgname} not found in [${TESTING_REPO}]. skipping"
done
for pkg in "$(dedup_array "${removed_pkgs[@]}")"; do
msg "Removing $pkg"
for pkgarch in "${ARCHES[@]}"; do
repo=$(find_repo_for_package $pkg $pkgarch) || continue
arch_db_remove "$repo" "$pkgarch" "$pkg"
if [[ -f "$HISTORYREPO/$repo-$DEBUGSUFFIX/$pkgarch/$pkg-$DEBUGSUFFIX" ]]; then
arch_db_remove "$repo-$DEBUGSUFFIX" "$pkgarch" "$pkg-$DEBUGSUFFIX"
fi
done
done
arch_history_commit "testing2x: $*"
for repo in ${TESTING_REPO} ${STABLE_REPOS[@]}; do
for pkgarch in ${ARCHES[@]}; do
repo_unlock ${repo} ${pkgarch}
repo_unlock ${repo}-${DEBUGSUFFIX} ${pkgarch}
done
done
script_unlock
| true |
1335440a4b17ac472cb6c2820dd2f65b587ca863 | Shell | xamgore/au-conspectus | /script/build.sh | UTF-8 | 2,253 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# skip if build is triggered by pull request
if [ "$TRAVIS_PULL_REQUEST" == "true" ]; then
echo "this is PR, exiting"
exit 0
fi
# enable error reporting to the console
set -e
# cleanup "_site"
rm -rf _site
mkdir _site
echo "${1}"
# clone remote repo to "_site"
git clone "https://${GH_TOKEN}@github.com/${1}.git" --branch gh-pages --depth=1 ./_site
# clean repo from all files
( cd ./_site && git rm -rf --ignore-unmatch ./* )
( cd ./_site && rm -rf ./* )
# persuade github not to use jekyll
touch ./_site/.nojekyll 2>/dev/null || :
# make the template accessible from current dir
ln -s ./ast/template.html 2>/dev/null || :
# using the template, convert source markdown to html + json
mkdir ./input 2>/dev/null || :
find ./source -name '*.md' -print0 | xargs -n1 --null -t -I {} -- node ./ast/index.js {}
# generate the contents, move images & htmls the root folder
python ./terms/generate_html.py "${1}" "${2}" ./source ./_site
cp ./source/*.jpg ./source/*.png ./source/*.svg ./_site 2> /dev/null || :
mkdir -p ./_site/assets
cp ./res/*.css ./res/*.js ./_site/assets 2>/dev/null || :
# push generated htmls back to repository
if [ ${GH_TOKEN} ]
then
cd _site
git config user.email "no-reply@github.com"
git config user.name "Travis Bot"
git add --all
git commit --amend -m "Travis #$TRAVIS_BUILD_NUMBER"
git push --force origin gh-pages
cd ..
fi
# ssh
# echo 'Send gh-pages to mmcs server...'
# ping users.mmcs.sfedu.ru -c1
# echo sshpass -p "$USERS_PASSWD" ssh xamgore@users.mmcs.sfedu.ru '{rm -rf ./public_html; mkdir public_html;}'
# sshpass -p "$USERS_PASSWD" ssh xamgore@users.mmcs.sfedu.ru '{rm -rf ./public_html; mkdir public_html;}'
# echo sshpass -p "$USERS_PASSWD" scp -r ._site/ xamgore@users.mmcs.sfedu.ru:/home/xamgore/public_html
# sshpass -p "$USERS_PASSWD" scp -r ._site/ xamgore@users.mmcs.sfedu.ru:/home/xamgore/public_html
# sshpass -p "$USERS_PASSWD" ssh xamgore@users.mmcs.sfedu.ru '{ rm -rf ./public_html; git clone "https://github.com/xamgore/au-conspectus.git" --branch gh-pages ./public_html; }'
# send notification to telegram chat
git show --name-status --oneline | tail -n +2
message=$(git show --name-status --oneline | tail -n +2 | python ./telegram/message_generator.py "${2}")
[[ -z "$TM_TOKEN" ]] || TM_TOKEN="$TM_TOKEN" CHAT='${3}' MSG="$message" node ./telegram/index
| true |
54ac0a9d2a8ac94741c99f3388521bbed867a499 | Shell | sudar/dotfiles | /bash/bash_aliases | UTF-8 | 3,196 | 3.921875 | 4 | [
"Beerware"
] | permissive | ##############################################################################
# This is the list of alias that use.
# Feel free to use the entire file or parts of it.
#
# Author: Sudar (http://sudarmuthu.com)
# Repo: https://github.com/sudar/dotfiles
#
##############################################################################
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias dir='dir --color=auto'
alias vdir='vdir --color=auto'
alias grep='grep --color=always'
alias fgrep='fgrep --color=always'
alias egrep='egrep --color=always'
fi
# color svn diff. Make sure colordiff is installed
alias svnd="svn diff | colordiff | less -R"
function svndiff () { svn diff $@ | colordiff | less -R; }
# some more ls aliases
alias ll='ls -alFh'
alias la='ls -A'
alias l='ls -CF'
#------------------------------------------------------------------------------
# exclude .svn and .git
#------------------------------------------------------------------------------
alias grepe='grep --exclude-dir=".svn" --exclude-dir=".git"'
# exclude .svn or .git files while doing diff
alias diff="diff --exclude='.svn' --exclude='.git'"
# exclude .svn or .git files while doing zip
alias zip="zip -x '*/.git/*' -x '*/.svn/*'"
# delete .svn/* from a folder
alias delsvn='find . -name ".svn" -type d -exec rm -rf {} \;'
#------------------------------------------------------------------------------
# Enhance built-in commands
#------------------------------------------------------------------------------
# add more to cat command
cat()
{
/bin/cat "$@" | more
}
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# Use vi as the editor for svn
export SVN_EDITOR=vi
#------------------------------------------------------------------------------
# Make the output more human readable
#------------------------------------------------------------------------------
alias free="free -m"
alias df="df -h"
alias du="du -h"
#-------------------------------------------------------------
# File & strings related functions:
#-------------------------------------------------------------
# Find a file with a pattern in name:
function ff() { find . -type f -iname '*'"$*"'*' -ls ; }
# Find a file with pattern $1 in name and Execute $2 on it:
function fe() { find . -type f -iname '*'"${1:-}"'*' \
-exec ${2:-file} {} \; ; }
#-------------------------------------------------------------
# useful functions:
#-------------------------------------------------------------
function my_ip() # Get IP adress on ethernet.
{
MY_IP=$(/sbin/ifconfig eth0 | awk '/inet/ { print $2 } ' |
sed -e s/addr://)
echo ${MY_IP:-"Not connected"}
}
#-------------------------------------------------------------
# Set the title of the terminal. This works only in Mac
#-------------------------------------------------------------
ssh()
{
printf "\e]1;`echo $* | sed -Ee 's/^.+\@//'`\a"
command ssh $*
printf "\e]1;bash\a"
}
| true |
19de5e5254b4f2cbdde51ed2daf3f7e7c8d576a4 | Shell | g-harel/SOEN387 | /connect | UTF-8 | 427 | 2.703125 | 3 | [] | no_license | #!/bin/bash
# load variables written in secret .config
source .config
docker run \
--name SOEN387 \
-p 3306:3306 \
-e MYSQL_ROOT_PASSWORD=$DB_PASSWORD \
-e MYSQL_DATABASE=$DB_USERNAME \
-e MYSQL_USER=$DB_USERNAME \
-e MYSQL_PASSWORD=$DB_PASSWORD \
mysql
docker rm SOEN387
exit 0
# open persistent tunnel to remote
export SSHPASS="$DB_PASSWORD"
sshpass -e ssh -L $DB_PORT:$DB_HOST:$DB_PORT $DB_USERNAME@dumbledore.encs.concordia.ca
| true |
3d3b03e1c8be877f4d34264eea4335f3c2b95725 | Shell | jbcodeforce/BetterToDoBackEnd | /scripts/deployPostgresql.sh | UTF-8 | 199 | 2.78125 | 3 | [] | no_license | #!/bin/bash
if [[ -z $(oc get secret postgresql-creds 2> /dev/null) ]]
then
echo "#################"
echo "Deploy postgresql"
echo "#################"
oc apply -k kustomize/env/postgres
fi
| true |
771aeedf626c5fdf945aeefd13706ab4d6fb1dd3 | Shell | PhilippvK/giantboard-tools | /patches/rootfs/usbgadget-serial | UTF-8 | 1,330 | 3.125 | 3 | [] | no_license | #!/bin/bash
gadgetfs="/sys/kernel/config/usb_gadget"
gadget_dir="${gadgetfs}/g1"
id_vendor="0x1d6b"
id_product="0x0104"
serialnumber="0000"
manufacturer="Groboards"
product="Giant Board"
if [ ! -d "${gadgetfs}" ]; then
modprobe libcomposite
fi
if [ -d "${gadgetfs}/g1" ]; then
exit 0
fi
sleep 3
mkdir -p "${gadget_dir}"
if [ ! -d "${gadget_dir}" ]; then
exit 1
fi
echo "${id_vendor}" > "${gadget_dir}/idVendor"
echo "${id_product}" > "${gadget_dir}/idProduct"
# Windows Device descriptors for composite device
echo "0x0200" > "${gadget_dir}/bcdUSB"
echo "0xEF" > "${gadget_dir}/bDeviceClass"
echo "0x02" > "${gadget_dir}/bDeviceSubClass"
echo "0x01" > "${gadget_dir}/bDeviceProtocol"
echo "0x3066" > "${gadget_dir}/bcdDevice"
mkdir -p "${gadget_dir}/strings/0x409"
echo "${serialnumber}" > "${gadget_dir}/strings/0x409/serialnumber"
echo "${manufacturer}" > "${gadget_dir}/strings/0x409/manufacturer"
echo "${product}" > "${gadget_dir}/strings/0x409/product"
mkdir -p "${gadget_dir}/configs/c.1"
mkdir -p "${gadget_dir}/configs/c.1/strings/0x409"
echo "ACM" > "${gadget_dir}/configs/c.1/strings/0x409/configuration"
mkdir -p "${gadget_dir}/functions/acm.GS0"
ln -s "${gadget_dir}/functions/acm.GS0" "${gadget_dir}/configs/c.1"
echo "300000.gadget" > "${gadget_dir}/UDC"
sleep 1
systemctl start getty@ttyGS0
| true |
8c2aa3fe32031cce66d056e143fccc52c5c2e3ba | Shell | razee-io/Razeedash-api | /local-dev/api/containerApiTest.sh | UTF-8 | 640 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
CONTAINER_URL=${CONTAINER_URL:-https://containers.test.cloud.ibm.com/global/v2/applyRBAC}
RAZEE_QUERY=${1}
RAZEE_VARIABLES=${2}
[ ! -z "${RAZEE_USER_TOKEN}" ] && AUTH_HEADER="Authorization: Bearer ${RAZEE_USER_TOKEN}" || AUTH_HEADER="no-auth-available: asdf"
echo
# echo "AUTH_HEADER: ${AUTH_HEADER}"
echo "CONTAINER_URL: ${CONTAINER_URL}"
echo
# add '-v' for verbose
# -H "Origin: razeetest.com" \
# --ipv4 \
curl \
-X POST \
-H "Accept: application/json" \
-H "Content-Type: application/json" \
-H "${AUTH_HEADER}" \
-w "\nHTTP: %{http_code}\n" \
--data '{ "cluster": "'"DUMMYCLUSTERID"'" }' \
${CONTAINER_URL}
exit $?
| true |
e95b87e3aa72e3e590c3a46944a08fa2151bb47d | Shell | egesu/islandora-ubuntu-setup | /setup.sh | UTF-8 | 523 | 2.5625 | 3 | [] | no_license | #!/bin/bash
echo "Running 001"
./001-environment.sh >> log/001.log
echo "Running 002"
./002-os-upgrade.sh >> log/002.log
echo "Running 003"
./003-os-packages.sh >> log/003.log
echo "Running 004 dependencies"
echo "Running 004-d1"
./004-d1-ghostscript.sh &> log/004-d1.log
echo "Running 004-d2"
# ./004-d2-ffmpeg.sh &> log/004-d2.log
echo "Running 004-d3"
# ./004-d3-ffmpeg2theora.sh &> log/004-d3.log
echo "Running 004-d4"
./004-d4-fits.sh &> log/004-d4.log
echo "Running 004-d5"
./004-d5-drush.sh &> log/004-d5.log
| true |
72a929f79d124edcc7fb7b2bee8b4d0ca6471978 | Shell | Emiya-lcs/fate | /shell/zq.sh | UTF-8 | 2,344 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
qd_dir=/etc/rc.d/init.d
qd001_dir=/etc/rc.d/init.d/qd001.sh
#开机启动PHP+Nginx+redis
echo "------------------------------------------------------"
echo "|------------1、使用春生的LNMP脚本请选1--------------|"
echo "|-------------------2、不是请选2---------------------|"
echo "------------------------------------------------------"
while true
do
echo -n "请选择:"
read xz
case $xz in
1)
shift
#脚本路径
php_path=/usr/local/php-7.1.0/sbin/php-fpm
nginx_path=/usr/local/nginx-1.8.0/sbin/nginx
#redis_path=echo -e "\n" | /usr/local/redis-2.8.17/bin/redis-server /etc/redis-2.8.17 &
#memcached_path=/usr/local/memcached-1.4.15/bin/memcached -u root -d
shift
;;
2)
shift
php_path=`find / -path */sbin/php-fpm`
nginx_path=`find / -path */sbin/nginx`
redis_path=`find / -path */bin/redis-server`
memcached_path=`find / -path */bin/memcached`
shift
;;
esac
break
done
#判断启动脚本是否创建
if [ -f $qd001_dir ];then
echo '已创建:'$qd001_dir
else
#创建启动脚本
function qidong1
{
touch $qd_dir/qd001.sh
chmod +x $qd_dir/qd001.sh
}
qidong1
#启动脚本函数
function qidong2
{
echo '#!/bin/bash' > $qd_dir/qd001.sh
sed -i '$a #chkconfig:2345 80 90
$a #description:auto_run
$a
$a #nginx启动
$a '$nginx_path'
$a
$a #php启动
$a '$php_path'
$a
$a #redis启动
$a echo -e "\n" | '$redis_path' /etc/redis-2.8.17 &
$a
$a #memcached启动
$a '$memcached_path' -u root -d' $qd_dir/qd001.sh
echo "参数已写入脚本"
}
qidong2
fi
echo "-------------------------------------------"
echo "|------------a、设置开机自启--------------|"
echo "|------------b、取消开机自启--------------|"
echo "-------------------------------------------"
while true
do
echo -n "请做出你的选择:"
read xuanze
echo $xuanze
case $xuanze in
a)
shift
#需要执行的函数
function qidong3
{
cd $qd_dir
chkconfig --add qd001.sh
chkconfig qd001.sh on
}
qidong3
echo "设置自启成功"
shift
;;
b)
shift
#需要执行的函数
function qidong4
{
cd $qd_dir
chkconfig --del qd001.sh
chkconfig qd001.sh off
}
qidong4
echo "取消开机自启"
shift
;;
esac
break
done
| true |
6a0a869f74ccd25a7f4d3d98f9d7a6707e04280c | Shell | tychota/Portus | /startup.sh | UTF-8 | 4,370 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Start portus
if [ "$PORTUS_KEY_PATH" != "" ]; then
NAME=`basename $PORTUS_KEY_PATH .key`
else
NAME="registry"
fi
if [ "$PORTUS_PORT" = "" ]; then
PORTUS_PORT=443
fi
if [ "$PORTUS_MACHINE_FQDN" = "" ]; then
PORTUS_MACHINE_FQDN=`hostname`
fi
mkdir -p /etc/nginx/conf.d
cat >/etc/nginx/conf.d/portus.conf <<_END_
server {
listen 80 default_server;
listen [::]:80 default_server;
# Redirect all HTTP requests to HTTPS with a 301 Moved Permanently response.
return 301 https://$host$request_uri;
}
server {
listen 443 default_server ssl http2;
ssl_certificate certs/$NAME.crt;
ssl_certificate_key certs/$NAME.key;
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits
ssl_dhparam certs/dhparam.pem;
# modern configuration. tweak to your needs.
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';
ssl_prefer_server_ciphers on;
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
add_header Strict-Transport-Security max-age=15768000;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
# OCSP Stapling ---
# fetch OCSP records from URL in ssl_certificate and cache them
ssl_stapling on;
ssl_stapling_verify on;
## verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate certs/chain.pem;
resolver 8.8.8.8 8.8.4.4 valid=300s;
location / {
proxy_set_header Host $PORTUS_MACHINE_FQDN;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-Host $PORTUS_MACHINE_FQDN:$PORTUS_PORT;
proxy_pass http://portus:3000/;
proxy_http_version 1.1;
proxy_set_header Connection "upgrade";
proxy_read_timeout 900s;
}
}
_END_
cd /portus
if [ "$PORTUS_KEY_PATH" != "" -a "$PORTUS_MACHINE_FQDN" != "" -a ! -f "$PORTUS_KEY_PATH" ];then
# create self-signed certificates
echo Creating Certificate
PORTUS_CRT_PATH=`echo $PORTUS_KEY_PATH|sed 's/\.key$/.crt/'`
export ALTNAME=`hostname`
export IPADDR=`ip addr list eth0 |grep "inet " |cut -d' ' -f6|cut -d/ -f1|tail -1`
openssl req -x509 -newkey rsa:2048 -keyout "$PORTUS_KEY_PATH" -out "$PORTUS_CRT_PATH" -days 3650 -nodes -subj "/CN=$PORTUS_MACHINE_FQDN" -extensions SAN -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=DNS:registry,DNS:$PORTUS_MACHINE_FQDN,DNS:$ALTNAME,IP:$IPADDR,DNS:portus"))
fi
if [ "$PORTUS_MACHINE_FQDN" != "" ];then
echo config FQDN into rails
sed -i"" -e "s/portus.test.lan/$PORTUS_MACHINE_FQDN/" config/config.yml
fi
echo Making sure database is ready
rake db:create && rake db:migrate && rake db:seed
echo Creating API account if required
rake portus:create_api_account
if [ "$PORTUS_PASSWORD" != "" ]; then
echo Creating rancher password
rake "portus:create_user[rancher,rancher@rancher.io,$PORTUS_RANCHER_PASSWORD,false]"
fi
if [ "$REGISTRY_HOSTNAME" != "" -a "$REGISTRY_PORT" != "" -a "$REGISTRY_SSL_ENABLED" != "" ]; then
echo Checking registry definition for $REGISTRY_HOSTNAME:$REGISTRY_PORT
rake sshipway:registry"[Registry,$REGISTRY_HOSTNAME:$REGISTRY_PORT,$REGISTRY_SSL_ENABLED]"
fi
echo Starting chrono
bundle exec crono &
echo Starting Portus
/usr/bin/env /usr/local/bin/ruby /usr/local/bundle/bin/puma $*
| true |
8c87f23ce687c98d9c89d6e7fa466398d2e26962 | Shell | vsujeesh/tex-fonts | /code2000/create-package.sh | UTF-8 | 843 | 3.609375 | 4 | [] | no_license | # Set variables
set -e
NAME=code2000
ARCHIVE=CODE2000.ZIP
DOC=UNICREF.ZIP
BASE=https://web.archive.org/web/20101122142710/http://code2000.net
FONTS=$BASE/$ARCHIVE
SAMPLE=$BASE/$DOC
# Create new download and package directories
if [ -e download ]
then
rm -rf download
fi
mkdir download
if [ -e $NAME ]
then
rm -rf $NAME
fi
mkdir $NAME
# Download archive in download and copy essential files to package directory
cd download
if [ ! -e $ARCHIVE ]
then
wget $FONTS
fi
unzip $ARCHIVE
cp --preserve=timestamps CODE2000.TTF ../$NAME/CODE2000.ttf
wget $SAMPLE
unzip $DOC
convert SSEE.GIF -page a4 ../$NAME/$NAME.pdf
cd ..
cp --preserve=timestamps README.md $NAME/README
# Create archive for uploading to CTAN
rm -f $NAME.tar.gz $NAME.tar
tar cf $NAME.tar $NAME
gzip $NAME.tar
# Remove download and package directory
rm -rf download $NAME
| true |
3bb3d48f98cd5776739bda6b0d6950e62fea3216 | Shell | fredzannarbor/pagekicker-community | /scripts/bin/pdf2x1a.sh | UTF-8 | 707 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# converts pdf in command line to pdfx1a
while :
do
case $1 in
--help | -\?)
#
exit 0 # This is not an error, the user requested help, so do not exit status 1.
;;
--filepath)
filepath=$2
shift 2
;;
--filepath=*)
filepath=${1#*=}
shift
;;
--) # End of all options
shift
break
;;
-*)
echo "WARN: Unknown option (ignored): $1" >&2
shift
;;
*) # no more options. Stop while loop
break
;;
esac
done
echo "$filepath"
/home/fred/sfb/sfb-latest/trunk/scripts/lib/pstill_dist/pstill -M defaultall -m XimgAsCMYK -m Xspot -m Xoverprint -d 500 -m XPDFX=INTENTNAME -m XPDFXVERSION=1A -m XICCPROFILE=USWebCoatedSWOP.icc -o "$filepath".x1a "$filepath"
exit 0
| true |
3d7750f58b2a383d2c713fb7e9091521068addb5 | Shell | cqgd/ibsgwas | /conditional_analysis/cojo_meta.sh | UTF-8 | 1,230 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#SBATCH --job-name=cojo_meta
#SBATCH --output=/gfs/work/ceijsbouts/ibs/jobs/stream/job_%A_%a.stdout
#SBATCH --partition=compute
#SBATCH --ntasks=1
#SBATCH --time=1-0
#SBATCH --mem-per-cpu=0
#SBATCH --cpus-per-task=4
#SBATCH --array=1-5
TRAIT="metal_ICD_ROME_EURUSA_Qonly_Qnon"
extract_dir="/gfs/work/ceijsbouts/ibs/clump/extract"
loci_files=($extract_dir/$TRAIT/chr*)
echo ${loci_files[@]}
locus=${SLURM_ARRAY_TASK_ID}
locus_path=${loci_files[$locus-1]}
locus_name=$(basename "$locus_path")
CHR=$(echo $locus_path | sed 's/.*chr\([0-9]*\).*/\1/')
printf "Working with $TRAIT locus $locus \n from $locus_path \n on chr$CHR"
out_dir="/gfs/work/ceijsbouts/ibs/clump/cojo/$TRAIT"
mkdir ${out_dir}
out_file=${out_dir}/${locus_name}
module load bio/gcta/1.92.0b
gcta64 \
--bfile /gfs/archive/jostins/ukbb/v3/imputation_bfile_infomaf_filtered/ukb_imp_chr${CHR}_v3 \
--keep /gfs/work/ceijsbouts/ibs/clump/keep_unrelated/10k_passing_sqc.txt \
--extract $locus_path \
--cojo-file /gfs/work/ceijsbouts/ibs/clump/sumstats_meta_cojo/${TRAIT}/chr${CHR}.sumstats \
--cojo-slct \
--cojo-p 5e-8 \
--thread-num 4 \
--prevalence 0.15 \
--out ${out_file}
| true |
e6ca030a9228a92e1e2d517c75d9c2fe5ace3d48 | Shell | lamocUFF/SIMOP2 | /omega.sh | UTF-8 | 1,198 | 2.71875 | 3 | [] | no_license | #/bin/bash
#------------------------------------------------------------------------
#
#
# SCRIPT PARA ADQUIRIR PREVISOES DO ETA 10 DIAS DO CPTEC E
# CALCULAR CHUVA ACUMULADA POR BACIA DO SIN
#
# VERSAO 2.0
#
#
# bY regis reginaldo.venturadesa@gmail.com
# uso:
# adquire [00/12]
#
# ----------------------------------------------------------------------
# Necessita de um arquivo contendo informaçoes sonre as bacias.
# (ver como documentar isso aqui)
#
#
#
#-------------------------------------------------------------------------
# essa versao é feita pela conta regisgrundig e nao pela lAMOC
#
#--------------------------------------------------------------------------
MODDEBUG=1
#
# Existem duas rodadas do modelo ao dia. Uma as 00Z e outra as 12Z
# se nada for informada na linha de comando assume-se 00z
#
#
export LANG=en_us_8859_1
#
# verifica sistema
# no cygwin (windows)
# se bem instalado deve
# funcionar sem as variaveis
#
MACH=`uname -a | cut -c1-5`
if [ $MACH = "Linux" ];then
export PATH=$PATH:/usr/local/grads
export GADDIR=/usr/local/grads
export GADLIB=/usr/local/grads
export GASCRP=/usr/local/grads
fi
./adquire_eta40.sh
./adquire_obs.sh
./adquire_gfs.sh
| true |
5d056c6c0e91c07cc1ce6b10f4838cf3f4acca78 | Shell | rickpe2019/Thiago-ProgScript | /Atv6-thiago/7.sh | UTF-8 | 125 | 3.140625 | 3 | [] | no_license | #!/bin/bash
lista=$(ls)
novo=$(ls | head -1)
for i in $lista; do
if [ $i -nt $novo ]; then
novo=$i
fi
done
echo $novo
| true |
632e98e3c4bda0e4ecdcb59b22fabec711ae09d4 | Shell | benwilhelm/circleci-hello-world | /scripts/deploy.sh | UTF-8 | 367 | 2.953125 | 3 | [] | no_license | #! /bin/bash
git add docs
gitName=`git config user.name`
gitEmail=`git config user.email`
if [ "$gitName" = "" ]
then
echo "setting git user.name to 'CI'"
git config user.name = "CI"
fi
if [ "$gitEmail" = "" ]
then
echo "setting git user.email to 'ben+ci@doublebeamdesign.com'"
git config user.email = "ben+ci@doublebeamdesign.com"
fi
git commit -m "Automated Deploy"
git push -u origin master
| true |
ce862e8d7a4d7d7c766143804e8dc6016f797af0 | Shell | timcurzon/docker-apparatus | /arm-hf/archive/php-fpm/build | UTF-8 | 438 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/bash
IMAGENAME="php-fpm"
DEFAULTVARIANT="alpine"
VARIANT=""
printf "\nEnter a variant - only alpine available currently (blank for default [alpine]): "
read VARIANT
if [ "$VARIANT" = "" ]; then
VARIANT=$DEFAULTVARIANT
fi
if [ "$VARIANT" = "alpine" ]; then
printf "Building $VARIANT...\n\n"
docker build -t $IMAGENAME:$VARIANT -f $VARIANT/Dockerfile $VARIANT
printf "\nDone!\n\n"
else
printf "Invalid variant!\n\n"
fi
| true |
0d611c52306901c324dfe99d03e8f65f62a90b29 | Shell | snagoor/sysadmin | /generate-ca.sh | UTF-8 | 4,138 | 4.0625 | 4 | [] | no_license | #! /bin/bash
function read_cert_details() {
echo ""
while [ -z "$CA_COUNTRY_INFO" ]; do read -r -p "Provide Country Code (2 Letters) [IN] : " CA_COUNTRY_INFO; done
while [ -z "$CA_STATE_INFO" ]; do read -r -p "Provide State Information : " CA_STATE_INFO; done
while [ -z "$CA_CITY_INFO" ]; do read -r -p "Provide City Information : " CA_CITY_INFO; done
while [ -z "$CA_ORG_INFO" ]; do read -r -p "Provide Organization Name : " CA_ORG_INFO; done
while [ -z "$CA_DEPT_INFO" ]; do read -r -p "Provide Department Information : " CA_DEPT_INFO; done
while [ -z "$CA_EMAIL_INFO" ]; do read -r -p "Provide Email Address (root@example.com) : " CA_EMAIL_INFO; done
while [ -z "$CA_CN_INFO" ]; do read -r -p "Provide Common Name [Host FQDN (host.example.com)] : " CA_CN_INFO; done
while [ -z $CA_CERT_DAYS ]; do read -r -p "How many days would you like the CA Cert to be valid for? [DAYS] : " CA_CERT_DAYS && validate_days; done
show_cert_details
echo ""
read -r -p "Would you like to proceed with the above selection [Y/N] " READ_INPUT
if [ "$READ_INPUT" == "Y" ] || [ "$READ_INPUT" == "y" ]; then
write_conf_file
else
echo -e "\nBased on input receieved, exiting now\n"
exit
fi
}
function validate_days() {
if ! [[ $CA_CERT_DAYS =~ ^[0-9]+$ ]]; then
CA_CERT_DAYS=$(echo $CA_CERT_DAYS | bc)
if [[ $CA_CERT_DAYS -lt 1 ]]; then
read -r -p "How many days would you like the CA Cert to be valid for? [DAYS] : " CA_CERT_DAYS
CA_CERT_DAYS=$(echo $CA_CERT_DAYS | bc)
validate_days
fi
fi
}
function write_conf_file() {
cat > "$PWD/certs/ca_certs/ca-setup.conf" << EOF
[req]
default_bits = 2048
prompt = no
default_md = sha256
distinguished_name = dn
[dn]
C=$CA_COUNTRY_INFO
ST=$CA_STATE_INFO
L=$CA_CITY_INFO
O=$CA_ORG_INFO
OU=$CA_DEPT_INFO
emailAddress=$CA_EMAIL_INFO
CN=$CA_CN_INFO
EOF
}
function show_cert_details() {
echo -e "\nPer your selection below are the details entered\n"
echo -e "\t\tCountry Code\t\t\t: $CA_COUNTRY_INFO"
echo -e "\t\tState Information\t\t: $CA_STATE_INFO"
echo -e "\t\tCity\t\t\t\t: $CA_CITY_INFO"
echo -e "\t\tOrganization\t\t\t: $CA_ORG_INFO"
echo -e "\t\tDepartment\t\t\t: $CA_DEPT_INFO"
echo -e "\t\tCommon Name\t\t\t: $CA_CN_INFO"
echo -e "\t\tEmail Address\t\t\t: $CA_EMAIL_INFO"
echo -e "\t\tCertificate Validity\t\t: $CA_CERT_DAYS days"
}
function status_check() {
LAST_STATUS="$?"
if [ "$LAST_STATUS" -ne 0 ]; then
echo -e "$1\n"
exit
fi
}
function root_check() {
if [ "$(id -u)" != "0" ]; then
echo -e "\nThis script must be run as root. Exiting for now.\n" 1>&2
exit 1
fi
}
function main() {
if [ ! -d "$PWD/certs/ca_certs" ]; then
mkdir -p "$PWD/certs/ca_certs/"
status_check "Unable to create $PWD/certs/ca_certs directory, check Permissions or path"
else
read -r -p "$PWD/certs/ca_certs directory contains old data would you like to clear its contents? [Y/N] : " CA_DIR_INPUT
if [ "$CA_DIR_INPUT" == "Y" ] || [ "$CA_DIR_INPUT" == "y" ]; then
rm -rf "$PWD/certs/ca_certs/*"
status_check "Unable to delete contents of $PWD/certs/ca_certs, check Permissions or path"
else
echo "\nWARNING: $PWD/certs/ca_certs directory exists, there might be unknown issues while generating CA certs.\n"
fi
fi
read_cert_details
echo -e "\nGenerating new CA Private key\n"
openssl genrsa -out "$PWD/certs/ca_certs/ca-private.key" 2048 >/dev/null 2>&1
status_check "Something went wrong while executing command 'openssl genrsa -out $PWD/certs/ca_certs/ca-private.key 2048'"
echo -e "Generating new CA Cert for $CA_CERT_DAYS days\n"
openssl req -new -x509 -days "$CA_CERT_DAYS" -nodes -key "$PWD/certs/ca_certs/ca-private.key" -sha256 -out "$PWD/certs/ca_certs/ca-cert.pem" -config "$PWD/certs/ca_certs/ca-setup.conf"
status_check "Something went wrong while executing command 'openssl req -new -x509 -days $CA_CERT_DAYS -nodes -key $PWD/certs/ca_certs/ca-private.key -sha256 -out $PWD/certs/ca_certs/ca-cert.pem -config $PWD/certs/ca_certs/ca-setup.conf'"
echo -e "Successfully generated CA certs, please find the below details\n"
echo -e "CA Cert \t\t: $PWD/certs/ca_certs/ca-cert.pem"
echo -e "CA Private Key \t\t: $PWD/certs/ca_certs/ca-private.key\n"
}
root_check
main
| true |
defd16aa4aa58a99efbddca918a096ff609ac947 | Shell | ziel/dots | /bin/snvol | UTF-8 | 603 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
# snvol is Simple Notifying Volume Control
# intended for use with xbindkeys and pamixer
# (c)ziel
# I'll stick a foss license on this silliness at some point
# right now I just need volume control ;-)
notify()
{
vol=$(pamixer --get-vol)
if [ "$(pamixer --get-mute)" = "true" ];
then
vol=0
fi
notify-send -h 'int:value:'${vol} Vol
}
main()
{
case $1 in
increase)
pamixer --increase $2 && notify
;;
decrease)
pamixer --decrease $2 && notify
;;
toggle-mute)
pamixer --toggle-mute && notify
;;
esac
}
main $@;
| true |
3c759ac840f74495b1209ca636337dc9d5e2177d | Shell | ClickHouse/ClickHouse | /tests/queries/0_stateless/00956_http_prepared_statements.sh | UTF-8 | 1,143 | 2.96875 | 3 | [
"BSL-1.0",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "DROP TABLE IF EXISTS ps";
${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "CREATE TABLE ps (i UInt8, s String, d Date) ENGINE = Memory";
${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "INSERT INTO ps VALUES (1, 'Hello, world', '2005-05-05')";
${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "INSERT INTO ps VALUES (2, 'test', '2019-05-25')";
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}¶m_id=1" \
-d "SELECT * FROM ps WHERE i = {id:UInt8} ORDER BY i, s, d";
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}¶m_phrase=Hello,+world" \
-d "SELECT * FROM ps WHERE s = {phrase:String} ORDER BY i, s, d";
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}¶m_date=2019-05-25" \
-d "SELECT * FROM ps WHERE d = {date:Date} ORDER BY i, s, d";
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}¶m_id=2¶m_phrase=test" \
-d "SELECT * FROM ps WHERE i = {id:UInt8} and s = {phrase:String} ORDER BY i, s, d";
${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "DROP TABLE ps";
| true |
23974b5afeca59f57b339afa3144f737ccbf4d29 | Shell | xiaomeng79/learning_notes | /02数据存取/1mysql/full.sh | UTF-8 | 2,578 | 3.984375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
TODAY=`date +%Y%m%d%H%M`
#YESTERDAY=`date -d"yesterday" +%Y%m%d%H%M`
USEROPTIONS="--user=root --password=Abcd_1234"
TMPFILE="/var/innobackup_$TODAY.$$.tmp"
#$$脚本执行的进程ID
MYCNF=/etc/my.cnf
BACKUPDIR=/data/backup # 备份的主目录
FULLBACKUPDIR=$BACKUPDIR/full # 全库备份的目录
INCRBACKUPDIR=$BACKUPDIR/incr # 增量备份的目录
HOSTIP=`ip a |grep inet|tail -2|head -1|awk -F" " '{print $2}'|awk -F"/" '{print $1}'`
#############################################################################
# pre check
#############################################################################
# Check options before proceeding
if [ ! -x $INNOBACKUPEXFULL ]; then
echo "$INNOBACKUPEXFULL does not exist."
exit 1
fi
if [ ! -d $BACKUPDIR ]; then
mkdir -p $BACKUPDIR
fi
if [ -z "`mysqladmin $USEROPTIONS status | grep 'Uptime'`" ] ; then
echo "HALTED: MySQL does not appear to be running."
exit 1
fi
if ! `echo 'exit' | mysql -s $USEROPTIONS` ; then
echo "HALTED: Supplied mysql username or password appears to be incorrect (not copied here for security, see script)."
exit 1
fi
# Some info output
echo "----------------------------"
echo
echo "$0: MySQL backup script"
echo "started: `date`"
echo
# Create fullbackup directory if not exist.
if [ ! -d $FULLBACKUPDIR ]; then
mkdir -pv $FULLBACKUPDIR
else
rm -fr $FULLBACKUPDIR/*
fi
#############################################################################
# begin backup
#############################################################################
cd $FULLBACKUPDIR
#delete the full backup file before today
rm -rf ./* ../incr/* /var/*.tmp
innobackupex --defaults-file=$MYCNF $USEROPTIONS $FULLBACKUPDIR > $TMPFILE 2>&1
if [ -z "`tail -1 $TMPFILE | grep 'completed OK!'`" ] ; then
echo "innobackupex failed:";
echo "---------- ERROR OUTPUT from innobackupex ----------"
exit 1
else
#备份成功 获取这次备份的目录
THISBACKUP=`cat $TMPFILE|grep "Backup created in directory" |awk -F"'" '{print$2}'`
CURDIR=`cat $TMPFILE|grep "Backup created in directory" |awk -F"'" '{print$2}'|awk -F "/" '{print$5}'`
echo "Databases backed up successfully to: $THISBACKUP"
tar -zcf ip_proxy.$TODAY.full.tar.gz $CURDIR
scp -l 150 ip_proxy.$TODAY.full.tar.gz root@127.0.0.1:/backup/mysql
if [ $? = 0 ]; then
echo "scp fullbackup file successfully"
else
echo "Error with scp."
fi
fi
#---------------
# Cleanup
#echo "delete tar files of 3 days ago"
#find $BACKUPDIR/ -mtime +3 -name "*.tar.gz" -exec rm -rf {} \;
echo
echo "completed: `date`"
exit 0 | true |
27cfaa5bc97cddf88532b7d051c8aa8204114a06 | Shell | chrishowell/TorrentAutomation | /mail_logging.sh | UTF-8 | 445 | 2.828125 | 3 | [
"Unlicense"
] | permissive | #/bin/bash
. ./mail_config.sh
maillog_info() {
infostring="PROCESS-INFO: $1"
echo $infostring
echo $infostring | mail -s "PROCESS-INFO" $monitor_email
}
maillog_error() {
errorstring="PROCESS-ERROR: $1"
echo $errorstring
echo $errorstring | mail -s "PROCESS-ERROR" $monitor_email
}
maillog_warning() {
warningstring="PROCESS-WARNING: $1"
echo $warningstring
echo $warningstring | mail -s "PROCESS-WARNING" $monitor_email
}
| true |
e1fb4ab63bee8a3f826d4db83c88fe57a44db831 | Shell | louispham22/automation-jenkins-tools | /build/build-hrbc-base.sh | UTF-8 | 9,394 | 3.96875 | 4 | [] | no_license | #!/bin/bash
set -ex
# HrbcBuildUrl - the build url to get the hrbc binaries from
# NOCACHE - if set, will disable cache if one of the following: "all" or "image". image will only ignore image cache. all will ignore base cache too.
# BUILD_WORKSPACE - workspace root
# REGISTRY - if not specified, will use default
# PHP_VERSION
# HTTPD_VERSION
# TOMCAT_VERSION
# JAVA_VERSION
#
# usage: $0 [<tag> ... ]
#
# Obtained from build url version file
# HRBC_VERSION - the hrbc version
# API_SVN - api svn revision
# PRODUCT_SVN - product svn revision
# STATIC_SVN - static svn revison
# OFFICE_SVN - static svn revison
function hastag {
local image="$1"
local tag="$2"
local result;
if [[ " $(curl -sL http://${REGISTRY}v2/$image/tags/list | jq -r .tags[] | tr '\n' ' ') " =~ " $tag " ]]; then
result=1;
else
result=0;
fi
return $result;
}
# get version info
curl -sL $HrbcBuildUrl/artifact/version.txt -o version.env && source version.env;
# Build version strings
VERTAG_HTTPD=${HTTPD_VERSION:-off}
VERTAG_PHP=${PHP_VERSION:-off};
VERTAG_TOMCAT=${TOMCAT_VERSION:-off}
VERTAG_JAVA=${JAVA_VERSION:-off}
VERTAG_WEB="AZ${VERTAG_AZ}-J${VERTAG_JAVA}-T${VERTAG_TOMCAT}-A${VERTAG_HTTPD}-P${VERTAG_PHP}"
VERTAG_BASE_PHP="AZ${VERTAG_AZ}-J${VERTAG_JAVA}-A${VERTAG_HTTPD}-P${VERTAG_PHP}"
VERTAG_BASE_TOMCAT="AZ${VERTAG_AZ}-J${VERTAG_JAVA}-T${VERTAG_TOMCAT}"
# base hrbc tag on branch + revision
VERTAG_HRBC_BRANCH=$HRBC_VERSION;
labels=( "hrbc.version=${HRBC_VERSION}" )
vars=()
#if API_HASH is present, we asume git and use commitdate/hash to build tag.
for varname in API PRODUCT STATIC OFFICE TOOLS; do
if [ -n "$API_HASH" ]; then
datevar=${varname}_DATE
hashvar=${varname}_HASH
vars+=( "D$(date -u '+%Y%m%d%H%M%S' -d${!datevar})_H$(echo -n ${!hashvar} | head -c10)" )
labels+=( "hrbc.${varname,,}.git.commitdate=${!datevar}" "hrbc.${varname,,}.git.hash=${!hashvar}" )
else
svnvar=${varname}_SVN
vars+=( "r${!svnvar}" )
labels+=( "hrbc.${varname,,}.svn.revision=${!svnvar}" )
fi
done
HRBC_REV=$(echo ${vars[*]} | tr ' ' '\n' | sort -rn | head -1)
VERTAG_HRBC="${VERTAG_WEB}-H${VERTAG_HRBC_BRANCH}_${HRBC_REV}"
VERTAG_PHP="${VERTAG_BASE_PHP}-H${VERTAG_HRBC_BRANCH}_${HRBC_REV}"
VERTAG_TOMCAT="${VERTAG_BASE_TOMCAT}-H${VERTAG_HRBC_BRANCH}_${HRBC_REV}"
REGISTRY=${REGISTRY:-registry.ps.porters.local:5000/}
# Check registry for hrbc-base, web-base and build if needed
if [[ "all" == "$NOCACHE" ]] || hastag hrbc/base $VERTAG_WEB; then
DOCKERBUILD_ENVWHITELIST="PHP_VERSION HTTPD_VERSION TOMCAT_VERSION JAVA_VERSION" \
DOCKERBUILD_LABELS="middleware.php.version=$PHP_VERSION middleware.httpd.version=$HTTPD_VERSION middleware.tomcat.version=$TOMCAT_VERSION middleware.java.version=$JAVA_VERSION" \
DOCKERBUILD_REGISTRY=${REGISTRY} \
DOCKERBUILD_FROMTAG="AZ${VERTAG_AZ}" \
$BUILD_WORKSPACE/tools/build/build.sh hrbc/base $BUILD_WORKSPACE/hrbc-base $VERTAG_WEB
fi
# Check registry for hrbc-base-web and build if needed
if [[ "all" == "$NOCACHE" ]] || hastag hrbc/base-web $VERTAG_WEB; then
DOCKERBUILD_ENVWHITELIST="" \
DOCKERBUILD_LABELS="middleware.php.version=$PHP_VERSION middleware.httpd.version=$HTTPD_VERSION middleware.tomcat.version=$TOMCAT_VERSION middleware.java.version=$JAVA_VERSION" \
DOCKERBUILD_FROMTAG=${VERTAG_WEB} \
DOCKERBUILD_REGISTRY=${REGISTRY} \
$BUILD_WORKSPACE/tools/build/build.sh hrbc/base-web $BUILD_WORKSPACE/hrbc-base-web $VERTAG_WEB
fi
# Check registry for php hrbc-base, web-base and build if needed
if [[ "all" == "$NOCACHE" ]] || hastag hrbc/base $VERTAG_BASE_PHP; then
DOCKERBUILD_ENVWHITELIST="PHP_VERSION HTTPD_VERSION JAVA_VERSION" \
DOCKERBUILD_LABELS="middleware.php.version=$PHP_VERSION middleware.httpd.version=$HTTPD_VERSION middleware.java.version=$JAVA_VERSION" \
DOCKERBUILD_REGISTRY=${REGISTRY} \
DOCKERBUILD_FROMTAG="AZ${VERTAG_AZ}" \
$BUILD_WORKSPACE/tools/build/build.sh hrbc/base $BUILD_WORKSPACE/hrbc-base $VERTAG_BASE_PHP
fi
# Check registry for php hrbc-base-web and build if needed
if [[ "all" == "$NOCACHE" ]] || hastag hrbc/base-web $VERTAG_BASE_PHP; then
DOCKERBUILD_ENVWHITELIST="" \
DOCKERBUILD_LABELS="middleware.php.version=$PHP_VERSION middleware.httpd.version=$HTTPD_VERSION middleware.java.version=$JAVA_VERSION" \
DOCKERBUILD_FROMTAG=${VERTAG_BASE_PHP} \
DOCKERBUILD_REGISTRY=${REGISTRY} \
$BUILD_WORKSPACE/tools/build/build.sh hrbc/base-web $BUILD_WORKSPACE/hrbc-base-web $VERTAG_BASE_PHP
fi
# Check registry for hrbc version and build if needed
if [ 31202300 -gt $(( $(echo $HRBC_VERSION | cut -d"." -f1) * 10000000 + $(echo $HRBC_VERSION | cut -d"." -f2) * 100000 + $(echo $HRBC_VERSION | cut -d"." -f3) * 100 + $(($(echo $HRBC_VERSION | cut -d"." -f4))) )) ] && [[ $HRBC_VERSION = *.* ]]; then
IMAGES="hrbc/api hrbc/web hrbc/fts hrbc/batch hrbc/migration hrbc/tools"
elif [ 40000100 -gt $(( $(echo $HRBC_VERSION | cut -d"." -f1) * 10000000 + $(echo $HRBC_VERSION | cut -d"." -f2) * 100000 + $(echo $HRBC_VERSION | cut -d"." -f3) * 100 + $(($(echo $HRBC_VERSION | cut -d"." -f4))) )) ] && [[ $HRBC_VERSION = *.* ]]; then
IMAGES="hrbc/api hrbc/web hrbc/batch hrbc/migration hrbc/tools"
else
IMAGES="hrbc/api hrbc/web hrbc/batch hrbc/migration hrbc/tools"
IMAGES_TOMCAT="hrbc/privateapi"
IMAGES_PHP="hrbc/product-web"
fi
for image in $IMAGES; do
if [[ "all" == "$NOCACHE" ]] || [[ "image" == "$NOCACHE" ]] || hastag $image $VERTAG_HRBC; then
# migration uses full versiontag as base and no artifact url
[[ "hrbc/migration" = $image ]] && VerTagBase=$VERTAG_HRBC || VerTagBase=$VERTAG_WEB
[[ "hrbc/migration" = $image ]] && BuildArgs="" || BuildArgs="ARTIFACT_URL=$HrbcBuildUrl/artifact"
DOCKERBUILD_ENVWHITELIST="" \
DOCKERBUILD_LABELS="${labels[*]}" \
DOCKERBUILD_FROMTAG=${VerTagBase} \
DOCKERBUILD_REGISTRY=${REGISTRY} \
DOCKERBUILD_BUILDARG=${BuildArgs} \
$BUILD_WORKSPACE/tools/build/build.sh $image $BUILD_WORKSPACE/${image/\//-} $VERTAG_HRBC $@
else
# pull latest in case local copy was cleaned
docker pull ${REGISTRY}${image}:${VERTAG_HRBC}
# add tags if already built
for tag in $@; do
docker tag ${REGISTRY}${image}:${VERTAG_HRBC} ${REGISTRY}${image}:$tag
docker push ${REGISTRY}${image}:$tag
done
fi
# if this is the latest tag in revision, we should add the latest flag (same base, no revision)
prefix=${VERTAG_WEB}-H${VERTAG_HRBC_BRANCH}
if test "$HRBC_REV" = "$(curl -sL http://${REGISTRY}v2/$image/tags/list | jq -r .tags[] | grep ^${prefix}_[rD][0-9]*.*$ | sed -re s/^..\{${#prefix}\}[r]*//g | sort -rn|head -1)"; then
docker tag ${REGISTRY}${image}:${VERTAG_HRBC} ${REGISTRY}${image}:${prefix}
docker push ${REGISTRY}${image}:${prefix}
fi
done
if [ -n "$IMAGES_TOMCAT" ]; then
for image in $IMAGES_TOMCAT; do
if [[ "all" == "$NOCACHE" ]] || [[ "image" == "$NOCACHE" ]] || hastag $image $VERTAG_TOMCAT; then
BuildArgs="ARTIFACT_URL=$HrbcBuildUrl/artifact"
DOCKERBUILD_ENVWHITELIST="" \
DOCKERBUILD_LABELS="${labels[*]}" \
DOCKERBUILD_FROMTAG=${VERTAG_BASE_TOMCAT} \
DOCKERBUILD_REGISTRY=${REGISTRY} \
DOCKERBUILD_BUILDARG=${BuildArgs} \
$BUILD_WORKSPACE/tools/build/build.sh $image $BUILD_WORKSPACE/${image/\//-} $VERTAG_TOMCAT $@
else
# pull latest in case local copy was cleaned
docker pull ${REGISTRY}${image}:${VERTAG_TOMCAT}
# add tags if already built
for tag in $@; do
docker tag ${REGISTRY}${image}:${VERTAG_TOMCAT} ${REGISTRY}${image}:$tag
docker push ${REGISTRY}${image}:$tag
done
fi
# if this is the latest tag in revision, we should add the latest flag (same base, no revision)
prefix=${VERTAG_BASE_TOMCAT}-H${VERTAG_HRBC_BRANCH}
if test "$HRBC_REV" = "$(curl -sL http://${REGISTRY}v2/$image/tags/list | jq -r .tags[] | grep ^${prefix}_[rD][0-9]*.*$ | sed -re s/^..\{${#prefix}\}[r]*//g | sort -rn|head -1)"; then
docker tag ${REGISTRY}${image}:${VERTAG_TOMCAT} ${REGISTRY}${image}:${prefix}
docker push ${REGISTRY}${image}:${prefix}
fi
done
fi
if [ -n "$IMAGES_PHP" ]; then
for image in $IMAGES_PHP; do
if [[ "all" == "$NOCACHE" ]] || [[ "image" == "$NOCACHE" ]] || hastag $image $VERTAG_PHP; then
BuildArgs="ARTIFACT_URL=$HrbcBuildUrl/artifact"
DOCKERBUILD_ENVWHITELIST="" \
DOCKERBUILD_LABELS="${labels[*]}" \
DOCKERBUILD_FROMTAG=${VERTAG_BASE_PHP} \
DOCKERBUILD_REGISTRY=${REGISTRY} \
DOCKERBUILD_BUILDARG=${BuildArgs} \
$BUILD_WORKSPACE/tools/build/build.sh $image $BUILD_WORKSPACE/${image/\//-} $VERTAG_PHP $@
else
# pull latest in case local copy was cleaned
docker pull ${REGISTRY}${image}:${VERTAG_PHP}
# add tags if already built
for tag in $@; do
docker tag ${REGISTRY}${image}:${VERTAG_PHP} ${REGISTRY}${image}:$tag
docker push ${REGISTRY}${image}:$tag
done
fi
# if this is the latest tag in revision, we should add the latest flag (same base, no revision)
prefix=${VERTAG_BASE_PHP}-H${VERTAG_HRBC_BRANCH}
if test "$HRBC_REV" = "$(curl -sL http://${REGISTRY}v2/$image/tags/list | jq -r .tags[] | grep ^${prefix}_[rD][0-9]*.*$ | sed -re s/^..\{${#prefix}\}[r]*//g | sort -rn|head -1)"; then
docker tag ${REGISTRY}${image}:${VERTAG_PHP} ${REGISTRY}${image}:${prefix}
docker push ${REGISTRY}${image}:${prefix}
fi
done
fi
echo "VERTAG_BASE=${VERTAG_WEB}" >> version.env
echo "VERTAG_HRBC=${VERTAG_HRBC}" >> version.env
echo "VERTAG_PHP=${VERTAG_PHP}" >> version.env
echo "VERTAG_TOMCAT=${VERTAG_TOMCAT}" >> version.env
| true |
7d7e933b3e97b02f32aa0d15914a23fb915bc317 | Shell | akliang/polysi_amplifier_simulations | /haseldonoise_sleeper_balancer.sh | UTF-8 | 2,438 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# crontab: */5 * * * * /home/user/haseldonoise_sleeper_balancer.sh >>/mnt/SimRAID/Sims2010/Skynet/queues/haseldonoise_sleeper_balancer.log
# crontab: */5 * * * * /mnt/SimRAID/Sims2010/framework/spc_testbenches/pmb2016_countrate_paper/haseldonoise_sleeper_balancer.sh >>/mnt/SimRAID/Sims2010/Skynet/queues/haseldonoise_sleeper_balancer.log
set -o errexit
ABSPATH="/home/user"
ABSPATH="/mnt/SimRAID/Sims2010/framework/spc_testbenches/pmb2016_countrate_paper"
DTAG=$( date +%Y%m%dT%H%M%S )
TSTART=$( date +"%s" )
BNAME=$( basename "$0" )
SLEEP_PRIO='\-8999'
SLEEP_JOB="/bin/sleep"
# run xnodestats to refresh lastcql
/home/user/bin/xnodestats > /dev/null
XNCQL="/mnt/SimRAID/opt/tmp/lastcql.out"
LASTCQL="$( grep "HAS_ELDONOISE" $XNCQL )"
# figure out if there are any HAS_ELDONOISE jobs that aren't running
IDLEJOBS=$( echo "$LASTCQL" | grep -v '/bin/sleep' | gawk '$1 == 1' | wc -l )
if [ "$IDLEJOBS" -gt 0 ]; then
echo "======= $BNAME ($DTAG) begin ======="
echo "Found $IDLEJOBS jobs idling (non-sleeper, HAS_ELDONOISE tagged)"
# figure out how mnay running sleeper jobs there are
SLEEPRUNIDS=$( echo "$LASTCQL" | grep '/bin/sleep' | gawk '$1 == 2' | gawk 'BEGIN { OFS="." } { print $2,$3 }' )
if [ "$SLEEPRUNIDS" == "" ]; then
SLEEPRUNNUM=0
else
SLEEPRUNNUM=$( echo "$SLEEPRUNIDS" | wc -l )
fi
echo "Currently have $SLEEPRUNNUM running sleeper jobs"
# _hold and _release sleeper jobs, if possible
if [ "$SLEEPRUNNUM" -eq 0 ]; then
echo "No sleeper jobs to release... doing nothing."
HOLDNUM=0
elif [ "$SLEEPRUNNUM" -ge "$IDLEJOBS" ]; then
HOLDNUM="$IDLEJOBS"
elif [ "$SLEEPRUNNUM" -lt "$IDLEJOBS" ]; then
HOLDNUM="$SLEEPRUNNUM"
else
echo "Unknown error!"
HOLDNUM=0
exit
fi
if [ "$HOLDNUM" -gt 0 ]; then
echo "Putting $HOLDNUM sleepers on hold..."
condor_hold $( echo "$SLEEPRUNIDS" | head -n "$HOLDNUM" )
echo "Waiting 5 seconds to re-queue sleepers..."
sleep 5
condor_release $( echo "$SLEEPRUNIDS" | head -n "$HOLDNUM" )
else
echo "HOLDNUM is zero! Exiting..."
exit
fi
TEND=$( date +"%s" )
TLAP=$( echo $TEND $TSTART | gawk '{ print $1-$2 }' )
echo "$BNAME done! ... took $TLAP seconds"
echo -e "\n\n"
else
#echo "No idle jobs found... doing nothing."
sleep 1
fi
# clean-up
#rm $ABSPATH/cirsim_sleepers_condorq.txt $ABSPATH/cirsim_sleepers_condorq_long.txt $ABSPATH/cirsim_sleepers_combined.txt
| true |
fd7eaa828500e3a62968ae7295f1db5f38361a9d | Shell | luhanghang/Box | /B7/etc/init.d/open-iscsi | UTF-8 | 4,503 | 3.578125 | 4 | [] | no_license | #! /bin/sh
### BEGIN INIT INFO
# Provides: iscsi
# Required-Start: $local_fs
# Required-Stop: $remote_fs sendsigs networking
# Default-Start: S
# Default-Stop: 0 6
# Short-Description: Starts and stops the iSCSI initiator services and logs in to default targets
### END INIT INFO
PATH=/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=/sbin/iscsid
ADM=/sbin/iscsiadm
PIDFILE=/var/run/iscsid.pid
NAMEFILE=/etc/iscsi/initiatorname.iscsi
[ -x "$DAEMON" ] || exit 0
# Support for ifupdown script.
# Don't bother to restart when lo is configured.
if [ "$IFACE" = lo ]; then
exit 0
fi
. /lib/lsb/init-functions
if [ ! -d /sys/class/ ]; then
log_failure_msg "iSCSI requires a mounted sysfs, not started."
exit 0
fi
nodestartup_re='s/^node\.conn\[0]\.startup[ ]*=[ ]*//p'
RETVAL=0
sanitychecks() {
# Do sanity checks before we start..
if [ ! -e $CONFIGFILE ]; then
log_failure_msg "Error: configuration file $CONFIGFILE is missing!"
log_failure_msg "The iSCSI driver has not been correctly installed and cannot start."
exit 1
fi
if [ ! -f $NAMEFILE ] ; then
log_failure_msg "Error: InitiatorName file $NAMEFILE is missing!"
log_failure_msg "The iSCSI driver has not been correctly installed and cannot start."
exit 1
fi
# make sure there is a valid InitiatorName for the driver
if ! grep -q "^InitiatorName=[^ \t\n]" $NAMEFILE ; then
log_failure_msg "Error: $NAMEFILE does not contain a valid InitiatorName."
log_failure_msg "The iSCSI driver has not been correctly installed and cannot start."
exit 1
fi
}
startdaemon() {
if pidofproc -p $PIDFILE $DAEMON > /dev/null; then
# The iscsi daemon is already running
RETVAL=0
else
log_daemon_msg "Starting iSCSI initiator service" "iscsid"
sanitychecks
modprobe -q iscsi_tcp 2>/dev/null || :
modprobe -q ib_iser 2>/dev/null || :
start-stop-daemon --start --quiet --exec $DAEMON
RETVAL=$?
log_end_msg $RETVAL
# Don't kill the iscsi daemon when killing all processes
# during system shutdown
ln -sf $PIDFILE /lib/init/rw/sendsigs.omit.d/ || true
fi
}
starttargets() {
log_daemon_msg "Setting up iSCSI targets"
# Only start automatic targets if there isn't the expected
# number of running sessions
ISCSI_TARGET_NB=$(cat /etc/iscsi/nodes/*/*/default 2>/dev/null| grep -c automatic)
ISCSI_SESSION_NB=$($ADM -m session 2>/dev/null | grep -c ^)
if [ "${ISCSI_TARGET_NB}" -ne "${ISCSI_SESSION_NB}" ]; then
$ADM -m node --loginall=automatic > /dev/null
udevadm settle
fi
log_end_msg 0
}
# Wait for iscsi devices to be started
waitfordevices() {
log_daemon_msg "Waiting for iscsi devices"
sanitychecks
ISCSI_TARGET_NB=$(cat /etc/iscsi/nodes/*/*/default 2>/dev/null| grep -c automatic)
ISCSI_SESSION_NB=0
I=0
while [ "${ISCSI_TARGET_NB}" -ne "${ISCSI_SESSION_NB}" ] && [ "$I" -ne 20 ]
do
sleep 1
ISCSI_SESSION_NB=$($ADM -m session 2>/dev/null | grep -c ^)
I=$((I+1))
done
if [ "${I}" -eq 20 ]; then
RETVAL=1
log_end_msg 1
else
log_end_msg 0
fi
}
start() {
startdaemon
if [ "$runlevel" = S ]; then
# during boot process (rcS) wait for devices to be brought up
# by ifupdown scripts.
waitfordevices
else
starttargets
fi
}
stoptargets() {
log_daemon_msg "Disconnecting iSCSI targets"
sync
# only logout if daemon is running, iscsiadm hangs otherwise
if pidofproc -p $PIDFILE $DAEMON > /dev/null; then
$ADM -m node --logoutall=all > /dev/null
fi
log_end_msg 0
}
stop() {
stoptargets
log_daemon_msg "Stopping iSCSI initiator service"
start-stop-daemon --stop --quiet --signal KILL --exec $DAEMON
rm -f $PIDFILE /lib/init/rw/sendsigs.omit.d/`basename $PIDFILE`
modprobe -r ib_iser 2>/dev/null
modprobe -r iscsi_tcp 2>/dev/null
log_end_msg 0
}
restart() {
stop
start
}
restarttargets() {
stoptargets
starttargets
}
status() {
if status_of_proc $DAEMON `basename $DAEMON`; then
# list active sessions
log_daemon_msg Current active iSCSI sessions:
$ADM -m session
exit 0
else
exit $?
fi
}
# Support for ifupdown script
if [ -z "${MODE}" ]
then
MODE=$1
fi
case "$MODE" in
start|starttargets|stop|stoptargets|restart|restarttargets|status)
$MODE
;;
force-reload)
restart
;;
*)
log_success_msg "Usage: $0 {start|stop|restart|force-reload|status|starttargets|stoptargets|restarttargets}"
exit 1
;;
esac
exit $RETVAL
| true |
837b0f55d4e6f5c4a109e7f06c810dfa4228e2a8 | Shell | IBT-FMI/NeuroGentooProject | /SharedPrefix/sharePrefix.sh | UTF-8 | 1,443 | 4.15625 | 4 | [] | no_license | #!/bin/bash
echo "WARNING! This script is documented to potentially break Portage. See https://github.com/IBT-FMI/NeuroGentooProject/issues/16 for further details. Edit the script to proceed only if you know what you are doing."
exit 1
if [ "$1" == "-h" -o "$1" == "--help" ]
then
echo "Usage: $0 [group] [prefix-directory] [start-script]"
cat <<-EOF
Shares the prefix in <prefix-directory> with every member of group <group>,
using <start-script> as startprefix-script
EOF
exit 0
fi
GROUP="${1:-gentoo}"
EPREFIX="${2:-$HOME/gentoo}"
SCRIPT="${3:-$EPREFIX/startprefix}"
FAIL=false
if [ ! -d "${EPREFIX}" ]
then
FAIL=true
echo "${EPREFIX} is not a directory"
stat "${EPREFIX}"
fi
if [ ! -f "${SCRIPT}" ]
then
FAIL=true
echo "${SCRIPT} does not exist"
stat "${SCRIPT}"
fi
if ! grep "${GROUP}" /etc/group > /dev/null
then
FAIL=true
echo "group ${GROUP} does not exist"
fi
if $FAIL
then
exit 1
fi
echo "Making prefix group-read/writeable"
chmod -R g+rw "${EPREFIX}"
echo "changing group of prefix to ${GROUP}"
chgrp -R "${GROUP}" "${EPREFIX}"
echo "Setting the sticky-bit in prefix"
find "${EPREFIX}" -type d -exec chmod +s '{}' '+'
echo "Modifying the start_gentoo script ${SCRIPT}"
sed -i 's/RETAIN="/RETAIN="PORTAGE_USERNAME=$USER PORTAGE_GRPNAME=gentoo/; /^EPREFIX/i umask g+rwx' "${SCRIPT}"
echo "Adding FEATURES=unprivileged to make.conf"
echo 'FEATURES="${FEATURES} unprivileged"' >> "${EPREFIX}/etc/portage/make.conf"
| true |
749ff0bd3035b8ff03833c215d52da7f758303e6 | Shell | maxadamski/wiwiwi | /ok/script/conv | UTF-8 | 188 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env bash
for inp in res/tailard/*.txt; do
name=$(basename $inp .txt)
conv=res/tail-or/$name.txt
out/jobshop --convert -t tailard -f $inp 1> $conv
echo $conv
done
| true |
ad17950a53ea4f35b6d4142d72811f77375c88e7 | Shell | LieberInstitute/HumanPilot | /10X/bamtofastq.sh | UTF-8 | 1,274 | 3.21875 | 3 | [] | no_license | #!/bin/bash
## Usage:
# sh bamtofastq.sh
## Create the logs directory
mkdir -p logs_bamtofastq
for sample in 151507 151508 151509 151510 151669 151670 151671 151672 151673 151674 151675 151676; do
## Internal script name
SHORT="bamtofastq_${sample}"
# Construct shell file
echo "Creating script bamtofastq_${sample}"
cat > .${SHORT}.sh <<EOF
#!/bin/bash
#$ -cwd
#$ -l bluejay,mem_free=20G,h_vmem=20G,h_fsize=100G
#$ -pe local 4
#$ -N ${SHORT}
#$ -o logs_bamtofastq/${SHORT}.txt
#$ -e logs_bamtofastq/${SHORT}.txt
#$ -m e
echo "**** Job starts ****"
date
echo "**** JHPCE info ****"
echo "User: \${USER}"
echo "Job id: \${JOB_ID}"
echo "Job name: \${JOB_NAME}"
echo "Hostname: \${HOSTNAME}"
echo "Task id: \${SGE_TASK_ID}"
## List current modules for reproducibility
module list
## Edit with your job command
/dcl02/lieber/ajaffe/SpatialTranscriptomics/bamtofastq-1.2.0 --nthreads=4 /dcs04/lieber/lcolladotor/with10x_LIBD001/HumanPilot/10X/${sample}/${sample}_mRNA.bam /dcs04/lieber/lcolladotor/with10x_LIBD001/HumanPilot/10X/${sample}/fastq
echo "**** Job ends ****"
date
## This script was made using sgejobs version 0.99.1
## available from http://research.libd.org/sgejobs/
EOF
call="qsub .${SHORT}.sh"
echo $call
$call
done
| true |
f718d2d0dba7e59ec78926715e5f7e2962c4d155 | Shell | sovelten/PyLaTeX | /testall.sh | UTF-8 | 517 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Check code guidelines
echo -e '\e[32mChecking for code style errors \e[0m'
if ! flake8 pylatex examples tests; then
exit 1
fi
python_version=$(python --version |& sed 's|Python \(.\).*|\1|g')
if [ "$python_version" = '2' ]; then
main_folder=python2_source
else
main_folder=.
fi
for f in $main_folder/{tests,examples}/*.py; do
echo -e '\e[32mTesting '$f'\e[0m'
if ! python $f; then
exit 1
fi
done
if [ "$1" = 'clean' ]; then
rm *.pdf *.log *.aux *.tex
fi
| true |
1d9cd2fc9db987e2516069c54ca7d7a95b5bf22d | Shell | dumitruc/tools | /bin/check-dsl-syncspeed | UTF-8 | 3,067 | 4.0625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# Script to check the sync speed of the DSL broadband connection
prog="${0##*/}"
# Action
task="$1"
lockfile="/var/run/$prog.pid"
failfile="/tmp/$prog.fail"
router="192.168.11.254"
up=0
down=0
up_min=256
down_min=2000
fail_reset=10
fail_stop=3
unset restart
# Redirect output when there is no TTY
tty 1>/dev/null 2>&1 || {
exec 1> >(logger -t "$prog")
exec 2>&1
}
[[ -e $lockfile ]] && {
echo >&2 "Already running, PID $(cat $lockfile)"
exit 1
}
( set -o noclobber ; echo $$ >$lockfile ) || {
exit 1
}
trap "rm -f $lockfile" EXIT
function get_syncspeed() {
read up down < <(GET "http://${router}/cgi/b/dsl/ov/?be=0&l0=1&l1=0" | grep Bandwidth | sed 's/.*>\([0-9,]\+\) \/ \([0-9,]\+\)<.*/\1 \2/g; s/,//g;')
[[ -n $up && -n $down ]]
}
get_syncspeed || {
echo >&2 "DSL connection is down"
exit 1
}
[[ $up -lt $up_min ]] && {
restart=1
}
[[ $down -lt $down_min ]] && {
restart=1
}
case $task in
(REPORT)
echo "REPORT: UP:$up / DOWN:$down"
;;
esac
if [[ -n $restart ]] ; then
echo >&2 "DSL sync speed is inadequate, restart required: Up / Down ($up / $down)"
case $task in
(RESTART)
[ -f $failfile ] && {
fail=`cat $failfile`
}
fail=${fail:-0}
[ $fail -gt $fail_reset ] && {
echo "Resetting resync failures, permitting restart again" >&2
fail=0
}
[ $fail -gt $fail_stop ] && {
echo `expr $fail + 1` >$failfile
[ $fail -gt `expr $fail_stop + 1` ] && {
echo "Restart disabled, ignoring request. Will permit in `expr $fail_reset - $fail` attempts" >&2
exit 1
}
echo "Failed to restart at a better rate on more than three consecutive attempts, aborting restart" >&2
exit 1
}
echo "Sending restart request to DSL router: $router..."
fields=( \
"0=17" \
"1=" \
"2=Yes" \
"3=No" \
)
echo "$(printf "%s&" "${fields[@]}" )EOL=1" | \
POST "http://${router}/cgi/b/info/restart/?be=0&l0=0&l1=0&tid=RESTART" \
1>/dev/null 2>&1
# Wait for it to go down
echo -n "Stopping..."
while ping -t 2 -c 1 $router 1>/dev/null 2>&1 ; do
echo -n .
sleep 5
done
echo DONE
# Wait for it to come back up
echo -n "Starting..."
while ! ping -t 2 -c 1 $router 1>/dev/null 2>&1 ; do
echo -n .
sleep 5
done
echo DONE
echo -n "Getting new sync speed..."
while ! get_syncspeed ; do
echo -n .
sleep 5
done
echo
echo "DSL sync speed is now: Up / Down ($up / $down)"
# Is the new sync speed okay?
if [ $down -lt $down_min ] ; then
echo `expr $fail + 1` >$failfile
else
echo 0 >$failfile
fi
;;
esac
exit 1
else
[ -f $failfile ] && rm -f >$failfile
fi
exit 0
| true |
069606ad1ad01d5096b4e20b90c55943163e0c4d | Shell | SergeTarasov/PyQt5-things | /IPA/conwav.sh | UTF-8 | 218 | 3.046875 | 3 | [] | no_license | sdir='sounds/vowels/'
cont=$(ls $sdir)
for entity in $cont
do
lengh=${#entity}
echo $entity, $lengh, ${entity:0:lengh-4}
output=${entity:0:lengh-4}
echo $output
ffmpeg -i $sdir$entity $sdir$output.wav
done
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.